blob: 7636e71a0a6b69e302d5819ce5e78ec5f682f4aa [file] [log] [blame]
Tom Rini10e47792018-05-06 17:58:06 -04001// SPDX-License-Identifier: BSD-3-Clause
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002/*
3 * Copyright Altera Corporation (C) 2012-2015
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05004 */
5
Simon Glass0f2af882020-05-10 11:40:05 -06006#include <log.h>
Tom Rinidec7ea02024-05-20 13:35:03 -06007#include <linux/string.h>
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05008#include <asm/io.h>
9#include <asm/arch/sdram.h>
Marek Vasut6ca5b962015-07-18 02:46:56 +020010#include <errno.h>
Marek Vasut6bccacf2019-10-18 00:22:31 +020011#include <hang.h>
Dinh Nguyen135cc7f2015-06-02 22:52:49 -050012#include "sequencer.h"
Marek Vasut662a8a62015-08-02 16:55:45 +020013
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +020014static const struct socfpga_sdr_rw_load_manager *sdr_rw_load_mgr_regs =
Marek Vasutc85b9b32015-08-02 19:47:01 +020015 (struct socfpga_sdr_rw_load_manager *)
16 (SDR_PHYGRP_RWMGRGRP_ADDRESS | 0x800);
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +020017static const struct socfpga_sdr_rw_load_jump_manager *sdr_rw_load_jump_mgr_regs
18 = (struct socfpga_sdr_rw_load_jump_manager *)
Marek Vasutc85b9b32015-08-02 19:47:01 +020019 (SDR_PHYGRP_RWMGRGRP_ADDRESS | 0xC00);
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +020020static const struct socfpga_sdr_reg_file *sdr_reg_file =
Marek Vasut341ceec2015-07-12 18:31:05 +020021 (struct socfpga_sdr_reg_file *)SDR_PHYGRP_REGFILEGRP_ADDRESS;
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +020022static const struct socfpga_sdr_scc_mgr *sdr_scc_mgr =
Marek Vasutc85b9b32015-08-02 19:47:01 +020023 (struct socfpga_sdr_scc_mgr *)
24 (SDR_PHYGRP_SCCGRP_ADDRESS | 0xe00);
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +020025static const struct socfpga_phy_mgr_cmd *phy_mgr_cmd =
Marek Vasutc3b9b0f2015-07-12 18:54:37 +020026 (struct socfpga_phy_mgr_cmd *)SDR_PHYGRP_PHYMGRGRP_ADDRESS;
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +020027static const struct socfpga_phy_mgr_cfg *phy_mgr_cfg =
Marek Vasutc85b9b32015-08-02 19:47:01 +020028 (struct socfpga_phy_mgr_cfg *)
29 (SDR_PHYGRP_PHYMGRGRP_ADDRESS | 0x40);
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +020030static const struct socfpga_data_mgr *data_mgr =
Marek Vasuta3340102015-07-12 19:03:33 +020031 (struct socfpga_data_mgr *)SDR_PHYGRP_DATAMGRGRP_ADDRESS;
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +020032static const struct socfpga_sdr_ctrl *sdr_ctrl =
Marek Vasutcd5d38e2015-07-12 20:49:39 +020033 (struct socfpga_sdr_ctrl *)SDR_CTRLGRP_ADDRESS;
34
Dinh Nguyen135cc7f2015-06-02 22:52:49 -050035#define DELTA_D 1
Dinh Nguyen135cc7f2015-06-02 22:52:49 -050036
37/*
38 * In order to reduce ROM size, most of the selectable calibration steps are
39 * decided at compile time based on the user's calibration mode selection,
40 * as captured by the STATIC_CALIB_STEPS selection below.
41 *
42 * However, to support simulation-time selection of fast simulation mode, where
43 * we skip everything except the bare minimum, we need a few of the steps to
44 * be dynamic. In those cases, we either use the DYNAMIC_CALIB_STEPS for the
45 * check, which is based on the rtl-supplied value, or we dynamically compute
46 * the value to use based on the dynamically-chosen calibration mode
47 */
48
49#define DLEVEL 0
50#define STATIC_IN_RTL_SIM 0
51#define STATIC_SKIP_DELAY_LOOPS 0
52
53#define STATIC_CALIB_STEPS (STATIC_IN_RTL_SIM | CALIB_SKIP_FULL_TEST | \
54 STATIC_SKIP_DELAY_LOOPS)
55
Dinh Nguyen135cc7f2015-06-02 22:52:49 -050056#define SKIP_DELAY_LOOP_VALUE_OR_ZERO(non_skip_value) \
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +020057 ((non_skip_value) & seq->skip_delay_mask)
Dinh Nguyen135cc7f2015-06-02 22:52:49 -050058
Marek Vasut6bccacf2019-10-18 00:22:31 +020059bool dram_is_ddr(const u8 ddr)
60{
61 const struct socfpga_sdram_config *cfg = socfpga_get_sdram_config();
62 const u8 type = (cfg->ctrl_cfg >> SDR_CTRLGRP_CTRLCFG_MEMTYPE_LSB) &
63 SDR_CTRLGRP_CTRLCFG_MEMTYPE_MASK;
64
65 if (ddr == 2 && type == 1) /* DDR2 */
66 return true;
67
68 if (ddr == 3 && type == 2) /* DDR3 */
69 return true;
70
71 return false;
72}
73
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +020074static void set_failing_group_stage(struct socfpga_sdrseq *seq,
75 u32 group, u32 stage, u32 substage)
Dinh Nguyen135cc7f2015-06-02 22:52:49 -050076{
77 /*
78 * Only set the global stage if there was not been any other
79 * failing group
80 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +020081 if (seq->gbl.error_stage == CAL_STAGE_NIL) {
82 seq->gbl.error_substage = substage;
83 seq->gbl.error_stage = stage;
84 seq->gbl.error_group = group;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -050085 }
86}
87
Marek Vasut6eeb7472015-07-12 21:10:24 +020088static void reg_file_set_group(u16 set_group)
Dinh Nguyen135cc7f2015-06-02 22:52:49 -050089{
Marek Vasut6eeb7472015-07-12 21:10:24 +020090 clrsetbits_le32(&sdr_reg_file->cur_stage, 0xffff0000, set_group << 16);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -050091}
92
Marek Vasut6eeb7472015-07-12 21:10:24 +020093static void reg_file_set_stage(u8 set_stage)
Dinh Nguyen135cc7f2015-06-02 22:52:49 -050094{
Marek Vasut6eeb7472015-07-12 21:10:24 +020095 clrsetbits_le32(&sdr_reg_file->cur_stage, 0xffff, set_stage & 0xff);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -050096}
97
Marek Vasut6eeb7472015-07-12 21:10:24 +020098static void reg_file_set_sub_stage(u8 set_sub_stage)
Dinh Nguyen135cc7f2015-06-02 22:52:49 -050099{
Marek Vasut6eeb7472015-07-12 21:10:24 +0200100 set_sub_stage &= 0xff;
101 clrsetbits_le32(&sdr_reg_file->cur_stage, 0xff00, set_sub_stage << 8);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500102}
103
Marek Vasutfe5aa452015-07-17 01:36:32 +0200104/**
105 * phy_mgr_initialize() - Initialize PHY Manager
106 *
107 * Initialize PHY Manager.
108 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200109static void phy_mgr_initialize(struct socfpga_sdrseq *seq)
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500110{
Marek Vasutfe5aa452015-07-17 01:36:32 +0200111 u32 ratio;
112
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500113 debug("%s:%d\n", __func__, __LINE__);
Marek Vasutfe5aa452015-07-17 01:36:32 +0200114 /* Calibration has control over path to memory */
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500115 /*
116 * In Hard PHY this is a 2-bit control:
117 * 0: AFI Mux Select
118 * 1: DDIO Mux Select
119 */
Marek Vasutb5450962015-07-12 21:05:08 +0200120 writel(0x3, &phy_mgr_cfg->mux_sel);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500121
122 /* USER memory clock is not stable we begin initialization */
Marek Vasutb5450962015-07-12 21:05:08 +0200123 writel(0, &phy_mgr_cfg->reset_mem_stbl);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500124
125 /* USER calibration status all set to zero */
Marek Vasutb5450962015-07-12 21:05:08 +0200126 writel(0, &phy_mgr_cfg->cal_status);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500127
Marek Vasutb5450962015-07-12 21:05:08 +0200128 writel(0, &phy_mgr_cfg->cal_debug_info);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500129
Marek Vasutfe5aa452015-07-17 01:36:32 +0200130 /* Init params only if we do NOT skip calibration. */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200131 if ((seq->dyn_calib_steps & CALIB_SKIP_ALL) == CALIB_SKIP_ALL)
Marek Vasutfe5aa452015-07-17 01:36:32 +0200132 return;
133
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200134 ratio = seq->rwcfg->mem_dq_per_read_dqs /
135 seq->rwcfg->mem_virtual_groups_per_read_dqs;
136 seq->param.read_correct_mask_vg = (1 << ratio) - 1;
137 seq->param.write_correct_mask_vg = (1 << ratio) - 1;
138 seq->param.read_correct_mask = (1 << seq->rwcfg->mem_dq_per_read_dqs)
139 - 1;
140 seq->param.write_correct_mask = (1 << seq->rwcfg->mem_dq_per_write_dqs)
141 - 1;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500142}
143
Marek Vasut575029d2015-07-20 08:15:57 +0200144/**
145 * set_rank_and_odt_mask() - Set Rank and ODT mask
146 * @rank: Rank mask
147 * @odt_mode: ODT mode, OFF or READ_WRITE
148 *
149 * Set Rank and ODT mask (On-Die Termination).
150 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200151static void set_rank_and_odt_mask(struct socfpga_sdrseq *seq,
152 const u32 rank, const u32 odt_mode)
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500153{
Marek Vasut0b5e2572015-07-20 08:03:11 +0200154 u32 odt_mask_0 = 0;
155 u32 odt_mask_1 = 0;
156 u32 cs_and_odt_mask;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500157
Marek Vasut0b5e2572015-07-20 08:03:11 +0200158 if (odt_mode == RW_MGR_ODT_MODE_OFF) {
159 odt_mask_0 = 0x0;
160 odt_mask_1 = 0x0;
161 } else { /* RW_MGR_ODT_MODE_READ_WRITE */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200162 switch (seq->rwcfg->mem_number_of_ranks) {
Marek Vasut92523082015-07-20 08:09:05 +0200163 case 1: /* 1 Rank */
164 /* Read: ODT = 0 ; Write: ODT = 1 */
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500165 odt_mask_0 = 0x0;
166 odt_mask_1 = 0x1;
Marek Vasut92523082015-07-20 08:09:05 +0200167 break;
168 case 2: /* 2 Ranks */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200169 if (seq->rwcfg->mem_number_of_cs_per_dimm == 1) {
Marek Vasut575029d2015-07-20 08:15:57 +0200170 /*
171 * - Dual-Slot , Single-Rank (1 CS per DIMM)
172 * OR
173 * - RDIMM, 4 total CS (2 CS per DIMM, 2 DIMM)
174 *
175 * Since MEM_NUMBER_OF_RANKS is 2, they
176 * are both single rank with 2 CS each
177 * (special for RDIMM).
178 *
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500179 * Read: Turn on ODT on the opposite rank
180 * Write: Turn on ODT on all ranks
181 */
182 odt_mask_0 = 0x3 & ~(1 << rank);
183 odt_mask_1 = 0x3;
Marek Vasut6bccacf2019-10-18 00:22:31 +0200184 if (dram_is_ddr(2))
185 odt_mask_1 &= ~(1 << rank);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500186 } else {
187 /*
Marek Vasut575029d2015-07-20 08:15:57 +0200188 * - Single-Slot , Dual-Rank (2 CS per DIMM)
189 *
190 * Read: Turn on ODT off on all ranks
191 * Write: Turn on ODT on active rank
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500192 */
193 odt_mask_0 = 0x0;
194 odt_mask_1 = 0x3 & (1 << rank);
195 }
Marek Vasut92523082015-07-20 08:09:05 +0200196 break;
197 case 4: /* 4 Ranks */
Marek Vasut6bccacf2019-10-18 00:22:31 +0200198 /*
199 * DDR3 Read, DDR2 Read/Write:
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500200 * ----------+-----------------------+
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500201 * | ODT |
Marek Vasut6bccacf2019-10-18 00:22:31 +0200202 * +-----------------------+
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500203 * Rank | 3 | 2 | 1 | 0 |
204 * ----------+-----+-----+-----+-----+
205 * 0 | 0 | 1 | 0 | 0 |
206 * 1 | 1 | 0 | 0 | 0 |
207 * 2 | 0 | 0 | 0 | 1 |
208 * 3 | 0 | 0 | 1 | 0 |
209 * ----------+-----+-----+-----+-----+
210 *
Marek Vasut6bccacf2019-10-18 00:22:31 +0200211 * DDR3 Write:
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500212 * ----------+-----------------------+
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500213 * | ODT |
214 * Write To +-----------------------+
215 * Rank | 3 | 2 | 1 | 0 |
216 * ----------+-----+-----+-----+-----+
217 * 0 | 0 | 1 | 0 | 1 |
218 * 1 | 1 | 0 | 1 | 0 |
219 * 2 | 0 | 1 | 0 | 1 |
220 * 3 | 1 | 0 | 1 | 0 |
221 * ----------+-----+-----+-----+-----+
222 */
223 switch (rank) {
224 case 0:
225 odt_mask_0 = 0x4;
Marek Vasut6bccacf2019-10-18 00:22:31 +0200226 if (dram_is_ddr(2))
227 odt_mask_1 = 0x4;
228 else if (dram_is_ddr(3))
229 odt_mask_1 = 0x5;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500230 break;
231 case 1:
232 odt_mask_0 = 0x8;
Marek Vasut6bccacf2019-10-18 00:22:31 +0200233 if (dram_is_ddr(2))
234 odt_mask_1 = 0x8;
235 else if (dram_is_ddr(3))
236 odt_mask_1 = 0xA;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500237 break;
238 case 2:
239 odt_mask_0 = 0x1;
Marek Vasut6bccacf2019-10-18 00:22:31 +0200240 if (dram_is_ddr(2))
241 odt_mask_1 = 0x1;
242 else if (dram_is_ddr(3))
243 odt_mask_1 = 0x5;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500244 break;
245 case 3:
246 odt_mask_0 = 0x2;
Marek Vasut6bccacf2019-10-18 00:22:31 +0200247 if (dram_is_ddr(2))
248 odt_mask_1 = 0x2;
249 else if (dram_is_ddr(3))
250 odt_mask_1 = 0xA;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500251 break;
252 }
Marek Vasut92523082015-07-20 08:09:05 +0200253 break;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500254 }
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500255 }
256
Marek Vasut0b5e2572015-07-20 08:03:11 +0200257 cs_and_odt_mask = (0xFF & ~(1 << rank)) |
258 ((0xFF & odt_mask_0) << 8) |
259 ((0xFF & odt_mask_1) << 16);
Marek Vasutb5450962015-07-12 21:05:08 +0200260 writel(cs_and_odt_mask, SDR_PHYGRP_RWMGRGRP_ADDRESS |
261 RW_MGR_SET_CS_AND_ODT_MASK_OFFSET);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500262}
263
Marek Vasut303a3dc2015-07-12 22:28:33 +0200264/**
265 * scc_mgr_set() - Set SCC Manager register
266 * @off: Base offset in SCC Manager space
267 * @grp: Read/Write group
268 * @val: Value to be set
269 *
270 * This function sets the SCC Manager (Scan Chain Control Manager) register.
271 */
272static void scc_mgr_set(u32 off, u32 grp, u32 val)
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500273{
Marek Vasut303a3dc2015-07-12 22:28:33 +0200274 writel(val, SDR_PHYGRP_SCCGRP_ADDRESS | off | (grp << 2));
275}
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500276
Marek Vasut8957b492015-07-20 07:16:42 +0200277/**
278 * scc_mgr_initialize() - Initialize SCC Manager registers
279 *
280 * Initialize SCC Manager registers.
281 */
Marek Vasut303a3dc2015-07-12 22:28:33 +0200282static void scc_mgr_initialize(void)
283{
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500284 /*
Marek Vasut8957b492015-07-20 07:16:42 +0200285 * Clear register file for HPS. 16 (2^4) is the size of the
286 * full register file in the scc mgr:
287 * RFILE_DEPTH = 1 + log2(MEM_DQ_PER_DQS + 1 + MEM_DM_PER_DQS +
288 * MEM_IF_READ_DQS_WIDTH - 1);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500289 */
Marek Vasut303a3dc2015-07-12 22:28:33 +0200290 int i;
Marek Vasut8957b492015-07-20 07:16:42 +0200291
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500292 for (i = 0; i < 16; i++) {
Marek Vasut4df2d7b2016-04-04 21:21:05 +0200293 debug_cond(DLEVEL >= 1, "%s:%d: Clearing SCC RFILE index %u\n",
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500294 __func__, __LINE__, i);
Marek Vasut45ce2962016-04-04 17:28:16 +0200295 scc_mgr_set(SCC_MGR_HHP_RFILE_OFFSET, i, 0);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500296 }
297}
298
Marek Vasut8af9ca02015-08-02 19:42:26 +0200299static void scc_mgr_set_dqdqs_output_phase(u32 write_group, u32 phase)
Marek Vasut7481b692015-07-12 22:11:55 +0200300{
Marek Vasut303a3dc2015-07-12 22:28:33 +0200301 scc_mgr_set(SCC_MGR_DQDQS_OUT_PHASE_OFFSET, write_group, phase);
Marek Vasut7481b692015-07-12 22:11:55 +0200302}
303
Marek Vasut8af9ca02015-08-02 19:42:26 +0200304static void scc_mgr_set_dqs_bus_in_delay(u32 read_group, u32 delay)
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500305{
Marek Vasut303a3dc2015-07-12 22:28:33 +0200306 scc_mgr_set(SCC_MGR_DQS_IN_DELAY_OFFSET, read_group, delay);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500307}
308
Marek Vasut8af9ca02015-08-02 19:42:26 +0200309static void scc_mgr_set_dqs_en_phase(u32 read_group, u32 phase)
Marek Vasut7481b692015-07-12 22:11:55 +0200310{
Marek Vasut303a3dc2015-07-12 22:28:33 +0200311 scc_mgr_set(SCC_MGR_DQS_EN_PHASE_OFFSET, read_group, phase);
Marek Vasut7481b692015-07-12 22:11:55 +0200312}
313
Marek Vasut8af9ca02015-08-02 19:42:26 +0200314static void scc_mgr_set_dqs_en_delay(u32 read_group, u32 delay)
Marek Vasut7481b692015-07-12 22:11:55 +0200315{
Marek Vasut303a3dc2015-07-12 22:28:33 +0200316 scc_mgr_set(SCC_MGR_DQS_EN_DELAY_OFFSET, read_group, delay);
Marek Vasut7481b692015-07-12 22:11:55 +0200317}
318
Marek Vasutf4d38622016-04-04 21:16:18 +0200319static void scc_mgr_set_dq_in_delay(u32 dq_in_group, u32 delay)
320{
321 scc_mgr_set(SCC_MGR_IO_IN_DELAY_OFFSET, dq_in_group, delay);
322}
323
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200324static void scc_mgr_set_dqs_io_in_delay(struct socfpga_sdrseq *seq,
325 u32 delay)
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500326{
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200327 scc_mgr_set(SCC_MGR_IO_IN_DELAY_OFFSET,
328 seq->rwcfg->mem_dq_per_write_dqs, delay);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500329}
330
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200331static void scc_mgr_set_dm_in_delay(struct socfpga_sdrseq *seq, u32 dm,
332 u32 delay)
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500333{
Marek Vasutf4d38622016-04-04 21:16:18 +0200334 scc_mgr_set(SCC_MGR_IO_IN_DELAY_OFFSET,
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200335 seq->rwcfg->mem_dq_per_write_dqs + 1 + dm,
Marek Vasutf4d38622016-04-04 21:16:18 +0200336 delay);
Marek Vasut7481b692015-07-12 22:11:55 +0200337}
338
Marek Vasut8af9ca02015-08-02 19:42:26 +0200339static void scc_mgr_set_dq_out1_delay(u32 dq_in_group, u32 delay)
Marek Vasut7481b692015-07-12 22:11:55 +0200340{
Marek Vasut303a3dc2015-07-12 22:28:33 +0200341 scc_mgr_set(SCC_MGR_IO_OUT1_DELAY_OFFSET, dq_in_group, delay);
Marek Vasut7481b692015-07-12 22:11:55 +0200342}
343
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200344static void scc_mgr_set_dqs_out1_delay(struct socfpga_sdrseq *seq,
345 u32 delay)
Marek Vasut7481b692015-07-12 22:11:55 +0200346{
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200347 scc_mgr_set(SCC_MGR_IO_OUT1_DELAY_OFFSET,
348 seq->rwcfg->mem_dq_per_write_dqs, delay);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500349}
350
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200351static void scc_mgr_set_dm_out1_delay(struct socfpga_sdrseq *seq, u32 dm,
352 u32 delay)
Marek Vasut7481b692015-07-12 22:11:55 +0200353{
Marek Vasut303a3dc2015-07-12 22:28:33 +0200354 scc_mgr_set(SCC_MGR_IO_OUT1_DELAY_OFFSET,
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200355 seq->rwcfg->mem_dq_per_write_dqs + 1 + dm,
Marek Vasut303a3dc2015-07-12 22:28:33 +0200356 delay);
Marek Vasut7481b692015-07-12 22:11:55 +0200357}
358
359/* load up dqs config settings */
Marek Vasut8af9ca02015-08-02 19:42:26 +0200360static void scc_mgr_load_dqs(u32 dqs)
Marek Vasut7481b692015-07-12 22:11:55 +0200361{
362 writel(dqs, &sdr_scc_mgr->dqs_ena);
363}
364
365/* load up dqs io config settings */
366static void scc_mgr_load_dqs_io(void)
367{
368 writel(0, &sdr_scc_mgr->dqs_io_ena);
369}
370
371/* load up dq config settings */
Marek Vasut8af9ca02015-08-02 19:42:26 +0200372static void scc_mgr_load_dq(u32 dq_in_group)
Marek Vasut7481b692015-07-12 22:11:55 +0200373{
374 writel(dq_in_group, &sdr_scc_mgr->dq_ena);
375}
376
377/* load up dm config settings */
Marek Vasut8af9ca02015-08-02 19:42:26 +0200378static void scc_mgr_load_dm(u32 dm)
Marek Vasut7481b692015-07-12 22:11:55 +0200379{
380 writel(dm, &sdr_scc_mgr->dm_ena);
381}
382
Marek Vasut1d3cde32015-07-12 23:25:21 +0200383/**
384 * scc_mgr_set_all_ranks() - Set SCC Manager register for all ranks
385 * @off: Base offset in SCC Manager space
386 * @grp: Read/Write group
387 * @val: Value to be set
388 * @update: If non-zero, trigger SCC Manager update for all ranks
389 *
390 * This function sets the SCC Manager (Scan Chain Control Manager) register
391 * and optionally triggers the SCC update for all ranks.
392 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200393static void scc_mgr_set_all_ranks(struct socfpga_sdrseq *seq,
394 const u32 off, const u32 grp, const u32 val,
Marek Vasut1d3cde32015-07-12 23:25:21 +0200395 const int update)
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500396{
Marek Vasut1d3cde32015-07-12 23:25:21 +0200397 u32 r;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500398
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200399 for (r = 0; r < seq->rwcfg->mem_number_of_ranks;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500400 r += NUM_RANKS_PER_SHADOW_REG) {
Marek Vasut1d3cde32015-07-12 23:25:21 +0200401 scc_mgr_set(off, grp, val);
Marek Vasut49722822015-07-12 23:14:33 +0200402
Marek Vasut1d3cde32015-07-12 23:25:21 +0200403 if (update || (r == 0)) {
404 writel(grp, &sdr_scc_mgr->dqs_ena);
Marek Vasutb5450962015-07-12 21:05:08 +0200405 writel(0, &sdr_scc_mgr->update);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500406 }
407 }
408}
409
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200410static void scc_mgr_set_dqs_en_phase_all_ranks(struct socfpga_sdrseq *seq,
411 u32 read_group, u32 phase)
Marek Vasut1d3cde32015-07-12 23:25:21 +0200412{
413 /*
414 * USER although the h/w doesn't support different phases per
415 * shadow register, for simplicity our scc manager modeling
416 * keeps different phase settings per shadow reg, and it's
417 * important for us to keep them in sync to match h/w.
418 * for efficiency, the scan chain update should occur only
419 * once to sr0.
420 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200421 scc_mgr_set_all_ranks(seq, SCC_MGR_DQS_EN_PHASE_OFFSET,
Marek Vasut1d3cde32015-07-12 23:25:21 +0200422 read_group, phase, 0);
423}
424
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200425static void scc_mgr_set_dqdqs_output_phase_all_ranks(struct socfpga_sdrseq *seq,
426 u32 write_group, u32 phase)
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500427{
Marek Vasut1d3cde32015-07-12 23:25:21 +0200428 /*
429 * USER although the h/w doesn't support different phases per
430 * shadow register, for simplicity our scc manager modeling
431 * keeps different phase settings per shadow reg, and it's
432 * important for us to keep them in sync to match h/w.
433 * for efficiency, the scan chain update should occur only
434 * once to sr0.
435 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200436 scc_mgr_set_all_ranks(seq, SCC_MGR_DQDQS_OUT_PHASE_OFFSET,
Marek Vasut1d3cde32015-07-12 23:25:21 +0200437 write_group, phase, 0);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500438}
439
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200440static void scc_mgr_set_dqs_en_delay_all_ranks(struct socfpga_sdrseq *seq,
441 u32 read_group, u32 delay)
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500442{
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500443 /*
444 * In shadow register mode, the T11 settings are stored in
445 * registers in the core, which are updated by the DQS_ENA
446 * signals. Not issuing the SCC_MGR_UPD command allows us to
447 * save lots of rank switching overhead, by calling
448 * select_shadow_regs_for_update with update_scan_chains
449 * set to 0.
450 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200451 scc_mgr_set_all_ranks(seq, SCC_MGR_DQS_EN_DELAY_OFFSET,
Marek Vasut1d3cde32015-07-12 23:25:21 +0200452 read_group, delay, 1);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500453}
454
Marek Vasute62f6912015-07-12 23:39:06 +0200455/**
456 * scc_mgr_set_oct_out1_delay() - Set OCT output delay
457 * @write_group: Write group
458 * @delay: Delay value
459 *
460 * This function sets the OCT output delay in SCC manager.
461 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200462static void scc_mgr_set_oct_out1_delay(struct socfpga_sdrseq *seq,
463 const u32 write_group, const u32 delay)
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500464{
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200465 const int ratio = seq->rwcfg->mem_if_read_dqs_width /
466 seq->rwcfg->mem_if_write_dqs_width;
Marek Vasute62f6912015-07-12 23:39:06 +0200467 const int base = write_group * ratio;
468 int i;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500469 /*
470 * Load the setting in the SCC manager
471 * Although OCT affects only write data, the OCT delay is controlled
472 * by the DQS logic block which is instantiated once per read group.
473 * For protocols where a write group consists of multiple read groups,
474 * the setting must be set multiple times.
475 */
Marek Vasute62f6912015-07-12 23:39:06 +0200476 for (i = 0; i < ratio; i++)
477 scc_mgr_set(SCC_MGR_OCT_OUT1_DELAY_OFFSET, base + i, delay);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500478}
479
Marek Vasut3b8e5b02015-07-19 01:32:55 +0200480/**
481 * scc_mgr_set_hhp_extras() - Set HHP extras.
482 *
483 * Load the fixed setting in the SCC manager HHP extras.
484 */
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500485static void scc_mgr_set_hhp_extras(void)
486{
487 /*
488 * Load the fixed setting in the SCC manager
Marek Vasut3b8e5b02015-07-19 01:32:55 +0200489 * bits: 0:0 = 1'b1 - DQS bypass
490 * bits: 1:1 = 1'b1 - DQ bypass
491 * bits: 4:2 = 3'b001 - rfifo_mode
492 * bits: 6:5 = 2'b01 - rfifo clock_select
493 * bits: 7:7 = 1'b0 - separate gating from ungating setting
494 * bits: 8:8 = 1'b0 - separate OE from Output delay setting
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500495 */
Marek Vasut3b8e5b02015-07-19 01:32:55 +0200496 const u32 value = (0 << 8) | (0 << 7) | (1 << 5) |
497 (1 << 2) | (1 << 1) | (1 << 0);
498 const u32 addr = SDR_PHYGRP_SCCGRP_ADDRESS |
499 SCC_MGR_HHP_GLOBALS_OFFSET |
500 SCC_MGR_HHP_EXTRAS_OFFSET;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500501
Marek Vasut4df2d7b2016-04-04 21:21:05 +0200502 debug_cond(DLEVEL >= 1, "%s:%d Setting HHP Extras\n",
Marek Vasut3b8e5b02015-07-19 01:32:55 +0200503 __func__, __LINE__);
504 writel(value, addr);
Marek Vasut4df2d7b2016-04-04 21:21:05 +0200505 debug_cond(DLEVEL >= 1, "%s:%d Done Setting HHP Extras\n",
Marek Vasut3b8e5b02015-07-19 01:32:55 +0200506 __func__, __LINE__);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500507}
508
Marek Vasut08bcb982015-07-20 04:41:53 +0200509/**
510 * scc_mgr_zero_all() - Zero all DQS config
511 *
512 * Zero all DQS config.
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500513 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200514static void scc_mgr_zero_all(struct socfpga_sdrseq *seq)
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500515{
Marek Vasut08bcb982015-07-20 04:41:53 +0200516 int i, r;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500517
518 /*
519 * USER Zero all DQS config settings, across all groups and all
520 * shadow registers
521 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200522 for (r = 0; r < seq->rwcfg->mem_number_of_ranks;
Marek Vasut08bcb982015-07-20 04:41:53 +0200523 r += NUM_RANKS_PER_SHADOW_REG) {
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200524 for (i = 0; i < seq->rwcfg->mem_if_read_dqs_width; i++) {
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500525 /*
526 * The phases actually don't exist on a per-rank basis,
527 * but there's no harm updating them several times, so
528 * let's keep the code simple.
529 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200530 scc_mgr_set_dqs_bus_in_delay(i,
531 seq->iocfg->dqs_in_reserve
532 );
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500533 scc_mgr_set_dqs_en_phase(i, 0);
534 scc_mgr_set_dqs_en_delay(i, 0);
535 }
536
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200537 for (i = 0; i < seq->rwcfg->mem_if_write_dqs_width; i++) {
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500538 scc_mgr_set_dqdqs_output_phase(i, 0);
Marek Vasut08bcb982015-07-20 04:41:53 +0200539 /* Arria V/Cyclone V don't have out2. */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200540 scc_mgr_set_oct_out1_delay(seq, i,
541 seq->iocfg->dqs_out_reserve);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500542 }
543 }
544
Marek Vasut08bcb982015-07-20 04:41:53 +0200545 /* Multicast to all DQS group enables. */
Marek Vasutb5450962015-07-12 21:05:08 +0200546 writel(0xff, &sdr_scc_mgr->dqs_ena);
547 writel(0, &sdr_scc_mgr->update);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500548}
549
Marek Vasut0341de42015-07-17 02:06:20 +0200550/**
551 * scc_set_bypass_mode() - Set bypass mode and trigger SCC update
552 * @write_group: Write group
553 *
554 * Set bypass mode and trigger SCC update.
555 */
556static void scc_set_bypass_mode(const u32 write_group)
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500557{
Marek Vasut0341de42015-07-17 02:06:20 +0200558 /* Multicast to all DQ enables. */
Marek Vasutb5450962015-07-12 21:05:08 +0200559 writel(0xff, &sdr_scc_mgr->dq_ena);
560 writel(0xff, &sdr_scc_mgr->dm_ena);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500561
Marek Vasut0341de42015-07-17 02:06:20 +0200562 /* Update current DQS IO enable. */
Marek Vasutb5450962015-07-12 21:05:08 +0200563 writel(0, &sdr_scc_mgr->dqs_io_ena);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500564
Marek Vasut0341de42015-07-17 02:06:20 +0200565 /* Update the DQS logic. */
Marek Vasutb5450962015-07-12 21:05:08 +0200566 writel(write_group, &sdr_scc_mgr->dqs_ena);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500567
Marek Vasut0341de42015-07-17 02:06:20 +0200568 /* Hit update. */
Marek Vasutb5450962015-07-12 21:05:08 +0200569 writel(0, &sdr_scc_mgr->update);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500570}
571
Marek Vasut5a4379e2015-07-13 00:30:09 +0200572/**
573 * scc_mgr_load_dqs_for_write_group() - Load DQS settings for Write Group
574 * @write_group: Write group
575 *
576 * Load DQS settings for Write Group, do not trigger SCC update.
577 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200578static void scc_mgr_load_dqs_for_write_group(struct socfpga_sdrseq *seq,
579 const u32 write_group)
Marek Vasut7481b692015-07-12 22:11:55 +0200580{
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200581 const int ratio = seq->rwcfg->mem_if_read_dqs_width /
582 seq->rwcfg->mem_if_write_dqs_width;
Marek Vasut5a4379e2015-07-13 00:30:09 +0200583 const int base = write_group * ratio;
584 int i;
Marek Vasut7481b692015-07-12 22:11:55 +0200585 /*
Marek Vasut5a4379e2015-07-13 00:30:09 +0200586 * Load the setting in the SCC manager
Marek Vasut7481b692015-07-12 22:11:55 +0200587 * Although OCT affects only write data, the OCT delay is controlled
588 * by the DQS logic block which is instantiated once per read group.
589 * For protocols where a write group consists of multiple read groups,
Marek Vasut5a4379e2015-07-13 00:30:09 +0200590 * the setting must be set multiple times.
Marek Vasut7481b692015-07-12 22:11:55 +0200591 */
Marek Vasut5a4379e2015-07-13 00:30:09 +0200592 for (i = 0; i < ratio; i++)
593 writel(base + i, &sdr_scc_mgr->dqs_ena);
Marek Vasut7481b692015-07-12 22:11:55 +0200594}
595
Marek Vasut62d3c692015-07-20 08:41:04 +0200596/**
597 * scc_mgr_zero_group() - Zero all configs for a group
598 *
599 * Zero DQ, DM, DQS and OCT configs for a group.
600 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200601static void scc_mgr_zero_group(struct socfpga_sdrseq *seq,
602 const u32 write_group, const int out_only)
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500603{
Marek Vasut62d3c692015-07-20 08:41:04 +0200604 int i, r;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500605
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200606 for (r = 0; r < seq->rwcfg->mem_number_of_ranks;
Marek Vasut62d3c692015-07-20 08:41:04 +0200607 r += NUM_RANKS_PER_SHADOW_REG) {
608 /* Zero all DQ config settings. */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200609 for (i = 0; i < seq->rwcfg->mem_dq_per_write_dqs; i++) {
Marek Vasutcab80792015-07-12 22:07:33 +0200610 scc_mgr_set_dq_out1_delay(i, 0);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500611 if (!out_only)
Marek Vasutcab80792015-07-12 22:07:33 +0200612 scc_mgr_set_dq_in_delay(i, 0);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500613 }
614
Marek Vasut62d3c692015-07-20 08:41:04 +0200615 /* Multicast to all DQ enables. */
Marek Vasutb5450962015-07-12 21:05:08 +0200616 writel(0xff, &sdr_scc_mgr->dq_ena);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500617
Marek Vasut62d3c692015-07-20 08:41:04 +0200618 /* Zero all DM config settings. */
Marek Vasutf4d38622016-04-04 21:16:18 +0200619 for (i = 0; i < RW_MGR_NUM_DM_PER_WRITE_GROUP; i++) {
620 if (!out_only)
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200621 scc_mgr_set_dm_in_delay(seq, i, 0);
622 scc_mgr_set_dm_out1_delay(seq, i, 0);
Marek Vasutf4d38622016-04-04 21:16:18 +0200623 }
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500624
Marek Vasut62d3c692015-07-20 08:41:04 +0200625 /* Multicast to all DM enables. */
Marek Vasutb5450962015-07-12 21:05:08 +0200626 writel(0xff, &sdr_scc_mgr->dm_ena);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500627
Marek Vasut62d3c692015-07-20 08:41:04 +0200628 /* Zero all DQS IO settings. */
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500629 if (!out_only)
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200630 scc_mgr_set_dqs_io_in_delay(seq, 0);
Marek Vasut62d3c692015-07-20 08:41:04 +0200631
632 /* Arria V/Cyclone V don't have out2. */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200633 scc_mgr_set_dqs_out1_delay(seq, seq->iocfg->dqs_out_reserve);
634 scc_mgr_set_oct_out1_delay(seq, write_group,
635 seq->iocfg->dqs_out_reserve);
636 scc_mgr_load_dqs_for_write_group(seq, write_group);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500637
Marek Vasut62d3c692015-07-20 08:41:04 +0200638 /* Multicast to all DQS IO enables (only 1 in total). */
Marek Vasutb5450962015-07-12 21:05:08 +0200639 writel(0, &sdr_scc_mgr->dqs_io_ena);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500640
Marek Vasut62d3c692015-07-20 08:41:04 +0200641 /* Hit update to zero everything. */
Marek Vasutb5450962015-07-12 21:05:08 +0200642 writel(0, &sdr_scc_mgr->update);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500643 }
644}
645
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500646/*
647 * apply and load a particular input delay for the DQ pins in a group
648 * group_bgn is the index of the first dq pin (in the write group)
649 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200650static void scc_mgr_apply_group_dq_in_delay(struct socfpga_sdrseq *seq,
651 u32 group_bgn, u32 delay)
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500652{
Marek Vasut8af9ca02015-08-02 19:42:26 +0200653 u32 i, p;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500654
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200655 for (i = 0, p = group_bgn; i < seq->rwcfg->mem_dq_per_read_dqs;
656 i++, p++) {
Marek Vasutcab80792015-07-12 22:07:33 +0200657 scc_mgr_set_dq_in_delay(p, delay);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500658 scc_mgr_load_dq(p);
659 }
660}
661
Marek Vasutcd649502015-07-17 05:42:49 +0200662/**
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200663 * scc_mgr_apply_group_dq_out1_delay() - Apply and load an output delay for the
664 * DQ pins in a group
Marek Vasutcd649502015-07-17 05:42:49 +0200665 * @delay: Delay value
666 *
667 * Apply and load a particular output delay for the DQ pins in a group.
668 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200669static void scc_mgr_apply_group_dq_out1_delay(struct socfpga_sdrseq *seq,
670 const u32 delay)
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500671{
Marek Vasutcd649502015-07-17 05:42:49 +0200672 int i;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500673
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200674 for (i = 0; i < seq->rwcfg->mem_dq_per_write_dqs; i++) {
Marek Vasutcd649502015-07-17 05:42:49 +0200675 scc_mgr_set_dq_out1_delay(i, delay);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500676 scc_mgr_load_dq(i);
677 }
678}
679
680/* apply and load a particular output delay for the DM pins in a group */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200681static void scc_mgr_apply_group_dm_out1_delay(struct socfpga_sdrseq *seq,
682 u32 delay1)
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500683{
Marek Vasut8af9ca02015-08-02 19:42:26 +0200684 u32 i;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500685
686 for (i = 0; i < RW_MGR_NUM_DM_PER_WRITE_GROUP; i++) {
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200687 scc_mgr_set_dm_out1_delay(seq, i, delay1);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500688 scc_mgr_load_dm(i);
689 }
690}
691
692
693/* apply and load delay on both DQS and OCT out1 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200694static void scc_mgr_apply_group_dqs_io_and_oct_out1(struct socfpga_sdrseq *seq,
695 u32 write_group, u32 delay)
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500696{
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200697 scc_mgr_set_dqs_out1_delay(seq, delay);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500698 scc_mgr_load_dqs_io();
699
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200700 scc_mgr_set_oct_out1_delay(seq, write_group, delay);
701 scc_mgr_load_dqs_for_write_group(seq, write_group);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500702}
703
Marek Vasut484fb3b2015-07-17 05:33:28 +0200704/**
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200705 * scc_mgr_apply_group_all_out_delay_add() - Apply a delay to the entire output
706 * side: DQ, DM, DQS, OCT
Marek Vasut484fb3b2015-07-17 05:33:28 +0200707 * @write_group: Write group
708 * @delay: Delay value
709 *
710 * Apply a delay to the entire output side: DQ, DM, DQS, OCT.
711 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200712static void scc_mgr_apply_group_all_out_delay_add(struct socfpga_sdrseq *seq,
713 const u32 write_group,
Marek Vasut20bfb9d2015-07-17 05:30:14 +0200714 const u32 delay)
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500715{
Marek Vasut20bfb9d2015-07-17 05:30:14 +0200716 u32 i, new_delay;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500717
Marek Vasut20bfb9d2015-07-17 05:30:14 +0200718 /* DQ shift */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200719 for (i = 0; i < seq->rwcfg->mem_dq_per_write_dqs; i++)
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500720 scc_mgr_load_dq(i);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500721
Marek Vasut20bfb9d2015-07-17 05:30:14 +0200722 /* DM shift */
723 for (i = 0; i < RW_MGR_NUM_DM_PER_WRITE_GROUP; i++)
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500724 scc_mgr_load_dm(i);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500725
Marek Vasut484fb3b2015-07-17 05:33:28 +0200726 /* DQS shift */
727 new_delay = READ_SCC_DQS_IO_OUT2_DELAY + delay;
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200728 if (new_delay > seq->iocfg->io_out2_delay_max) {
Marek Vasut4df2d7b2016-04-04 21:21:05 +0200729 debug_cond(DLEVEL >= 1,
Marek Vasut484fb3b2015-07-17 05:33:28 +0200730 "%s:%d (%u, %u) DQS: %u > %d; adding %u to OUT1\n",
731 __func__, __LINE__, write_group, delay, new_delay,
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200732 seq->iocfg->io_out2_delay_max,
733 new_delay - seq->iocfg->io_out2_delay_max);
734 new_delay -= seq->iocfg->io_out2_delay_max;
735 scc_mgr_set_dqs_out1_delay(seq, new_delay);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500736 }
737
738 scc_mgr_load_dqs_io();
739
Marek Vasut484fb3b2015-07-17 05:33:28 +0200740 /* OCT shift */
741 new_delay = READ_SCC_OCT_OUT2_DELAY + delay;
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200742 if (new_delay > seq->iocfg->io_out2_delay_max) {
Marek Vasut4df2d7b2016-04-04 21:21:05 +0200743 debug_cond(DLEVEL >= 1,
Marek Vasut484fb3b2015-07-17 05:33:28 +0200744 "%s:%d (%u, %u) DQS: %u > %d; adding %u to OUT1\n",
745 __func__, __LINE__, write_group, delay,
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200746 new_delay, seq->iocfg->io_out2_delay_max,
747 new_delay - seq->iocfg->io_out2_delay_max);
748 new_delay -= seq->iocfg->io_out2_delay_max;
749 scc_mgr_set_oct_out1_delay(seq, write_group, new_delay);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500750 }
751
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200752 scc_mgr_load_dqs_for_write_group(seq, write_group);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500753}
754
Marek Vasut788870f2015-07-19 02:18:21 +0200755/**
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200756 * scc_mgr_apply_group_all_out_delay_add() - Apply a delay to the entire output
757 * side to all ranks
Marek Vasut788870f2015-07-19 02:18:21 +0200758 * @write_group: Write group
759 * @delay: Delay value
760 *
761 * Apply a delay to the entire output side (DQ, DM, DQS, OCT) to all ranks.
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500762 */
Marek Vasut788870f2015-07-19 02:18:21 +0200763static void
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200764scc_mgr_apply_group_all_out_delay_add_all_ranks(struct socfpga_sdrseq *seq,
765 const u32 write_group,
Marek Vasut788870f2015-07-19 02:18:21 +0200766 const u32 delay)
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500767{
Marek Vasut788870f2015-07-19 02:18:21 +0200768 int r;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500769
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200770 for (r = 0; r < seq->rwcfg->mem_number_of_ranks;
Marek Vasut788870f2015-07-19 02:18:21 +0200771 r += NUM_RANKS_PER_SHADOW_REG) {
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200772 scc_mgr_apply_group_all_out_delay_add(seq, write_group, delay);
Marek Vasutb5450962015-07-12 21:05:08 +0200773 writel(0, &sdr_scc_mgr->update);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500774 }
775}
776
Marek Vasut42e78602015-07-26 11:07:19 +0200777/**
778 * set_jump_as_return() - Return instruction optimization
779 *
780 * Optimization used to recover some slots in ddr3 inst_rom could be
781 * applied to other protocols if we wanted to
782 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200783static void set_jump_as_return(struct socfpga_sdrseq *seq)
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500784{
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500785 /*
Marek Vasut42e78602015-07-26 11:07:19 +0200786 * To save space, we replace return with jump to special shared
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500787 * RETURN instruction so we set the counter to large value so that
Marek Vasut42e78602015-07-26 11:07:19 +0200788 * we always jump.
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500789 */
Marek Vasutb5450962015-07-12 21:05:08 +0200790 writel(0xff, &sdr_rw_load_mgr_regs->load_cntr0);
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200791 writel(seq->rwcfg->rreturn, &sdr_rw_load_jump_mgr_regs->load_jump_add0);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500792}
793
Marek Vasut98d279a2015-07-26 11:46:04 +0200794/**
795 * delay_for_n_mem_clocks() - Delay for N memory clocks
796 * @clocks: Length of the delay
797 *
798 * Delay for N memory clocks.
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500799 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200800static void delay_for_n_mem_clocks(struct socfpga_sdrseq *seq,
801 const u32 clocks)
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500802{
Marek Vasut50d71992015-07-26 11:11:28 +0200803 u32 afi_clocks;
Marek Vasut13ee4382015-07-26 11:42:53 +0200804 u16 c_loop;
805 u8 inner;
806 u8 outer;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500807
808 debug("%s:%d: clocks=%u ... start\n", __func__, __LINE__, clocks);
809
Marek Vasut4b203df2015-07-26 11:34:09 +0200810 /* Scale (rounding up) to get afi clocks. */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200811 afi_clocks = DIV_ROUND_UP(clocks, seq->misccfg->afi_rate_ratio);
Marek Vasut4b203df2015-07-26 11:34:09 +0200812 if (afi_clocks) /* Temporary underflow protection */
813 afi_clocks--;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500814
815 /*
Marek Vasut50d71992015-07-26 11:11:28 +0200816 * Note, we don't bother accounting for being off a little
817 * bit because of a few extra instructions in outer loops.
818 * Note, the loops have a test at the end, and do the test
819 * before the decrement, and so always perform the loop
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500820 * 1 time more than the counter value
821 */
Marek Vasut13ee4382015-07-26 11:42:53 +0200822 c_loop = afi_clocks >> 16;
823 outer = c_loop ? 0xff : (afi_clocks >> 8);
824 inner = outer ? 0xff : afi_clocks;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500825
826 /*
827 * rom instructions are structured as follows:
828 *
829 * IDLE_LOOP2: jnz cntr0, TARGET_A
830 * IDLE_LOOP1: jnz cntr1, TARGET_B
831 * return
832 *
833 * so, when doing nested loops, TARGET_A is set to IDLE_LOOP2, and
834 * TARGET_B is set to IDLE_LOOP2 as well
835 *
836 * if we have no outer loop, though, then we can use IDLE_LOOP1 only,
837 * and set TARGET_B to IDLE_LOOP1 and we skip IDLE_LOOP2 entirely
838 *
839 * a little confusing, but it helps save precious space in the inst_rom
840 * and sequencer rom and keeps the delays more accurate and reduces
841 * overhead
842 */
Marek Vasut4b203df2015-07-26 11:34:09 +0200843 if (afi_clocks < 0x100) {
Marek Vasutb5450962015-07-12 21:05:08 +0200844 writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(inner),
Marek Vasutc85b9b32015-08-02 19:47:01 +0200845 &sdr_rw_load_mgr_regs->load_cntr1);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500846
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200847 writel(seq->rwcfg->idle_loop1,
Marek Vasutc85b9b32015-08-02 19:47:01 +0200848 &sdr_rw_load_jump_mgr_regs->load_jump_add1);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500849
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200850 writel(seq->rwcfg->idle_loop1, SDR_PHYGRP_RWMGRGRP_ADDRESS |
Marek Vasutb5450962015-07-12 21:05:08 +0200851 RW_MGR_RUN_SINGLE_GROUP_OFFSET);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500852 } else {
Marek Vasutb5450962015-07-12 21:05:08 +0200853 writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(inner),
Marek Vasutc85b9b32015-08-02 19:47:01 +0200854 &sdr_rw_load_mgr_regs->load_cntr0);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500855
Marek Vasutb5450962015-07-12 21:05:08 +0200856 writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(outer),
Marek Vasutc85b9b32015-08-02 19:47:01 +0200857 &sdr_rw_load_mgr_regs->load_cntr1);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500858
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200859 writel(seq->rwcfg->idle_loop2,
Marek Vasutc85b9b32015-08-02 19:47:01 +0200860 &sdr_rw_load_jump_mgr_regs->load_jump_add0);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500861
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200862 writel(seq->rwcfg->idle_loop2,
Marek Vasutc85b9b32015-08-02 19:47:01 +0200863 &sdr_rw_load_jump_mgr_regs->load_jump_add1);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500864
Marek Vasut7574c872015-07-26 11:44:54 +0200865 do {
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200866 writel(seq->rwcfg->idle_loop2,
Marek Vasutc85b9b32015-08-02 19:47:01 +0200867 SDR_PHYGRP_RWMGRGRP_ADDRESS |
868 RW_MGR_RUN_SINGLE_GROUP_OFFSET);
Marek Vasut7574c872015-07-26 11:44:54 +0200869 } while (c_loop-- != 0);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500870 }
871 debug("%s:%d clocks=%u ... end\n", __func__, __LINE__, clocks);
872}
873
Marek Vasut6bccacf2019-10-18 00:22:31 +0200874static void delay_for_n_ns(struct socfpga_sdrseq *seq, const u32 ns)
875{
876 delay_for_n_mem_clocks(seq, (ns * seq->misccfg->afi_clk_freq *
877 seq->misccfg->afi_rate_ratio) / 1000);
878}
879
Marek Vasut8bf92272015-07-13 00:44:30 +0200880/**
881 * rw_mgr_mem_init_load_regs() - Load instruction registers
882 * @cntr0: Counter 0 value
883 * @cntr1: Counter 1 value
884 * @cntr2: Counter 2 value
885 * @jump: Jump instruction value
886 *
887 * Load instruction registers.
888 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200889static void rw_mgr_mem_init_load_regs(struct socfpga_sdrseq *seq,
890 u32 cntr0, u32 cntr1, u32 cntr2, u32 jump)
Marek Vasut8bf92272015-07-13 00:44:30 +0200891{
Marek Vasut8af9ca02015-08-02 19:42:26 +0200892 u32 grpaddr = SDR_PHYGRP_RWMGRGRP_ADDRESS |
Marek Vasut8bf92272015-07-13 00:44:30 +0200893 RW_MGR_RUN_SINGLE_GROUP_OFFSET;
894
895 /* Load counters */
896 writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(cntr0),
897 &sdr_rw_load_mgr_regs->load_cntr0);
898 writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(cntr1),
899 &sdr_rw_load_mgr_regs->load_cntr1);
900 writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(cntr2),
901 &sdr_rw_load_mgr_regs->load_cntr2);
902
903 /* Load jump address */
904 writel(jump, &sdr_rw_load_jump_mgr_regs->load_jump_add0);
905 writel(jump, &sdr_rw_load_jump_mgr_regs->load_jump_add1);
906 writel(jump, &sdr_rw_load_jump_mgr_regs->load_jump_add2);
907
908 /* Execute count instruction */
909 writel(jump, grpaddr);
910}
911
Marek Vasutc577ab52015-07-13 00:51:05 +0200912/**
Marek Vasut6bccacf2019-10-18 00:22:31 +0200913 * rw_mgr_mem_load_user_ddr2() - Load user calibration values for DDR2
914 * @handoff: Indicate whether this is initialization or handoff phase
915 *
916 * Load user calibration values and optionally precharge the banks.
917 */
918static void rw_mgr_mem_load_user_ddr2(struct socfpga_sdrseq *seq,
919 const int handoff)
920{
921 u32 grpaddr = SDR_PHYGRP_RWMGRGRP_ADDRESS |
922 RW_MGR_RUN_SINGLE_GROUP_OFFSET;
923 u32 r;
924
925 for (r = 0; r < seq->rwcfg->mem_number_of_ranks; r++) {
926 /* set rank */
927 set_rank_and_odt_mask(seq, r, RW_MGR_ODT_MODE_OFF);
928
929 /* precharge all banks ... */
930 writel(seq->rwcfg->precharge_all, grpaddr);
931
932 writel(seq->rwcfg->emr2, grpaddr);
933 writel(seq->rwcfg->emr3, grpaddr);
934 writel(seq->rwcfg->emr, grpaddr);
935
936 if (handoff) {
937 writel(seq->rwcfg->mr_user, grpaddr);
938 continue;
939 }
940
941 writel(seq->rwcfg->mr_dll_reset, grpaddr);
942
943 writel(seq->rwcfg->precharge_all, grpaddr);
944
945 writel(seq->rwcfg->refresh, grpaddr);
946 delay_for_n_ns(seq, 200);
947 writel(seq->rwcfg->refresh, grpaddr);
948 delay_for_n_ns(seq, 200);
949
950 writel(seq->rwcfg->mr_calib, grpaddr);
951 writel(/*seq->rwcfg->*/0x0b, grpaddr); // EMR_OCD_ENABLE
952 writel(seq->rwcfg->emr, grpaddr);
953 delay_for_n_mem_clocks(seq, 200);
954 }
955}
956
957/**
958 * rw_mgr_mem_load_user_ddr3() - Load user calibration values
Marek Vasutc577ab52015-07-13 00:51:05 +0200959 * @fin1: Final instruction 1
960 * @fin2: Final instruction 2
961 * @precharge: If 1, precharge the banks at the end
962 *
963 * Load user calibration values and optionally precharge the banks.
964 */
Marek Vasut6bccacf2019-10-18 00:22:31 +0200965static void rw_mgr_mem_load_user_ddr3(struct socfpga_sdrseq *seq,
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200966 const u32 fin1, const u32 fin2,
Marek Vasutc577ab52015-07-13 00:51:05 +0200967 const int precharge)
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500968{
Marek Vasutc577ab52015-07-13 00:51:05 +0200969 u32 grpaddr = SDR_PHYGRP_RWMGRGRP_ADDRESS |
970 RW_MGR_RUN_SINGLE_GROUP_OFFSET;
971 u32 r;
972
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200973 for (r = 0; r < seq->rwcfg->mem_number_of_ranks; r++) {
Marek Vasutc577ab52015-07-13 00:51:05 +0200974 /* set rank */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200975 set_rank_and_odt_mask(seq, r, RW_MGR_ODT_MODE_OFF);
Marek Vasutc577ab52015-07-13 00:51:05 +0200976
977 /* precharge all banks ... */
978 if (precharge)
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200979 writel(seq->rwcfg->precharge_all, grpaddr);
Marek Vasutc577ab52015-07-13 00:51:05 +0200980
981 /*
982 * USER Use Mirror-ed commands for odd ranks if address
983 * mirrorring is on
984 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200985 if ((seq->rwcfg->mem_address_mirroring >> r) & 0x1) {
986 set_jump_as_return(seq);
987 writel(seq->rwcfg->mrs2_mirr, grpaddr);
988 delay_for_n_mem_clocks(seq, 4);
989 set_jump_as_return(seq);
990 writel(seq->rwcfg->mrs3_mirr, grpaddr);
991 delay_for_n_mem_clocks(seq, 4);
992 set_jump_as_return(seq);
993 writel(seq->rwcfg->mrs1_mirr, grpaddr);
994 delay_for_n_mem_clocks(seq, 4);
995 set_jump_as_return(seq);
Marek Vasutc577ab52015-07-13 00:51:05 +0200996 writel(fin1, grpaddr);
997 } else {
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200998 set_jump_as_return(seq);
999 writel(seq->rwcfg->mrs2, grpaddr);
1000 delay_for_n_mem_clocks(seq, 4);
1001 set_jump_as_return(seq);
1002 writel(seq->rwcfg->mrs3, grpaddr);
1003 delay_for_n_mem_clocks(seq, 4);
1004 set_jump_as_return(seq);
1005 writel(seq->rwcfg->mrs1, grpaddr);
1006 set_jump_as_return(seq);
Marek Vasutc577ab52015-07-13 00:51:05 +02001007 writel(fin2, grpaddr);
1008 }
1009
1010 if (precharge)
1011 continue;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001012
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001013 set_jump_as_return(seq);
1014 writel(seq->rwcfg->zqcl, grpaddr);
Marek Vasutc577ab52015-07-13 00:51:05 +02001015
1016 /* tZQinit = tDLLK = 512 ck cycles */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001017 delay_for_n_mem_clocks(seq, 512);
Marek Vasutc577ab52015-07-13 00:51:05 +02001018 }
1019}
1020
Marek Vasut1185e222015-07-26 10:57:06 +02001021/**
Marek Vasut6bccacf2019-10-18 00:22:31 +02001022 * rw_mgr_mem_load_user() - Load user calibration values
1023 * @fin1: Final instruction 1
1024 * @fin2: Final instruction 2
1025 * @precharge: If 1, precharge the banks at the end
1026 *
1027 * Load user calibration values and optionally precharge the banks.
1028 */
1029static void rw_mgr_mem_load_user(struct socfpga_sdrseq *seq,
1030 const u32 fin1, const u32 fin2,
1031 const int precharge)
1032{
1033 if (dram_is_ddr(2))
1034 rw_mgr_mem_load_user_ddr2(seq, precharge);
1035 else if (dram_is_ddr(3))
1036 rw_mgr_mem_load_user_ddr3(seq, fin1, fin2, precharge);
1037 else
1038 hang();
1039}
1040/**
Marek Vasut1185e222015-07-26 10:57:06 +02001041 * rw_mgr_mem_initialize() - Initialize RW Manager
1042 *
1043 * Initialize RW Manager.
1044 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001045static void rw_mgr_mem_initialize(struct socfpga_sdrseq *seq)
Marek Vasutc577ab52015-07-13 00:51:05 +02001046{
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001047 debug("%s:%d\n", __func__, __LINE__);
1048
1049 /* The reset / cke part of initialization is broadcasted to all ranks */
Marek Vasut6bccacf2019-10-18 00:22:31 +02001050 if (dram_is_ddr(3)) {
1051 writel(RW_MGR_RANK_ALL, SDR_PHYGRP_RWMGRGRP_ADDRESS |
1052 RW_MGR_SET_CS_AND_ODT_MASK_OFFSET);
1053 }
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001054
1055 /*
1056 * Here's how you load register for a loop
1057 * Counters are located @ 0x800
1058 * Jump address are located @ 0xC00
1059 * For both, registers 0 to 3 are selected using bits 3 and 2, like
1060 * in 0x800, 0x804, 0x808, 0x80C and 0xC00, 0xC04, 0xC08, 0xC0C
1061 * I know this ain't pretty, but Avalon bus throws away the 2 least
1062 * significant bits
1063 */
1064
Marek Vasut1185e222015-07-26 10:57:06 +02001065 /* Start with memory RESET activated */
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001066
1067 /* tINIT = 200us */
1068
1069 /*
1070 * 200us @ 266MHz (3.75 ns) ~ 54000 clock cycles
1071 * If a and b are the number of iteration in 2 nested loops
1072 * it takes the following number of cycles to complete the operation:
1073 * number_of_cycles = ((2 + n) * a + 2) * b
1074 * where n is the number of instruction in the inner loop
1075 * One possible solution is n = 0 , a = 256 , b = 106 => a = FF,
1076 * b = 6A
1077 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001078 rw_mgr_mem_init_load_regs(seq, seq->misccfg->tinit_cntr0_val,
1079 seq->misccfg->tinit_cntr1_val,
1080 seq->misccfg->tinit_cntr2_val,
1081 seq->rwcfg->init_reset_0_cke_0);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001082
Marek Vasut1185e222015-07-26 10:57:06 +02001083 /* Indicate that memory is stable. */
Marek Vasutb5450962015-07-12 21:05:08 +02001084 writel(1, &phy_mgr_cfg->reset_mem_stbl);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001085
Marek Vasut6bccacf2019-10-18 00:22:31 +02001086 if (dram_is_ddr(2)) {
1087 writel(seq->rwcfg->nop, SDR_PHYGRP_RWMGRGRP_ADDRESS |
1088 RW_MGR_RUN_SINGLE_GROUP_OFFSET);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001089
Marek Vasut6bccacf2019-10-18 00:22:31 +02001090 /* Bring up clock enable. */
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001091
Marek Vasut6bccacf2019-10-18 00:22:31 +02001092 /* tXRP < 400 ck cycles */
1093 delay_for_n_ns(seq, 400);
1094 } else if (dram_is_ddr(3)) {
1095 /*
1096 * transition the RESET to high
1097 * Wait for 500us
1098 */
1099
1100 /*
1101 * 500us @ 266MHz (3.75 ns) ~ 134000 clock cycles
1102 * If a and b are the number of iteration in 2 nested loops
1103 * it takes the following number of cycles to complete the
1104 * operation number_of_cycles = ((2 + n) * a + 2) * b
1105 * where n is the number of instruction in the inner loop
1106 * One possible solution is
1107 * n = 2 , a = 131 , b = 256 => a = 83, b = FF
1108 */
1109 rw_mgr_mem_init_load_regs(seq, seq->misccfg->treset_cntr0_val,
1110 seq->misccfg->treset_cntr1_val,
1111 seq->misccfg->treset_cntr2_val,
1112 seq->rwcfg->init_reset_1_cke_0);
1113 /* Bring up clock enable. */
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001114
Marek Vasut6bccacf2019-10-18 00:22:31 +02001115 /* tXRP < 250 ck cycles */
1116 delay_for_n_mem_clocks(seq, 250);
1117 }
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001118
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001119 rw_mgr_mem_load_user(seq, seq->rwcfg->mrs0_dll_reset_mirr,
1120 seq->rwcfg->mrs0_dll_reset, 0);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001121}
1122
Marek Vasutc1402752015-07-26 10:59:19 +02001123/**
1124 * rw_mgr_mem_handoff() - Hand off the memory to user
1125 *
1126 * At the end of calibration we have to program the user settings in
1127 * and hand off the memory to the user.
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001128 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001129static void rw_mgr_mem_handoff(struct socfpga_sdrseq *seq)
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001130{
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001131 rw_mgr_mem_load_user(seq, seq->rwcfg->mrs0_user_mirr,
1132 seq->rwcfg->mrs0_user, 1);
Marek Vasutc577ab52015-07-13 00:51:05 +02001133 /*
Marek Vasutc1402752015-07-26 10:59:19 +02001134 * Need to wait tMOD (12CK or 15ns) time before issuing other
1135 * commands, but we will have plenty of NIOS cycles before actual
1136 * handoff so its okay.
Marek Vasutc577ab52015-07-13 00:51:05 +02001137 */
Marek Vasut0b97c422015-07-21 05:43:37 +02001138}
1139
Marek Vasutadbaa2d2015-07-21 06:00:36 +02001140/**
1141 * rw_mgr_mem_calibrate_write_test_issue() - Issue write test command
1142 * @group: Write Group
1143 * @use_dm: Use DM
1144 *
1145 * Issue write test command. Two variants are provided, one that just tests
1146 * a write pattern and another that tests datamask functionality.
Marek Vasut0b97c422015-07-21 05:43:37 +02001147 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001148static void rw_mgr_mem_calibrate_write_test_issue(struct socfpga_sdrseq *seq,
1149 u32 group, u32 test_dm)
Marek Vasut0b97c422015-07-21 05:43:37 +02001150{
Marek Vasutadbaa2d2015-07-21 06:00:36 +02001151 const u32 quick_write_mode =
1152 (STATIC_CALIB_STEPS & CALIB_SKIP_WRITES) &&
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001153 seq->misccfg->enable_super_quick_calibration;
Marek Vasutadbaa2d2015-07-21 06:00:36 +02001154 u32 mcc_instruction;
1155 u32 rw_wl_nop_cycles;
Marek Vasut0b97c422015-07-21 05:43:37 +02001156
1157 /*
1158 * Set counter and jump addresses for the right
1159 * number of NOP cycles.
1160 * The number of supported NOP cycles can range from -1 to infinity
1161 * Three different cases are handled:
1162 *
1163 * 1. For a number of NOP cycles greater than 0, the RW Mgr looping
1164 * mechanism will be used to insert the right number of NOPs
1165 *
1166 * 2. For a number of NOP cycles equals to 0, the micro-instruction
1167 * issuing the write command will jump straight to the
1168 * micro-instruction that turns on DQS (for DDRx), or outputs write
1169 * data (for RLD), skipping
1170 * the NOP micro-instruction all together
1171 *
1172 * 3. A number of NOP cycles equal to -1 indicates that DQS must be
1173 * turned on in the same micro-instruction that issues the write
1174 * command. Then we need
1175 * to directly jump to the micro-instruction that sends out the data
1176 *
1177 * NOTE: Implementing this mechanism uses 2 RW Mgr jump-counters
1178 * (2 and 3). One jump-counter (0) is used to perform multiple
1179 * write-read operations.
1180 * one counter left to issue this command in "multiple-group" mode
1181 */
1182
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001183 rw_wl_nop_cycles = seq->gbl.rw_wl_nop_cycles;
Marek Vasut0b97c422015-07-21 05:43:37 +02001184
1185 if (rw_wl_nop_cycles == -1) {
1186 /*
1187 * CNTR 2 - We want to execute the special write operation that
1188 * turns on DQS right away and then skip directly to the
1189 * instruction that sends out the data. We set the counter to a
1190 * large number so that the jump is always taken.
1191 */
1192 writel(0xFF, &sdr_rw_load_mgr_regs->load_cntr2);
1193
1194 /* CNTR 3 - Not used */
1195 if (test_dm) {
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001196 mcc_instruction = seq->rwcfg->lfsr_wr_rd_dm_bank_0_wl_1;
1197 writel(seq->rwcfg->lfsr_wr_rd_dm_bank_0_data,
Marek Vasut0b97c422015-07-21 05:43:37 +02001198 &sdr_rw_load_jump_mgr_regs->load_jump_add2);
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001199 writel(seq->rwcfg->lfsr_wr_rd_dm_bank_0_nop,
Marek Vasut0b97c422015-07-21 05:43:37 +02001200 &sdr_rw_load_jump_mgr_regs->load_jump_add3);
1201 } else {
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001202 mcc_instruction = seq->rwcfg->lfsr_wr_rd_bank_0_wl_1;
1203 writel(seq->rwcfg->lfsr_wr_rd_bank_0_data,
Marek Vasutc85b9b32015-08-02 19:47:01 +02001204 &sdr_rw_load_jump_mgr_regs->load_jump_add2);
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001205 writel(seq->rwcfg->lfsr_wr_rd_bank_0_nop,
Marek Vasutc85b9b32015-08-02 19:47:01 +02001206 &sdr_rw_load_jump_mgr_regs->load_jump_add3);
Marek Vasut0b97c422015-07-21 05:43:37 +02001207 }
1208 } else if (rw_wl_nop_cycles == 0) {
1209 /*
1210 * CNTR 2 - We want to skip the NOP operation and go straight
1211 * to the DQS enable instruction. We set the counter to a large
1212 * number so that the jump is always taken.
1213 */
1214 writel(0xFF, &sdr_rw_load_mgr_regs->load_cntr2);
1215
1216 /* CNTR 3 - Not used */
1217 if (test_dm) {
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001218 mcc_instruction = seq->rwcfg->lfsr_wr_rd_dm_bank_0;
1219 writel(seq->rwcfg->lfsr_wr_rd_dm_bank_0_dqs,
Marek Vasut0b97c422015-07-21 05:43:37 +02001220 &sdr_rw_load_jump_mgr_regs->load_jump_add2);
1221 } else {
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001222 mcc_instruction = seq->rwcfg->lfsr_wr_rd_bank_0;
1223 writel(seq->rwcfg->lfsr_wr_rd_bank_0_dqs,
Marek Vasutc85b9b32015-08-02 19:47:01 +02001224 &sdr_rw_load_jump_mgr_regs->load_jump_add2);
Marek Vasut0b97c422015-07-21 05:43:37 +02001225 }
1226 } else {
1227 /*
1228 * CNTR 2 - In this case we want to execute the next instruction
1229 * and NOT take the jump. So we set the counter to 0. The jump
1230 * address doesn't count.
1231 */
1232 writel(0x0, &sdr_rw_load_mgr_regs->load_cntr2);
1233 writel(0x0, &sdr_rw_load_jump_mgr_regs->load_jump_add2);
1234
1235 /*
1236 * CNTR 3 - Set the nop counter to the number of cycles we
1237 * need to loop for, minus 1.
1238 */
1239 writel(rw_wl_nop_cycles - 1, &sdr_rw_load_mgr_regs->load_cntr3);
1240 if (test_dm) {
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001241 mcc_instruction = seq->rwcfg->lfsr_wr_rd_dm_bank_0;
1242 writel(seq->rwcfg->lfsr_wr_rd_dm_bank_0_nop,
Marek Vasutc85b9b32015-08-02 19:47:01 +02001243 &sdr_rw_load_jump_mgr_regs->load_jump_add3);
Marek Vasut0b97c422015-07-21 05:43:37 +02001244 } else {
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001245 mcc_instruction = seq->rwcfg->lfsr_wr_rd_bank_0;
1246 writel(seq->rwcfg->lfsr_wr_rd_bank_0_nop,
Marek Vasutc85b9b32015-08-02 19:47:01 +02001247 &sdr_rw_load_jump_mgr_regs->load_jump_add3);
Marek Vasut0b97c422015-07-21 05:43:37 +02001248 }
1249 }
1250
1251 writel(0, SDR_PHYGRP_RWMGRGRP_ADDRESS |
1252 RW_MGR_RESET_READ_DATAPATH_OFFSET);
1253
1254 if (quick_write_mode)
1255 writel(0x08, &sdr_rw_load_mgr_regs->load_cntr0);
1256 else
1257 writel(0x40, &sdr_rw_load_mgr_regs->load_cntr0);
1258
1259 writel(mcc_instruction, &sdr_rw_load_jump_mgr_regs->load_jump_add0);
1260
1261 /*
1262 * CNTR 1 - This is used to ensure enough time elapses
1263 * for read data to come back.
1264 */
1265 writel(0x30, &sdr_rw_load_mgr_regs->load_cntr1);
1266
1267 if (test_dm) {
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001268 writel(seq->rwcfg->lfsr_wr_rd_dm_bank_0_wait,
Marek Vasutc85b9b32015-08-02 19:47:01 +02001269 &sdr_rw_load_jump_mgr_regs->load_jump_add1);
Marek Vasut0b97c422015-07-21 05:43:37 +02001270 } else {
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001271 writel(seq->rwcfg->lfsr_wr_rd_bank_0_wait,
Marek Vasutc85b9b32015-08-02 19:47:01 +02001272 &sdr_rw_load_jump_mgr_regs->load_jump_add1);
Marek Vasut0b97c422015-07-21 05:43:37 +02001273 }
1274
Marek Vasutadbaa2d2015-07-21 06:00:36 +02001275 writel(mcc_instruction, (SDR_PHYGRP_RWMGRGRP_ADDRESS |
1276 RW_MGR_RUN_SINGLE_GROUP_OFFSET) +
1277 (group << 2));
Marek Vasut0b97c422015-07-21 05:43:37 +02001278}
1279
Marek Vasutc67d9622015-07-21 05:57:11 +02001280/**
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001281 * rw_mgr_mem_calibrate_write_test() - Test writes, check for single/multiple
1282 * pass
Marek Vasutc67d9622015-07-21 05:57:11 +02001283 * @rank_bgn: Rank number
1284 * @write_group: Write Group
1285 * @use_dm: Use DM
1286 * @all_correct: All bits must be correct in the mask
1287 * @bit_chk: Resulting bit mask after the test
1288 * @all_ranks: Test all ranks
1289 *
1290 * Test writes, can check for a single bit pass or multiple bit pass.
1291 */
Marek Vasutbc773a12015-07-21 05:54:39 +02001292static int
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001293rw_mgr_mem_calibrate_write_test(struct socfpga_sdrseq *seq,
1294 const u32 rank_bgn, const u32 write_group,
Marek Vasutbc773a12015-07-21 05:54:39 +02001295 const u32 use_dm, const u32 all_correct,
1296 u32 *bit_chk, const u32 all_ranks)
Marek Vasut0b97c422015-07-21 05:43:37 +02001297{
Marek Vasutbc773a12015-07-21 05:54:39 +02001298 const u32 rank_end = all_ranks ?
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001299 seq->rwcfg->mem_number_of_ranks :
Marek Vasutbc773a12015-07-21 05:54:39 +02001300 (rank_bgn + NUM_RANKS_PER_SHADOW_REG);
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001301 const u32 shift_ratio = seq->rwcfg->mem_dq_per_write_dqs /
1302 seq->rwcfg->mem_virtual_groups_per_write_dqs;
1303 const u32 correct_mask_vg = seq->param.write_correct_mask_vg;
Marek Vasutbc773a12015-07-21 05:54:39 +02001304
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001305 u32 tmp_bit_chk, base_rw_mgr, group;
Marek Vasutbc773a12015-07-21 05:54:39 +02001306 int vg, r;
Marek Vasut0b97c422015-07-21 05:43:37 +02001307
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001308 *bit_chk = seq->param.write_correct_mask;
Marek Vasut0b97c422015-07-21 05:43:37 +02001309
1310 for (r = rank_bgn; r < rank_end; r++) {
Marek Vasutbc773a12015-07-21 05:54:39 +02001311 /* Set rank */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001312 set_rank_and_odt_mask(seq, r, RW_MGR_ODT_MODE_READ_WRITE);
Marek Vasut0b97c422015-07-21 05:43:37 +02001313
1314 tmp_bit_chk = 0;
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001315 for (vg = seq->rwcfg->mem_virtual_groups_per_write_dqs - 1;
Marek Vasutbc773a12015-07-21 05:54:39 +02001316 vg >= 0; vg--) {
1317 /* Reset the FIFOs to get pointers to known state. */
Marek Vasut0b97c422015-07-21 05:43:37 +02001318 writel(0, &phy_mgr_cmd->fifo_reset);
1319
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001320 group = write_group *
1321 seq->rwcfg->mem_virtual_groups_per_write_dqs
1322 + vg;
1323 rw_mgr_mem_calibrate_write_test_issue(seq, group,
1324 use_dm);
Marek Vasut0b97c422015-07-21 05:43:37 +02001325
Marek Vasutbc773a12015-07-21 05:54:39 +02001326 base_rw_mgr = readl(SDR_PHYGRP_RWMGRGRP_ADDRESS);
1327 tmp_bit_chk <<= shift_ratio;
1328 tmp_bit_chk |= (correct_mask_vg & ~(base_rw_mgr));
Marek Vasut0b97c422015-07-21 05:43:37 +02001329 }
Marek Vasutbc773a12015-07-21 05:54:39 +02001330
Marek Vasut0b97c422015-07-21 05:43:37 +02001331 *bit_chk &= tmp_bit_chk;
1332 }
1333
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001334 set_rank_and_odt_mask(seq, 0, RW_MGR_ODT_MODE_OFF);
Marek Vasut0b97c422015-07-21 05:43:37 +02001335 if (all_correct) {
Marek Vasut4df2d7b2016-04-04 21:21:05 +02001336 debug_cond(DLEVEL >= 2,
Marek Vasutbc773a12015-07-21 05:54:39 +02001337 "write_test(%u,%u,ALL) : %u == %u => %i\n",
1338 write_group, use_dm, *bit_chk,
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001339 seq->param.write_correct_mask,
1340 *bit_chk == seq->param.write_correct_mask);
1341 return *bit_chk == seq->param.write_correct_mask;
Marek Vasut0b97c422015-07-21 05:43:37 +02001342 } else {
Marek Vasut4df2d7b2016-04-04 21:21:05 +02001343 debug_cond(DLEVEL >= 2,
Marek Vasutbc773a12015-07-21 05:54:39 +02001344 "write_test(%u,%u,ONE) : %u != %i => %i\n",
1345 write_group, use_dm, *bit_chk, 0, *bit_chk != 0);
Marek Vasut0b97c422015-07-21 05:43:37 +02001346 return *bit_chk != 0x00;
1347 }
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001348}
1349
Marek Vasut55c4d692015-07-18 03:55:07 +02001350/**
1351 * rw_mgr_mem_calibrate_read_test_patterns() - Read back test patterns
1352 * @rank_bgn: Rank number
1353 * @group: Read/Write Group
1354 * @all_ranks: Test all ranks
1355 *
1356 * Performs a guaranteed read on the patterns we are going to use during a
1357 * read test to ensure memory works.
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001358 */
Marek Vasut55c4d692015-07-18 03:55:07 +02001359static int
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001360rw_mgr_mem_calibrate_read_test_patterns(struct socfpga_sdrseq *seq,
1361 const u32 rank_bgn, const u32 group,
Marek Vasut55c4d692015-07-18 03:55:07 +02001362 const u32 all_ranks)
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001363{
Marek Vasut55c4d692015-07-18 03:55:07 +02001364 const u32 addr = SDR_PHYGRP_RWMGRGRP_ADDRESS |
1365 RW_MGR_RUN_SINGLE_GROUP_OFFSET;
1366 const u32 addr_offset =
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001367 (group * seq->rwcfg->mem_virtual_groups_per_read_dqs)
1368 << 2;
Marek Vasut55c4d692015-07-18 03:55:07 +02001369 const u32 rank_end = all_ranks ?
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001370 seq->rwcfg->mem_number_of_ranks :
Marek Vasut55c4d692015-07-18 03:55:07 +02001371 (rank_bgn + NUM_RANKS_PER_SHADOW_REG);
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001372 const u32 shift_ratio = seq->rwcfg->mem_dq_per_read_dqs /
1373 seq->rwcfg->mem_virtual_groups_per_read_dqs;
1374 const u32 correct_mask_vg = seq->param.read_correct_mask_vg;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001375
Marek Vasut55c4d692015-07-18 03:55:07 +02001376 u32 tmp_bit_chk, base_rw_mgr, bit_chk;
1377 int vg, r;
1378 int ret = 0;
1379
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001380 bit_chk = seq->param.read_correct_mask;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001381
1382 for (r = rank_bgn; r < rank_end; r++) {
Marek Vasut55c4d692015-07-18 03:55:07 +02001383 /* Set rank */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001384 set_rank_and_odt_mask(seq, r, RW_MGR_ODT_MODE_READ_WRITE);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001385
1386 /* Load up a constant bursts of read commands */
Marek Vasutb5450962015-07-12 21:05:08 +02001387 writel(0x20, &sdr_rw_load_mgr_regs->load_cntr0);
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001388 writel(seq->rwcfg->guaranteed_read,
Marek Vasutc85b9b32015-08-02 19:47:01 +02001389 &sdr_rw_load_jump_mgr_regs->load_jump_add0);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001390
Marek Vasutb5450962015-07-12 21:05:08 +02001391 writel(0x20, &sdr_rw_load_mgr_regs->load_cntr1);
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001392 writel(seq->rwcfg->guaranteed_read_cont,
Marek Vasutc85b9b32015-08-02 19:47:01 +02001393 &sdr_rw_load_jump_mgr_regs->load_jump_add1);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001394
1395 tmp_bit_chk = 0;
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001396 for (vg = seq->rwcfg->mem_virtual_groups_per_read_dqs - 1;
Marek Vasut55c4d692015-07-18 03:55:07 +02001397 vg >= 0; vg--) {
1398 /* Reset the FIFOs to get pointers to known state. */
Marek Vasutb5450962015-07-12 21:05:08 +02001399 writel(0, &phy_mgr_cmd->fifo_reset);
1400 writel(0, SDR_PHYGRP_RWMGRGRP_ADDRESS |
1401 RW_MGR_RESET_READ_DATAPATH_OFFSET);
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001402 writel(seq->rwcfg->guaranteed_read,
Marek Vasut55c4d692015-07-18 03:55:07 +02001403 addr + addr_offset + (vg << 2));
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001404
Marek Vasutb5450962015-07-12 21:05:08 +02001405 base_rw_mgr = readl(SDR_PHYGRP_RWMGRGRP_ADDRESS);
Marek Vasut55c4d692015-07-18 03:55:07 +02001406 tmp_bit_chk <<= shift_ratio;
1407 tmp_bit_chk |= correct_mask_vg & ~base_rw_mgr;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001408 }
Marek Vasut55c4d692015-07-18 03:55:07 +02001409
1410 bit_chk &= tmp_bit_chk;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001411 }
1412
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001413 writel(seq->rwcfg->clear_dqs_enable, addr + (group << 2));
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001414
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001415 set_rank_and_odt_mask(seq, 0, RW_MGR_ODT_MODE_OFF);
Marek Vasut55c4d692015-07-18 03:55:07 +02001416
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001417 if (bit_chk != seq->param.read_correct_mask)
Marek Vasut55c4d692015-07-18 03:55:07 +02001418 ret = -EIO;
1419
Marek Vasut4df2d7b2016-04-04 21:21:05 +02001420 debug_cond(DLEVEL >= 1,
Marek Vasut55c4d692015-07-18 03:55:07 +02001421 "%s:%d test_load_patterns(%u,ALL) => (%u == %u) => %i\n",
1422 __func__, __LINE__, group, bit_chk,
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001423 seq->param.read_correct_mask, ret);
Marek Vasut55c4d692015-07-18 03:55:07 +02001424
1425 return ret;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001426}
1427
Marek Vasut6a752782015-07-18 03:34:22 +02001428/**
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001429 * rw_mgr_mem_calibrate_read_load_patterns() - Load up the patterns for read
1430 * test
Marek Vasut6a752782015-07-18 03:34:22 +02001431 * @rank_bgn: Rank number
1432 * @all_ranks: Test all ranks
1433 *
1434 * Load up the patterns we are going to use during a read test.
1435 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001436static void rw_mgr_mem_calibrate_read_load_patterns(struct socfpga_sdrseq *seq,
1437 const u32 rank_bgn,
Marek Vasut6a752782015-07-18 03:34:22 +02001438 const int all_ranks)
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001439{
Marek Vasut6a752782015-07-18 03:34:22 +02001440 const u32 rank_end = all_ranks ?
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001441 seq->rwcfg->mem_number_of_ranks :
Marek Vasut6a752782015-07-18 03:34:22 +02001442 (rank_bgn + NUM_RANKS_PER_SHADOW_REG);
1443 u32 r;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001444
1445 debug("%s:%d\n", __func__, __LINE__);
Marek Vasut6a752782015-07-18 03:34:22 +02001446
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001447 for (r = rank_bgn; r < rank_end; r++) {
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001448 /* set rank */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001449 set_rank_and_odt_mask(seq, r, RW_MGR_ODT_MODE_READ_WRITE);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001450
1451 /* Load up a constant bursts */
Marek Vasutb5450962015-07-12 21:05:08 +02001452 writel(0x20, &sdr_rw_load_mgr_regs->load_cntr0);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001453
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001454 writel(seq->rwcfg->guaranteed_write_wait0,
Marek Vasutc85b9b32015-08-02 19:47:01 +02001455 &sdr_rw_load_jump_mgr_regs->load_jump_add0);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001456
Marek Vasutb5450962015-07-12 21:05:08 +02001457 writel(0x20, &sdr_rw_load_mgr_regs->load_cntr1);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001458
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001459 writel(seq->rwcfg->guaranteed_write_wait1,
Marek Vasutc85b9b32015-08-02 19:47:01 +02001460 &sdr_rw_load_jump_mgr_regs->load_jump_add1);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001461
Marek Vasutb5450962015-07-12 21:05:08 +02001462 writel(0x04, &sdr_rw_load_mgr_regs->load_cntr2);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001463
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001464 writel(seq->rwcfg->guaranteed_write_wait2,
Marek Vasutc85b9b32015-08-02 19:47:01 +02001465 &sdr_rw_load_jump_mgr_regs->load_jump_add2);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001466
Marek Vasutb5450962015-07-12 21:05:08 +02001467 writel(0x04, &sdr_rw_load_mgr_regs->load_cntr3);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001468
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001469 writel(seq->rwcfg->guaranteed_write_wait3,
Marek Vasutc85b9b32015-08-02 19:47:01 +02001470 &sdr_rw_load_jump_mgr_regs->load_jump_add3);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001471
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001472 writel(seq->rwcfg->guaranteed_write,
1473 SDR_PHYGRP_RWMGRGRP_ADDRESS |
1474 RW_MGR_RUN_SINGLE_GROUP_OFFSET);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001475 }
1476
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001477 set_rank_and_odt_mask(seq, 0, RW_MGR_ODT_MODE_OFF);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001478}
1479
Marek Vasut656002e2015-07-20 03:26:05 +02001480/**
1481 * rw_mgr_mem_calibrate_read_test() - Perform READ test on single rank
1482 * @rank_bgn: Rank number
1483 * @group: Read/Write group
1484 * @num_tries: Number of retries of the test
1485 * @all_correct: All bits must be correct in the mask
1486 * @bit_chk: Resulting bit mask after the test
1487 * @all_groups: Test all R/W groups
1488 * @all_ranks: Test all ranks
1489 *
1490 * Try a read and see if it returns correct data back. Test has dummy reads
1491 * inserted into the mix used to align DQS enable. Test has more thorough
1492 * checks than the regular read test.
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001493 */
Marek Vasutc6c1fe72015-07-19 07:48:58 +02001494static int
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001495rw_mgr_mem_calibrate_read_test(struct socfpga_sdrseq *seq,
1496 const u32 rank_bgn, const u32 group,
Marek Vasutc6c1fe72015-07-19 07:48:58 +02001497 const u32 num_tries, const u32 all_correct,
1498 u32 *bit_chk,
1499 const u32 all_groups, const u32 all_ranks)
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001500{
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001501 const u32 rank_end = all_ranks ? seq->rwcfg->mem_number_of_ranks :
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001502 (rank_bgn + NUM_RANKS_PER_SHADOW_REG);
Marek Vasutc6c1fe72015-07-19 07:48:58 +02001503 const u32 quick_read_mode =
1504 ((STATIC_CALIB_STEPS & CALIB_SKIP_DELAY_SWEEPS) &&
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001505 seq->misccfg->enable_super_quick_calibration);
1506 u32 correct_mask_vg = seq->param.read_correct_mask_vg;
Marek Vasutc6c1fe72015-07-19 07:48:58 +02001507 u32 tmp_bit_chk;
1508 u32 base_rw_mgr;
1509 u32 addr;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001510
Marek Vasutc6c1fe72015-07-19 07:48:58 +02001511 int r, vg, ret;
Marek Vasuta005c772015-07-19 07:44:21 +02001512
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001513 *bit_chk = seq->param.read_correct_mask;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001514
1515 for (r = rank_bgn; r < rank_end; r++) {
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001516 /* set rank */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001517 set_rank_and_odt_mask(seq, r, RW_MGR_ODT_MODE_READ_WRITE);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001518
Marek Vasutb5450962015-07-12 21:05:08 +02001519 writel(0x10, &sdr_rw_load_mgr_regs->load_cntr1);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001520
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001521 writel(seq->rwcfg->read_b2b_wait1,
Marek Vasutc85b9b32015-08-02 19:47:01 +02001522 &sdr_rw_load_jump_mgr_regs->load_jump_add1);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001523
Marek Vasutb5450962015-07-12 21:05:08 +02001524 writel(0x10, &sdr_rw_load_mgr_regs->load_cntr2);
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001525 writel(seq->rwcfg->read_b2b_wait2,
Marek Vasutc85b9b32015-08-02 19:47:01 +02001526 &sdr_rw_load_jump_mgr_regs->load_jump_add2);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001527
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001528 if (quick_read_mode)
Marek Vasutb5450962015-07-12 21:05:08 +02001529 writel(0x1, &sdr_rw_load_mgr_regs->load_cntr0);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001530 /* need at least two (1+1) reads to capture failures */
1531 else if (all_groups)
Marek Vasutb5450962015-07-12 21:05:08 +02001532 writel(0x06, &sdr_rw_load_mgr_regs->load_cntr0);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001533 else
Marek Vasutb5450962015-07-12 21:05:08 +02001534 writel(0x32, &sdr_rw_load_mgr_regs->load_cntr0);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001535
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001536 writel(seq->rwcfg->read_b2b,
Marek Vasutc85b9b32015-08-02 19:47:01 +02001537 &sdr_rw_load_jump_mgr_regs->load_jump_add0);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001538 if (all_groups)
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001539 writel(seq->rwcfg->mem_if_read_dqs_width *
1540 seq->rwcfg->mem_virtual_groups_per_read_dqs - 1,
Marek Vasutb5450962015-07-12 21:05:08 +02001541 &sdr_rw_load_mgr_regs->load_cntr3);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001542 else
Marek Vasutb5450962015-07-12 21:05:08 +02001543 writel(0x0, &sdr_rw_load_mgr_regs->load_cntr3);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001544
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001545 writel(seq->rwcfg->read_b2b,
Marek Vasutc85b9b32015-08-02 19:47:01 +02001546 &sdr_rw_load_jump_mgr_regs->load_jump_add3);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001547
1548 tmp_bit_chk = 0;
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001549 for (vg = seq->rwcfg->mem_virtual_groups_per_read_dqs - 1;
1550 vg >= 0; vg--) {
Marek Vasut50a780f2015-07-19 07:57:28 +02001551 /* Reset the FIFOs to get pointers to known state. */
Marek Vasutb5450962015-07-12 21:05:08 +02001552 writel(0, &phy_mgr_cmd->fifo_reset);
1553 writel(0, SDR_PHYGRP_RWMGRGRP_ADDRESS |
1554 RW_MGR_RESET_READ_DATAPATH_OFFSET);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001555
Marek Vasut50a780f2015-07-19 07:57:28 +02001556 if (all_groups) {
1557 addr = SDR_PHYGRP_RWMGRGRP_ADDRESS |
1558 RW_MGR_RUN_ALL_GROUPS_OFFSET;
1559 } else {
1560 addr = SDR_PHYGRP_RWMGRGRP_ADDRESS |
1561 RW_MGR_RUN_SINGLE_GROUP_OFFSET;
1562 }
Marek Vasuta3340102015-07-12 19:03:33 +02001563
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001564 writel(seq->rwcfg->read_b2b, addr +
Marek Vasutc85b9b32015-08-02 19:47:01 +02001565 ((group *
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001566 seq->rwcfg->mem_virtual_groups_per_read_dqs +
Marek Vasutc85b9b32015-08-02 19:47:01 +02001567 vg) << 2));
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001568
Marek Vasutb5450962015-07-12 21:05:08 +02001569 base_rw_mgr = readl(SDR_PHYGRP_RWMGRGRP_ADDRESS);
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001570 tmp_bit_chk <<=
1571 seq->rwcfg->mem_dq_per_read_dqs /
1572 seq->rwcfg->mem_virtual_groups_per_read_dqs;
Marek Vasut50a780f2015-07-19 07:57:28 +02001573 tmp_bit_chk |= correct_mask_vg & ~(base_rw_mgr);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001574 }
Marek Vasut28957f32015-07-19 07:51:17 +02001575
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001576 *bit_chk &= tmp_bit_chk;
1577 }
1578
Marek Vasuta3340102015-07-12 19:03:33 +02001579 addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_RUN_SINGLE_GROUP_OFFSET;
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001580 writel(seq->rwcfg->clear_dqs_enable, addr + (group << 2));
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001581
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001582 set_rank_and_odt_mask(seq, 0, RW_MGR_ODT_MODE_OFF);
Marek Vasuta005c772015-07-19 07:44:21 +02001583
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001584 if (all_correct) {
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001585 ret = (*bit_chk == seq->param.read_correct_mask);
Marek Vasut4df2d7b2016-04-04 21:21:05 +02001586 debug_cond(DLEVEL >= 2,
Marek Vasuta005c772015-07-19 07:44:21 +02001587 "%s:%d read_test(%u,ALL,%u) => (%u == %u) => %i\n",
1588 __func__, __LINE__, group, all_groups, *bit_chk,
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001589 seq->param.read_correct_mask, ret);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001590 } else {
Marek Vasuta005c772015-07-19 07:44:21 +02001591 ret = (*bit_chk != 0x00);
Marek Vasut4df2d7b2016-04-04 21:21:05 +02001592 debug_cond(DLEVEL >= 2,
Marek Vasuta005c772015-07-19 07:44:21 +02001593 "%s:%d read_test(%u,ONE,%u) => (%u != %u) => %i\n",
1594 __func__, __LINE__, group, all_groups, *bit_chk,
1595 0, ret);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001596 }
Marek Vasuta005c772015-07-19 07:44:21 +02001597
1598 return ret;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001599}
1600
Marek Vasuta50d5d72015-07-19 07:35:36 +02001601/**
1602 * rw_mgr_mem_calibrate_read_test_all_ranks() - Perform READ test on all ranks
1603 * @grp: Read/Write group
1604 * @num_tries: Number of retries of the test
1605 * @all_correct: All bits must be correct in the mask
1606 * @all_groups: Test all R/W groups
1607 *
1608 * Perform a READ test across all memory ranks.
1609 */
1610static int
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001611rw_mgr_mem_calibrate_read_test_all_ranks(struct socfpga_sdrseq *seq,
1612 const u32 grp, const u32 num_tries,
Marek Vasuta50d5d72015-07-19 07:35:36 +02001613 const u32 all_correct,
1614 const u32 all_groups)
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001615{
Marek Vasuta50d5d72015-07-19 07:35:36 +02001616 u32 bit_chk;
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001617 return rw_mgr_mem_calibrate_read_test(seq, 0, grp, num_tries,
1618 all_correct, &bit_chk, all_groups,
1619 1);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001620}
1621
Marek Vasut1c9f25b2015-07-19 06:25:27 +02001622/**
1623 * rw_mgr_incr_vfifo() - Increase VFIFO value
1624 * @grp: Read/Write group
Marek Vasut1c9f25b2015-07-19 06:25:27 +02001625 *
1626 * Increase VFIFO value.
1627 */
Marek Vasut42e43ab2015-07-19 06:37:51 +02001628static void rw_mgr_incr_vfifo(const u32 grp)
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001629{
Marek Vasutb5450962015-07-12 21:05:08 +02001630 writel(grp, &phy_mgr_cmd->inc_vfifo_hard_phy);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001631}
1632
Marek Vasut1c9f25b2015-07-19 06:25:27 +02001633/**
1634 * rw_mgr_decr_vfifo() - Decrease VFIFO value
1635 * @grp: Read/Write group
Marek Vasut1c9f25b2015-07-19 06:25:27 +02001636 *
1637 * Decrease VFIFO value.
1638 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001639static void rw_mgr_decr_vfifo(struct socfpga_sdrseq *seq, const u32 grp)
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001640{
Marek Vasut1c9f25b2015-07-19 06:25:27 +02001641 u32 i;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001642
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001643 for (i = 0; i < seq->misccfg->read_valid_fifo_size - 1; i++)
Marek Vasut42e43ab2015-07-19 06:37:51 +02001644 rw_mgr_incr_vfifo(grp);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001645}
1646
Marek Vasut088eb212015-07-19 06:45:43 +02001647/**
1648 * find_vfifo_failing_read() - Push VFIFO to get a failing read
1649 * @grp: Read/Write group
1650 *
1651 * Push VFIFO until a failing read happens.
1652 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001653static int find_vfifo_failing_read(struct socfpga_sdrseq *seq,
1654 const u32 grp)
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001655{
Marek Vasuta50d5d72015-07-19 07:35:36 +02001656 u32 v, ret, fail_cnt = 0;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001657
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001658 for (v = 0; v < seq->misccfg->read_valid_fifo_size; v++) {
Marek Vasut4df2d7b2016-04-04 21:21:05 +02001659 debug_cond(DLEVEL >= 2, "%s:%d: vfifo %u\n",
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001660 __func__, __LINE__, v);
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001661 ret = rw_mgr_mem_calibrate_read_test_all_ranks(seq, grp, 1,
1662 PASS_ONE_BIT, 0);
Marek Vasut088eb212015-07-19 06:45:43 +02001663 if (!ret) {
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001664 fail_cnt++;
1665
1666 if (fail_cnt == 2)
Marek Vasut088eb212015-07-19 06:45:43 +02001667 return v;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001668 }
1669
Marek Vasut088eb212015-07-19 06:45:43 +02001670 /* Fiddle with FIFO. */
Marek Vasut42e43ab2015-07-19 06:37:51 +02001671 rw_mgr_incr_vfifo(grp);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001672 }
1673
Marek Vasut088eb212015-07-19 06:45:43 +02001674 /* No failing read found! Something must have gone wrong. */
Marek Vasut4df2d7b2016-04-04 21:21:05 +02001675 debug_cond(DLEVEL >= 2, "%s:%d: vfifo failed\n", __func__, __LINE__);
Marek Vasut088eb212015-07-19 06:45:43 +02001676 return 0;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001677}
1678
Marek Vasutf2b02d42015-07-19 05:26:49 +02001679/**
Marek Vasut6ff36b72015-07-19 07:27:06 +02001680 * sdr_find_phase_delay() - Find DQS enable phase or delay
1681 * @working: If 1, look for working phase/delay, if 0, look for non-working
1682 * @delay: If 1, look for delay, if 0, look for phase
1683 * @grp: Read/Write group
1684 * @work: Working window position
1685 * @work_inc: Working window increment
1686 * @pd: DQS Phase/Delay Iterator
1687 *
1688 * Find working or non-working DQS enable phase setting.
1689 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001690static int sdr_find_phase_delay(struct socfpga_sdrseq *seq, int working,
1691 int delay, const u32 grp, u32 *work,
1692 const u32 work_inc, u32 *pd)
Marek Vasut6ff36b72015-07-19 07:27:06 +02001693{
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001694 const u32 max = delay ? seq->iocfg->dqs_en_delay_max :
1695 seq->iocfg->dqs_en_phase_max;
Marek Vasuta50d5d72015-07-19 07:35:36 +02001696 u32 ret;
Marek Vasut6ff36b72015-07-19 07:27:06 +02001697
1698 for (; *pd <= max; (*pd)++) {
1699 if (delay)
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001700 scc_mgr_set_dqs_en_delay_all_ranks(seq, grp, *pd);
Marek Vasut6ff36b72015-07-19 07:27:06 +02001701 else
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001702 scc_mgr_set_dqs_en_phase_all_ranks(seq, grp, *pd);
Marek Vasut6ff36b72015-07-19 07:27:06 +02001703
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001704 ret = rw_mgr_mem_calibrate_read_test_all_ranks(seq, grp, 1,
1705 PASS_ONE_BIT, 0);
Marek Vasut6ff36b72015-07-19 07:27:06 +02001706 if (!working)
1707 ret = !ret;
1708
1709 if (ret)
1710 return 0;
1711
1712 if (work)
1713 *work += work_inc;
1714 }
1715
1716 return -EINVAL;
1717}
1718/**
Marek Vasutf2b02d42015-07-19 05:26:49 +02001719 * sdr_find_phase() - Find DQS enable phase
1720 * @working: If 1, look for working phase, if 0, look for non-working phase
1721 * @grp: Read/Write group
Marek Vasutf2b02d42015-07-19 05:26:49 +02001722 * @work: Working window position
1723 * @i: Iterator
1724 * @p: DQS Phase Iterator
Marek Vasutf2b02d42015-07-19 05:26:49 +02001725 *
1726 * Find working or non-working DQS enable phase setting.
1727 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001728static int sdr_find_phase(struct socfpga_sdrseq *seq, int working,
1729 const u32 grp, u32 *work, u32 *i, u32 *p)
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001730{
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001731 const u32 end = seq->misccfg->read_valid_fifo_size + (working ? 0 : 1);
Marek Vasut6ff36b72015-07-19 07:27:06 +02001732 int ret;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001733
Marek Vasutf2b02d42015-07-19 05:26:49 +02001734 for (; *i < end; (*i)++) {
1735 if (working)
1736 *p = 0;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001737
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001738 ret = sdr_find_phase_delay(seq, working, 0, grp, work,
1739 seq->iocfg->delay_per_opa_tap, p);
Marek Vasut6ff36b72015-07-19 07:27:06 +02001740 if (!ret)
1741 return 0;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001742
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001743 if (*p > seq->iocfg->dqs_en_phase_max) {
Marek Vasutf2b02d42015-07-19 05:26:49 +02001744 /* Fiddle with FIFO. */
Marek Vasut42e43ab2015-07-19 06:37:51 +02001745 rw_mgr_incr_vfifo(grp);
Marek Vasutf2b02d42015-07-19 05:26:49 +02001746 if (!working)
1747 *p = 0;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001748 }
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001749 }
1750
Marek Vasutf2b02d42015-07-19 05:26:49 +02001751 return -EINVAL;
1752}
1753
Marek Vasut6394ef52015-07-19 06:04:00 +02001754/**
1755 * sdr_working_phase() - Find working DQS enable phase
1756 * @grp: Read/Write group
1757 * @work_bgn: Working window start position
Marek Vasut6394ef52015-07-19 06:04:00 +02001758 * @d: dtaps output value
1759 * @p: DQS Phase Iterator
1760 * @i: Iterator
1761 *
1762 * Find working DQS enable phase setting.
1763 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001764static int sdr_working_phase(struct socfpga_sdrseq *seq, const u32 grp,
1765 u32 *work_bgn, u32 *d, u32 *p, u32 *i)
Marek Vasutf2b02d42015-07-19 05:26:49 +02001766{
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001767 const u32 dtaps_per_ptap = seq->iocfg->delay_per_opa_tap /
1768 seq->iocfg->delay_per_dqs_en_dchain_tap;
Marek Vasutf2b02d42015-07-19 05:26:49 +02001769 int ret;
1770
1771 *work_bgn = 0;
1772
1773 for (*d = 0; *d <= dtaps_per_ptap; (*d)++) {
1774 *i = 0;
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001775 scc_mgr_set_dqs_en_delay_all_ranks(seq, grp, *d);
1776 ret = sdr_find_phase(seq, 1, grp, work_bgn, i, p);
Marek Vasutf2b02d42015-07-19 05:26:49 +02001777 if (!ret)
1778 return 0;
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001779 *work_bgn += seq->iocfg->delay_per_dqs_en_dchain_tap;
Marek Vasutf2b02d42015-07-19 05:26:49 +02001780 }
1781
Marek Vasutb148ebe2015-07-19 05:01:12 +02001782 /* Cannot find working solution */
Marek Vasut4df2d7b2016-04-04 21:21:05 +02001783 debug_cond(DLEVEL >= 2, "%s:%d find_dqs_en_phase: no vfifo/ptap/dtap\n",
Marek Vasutf2b02d42015-07-19 05:26:49 +02001784 __func__, __LINE__);
1785 return -EINVAL;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001786}
1787
Marek Vasut6394ef52015-07-19 06:04:00 +02001788/**
1789 * sdr_backup_phase() - Find DQS enable backup phase
1790 * @grp: Read/Write group
1791 * @work_bgn: Working window start position
Marek Vasut6394ef52015-07-19 06:04:00 +02001792 * @p: DQS Phase Iterator
1793 *
1794 * Find DQS enable backup phase setting.
1795 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001796static void sdr_backup_phase(struct socfpga_sdrseq *seq, const u32 grp,
1797 u32 *work_bgn, u32 *p)
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001798{
Marek Vasuta50d5d72015-07-19 07:35:36 +02001799 u32 tmp_delay, d;
Marek Vasut6394ef52015-07-19 06:04:00 +02001800 int ret;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001801
1802 /* Special case code for backing up a phase */
1803 if (*p == 0) {
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001804 *p = seq->iocfg->dqs_en_phase_max;
1805 rw_mgr_decr_vfifo(seq, grp);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001806 } else {
1807 (*p)--;
1808 }
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001809 tmp_delay = *work_bgn - seq->iocfg->delay_per_opa_tap;
1810 scc_mgr_set_dqs_en_phase_all_ranks(seq, grp, *p);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001811
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001812 for (d = 0; d <= seq->iocfg->dqs_en_delay_max && tmp_delay < *work_bgn;
Marek Vasutc85b9b32015-08-02 19:47:01 +02001813 d++) {
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001814 scc_mgr_set_dqs_en_delay_all_ranks(seq, grp, d);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001815
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001816 ret = rw_mgr_mem_calibrate_read_test_all_ranks(seq, grp, 1,
1817 PASS_ONE_BIT, 0);
Marek Vasut6394ef52015-07-19 06:04:00 +02001818 if (ret) {
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001819 *work_bgn = tmp_delay;
1820 break;
1821 }
Marek Vasut6eff8032015-07-19 05:48:30 +02001822
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001823 tmp_delay += seq->iocfg->delay_per_dqs_en_dchain_tap;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001824 }
1825
Marek Vasut6394ef52015-07-19 06:04:00 +02001826 /* Restore VFIFO to old state before we decremented it (if needed). */
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001827 (*p)++;
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001828 if (*p > seq->iocfg->dqs_en_phase_max) {
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001829 *p = 0;
Marek Vasut42e43ab2015-07-19 06:37:51 +02001830 rw_mgr_incr_vfifo(grp);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001831 }
1832
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001833 scc_mgr_set_dqs_en_delay_all_ranks(seq, grp, 0);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001834}
1835
Marek Vasut6394ef52015-07-19 06:04:00 +02001836/**
1837 * sdr_nonworking_phase() - Find non-working DQS enable phase
1838 * @grp: Read/Write group
1839 * @work_end: Working window end position
Marek Vasut6394ef52015-07-19 06:04:00 +02001840 * @p: DQS Phase Iterator
1841 * @i: Iterator
1842 *
1843 * Find non-working DQS enable phase setting.
1844 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001845static int sdr_nonworking_phase(struct socfpga_sdrseq *seq,
1846 const u32 grp, u32 *work_end, u32 *p, u32 *i)
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001847{
Marek Vasutf2b02d42015-07-19 05:26:49 +02001848 int ret;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001849
1850 (*p)++;
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001851 *work_end += seq->iocfg->delay_per_opa_tap;
1852 if (*p > seq->iocfg->dqs_en_phase_max) {
Marek Vasutf2b02d42015-07-19 05:26:49 +02001853 /* Fiddle with FIFO. */
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001854 *p = 0;
Marek Vasut42e43ab2015-07-19 06:37:51 +02001855 rw_mgr_incr_vfifo(grp);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001856 }
1857
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001858 ret = sdr_find_phase(seq, 0, grp, work_end, i, p);
Marek Vasutf2b02d42015-07-19 05:26:49 +02001859 if (ret) {
1860 /* Cannot see edge of failing read. */
Marek Vasut4df2d7b2016-04-04 21:21:05 +02001861 debug_cond(DLEVEL >= 2, "%s:%d: end: failed\n",
Marek Vasutf2b02d42015-07-19 05:26:49 +02001862 __func__, __LINE__);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001863 }
1864
Marek Vasutf2b02d42015-07-19 05:26:49 +02001865 return ret;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001866}
1867
Marek Vasutfea03c32015-07-19 04:14:32 +02001868/**
1869 * sdr_find_window_center() - Find center of the working DQS window.
1870 * @grp: Read/Write group
1871 * @work_bgn: First working settings
1872 * @work_end: Last working settings
Marek Vasutfea03c32015-07-19 04:14:32 +02001873 *
1874 * Find center of the working DQS enable window.
1875 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001876static int sdr_find_window_center(struct socfpga_sdrseq *seq,
1877 const u32 grp, const u32 work_bgn,
Marek Vasut42e43ab2015-07-19 06:37:51 +02001878 const u32 work_end)
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001879{
Marek Vasuta50d5d72015-07-19 07:35:36 +02001880 u32 work_mid;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001881 int tmp_delay = 0;
Marek Vasutd996e802015-07-19 02:56:59 +02001882 int i, p, d;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001883
Marek Vasutd996e802015-07-19 02:56:59 +02001884 work_mid = (work_bgn + work_end) / 2;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001885
Marek Vasut4df2d7b2016-04-04 21:21:05 +02001886 debug_cond(DLEVEL >= 2, "work_bgn=%d work_end=%d work_mid=%d\n",
Marek Vasutd996e802015-07-19 02:56:59 +02001887 work_bgn, work_end, work_mid);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001888 /* Get the middle delay to be less than a VFIFO delay */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001889 tmp_delay = (seq->iocfg->dqs_en_phase_max + 1)
1890 * seq->iocfg->delay_per_opa_tap;
Marek Vasutd996e802015-07-19 02:56:59 +02001891
Marek Vasut4df2d7b2016-04-04 21:21:05 +02001892 debug_cond(DLEVEL >= 2, "vfifo ptap delay %d\n", tmp_delay);
Marek Vasutea4c4bb2015-07-19 04:04:33 +02001893 work_mid %= tmp_delay;
Marek Vasut4df2d7b2016-04-04 21:21:05 +02001894 debug_cond(DLEVEL >= 2, "new work_mid %d\n", work_mid);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001895
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001896 tmp_delay = rounddown(work_mid, seq->iocfg->delay_per_opa_tap);
1897 if (tmp_delay > seq->iocfg->dqs_en_phase_max
1898 * seq->iocfg->delay_per_opa_tap) {
1899 tmp_delay = seq->iocfg->dqs_en_phase_max
1900 * seq->iocfg->delay_per_opa_tap;
1901 }
1902 p = tmp_delay / seq->iocfg->delay_per_opa_tap;
Marek Vasutd996e802015-07-19 02:56:59 +02001903
Marek Vasut4df2d7b2016-04-04 21:21:05 +02001904 debug_cond(DLEVEL >= 2, "new p %d, tmp_delay=%d\n", p, tmp_delay);
Marek Vasutea4c4bb2015-07-19 04:04:33 +02001905
Marek Vasutc85b9b32015-08-02 19:47:01 +02001906 d = DIV_ROUND_UP(work_mid - tmp_delay,
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001907 seq->iocfg->delay_per_dqs_en_dchain_tap);
1908 if (d > seq->iocfg->dqs_en_delay_max)
1909 d = seq->iocfg->dqs_en_delay_max;
1910 tmp_delay += d * seq->iocfg->delay_per_dqs_en_dchain_tap;
Marek Vasutea4c4bb2015-07-19 04:04:33 +02001911
Marek Vasut4df2d7b2016-04-04 21:21:05 +02001912 debug_cond(DLEVEL >= 2, "new d %d, tmp_delay=%d\n", d, tmp_delay);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001913
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001914 scc_mgr_set_dqs_en_phase_all_ranks(seq, grp, p);
1915 scc_mgr_set_dqs_en_delay_all_ranks(seq, grp, d);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001916
1917 /*
1918 * push vfifo until we can successfully calibrate. We can do this
1919 * because the largest possible margin in 1 VFIFO cycle.
1920 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001921 for (i = 0; i < seq->misccfg->read_valid_fifo_size; i++) {
Marek Vasut4df2d7b2016-04-04 21:21:05 +02001922 debug_cond(DLEVEL >= 2, "find_dqs_en_phase: center\n");
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001923 if (rw_mgr_mem_calibrate_read_test_all_ranks(seq, grp, 1,
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001924 PASS_ONE_BIT,
Marek Vasuta50d5d72015-07-19 07:35:36 +02001925 0)) {
Marek Vasut4df2d7b2016-04-04 21:21:05 +02001926 debug_cond(DLEVEL >= 2,
Marek Vasut42e43ab2015-07-19 06:37:51 +02001927 "%s:%d center: found: ptap=%u dtap=%u\n",
1928 __func__, __LINE__, p, d);
Marek Vasutfea03c32015-07-19 04:14:32 +02001929 return 0;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001930 }
1931
Marek Vasutfea03c32015-07-19 04:14:32 +02001932 /* Fiddle with FIFO. */
Marek Vasut42e43ab2015-07-19 06:37:51 +02001933 rw_mgr_incr_vfifo(grp);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001934 }
1935
Marek Vasut4df2d7b2016-04-04 21:21:05 +02001936 debug_cond(DLEVEL >= 2, "%s:%d center: failed.\n",
Marek Vasutfea03c32015-07-19 04:14:32 +02001937 __func__, __LINE__);
1938 return -EINVAL;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001939}
1940
Marek Vasutec4bbd32015-07-20 09:11:09 +02001941/**
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001942 * rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase() - Find a good DQS enable to
1943 * use
Marek Vasutec4bbd32015-07-20 09:11:09 +02001944 * @grp: Read/Write Group
1945 *
1946 * Find a good DQS enable to use.
1947 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001948static int
1949rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase(struct socfpga_sdrseq *seq,
1950 const u32 grp)
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001951{
Marek Vasut59729a62015-07-20 09:20:20 +02001952 u32 d, p, i;
1953 u32 dtaps_per_ptap;
1954 u32 work_bgn, work_end;
Marek Vasuteb447cb2015-08-10 23:01:43 +02001955 u32 found_passing_read, found_failing_read = 0, initial_failing_dtap;
Marek Vasut59729a62015-07-20 09:20:20 +02001956 int ret;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001957
1958 debug("%s:%d %u\n", __func__, __LINE__, grp);
1959
1960 reg_file_set_sub_stage(CAL_SUBSTAGE_VFIFO_CENTER);
1961
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001962 scc_mgr_set_dqs_en_delay_all_ranks(seq, grp, 0);
1963 scc_mgr_set_dqs_en_phase_all_ranks(seq, grp, 0);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001964
Marek Vasut4896bcc2015-07-19 02:42:21 +02001965 /* Step 0: Determine number of delay taps for each phase tap. */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001966 dtaps_per_ptap = seq->iocfg->delay_per_opa_tap /
1967 seq->iocfg->delay_per_dqs_en_dchain_tap;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001968
Marek Vasut4896bcc2015-07-19 02:42:21 +02001969 /* Step 1: First push vfifo until we get a failing read. */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001970 find_vfifo_failing_read(seq, grp);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001971
Marek Vasut4896bcc2015-07-19 02:42:21 +02001972 /* Step 2: Find first working phase, increment in ptaps. */
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001973 work_bgn = 0;
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001974 ret = sdr_working_phase(seq, grp, &work_bgn, &d, &p, &i);
Marek Vasut28dbf122015-07-20 09:20:42 +02001975 if (ret)
1976 return ret;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001977
1978 work_end = work_bgn;
1979
1980 /*
Marek Vasut4896bcc2015-07-19 02:42:21 +02001981 * If d is 0 then the working window covers a phase tap and we can
1982 * follow the old procedure. Otherwise, we've found the beginning
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001983 * and we need to increment the dtaps until we find the end.
1984 */
1985 if (d == 0) {
Marek Vasut4896bcc2015-07-19 02:42:21 +02001986 /*
1987 * Step 3a: If we have room, back off by one and
1988 * increment in dtaps.
1989 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001990 sdr_backup_phase(seq, grp, &work_bgn, &p);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001991
Marek Vasut4896bcc2015-07-19 02:42:21 +02001992 /*
1993 * Step 4a: go forward from working phase to non working
1994 * phase, increment in ptaps.
1995 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001996 ret = sdr_nonworking_phase(seq, grp, &work_end, &p, &i);
Marek Vasut28dbf122015-07-20 09:20:42 +02001997 if (ret)
1998 return ret;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001999
Marek Vasut4896bcc2015-07-19 02:42:21 +02002000 /* Step 5a: Back off one from last, increment in dtaps. */
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002001
2002 /* Special case code for backing up a phase */
2003 if (p == 0) {
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002004 p = seq->iocfg->dqs_en_phase_max;
2005 rw_mgr_decr_vfifo(seq, grp);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002006 } else {
2007 p = p - 1;
2008 }
2009
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002010 work_end -= seq->iocfg->delay_per_opa_tap;
2011 scc_mgr_set_dqs_en_phase_all_ranks(seq, grp, p);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002012
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002013 d = 0;
2014
Marek Vasut4df2d7b2016-04-04 21:21:05 +02002015 debug_cond(DLEVEL >= 2, "%s:%d p: ptap=%u\n",
Marek Vasut4896bcc2015-07-19 02:42:21 +02002016 __func__, __LINE__, p);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002017 }
2018
Marek Vasut4896bcc2015-07-19 02:42:21 +02002019 /* The dtap increment to find the failing edge is done here. */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002020 sdr_find_phase_delay(seq, 0, 1, grp, &work_end,
2021 seq->iocfg->delay_per_dqs_en_dchain_tap, &d);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002022
2023 /* Go back to working dtap */
2024 if (d != 0)
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002025 work_end -= seq->iocfg->delay_per_dqs_en_dchain_tap;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002026
Marek Vasut4df2d7b2016-04-04 21:21:05 +02002027 debug_cond(DLEVEL >= 2,
Marek Vasut4896bcc2015-07-19 02:42:21 +02002028 "%s:%d p/d: ptap=%u dtap=%u end=%u\n",
2029 __func__, __LINE__, p, d - 1, work_end);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002030
2031 if (work_end < work_bgn) {
2032 /* nil range */
Marek Vasut4df2d7b2016-04-04 21:21:05 +02002033 debug_cond(DLEVEL >= 2, "%s:%d end-2: failed\n",
Marek Vasut4896bcc2015-07-19 02:42:21 +02002034 __func__, __LINE__);
Marek Vasut28dbf122015-07-20 09:20:42 +02002035 return -EINVAL;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002036 }
2037
Marek Vasut4df2d7b2016-04-04 21:21:05 +02002038 debug_cond(DLEVEL >= 2, "%s:%d found range [%u,%u]\n",
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002039 __func__, __LINE__, work_bgn, work_end);
2040
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002041 /*
Marek Vasut4896bcc2015-07-19 02:42:21 +02002042 * We need to calculate the number of dtaps that equal a ptap.
2043 * To do that we'll back up a ptap and re-find the edge of the
2044 * window using dtaps
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002045 */
Marek Vasut4df2d7b2016-04-04 21:21:05 +02002046 debug_cond(DLEVEL >= 2, "%s:%d calculate dtaps_per_ptap for tracking\n",
Marek Vasut4896bcc2015-07-19 02:42:21 +02002047 __func__, __LINE__);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002048
2049 /* Special case code for backing up a phase */
2050 if (p == 0) {
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002051 p = seq->iocfg->dqs_en_phase_max;
2052 rw_mgr_decr_vfifo(seq, grp);
Marek Vasut4df2d7b2016-04-04 21:21:05 +02002053 debug_cond(DLEVEL >= 2, "%s:%d backedup cycle/phase: p=%u\n",
Marek Vasut4896bcc2015-07-19 02:42:21 +02002054 __func__, __LINE__, p);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002055 } else {
2056 p = p - 1;
Marek Vasut4df2d7b2016-04-04 21:21:05 +02002057 debug_cond(DLEVEL >= 2, "%s:%d backedup phase only: p=%u",
Marek Vasut4896bcc2015-07-19 02:42:21 +02002058 __func__, __LINE__, p);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002059 }
2060
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002061 scc_mgr_set_dqs_en_phase_all_ranks(seq, grp, p);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002062
2063 /*
2064 * Increase dtap until we first see a passing read (in case the
Marek Vasut4896bcc2015-07-19 02:42:21 +02002065 * window is smaller than a ptap), and then a failing read to
2066 * mark the edge of the window again.
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002067 */
2068
Marek Vasut4896bcc2015-07-19 02:42:21 +02002069 /* Find a passing read. */
Marek Vasut4df2d7b2016-04-04 21:21:05 +02002070 debug_cond(DLEVEL >= 2, "%s:%d find passing read\n",
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002071 __func__, __LINE__);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002072
Marek Vasut6ff36b72015-07-19 07:27:06 +02002073 initial_failing_dtap = d;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002074
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002075 found_passing_read = !sdr_find_phase_delay(seq, 1, 1, grp, NULL, 0, &d);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002076 if (found_passing_read) {
Marek Vasut4896bcc2015-07-19 02:42:21 +02002077 /* Find a failing read. */
Marek Vasut4df2d7b2016-04-04 21:21:05 +02002078 debug_cond(DLEVEL >= 2, "%s:%d find failing read\n",
Marek Vasut4896bcc2015-07-19 02:42:21 +02002079 __func__, __LINE__);
Marek Vasut6ff36b72015-07-19 07:27:06 +02002080 d++;
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002081 found_failing_read = !sdr_find_phase_delay(seq, 0, 1, grp, NULL,
2082 0, &d);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002083 } else {
Marek Vasut4df2d7b2016-04-04 21:21:05 +02002084 debug_cond(DLEVEL >= 1,
Marek Vasut4896bcc2015-07-19 02:42:21 +02002085 "%s:%d failed to calculate dtaps per ptap. Fall back on static value\n",
2086 __func__, __LINE__);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002087 }
2088
2089 /*
2090 * The dynamically calculated dtaps_per_ptap is only valid if we
2091 * found a passing/failing read. If we didn't, it means d hit the max
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002092 * (seq->iocfg->dqs_en_delay_max). Otherwise, dtaps_per_ptap retains its
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002093 * statically calculated value.
2094 */
2095 if (found_passing_read && found_failing_read)
2096 dtaps_per_ptap = d - initial_failing_dtap;
2097
Marek Vasutb5450962015-07-12 21:05:08 +02002098 writel(dtaps_per_ptap, &sdr_reg_file->dtaps_per_ptap);
Marek Vasut4df2d7b2016-04-04 21:21:05 +02002099 debug_cond(DLEVEL >= 2, "%s:%d dtaps_per_ptap=%u - %u = %u",
Marek Vasut4896bcc2015-07-19 02:42:21 +02002100 __func__, __LINE__, d, initial_failing_dtap, dtaps_per_ptap);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002101
Marek Vasut4896bcc2015-07-19 02:42:21 +02002102 /* Step 6: Find the centre of the window. */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002103 ret = sdr_find_window_center(seq, grp, work_bgn, work_end);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002104
Marek Vasut28dbf122015-07-20 09:20:42 +02002105 return ret;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002106}
2107
Marek Vasutb20a5062015-07-13 02:11:02 +02002108/**
Marek Vasut85cd4d72015-07-13 02:48:34 +02002109 * search_stop_check() - Check if the detected edge is valid
2110 * @write: Perform read (Stage 2) or write (Stage 3) calibration
2111 * @d: DQS delay
2112 * @rank_bgn: Rank number
2113 * @write_group: Write Group
2114 * @read_group: Read Group
2115 * @bit_chk: Resulting bit mask after the test
2116 * @sticky_bit_chk: Resulting sticky bit mask after the test
2117 * @use_read_test: Perform read test
2118 *
2119 * Test if the found edge is valid.
2120 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002121static u32 search_stop_check(struct socfpga_sdrseq *seq, const int write,
2122 const int d, const int rank_bgn,
Marek Vasut85cd4d72015-07-13 02:48:34 +02002123 const u32 write_group, const u32 read_group,
2124 u32 *bit_chk, u32 *sticky_bit_chk,
2125 const u32 use_read_test)
2126{
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002127 const u32 ratio = seq->rwcfg->mem_if_read_dqs_width /
2128 seq->rwcfg->mem_if_write_dqs_width;
2129 const u32 correct_mask = write ? seq->param.write_correct_mask :
2130 seq->param.read_correct_mask;
2131 const u32 per_dqs = write ? seq->rwcfg->mem_dq_per_write_dqs :
2132 seq->rwcfg->mem_dq_per_read_dqs;
Marek Vasut85cd4d72015-07-13 02:48:34 +02002133 u32 ret;
2134 /*
2135 * Stop searching when the read test doesn't pass AND when
2136 * we've seen a passing read on every bit.
2137 */
2138 if (write) { /* WRITE-ONLY */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002139 ret = !rw_mgr_mem_calibrate_write_test(seq, rank_bgn,
2140 write_group, 0,
2141 PASS_ONE_BIT, bit_chk,
2142 0);
Marek Vasut85cd4d72015-07-13 02:48:34 +02002143 } else if (use_read_test) { /* READ-ONLY */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002144 ret = !rw_mgr_mem_calibrate_read_test(seq, rank_bgn, read_group,
Marek Vasut85cd4d72015-07-13 02:48:34 +02002145 NUM_READ_PB_TESTS,
2146 PASS_ONE_BIT, bit_chk,
2147 0, 0);
2148 } else { /* READ-ONLY */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002149 rw_mgr_mem_calibrate_write_test(seq, rank_bgn, write_group, 0,
Marek Vasut85cd4d72015-07-13 02:48:34 +02002150 PASS_ONE_BIT, bit_chk, 0);
2151 *bit_chk = *bit_chk >> (per_dqs *
2152 (read_group - (write_group * ratio)));
2153 ret = (*bit_chk == 0);
2154 }
2155 *sticky_bit_chk = *sticky_bit_chk | *bit_chk;
2156 ret = ret && (*sticky_bit_chk == correct_mask);
Marek Vasut4df2d7b2016-04-04 21:21:05 +02002157 debug_cond(DLEVEL >= 2,
Marek Vasut85cd4d72015-07-13 02:48:34 +02002158 "%s:%d center(left): dtap=%u => %u == %u && %u",
2159 __func__, __LINE__, d,
2160 *sticky_bit_chk, correct_mask, ret);
2161 return ret;
2162}
2163
2164/**
Marek Vasute624caf2015-07-13 02:38:15 +02002165 * search_left_edge() - Find left edge of DQ/DQS working phase
2166 * @write: Perform read (Stage 2) or write (Stage 3) calibration
2167 * @rank_bgn: Rank number
2168 * @write_group: Write Group
2169 * @read_group: Read Group
2170 * @test_bgn: Rank number to begin the test
Marek Vasute624caf2015-07-13 02:38:15 +02002171 * @sticky_bit_chk: Resulting sticky bit mask after the test
2172 * @left_edge: Left edge of the DQ/DQS phase
2173 * @right_edge: Right edge of the DQ/DQS phase
2174 * @use_read_test: Perform read test
2175 *
2176 * Find left edge of DQ/DQS working phase.
2177 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002178static void search_left_edge(struct socfpga_sdrseq *seq, const int write,
2179 const int rank_bgn, const u32 write_group,
2180 const u32 read_group, const u32 test_bgn,
2181 u32 *sticky_bit_chk, int *left_edge,
2182 int *right_edge, const u32 use_read_test)
Marek Vasute624caf2015-07-13 02:38:15 +02002183{
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002184 const u32 delay_max = write ? seq->iocfg->io_out1_delay_max :
2185 seq->iocfg->io_in_delay_max;
2186 const u32 dqs_max = write ? seq->iocfg->io_out1_delay_max :
2187 seq->iocfg->dqs_in_delay_max;
2188 const u32 per_dqs = write ? seq->rwcfg->mem_dq_per_write_dqs :
2189 seq->rwcfg->mem_dq_per_read_dqs;
Marek Vasutb69c2472015-07-18 20:34:00 +02002190 u32 stop, bit_chk;
Marek Vasute624caf2015-07-13 02:38:15 +02002191 int i, d;
2192
2193 for (d = 0; d <= dqs_max; d++) {
2194 if (write)
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002195 scc_mgr_apply_group_dq_out1_delay(seq, d);
Marek Vasute624caf2015-07-13 02:38:15 +02002196 else
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002197 scc_mgr_apply_group_dq_in_delay(seq, test_bgn, d);
Marek Vasute624caf2015-07-13 02:38:15 +02002198
2199 writel(0, &sdr_scc_mgr->update);
2200
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002201 stop = search_stop_check(seq, write, d, rank_bgn, write_group,
Marek Vasutb69c2472015-07-18 20:34:00 +02002202 read_group, &bit_chk, sticky_bit_chk,
Marek Vasut85cd4d72015-07-13 02:48:34 +02002203 use_read_test);
Marek Vasute624caf2015-07-13 02:38:15 +02002204 if (stop == 1)
2205 break;
2206
2207 /* stop != 1 */
2208 for (i = 0; i < per_dqs; i++) {
Marek Vasutb69c2472015-07-18 20:34:00 +02002209 if (bit_chk & 1) {
Marek Vasute624caf2015-07-13 02:38:15 +02002210 /*
2211 * Remember a passing test as
2212 * the left_edge.
2213 */
2214 left_edge[i] = d;
2215 } else {
2216 /*
2217 * If a left edge has not been seen
2218 * yet, then a future passing test
2219 * will mark this edge as the right
2220 * edge.
2221 */
2222 if (left_edge[i] == delay_max + 1)
2223 right_edge[i] = -(d + 1);
2224 }
Marek Vasutb69c2472015-07-18 20:34:00 +02002225 bit_chk >>= 1;
Marek Vasute624caf2015-07-13 02:38:15 +02002226 }
2227 }
2228
2229 /* Reset DQ delay chains to 0 */
2230 if (write)
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002231 scc_mgr_apply_group_dq_out1_delay(seq, 0);
Marek Vasute624caf2015-07-13 02:38:15 +02002232 else
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002233 scc_mgr_apply_group_dq_in_delay(seq, test_bgn, 0);
Marek Vasute624caf2015-07-13 02:38:15 +02002234
2235 *sticky_bit_chk = 0;
2236 for (i = per_dqs - 1; i >= 0; i--) {
Marek Vasut4df2d7b2016-04-04 21:21:05 +02002237 debug_cond(DLEVEL >= 2,
Marek Vasute624caf2015-07-13 02:38:15 +02002238 "%s:%d vfifo_center: left_edge[%u]: %d right_edge[%u]: %d\n",
2239 __func__, __LINE__, i, left_edge[i],
2240 i, right_edge[i]);
2241
2242 /*
2243 * Check for cases where we haven't found the left edge,
2244 * which makes our assignment of the the right edge invalid.
2245 * Reset it to the illegal value.
2246 */
2247 if ((left_edge[i] == delay_max + 1) &&
2248 (right_edge[i] != delay_max + 1)) {
2249 right_edge[i] = delay_max + 1;
Marek Vasut4df2d7b2016-04-04 21:21:05 +02002250 debug_cond(DLEVEL >= 2,
Marek Vasute624caf2015-07-13 02:38:15 +02002251 "%s:%d vfifo_center: reset right_edge[%u]: %d\n",
2252 __func__, __LINE__, i, right_edge[i]);
2253 }
2254
2255 /*
2256 * Reset sticky bit
2257 * READ: except for bits where we have seen both
2258 * the left and right edge.
2259 * WRITE: except for bits where we have seen the
2260 * left edge.
2261 */
2262 *sticky_bit_chk <<= 1;
2263 if (write) {
2264 if (left_edge[i] != delay_max + 1)
2265 *sticky_bit_chk |= 1;
2266 } else {
2267 if ((left_edge[i] != delay_max + 1) &&
2268 (right_edge[i] != delay_max + 1))
2269 *sticky_bit_chk |= 1;
2270 }
2271 }
Marek Vasute624caf2015-07-13 02:38:15 +02002272}
2273
2274/**
Marek Vasutb20a5062015-07-13 02:11:02 +02002275 * search_right_edge() - Find right edge of DQ/DQS working phase
2276 * @write: Perform read (Stage 2) or write (Stage 3) calibration
2277 * @rank_bgn: Rank number
2278 * @write_group: Write Group
2279 * @read_group: Read Group
2280 * @start_dqs: DQS start phase
2281 * @start_dqs_en: DQS enable start phase
Marek Vasutb20a5062015-07-13 02:11:02 +02002282 * @sticky_bit_chk: Resulting sticky bit mask after the test
2283 * @left_edge: Left edge of the DQ/DQS phase
2284 * @right_edge: Right edge of the DQ/DQS phase
2285 * @use_read_test: Perform read test
2286 *
2287 * Find right edge of DQ/DQS working phase.
2288 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002289static int search_right_edge(struct socfpga_sdrseq *seq, const int write,
2290 const int rank_bgn, const u32 write_group,
2291 const u32 read_group, const int start_dqs,
2292 const int start_dqs_en, u32 *sticky_bit_chk,
2293 int *left_edge, int *right_edge,
2294 const u32 use_read_test)
Marek Vasutb20a5062015-07-13 02:11:02 +02002295{
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002296 const u32 delay_max = write ? seq->iocfg->io_out1_delay_max :
2297 seq->iocfg->io_in_delay_max;
2298 const u32 dqs_max = write ? seq->iocfg->io_out1_delay_max :
2299 seq->iocfg->dqs_in_delay_max;
2300 const u32 per_dqs = write ? seq->rwcfg->mem_dq_per_write_dqs :
2301 seq->rwcfg->mem_dq_per_read_dqs;
Marek Vasutb69c2472015-07-18 20:34:00 +02002302 u32 stop, bit_chk;
Marek Vasutb20a5062015-07-13 02:11:02 +02002303 int i, d;
2304
2305 for (d = 0; d <= dqs_max - start_dqs; d++) {
2306 if (write) { /* WRITE-ONLY */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002307 scc_mgr_apply_group_dqs_io_and_oct_out1(seq,
2308 write_group,
Marek Vasutb20a5062015-07-13 02:11:02 +02002309 d + start_dqs);
2310 } else { /* READ-ONLY */
2311 scc_mgr_set_dqs_bus_in_delay(read_group, d + start_dqs);
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002312 if (seq->iocfg->shift_dqs_en_when_shift_dqs) {
Marek Vasut8af9ca02015-08-02 19:42:26 +02002313 u32 delay = d + start_dqs_en;
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002314 if (delay > seq->iocfg->dqs_en_delay_max)
2315 delay = seq->iocfg->dqs_en_delay_max;
Marek Vasutb20a5062015-07-13 02:11:02 +02002316 scc_mgr_set_dqs_en_delay(read_group, delay);
2317 }
2318 scc_mgr_load_dqs(read_group);
2319 }
2320
2321 writel(0, &sdr_scc_mgr->update);
2322
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002323 stop = search_stop_check(seq, write, d, rank_bgn, write_group,
Marek Vasutb69c2472015-07-18 20:34:00 +02002324 read_group, &bit_chk, sticky_bit_chk,
Marek Vasut85cd4d72015-07-13 02:48:34 +02002325 use_read_test);
Marek Vasutb20a5062015-07-13 02:11:02 +02002326 if (stop == 1) {
2327 if (write && (d == 0)) { /* WRITE-ONLY */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002328 for (i = 0;
2329 i < seq->rwcfg->mem_dq_per_write_dqs;
Marek Vasutc85b9b32015-08-02 19:47:01 +02002330 i++) {
Marek Vasutb20a5062015-07-13 02:11:02 +02002331 /*
2332 * d = 0 failed, but it passed when
2333 * testing the left edge, so it must be
2334 * marginal, set it to -1
2335 */
2336 if (right_edge[i] == delay_max + 1 &&
2337 left_edge[i] != delay_max + 1)
2338 right_edge[i] = -1;
2339 }
2340 }
2341 break;
2342 }
2343
2344 /* stop != 1 */
2345 for (i = 0; i < per_dqs; i++) {
Marek Vasutb69c2472015-07-18 20:34:00 +02002346 if (bit_chk & 1) {
Marek Vasutb20a5062015-07-13 02:11:02 +02002347 /*
2348 * Remember a passing test as
2349 * the right_edge.
2350 */
2351 right_edge[i] = d;
2352 } else {
2353 if (d != 0) {
2354 /*
2355 * If a right edge has not
2356 * been seen yet, then a future
2357 * passing test will mark this
2358 * edge as the left edge.
2359 */
2360 if (right_edge[i] == delay_max + 1)
2361 left_edge[i] = -(d + 1);
2362 } else {
2363 /*
2364 * d = 0 failed, but it passed
2365 * when testing the left edge,
2366 * so it must be marginal, set
2367 * it to -1
2368 */
2369 if (right_edge[i] == delay_max + 1 &&
2370 left_edge[i] != delay_max + 1)
2371 right_edge[i] = -1;
2372 /*
2373 * If a right edge has not been
2374 * seen yet, then a future
2375 * passing test will mark this
2376 * edge as the left edge.
2377 */
2378 else if (right_edge[i] == delay_max + 1)
2379 left_edge[i] = -(d + 1);
2380 }
2381 }
2382
Marek Vasut4df2d7b2016-04-04 21:21:05 +02002383 debug_cond(DLEVEL >= 2, "%s:%d center[r,d=%u]: ",
Marek Vasutb20a5062015-07-13 02:11:02 +02002384 __func__, __LINE__, d);
Marek Vasut4df2d7b2016-04-04 21:21:05 +02002385 debug_cond(DLEVEL >= 2,
Marek Vasutb20a5062015-07-13 02:11:02 +02002386 "bit_chk_test=%i left_edge[%u]: %d ",
Marek Vasutb69c2472015-07-18 20:34:00 +02002387 bit_chk & 1, i, left_edge[i]);
Marek Vasut4df2d7b2016-04-04 21:21:05 +02002388 debug_cond(DLEVEL >= 2, "right_edge[%u]: %d\n", i,
Marek Vasutb20a5062015-07-13 02:11:02 +02002389 right_edge[i]);
Marek Vasutb69c2472015-07-18 20:34:00 +02002390 bit_chk >>= 1;
Marek Vasutb20a5062015-07-13 02:11:02 +02002391 }
2392 }
2393
2394 /* Check that all bits have a window */
2395 for (i = 0; i < per_dqs; i++) {
Marek Vasut4df2d7b2016-04-04 21:21:05 +02002396 debug_cond(DLEVEL >= 2,
Marek Vasutb20a5062015-07-13 02:11:02 +02002397 "%s:%d write_center: left_edge[%u]: %d right_edge[%u]: %d",
2398 __func__, __LINE__, i, left_edge[i],
2399 i, right_edge[i]);
2400 if ((left_edge[i] == dqs_max + 1) ||
2401 (right_edge[i] == dqs_max + 1))
2402 return i + 1; /* FIXME: If we fail, retval > 0 */
2403 }
2404
2405 return 0;
2406}
2407
Marek Vasutaa0e6e12015-07-18 19:18:06 +02002408/**
2409 * get_window_mid_index() - Find the best middle setting of DQ/DQS phase
2410 * @write: Perform read (Stage 2) or write (Stage 3) calibration
2411 * @left_edge: Left edge of the DQ/DQS phase
2412 * @right_edge: Right edge of the DQ/DQS phase
2413 * @mid_min: Best DQ/DQS phase middle setting
2414 *
2415 * Find index and value of the middle of the DQ/DQS working phase.
2416 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002417static int get_window_mid_index(struct socfpga_sdrseq *seq,
2418 const int write, int *left_edge,
Marek Vasutaa0e6e12015-07-18 19:18:06 +02002419 int *right_edge, int *mid_min)
2420{
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002421 const u32 per_dqs = write ? seq->rwcfg->mem_dq_per_write_dqs :
2422 seq->rwcfg->mem_dq_per_read_dqs;
Marek Vasutaa0e6e12015-07-18 19:18:06 +02002423 int i, mid, min_index;
2424
2425 /* Find middle of window for each DQ bit */
2426 *mid_min = left_edge[0] - right_edge[0];
2427 min_index = 0;
2428 for (i = 1; i < per_dqs; i++) {
2429 mid = left_edge[i] - right_edge[i];
2430 if (mid < *mid_min) {
2431 *mid_min = mid;
2432 min_index = i;
2433 }
2434 }
2435
2436 /*
2437 * -mid_min/2 represents the amount that we need to move DQS.
2438 * If mid_min is odd and positive we'll need to add one to make
2439 * sure the rounding in further calculations is correct (always
2440 * bias to the right), so just add 1 for all positive values.
2441 */
2442 if (*mid_min > 0)
2443 (*mid_min)++;
2444 *mid_min = *mid_min / 2;
2445
Marek Vasut4df2d7b2016-04-04 21:21:05 +02002446 debug_cond(DLEVEL >= 1, "%s:%d vfifo_center: *mid_min=%d (index=%u)\n",
Marek Vasutaa0e6e12015-07-18 19:18:06 +02002447 __func__, __LINE__, *mid_min, min_index);
2448 return min_index;
2449}
2450
Marek Vasut89feb502015-07-18 19:46:26 +02002451/**
2452 * center_dq_windows() - Center the DQ/DQS windows
2453 * @write: Perform read (Stage 2) or write (Stage 3) calibration
2454 * @left_edge: Left edge of the DQ/DQS phase
2455 * @right_edge: Right edge of the DQ/DQS phase
2456 * @mid_min: Adjusted DQ/DQS phase middle setting
2457 * @orig_mid_min: Original DQ/DQS phase middle setting
2458 * @min_index: DQ/DQS phase middle setting index
2459 * @test_bgn: Rank number to begin the test
2460 * @dq_margin: Amount of shift for the DQ
2461 * @dqs_margin: Amount of shift for the DQS
2462 *
2463 * Align the DQ/DQS windows in each group.
2464 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002465static void center_dq_windows(struct socfpga_sdrseq *seq,
2466 const int write, int *left_edge, int *right_edge,
Marek Vasut89feb502015-07-18 19:46:26 +02002467 const int mid_min, const int orig_mid_min,
2468 const int min_index, const int test_bgn,
2469 int *dq_margin, int *dqs_margin)
2470{
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002471 const s32 delay_max = write ? seq->iocfg->io_out1_delay_max :
2472 seq->iocfg->io_in_delay_max;
2473 const s32 per_dqs = write ? seq->rwcfg->mem_dq_per_write_dqs :
2474 seq->rwcfg->mem_dq_per_read_dqs;
Marek Vasut66acabc2016-04-05 23:17:35 +02002475 const s32 delay_off = write ? SCC_MGR_IO_OUT1_DELAY_OFFSET :
Marek Vasut89feb502015-07-18 19:46:26 +02002476 SCC_MGR_IO_IN_DELAY_OFFSET;
Marek Vasut66acabc2016-04-05 23:17:35 +02002477 const s32 addr = SDR_PHYGRP_SCCGRP_ADDRESS | delay_off;
Marek Vasut89feb502015-07-18 19:46:26 +02002478
Marek Vasut66acabc2016-04-05 23:17:35 +02002479 s32 temp_dq_io_delay1;
Marek Vasut89feb502015-07-18 19:46:26 +02002480 int shift_dq, i, p;
2481
2482 /* Initialize data for export structures */
2483 *dqs_margin = delay_max + 1;
2484 *dq_margin = delay_max + 1;
2485
2486 /* add delay to bring centre of all DQ windows to the same "level" */
2487 for (i = 0, p = test_bgn; i < per_dqs; i++, p++) {
2488 /* Use values before divide by 2 to reduce round off error */
2489 shift_dq = (left_edge[i] - right_edge[i] -
2490 (left_edge[min_index] - right_edge[min_index]))/2 +
2491 (orig_mid_min - mid_min);
2492
Marek Vasut4df2d7b2016-04-04 21:21:05 +02002493 debug_cond(DLEVEL >= 2,
Marek Vasut89feb502015-07-18 19:46:26 +02002494 "vfifo_center: before: shift_dq[%u]=%d\n",
2495 i, shift_dq);
2496
Marek Vasut66acabc2016-04-05 23:17:35 +02002497 temp_dq_io_delay1 = readl(addr + (i << 2));
Marek Vasut89feb502015-07-18 19:46:26 +02002498
2499 if (shift_dq + temp_dq_io_delay1 > delay_max)
Marek Vasut66acabc2016-04-05 23:17:35 +02002500 shift_dq = delay_max - temp_dq_io_delay1;
Marek Vasut89feb502015-07-18 19:46:26 +02002501 else if (shift_dq + temp_dq_io_delay1 < 0)
2502 shift_dq = -temp_dq_io_delay1;
2503
Marek Vasut4df2d7b2016-04-04 21:21:05 +02002504 debug_cond(DLEVEL >= 2,
Marek Vasut89feb502015-07-18 19:46:26 +02002505 "vfifo_center: after: shift_dq[%u]=%d\n",
2506 i, shift_dq);
2507
2508 if (write)
Marek Vasutc85b9b32015-08-02 19:47:01 +02002509 scc_mgr_set_dq_out1_delay(i,
2510 temp_dq_io_delay1 + shift_dq);
Marek Vasut89feb502015-07-18 19:46:26 +02002511 else
Marek Vasutc85b9b32015-08-02 19:47:01 +02002512 scc_mgr_set_dq_in_delay(p,
2513 temp_dq_io_delay1 + shift_dq);
Marek Vasut89feb502015-07-18 19:46:26 +02002514
2515 scc_mgr_load_dq(p);
2516
Marek Vasut4df2d7b2016-04-04 21:21:05 +02002517 debug_cond(DLEVEL >= 2,
Marek Vasut89feb502015-07-18 19:46:26 +02002518 "vfifo_center: margin[%u]=[%d,%d]\n", i,
2519 left_edge[i] - shift_dq + (-mid_min),
2520 right_edge[i] + shift_dq - (-mid_min));
2521
2522 /* To determine values for export structures */
2523 if (left_edge[i] - shift_dq + (-mid_min) < *dq_margin)
2524 *dq_margin = left_edge[i] - shift_dq + (-mid_min);
2525
2526 if (right_edge[i] + shift_dq - (-mid_min) < *dqs_margin)
2527 *dqs_margin = right_edge[i] + shift_dq - (-mid_min);
2528 }
Marek Vasut89feb502015-07-18 19:46:26 +02002529}
2530
Marek Vasut9cdbb962015-07-21 04:27:32 +02002531/**
2532 * rw_mgr_mem_calibrate_vfifo_center() - Per-bit deskew DQ and centering
2533 * @rank_bgn: Rank number
2534 * @rw_group: Read/Write Group
2535 * @test_bgn: Rank at which the test begins
2536 * @use_read_test: Perform a read test
2537 * @update_fom: Update FOM
2538 *
2539 * Per-bit deskew DQ and centering.
2540 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002541static int rw_mgr_mem_calibrate_vfifo_center(struct socfpga_sdrseq *seq,
2542 const u32 rank_bgn,
2543 const u32 rw_group,
2544 const u32 test_bgn,
2545 const int use_read_test,
2546 const int update_fom)
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002547{
Marek Vasutf1b8f712015-07-18 19:57:12 +02002548 const u32 addr =
2549 SDR_PHYGRP_SCCGRP_ADDRESS + SCC_MGR_DQS_IN_DELAY_OFFSET +
Marek Vasutdfed1e62015-07-18 20:42:27 +02002550 (rw_group << 2);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002551 /*
2552 * Store these as signed since there are comparisons with
2553 * signed numbers.
2554 */
Marek Vasut8af9ca02015-08-02 19:42:26 +02002555 u32 sticky_bit_chk;
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002556 s32 left_edge[seq->rwcfg->mem_dq_per_read_dqs];
2557 s32 right_edge[seq->rwcfg->mem_dq_per_read_dqs];
2558 s32 orig_mid_min, mid_min;
2559 s32 new_dqs, start_dqs, start_dqs_en = 0, final_dqs_en;
2560 s32 dq_margin, dqs_margin;
Marek Vasutf1b8f712015-07-18 19:57:12 +02002561 int i, min_index;
Marek Vasutb20a5062015-07-13 02:11:02 +02002562 int ret;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002563
Marek Vasutdfed1e62015-07-18 20:42:27 +02002564 debug("%s:%d: %u %u", __func__, __LINE__, rw_group, test_bgn);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002565
Marek Vasutf1b8f712015-07-18 19:57:12 +02002566 start_dqs = readl(addr);
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002567 if (seq->iocfg->shift_dqs_en_when_shift_dqs)
2568 start_dqs_en = readl(addr - seq->iocfg->dqs_en_delay_offset);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002569
2570 /* set the left and right edge of each bit to an illegal value */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002571 /* use (seq->iocfg->io_in_delay_max + 1) as an illegal value */
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002572 sticky_bit_chk = 0;
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002573 for (i = 0; i < seq->rwcfg->mem_dq_per_read_dqs; i++) {
2574 left_edge[i] = seq->iocfg->io_in_delay_max + 1;
2575 right_edge[i] = seq->iocfg->io_in_delay_max + 1;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002576 }
2577
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002578 /* Search for the left edge of the window for each bit */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002579 search_left_edge(seq, 0, rank_bgn, rw_group, rw_group, test_bgn,
Marek Vasutb69c2472015-07-18 20:34:00 +02002580 &sticky_bit_chk,
Marek Vasute624caf2015-07-13 02:38:15 +02002581 left_edge, right_edge, use_read_test);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002582
Marek Vasutca8ea372015-07-18 08:01:45 +02002583
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002584 /* Search for the right edge of the window for each bit */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002585 ret = search_right_edge(seq, 0, rank_bgn, rw_group, rw_group,
Marek Vasutb20a5062015-07-13 02:11:02 +02002586 start_dqs, start_dqs_en,
Marek Vasutb69c2472015-07-18 20:34:00 +02002587 &sticky_bit_chk,
Marek Vasutb20a5062015-07-13 02:11:02 +02002588 left_edge, right_edge, use_read_test);
2589 if (ret) {
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002590 /*
Marek Vasutb20a5062015-07-13 02:11:02 +02002591 * Restore delay chain settings before letting the loop
2592 * in rw_mgr_mem_calibrate_vfifo to retry different
2593 * dqs/ck relationships.
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002594 */
Marek Vasutdfed1e62015-07-18 20:42:27 +02002595 scc_mgr_set_dqs_bus_in_delay(rw_group, start_dqs);
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002596 if (seq->iocfg->shift_dqs_en_when_shift_dqs)
Marek Vasutdfed1e62015-07-18 20:42:27 +02002597 scc_mgr_set_dqs_en_delay(rw_group, start_dqs_en);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002598
Marek Vasutdfed1e62015-07-18 20:42:27 +02002599 scc_mgr_load_dqs(rw_group);
Marek Vasutb20a5062015-07-13 02:11:02 +02002600 writel(0, &sdr_scc_mgr->update);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002601
Marek Vasut4df2d7b2016-04-04 21:21:05 +02002602 debug_cond(DLEVEL >= 1,
Marek Vasutb20a5062015-07-13 02:11:02 +02002603 "%s:%d vfifo_center: failed to find edge [%u]: %d %d",
2604 __func__, __LINE__, i, left_edge[i], right_edge[i]);
2605 if (use_read_test) {
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002606 set_failing_group_stage(seq, rw_group *
2607 seq->rwcfg->mem_dq_per_read_dqs + i,
Marek Vasutb20a5062015-07-13 02:11:02 +02002608 CAL_STAGE_VFIFO,
2609 CAL_SUBSTAGE_VFIFO_CENTER);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002610 } else {
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002611 set_failing_group_stage(seq, rw_group *
2612 seq->rwcfg->mem_dq_per_read_dqs + i,
Marek Vasutb20a5062015-07-13 02:11:02 +02002613 CAL_STAGE_VFIFO_AFTER_WRITES,
2614 CAL_SUBSTAGE_VFIFO_CENTER);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002615 }
Marek Vasutd29f8042015-07-18 20:44:28 +02002616 return -EIO;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002617 }
2618
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002619 min_index = get_window_mid_index(seq, 0, left_edge, right_edge,
2620 &mid_min);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002621
2622 /* Determine the amount we can change DQS (which is -mid_min) */
2623 orig_mid_min = mid_min;
2624 new_dqs = start_dqs - mid_min;
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002625 if (new_dqs > seq->iocfg->dqs_in_delay_max)
2626 new_dqs = seq->iocfg->dqs_in_delay_max;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002627 else if (new_dqs < 0)
2628 new_dqs = 0;
2629
2630 mid_min = start_dqs - new_dqs;
Marek Vasut4df2d7b2016-04-04 21:21:05 +02002631 debug_cond(DLEVEL >= 1, "vfifo_center: new mid_min=%d new_dqs=%d\n",
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002632 mid_min, new_dqs);
2633
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002634 if (seq->iocfg->shift_dqs_en_when_shift_dqs) {
2635 if (start_dqs_en - mid_min > seq->iocfg->dqs_en_delay_max)
Marek Vasutc85b9b32015-08-02 19:47:01 +02002636 mid_min += start_dqs_en - mid_min -
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002637 seq->iocfg->dqs_en_delay_max;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002638 else if (start_dqs_en - mid_min < 0)
2639 mid_min += start_dqs_en - mid_min;
2640 }
2641 new_dqs = start_dqs - mid_min;
2642
Marek Vasut4df2d7b2016-04-04 21:21:05 +02002643 debug_cond(DLEVEL >= 1,
Marek Vasutca8ea372015-07-18 08:01:45 +02002644 "vfifo_center: start_dqs=%d start_dqs_en=%d new_dqs=%d mid_min=%d\n",
2645 start_dqs,
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002646 seq->iocfg->shift_dqs_en_when_shift_dqs ? start_dqs_en : -1,
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002647 new_dqs, mid_min);
2648
Marek Vasut89feb502015-07-18 19:46:26 +02002649 /* Add delay to bring centre of all DQ windows to the same "level". */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002650 center_dq_windows(seq, 0, left_edge, right_edge, mid_min, orig_mid_min,
Marek Vasut89feb502015-07-18 19:46:26 +02002651 min_index, test_bgn, &dq_margin, &dqs_margin);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002652
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002653 /* Move DQS-en */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002654 if (seq->iocfg->shift_dqs_en_when_shift_dqs) {
Marek Vasutf1b8f712015-07-18 19:57:12 +02002655 final_dqs_en = start_dqs_en - mid_min;
Marek Vasutdfed1e62015-07-18 20:42:27 +02002656 scc_mgr_set_dqs_en_delay(rw_group, final_dqs_en);
2657 scc_mgr_load_dqs(rw_group);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002658 }
2659
2660 /* Move DQS */
Marek Vasutdfed1e62015-07-18 20:42:27 +02002661 scc_mgr_set_dqs_bus_in_delay(rw_group, new_dqs);
2662 scc_mgr_load_dqs(rw_group);
Marek Vasut4df2d7b2016-04-04 21:21:05 +02002663 debug_cond(DLEVEL >= 2,
Marek Vasutca8ea372015-07-18 08:01:45 +02002664 "%s:%d vfifo_center: dq_margin=%d dqs_margin=%d",
2665 __func__, __LINE__, dq_margin, dqs_margin);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002666
2667 /*
2668 * Do not remove this line as it makes sure all of our decisions
2669 * have been applied. Apply the update bit.
2670 */
Marek Vasutb5450962015-07-12 21:05:08 +02002671 writel(0, &sdr_scc_mgr->update);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002672
Marek Vasutd29f8042015-07-18 20:44:28 +02002673 if ((dq_margin < 0) || (dqs_margin < 0))
2674 return -EINVAL;
2675
2676 return 0;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002677}
2678
Marek Vasutc27ea622015-07-17 03:16:45 +02002679/**
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002680 * rw_mgr_mem_calibrate_guaranteed_write() - Perform guaranteed write into the
2681 * device
Marek Vasut6ca5b962015-07-18 02:46:56 +02002682 * @rw_group: Read/Write Group
2683 * @phase: DQ/DQS phase
2684 *
2685 * Because initially no communication ca be reliably performed with the memory
2686 * device, the sequencer uses a guaranteed write mechanism to write data into
2687 * the memory device.
2688 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002689static int rw_mgr_mem_calibrate_guaranteed_write(struct socfpga_sdrseq *seq,
2690 const u32 rw_group,
Marek Vasut6ca5b962015-07-18 02:46:56 +02002691 const u32 phase)
2692{
Marek Vasut6ca5b962015-07-18 02:46:56 +02002693 int ret;
2694
2695 /* Set a particular DQ/DQS phase. */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002696 scc_mgr_set_dqdqs_output_phase_all_ranks(seq, rw_group, phase);
Marek Vasut6ca5b962015-07-18 02:46:56 +02002697
Marek Vasut4df2d7b2016-04-04 21:21:05 +02002698 debug_cond(DLEVEL >= 1, "%s:%d guaranteed write: g=%u p=%u\n",
Marek Vasut6ca5b962015-07-18 02:46:56 +02002699 __func__, __LINE__, rw_group, phase);
2700
2701 /*
2702 * Altera EMI_RM 2015.05.04 :: Figure 1-25
2703 * Load up the patterns used by read calibration using the
2704 * current DQDQS phase.
2705 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002706 rw_mgr_mem_calibrate_read_load_patterns(seq, 0, 1);
Marek Vasut6ca5b962015-07-18 02:46:56 +02002707
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002708 if (seq->gbl.phy_debug_mode_flags & PHY_DEBUG_DISABLE_GUARANTEED_READ)
Marek Vasut6ca5b962015-07-18 02:46:56 +02002709 return 0;
2710
2711 /*
2712 * Altera EMI_RM 2015.05.04 :: Figure 1-26
2713 * Back-to-Back reads of the patterns used for calibration.
2714 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002715 ret = rw_mgr_mem_calibrate_read_test_patterns(seq, 0, rw_group, 1);
Marek Vasut55c4d692015-07-18 03:55:07 +02002716 if (ret)
Marek Vasut4df2d7b2016-04-04 21:21:05 +02002717 debug_cond(DLEVEL >= 1,
Marek Vasut6ca5b962015-07-18 02:46:56 +02002718 "%s:%d Guaranteed read test failed: g=%u p=%u\n",
2719 __func__, __LINE__, rw_group, phase);
Marek Vasut55c4d692015-07-18 03:55:07 +02002720 return ret;
Marek Vasut6ca5b962015-07-18 02:46:56 +02002721}
2722
2723/**
Marek Vasutfeb5e652015-07-18 02:57:32 +02002724 * rw_mgr_mem_calibrate_dqs_enable_calibration() - DQS Enable Calibration
2725 * @rw_group: Read/Write Group
2726 * @test_bgn: Rank at which the test begins
2727 *
2728 * DQS enable calibration ensures reliable capture of the DQ signal without
2729 * glitches on the DQS line.
2730 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002731static int
2732rw_mgr_mem_calibrate_dqs_enable_calibration(struct socfpga_sdrseq *seq,
2733 const u32 rw_group,
2734 const u32 test_bgn)
Marek Vasutfeb5e652015-07-18 02:57:32 +02002735{
Marek Vasutfeb5e652015-07-18 02:57:32 +02002736 /*
2737 * Altera EMI_RM 2015.05.04 :: Figure 1-27
2738 * DQS and DQS Eanble Signal Relationships.
2739 */
Marek Vasut3aa19dc2015-07-18 04:28:42 +02002740
2741 /* We start at zero, so have one less dq to devide among */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002742 const u32 delay_step = seq->iocfg->io_in_delay_max /
2743 (seq->rwcfg->mem_dq_per_read_dqs - 1);
Marek Vasut28dbf122015-07-20 09:20:42 +02002744 int ret;
Marek Vasut3aa19dc2015-07-18 04:28:42 +02002745 u32 i, p, d, r;
2746
2747 debug("%s:%d (%u,%u)\n", __func__, __LINE__, rw_group, test_bgn);
2748
2749 /* Try different dq_in_delays since the DQ path is shorter than DQS. */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002750 for (r = 0; r < seq->rwcfg->mem_number_of_ranks;
Marek Vasut3aa19dc2015-07-18 04:28:42 +02002751 r += NUM_RANKS_PER_SHADOW_REG) {
2752 for (i = 0, p = test_bgn, d = 0;
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002753 i < seq->rwcfg->mem_dq_per_read_dqs;
Marek Vasut3aa19dc2015-07-18 04:28:42 +02002754 i++, p++, d += delay_step) {
Marek Vasut4df2d7b2016-04-04 21:21:05 +02002755 debug_cond(DLEVEL >= 1,
Marek Vasut3aa19dc2015-07-18 04:28:42 +02002756 "%s:%d: g=%u r=%u i=%u p=%u d=%u\n",
2757 __func__, __LINE__, rw_group, r, i, p, d);
2758
2759 scc_mgr_set_dq_in_delay(p, d);
2760 scc_mgr_load_dq(p);
2761 }
2762
2763 writel(0, &sdr_scc_mgr->update);
2764 }
2765
2766 /*
2767 * Try rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase across different
2768 * dq_in_delay values
2769 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002770 ret = rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase(seq, rw_group);
Marek Vasut3aa19dc2015-07-18 04:28:42 +02002771
Marek Vasut4df2d7b2016-04-04 21:21:05 +02002772 debug_cond(DLEVEL >= 1,
Vagrant Cascadiana321d042021-12-21 13:07:01 -08002773 "%s:%d: g=%u found=%u; Resetting delay chain to zero\n",
Marek Vasut28dbf122015-07-20 09:20:42 +02002774 __func__, __LINE__, rw_group, !ret);
Marek Vasut3aa19dc2015-07-18 04:28:42 +02002775
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002776 for (r = 0; r < seq->rwcfg->mem_number_of_ranks;
Marek Vasut3aa19dc2015-07-18 04:28:42 +02002777 r += NUM_RANKS_PER_SHADOW_REG) {
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002778 scc_mgr_apply_group_dq_in_delay(seq, test_bgn, 0);
Marek Vasut3aa19dc2015-07-18 04:28:42 +02002779 writel(0, &sdr_scc_mgr->update);
2780 }
2781
Marek Vasut28dbf122015-07-20 09:20:42 +02002782 return ret;
Marek Vasutfeb5e652015-07-18 02:57:32 +02002783}
2784
2785/**
Marek Vasut349ea3e2015-07-18 03:10:31 +02002786 * rw_mgr_mem_calibrate_dq_dqs_centering() - Centering DQ/DQS
2787 * @rw_group: Read/Write Group
2788 * @test_bgn: Rank at which the test begins
2789 * @use_read_test: Perform a read test
2790 * @update_fom: Update FOM
2791 *
2792 * The centerin DQ/DQS stage attempts to align DQ and DQS signals on reads
2793 * within a group.
2794 */
2795static int
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002796rw_mgr_mem_calibrate_dq_dqs_centering(struct socfpga_sdrseq *seq,
2797 const u32 rw_group, const u32 test_bgn,
Marek Vasut349ea3e2015-07-18 03:10:31 +02002798 const int use_read_test,
2799 const int update_fom)
2800
2801{
2802 int ret, grp_calibrated;
2803 u32 rank_bgn, sr;
2804
2805 /*
2806 * Altera EMI_RM 2015.05.04 :: Figure 1-28
2807 * Read per-bit deskew can be done on a per shadow register basis.
2808 */
2809 grp_calibrated = 1;
2810 for (rank_bgn = 0, sr = 0;
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002811 rank_bgn < seq->rwcfg->mem_number_of_ranks;
Marek Vasut349ea3e2015-07-18 03:10:31 +02002812 rank_bgn += NUM_RANKS_PER_SHADOW_REG, sr++) {
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002813 ret = rw_mgr_mem_calibrate_vfifo_center(seq, rank_bgn, rw_group,
Marek Vasutdfed1e62015-07-18 20:42:27 +02002814 test_bgn,
Marek Vasut349ea3e2015-07-18 03:10:31 +02002815 use_read_test,
2816 update_fom);
Marek Vasutd29f8042015-07-18 20:44:28 +02002817 if (!ret)
Marek Vasut349ea3e2015-07-18 03:10:31 +02002818 continue;
2819
2820 grp_calibrated = 0;
2821 }
2822
2823 if (!grp_calibrated)
2824 return -EIO;
2825
2826 return 0;
2827}
2828
2829/**
Marek Vasutc27ea622015-07-17 03:16:45 +02002830 * rw_mgr_mem_calibrate_vfifo() - Calibrate the read valid prediction FIFO
2831 * @rw_group: Read/Write Group
2832 * @test_bgn: Rank at which the test begins
2833 *
2834 * Stage 1: Calibrate the read valid prediction FIFO.
2835 *
2836 * This function implements UniPHY calibration Stage 1, as explained in
2837 * detail in Altera EMI_RM 2015.05.04 , "UniPHY Calibration Stages".
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002838 *
Marek Vasutc27ea622015-07-17 03:16:45 +02002839 * - read valid prediction will consist of finding:
2840 * - DQS enable phase and DQS enable delay (DQS Enable Calibration)
2841 * - DQS input phase and DQS input delay (DQ/DQS Centering)
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002842 * - we also do a per-bit deskew on the DQ lines.
2843 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002844static int rw_mgr_mem_calibrate_vfifo(struct socfpga_sdrseq *seq,
2845 const u32 rw_group, const u32 test_bgn)
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002846{
Marek Vasut8af9ca02015-08-02 19:42:26 +02002847 u32 p, d;
2848 u32 dtaps_per_ptap;
2849 u32 failed_substage;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002850
Marek Vasut6ca5b962015-07-18 02:46:56 +02002851 int ret;
2852
Marek Vasute42fcea2015-07-17 04:24:18 +02002853 debug("%s:%d: %u %u\n", __func__, __LINE__, rw_group, test_bgn);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002854
Marek Vasut912d43e2015-07-18 03:15:34 +02002855 /* Update info for sims */
2856 reg_file_set_group(rw_group);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002857 reg_file_set_stage(CAL_STAGE_VFIFO);
Marek Vasut912d43e2015-07-18 03:15:34 +02002858 reg_file_set_sub_stage(CAL_SUBSTAGE_GUARANTEED_READ);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002859
Marek Vasut912d43e2015-07-18 03:15:34 +02002860 failed_substage = CAL_SUBSTAGE_GUARANTEED_READ;
2861
2862 /* USER Determine number of delay taps for each phase tap. */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002863 dtaps_per_ptap = DIV_ROUND_UP(seq->iocfg->delay_per_opa_tap,
2864 seq->iocfg->delay_per_dqs_en_dchain_tap)
2865 - 1;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002866
Marek Vasutf2a4bda2015-07-17 03:50:17 +02002867 for (d = 0; d <= dtaps_per_ptap; d += 2) {
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002868 /*
2869 * In RLDRAMX we may be messing the delay of pins in
Marek Vasute42fcea2015-07-17 04:24:18 +02002870 * the same write rw_group but outside of the current read
2871 * the rw_group, but that's ok because we haven't calibrated
Marek Vasutd7f49152015-07-17 03:44:26 +02002872 * output side yet.
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002873 */
2874 if (d > 0) {
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002875 scc_mgr_apply_group_all_out_delay_add_all_ranks(seq,
2876 rw_group,
2877 d);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002878 }
2879
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002880 for (p = 0; p <= seq->iocfg->dqdqs_out_phase_max; p++) {
Marek Vasut6ca5b962015-07-18 02:46:56 +02002881 /* 1) Guaranteed Write */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002882 ret = rw_mgr_mem_calibrate_guaranteed_write(seq,
2883 rw_group,
2884 p);
Marek Vasut6ca5b962015-07-18 02:46:56 +02002885 if (ret)
2886 break;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002887
Marek Vasutfeb5e652015-07-18 02:57:32 +02002888 /* 2) DQS Enable Calibration */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002889 ret = rw_mgr_mem_calibrate_dqs_enable_calibration(seq,
2890 rw_group,
Marek Vasutfeb5e652015-07-18 02:57:32 +02002891 test_bgn);
2892 if (ret) {
Marek Vasutf2a4bda2015-07-17 03:50:17 +02002893 failed_substage = CAL_SUBSTAGE_DQS_EN_PHASE;
2894 continue;
2895 }
2896
Marek Vasut349ea3e2015-07-18 03:10:31 +02002897 /* 3) Centering DQ/DQS */
Marek Vasutf2a4bda2015-07-17 03:50:17 +02002898 /*
Marek Vasut349ea3e2015-07-18 03:10:31 +02002899 * If doing read after write calibration, do not update
2900 * FOM now. Do it then.
Marek Vasutf2a4bda2015-07-17 03:50:17 +02002901 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002902 ret = rw_mgr_mem_calibrate_dq_dqs_centering(seq,
2903 rw_group,
2904 test_bgn,
2905 1, 0);
Marek Vasut349ea3e2015-07-18 03:10:31 +02002906 if (ret) {
Marek Vasutf2a4bda2015-07-17 03:50:17 +02002907 failed_substage = CAL_SUBSTAGE_VFIFO_CENTER;
Marek Vasut349ea3e2015-07-18 03:10:31 +02002908 continue;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002909 }
Marek Vasutf2a4bda2015-07-17 03:50:17 +02002910
Marek Vasut349ea3e2015-07-18 03:10:31 +02002911 /* All done. */
2912 goto cal_done_ok;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002913 }
2914 }
2915
Marek Vasutf2a4bda2015-07-17 03:50:17 +02002916 /* Calibration Stage 1 failed. */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002917 set_failing_group_stage(seq, rw_group, CAL_STAGE_VFIFO,
2918 failed_substage);
Marek Vasutf2a4bda2015-07-17 03:50:17 +02002919 return 0;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002920
Marek Vasutf2a4bda2015-07-17 03:50:17 +02002921 /* Calibration Stage 1 completed OK. */
2922cal_done_ok:
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002923 /*
2924 * Reset the delay chains back to zero if they have moved > 1
2925 * (check for > 1 because loop will increase d even when pass in
2926 * first case).
2927 */
2928 if (d > 2)
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002929 scc_mgr_zero_group(seq, rw_group, 1);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002930
2931 return 1;
2932}
2933
Marek Vasut2da02572015-07-18 05:58:44 +02002934/**
2935 * rw_mgr_mem_calibrate_vfifo_end() - DQ/DQS Centering.
2936 * @rw_group: Read/Write Group
2937 * @test_bgn: Rank at which the test begins
2938 *
2939 * Stage 3: DQ/DQS Centering.
2940 *
2941 * This function implements UniPHY calibration Stage 3, as explained in
2942 * detail in Altera EMI_RM 2015.05.04 , "UniPHY Calibration Stages".
2943 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002944static int rw_mgr_mem_calibrate_vfifo_end(struct socfpga_sdrseq *seq,
2945 const u32 rw_group,
Marek Vasut2da02572015-07-18 05:58:44 +02002946 const u32 test_bgn)
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002947{
Marek Vasut2da02572015-07-18 05:58:44 +02002948 int ret;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002949
Marek Vasut2da02572015-07-18 05:58:44 +02002950 debug("%s:%d %u %u", __func__, __LINE__, rw_group, test_bgn);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002951
Marek Vasut2da02572015-07-18 05:58:44 +02002952 /* Update info for sims. */
2953 reg_file_set_group(rw_group);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002954 reg_file_set_stage(CAL_STAGE_VFIFO_AFTER_WRITES);
2955 reg_file_set_sub_stage(CAL_SUBSTAGE_VFIFO_CENTER);
2956
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002957 ret = rw_mgr_mem_calibrate_dq_dqs_centering(seq, rw_group, test_bgn, 0,
2958 1);
Marek Vasut2da02572015-07-18 05:58:44 +02002959 if (ret)
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002960 set_failing_group_stage(seq, rw_group,
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002961 CAL_STAGE_VFIFO_AFTER_WRITES,
2962 CAL_SUBSTAGE_VFIFO_CENTER);
Marek Vasut2da02572015-07-18 05:58:44 +02002963 return ret;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002964}
2965
Marek Vasuta3581272015-07-21 06:18:57 +02002966/**
2967 * rw_mgr_mem_calibrate_lfifo() - Minimize latency
2968 *
2969 * Stage 4: Minimize latency.
2970 *
2971 * This function implements UniPHY calibration Stage 4, as explained in
2972 * detail in Altera EMI_RM 2015.05.04 , "UniPHY Calibration Stages".
2973 * Calibrate LFIFO to find smallest read latency.
2974 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002975static u32 rw_mgr_mem_calibrate_lfifo(struct socfpga_sdrseq *seq)
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002976{
Marek Vasuta3581272015-07-21 06:18:57 +02002977 int found_one = 0;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002978
2979 debug("%s:%d\n", __func__, __LINE__);
2980
Marek Vasuta3581272015-07-21 06:18:57 +02002981 /* Update info for sims. */
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002982 reg_file_set_stage(CAL_STAGE_LFIFO);
2983 reg_file_set_sub_stage(CAL_SUBSTAGE_READ_LATENCY);
2984
2985 /* Load up the patterns used by read calibration for all ranks */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002986 rw_mgr_mem_calibrate_read_load_patterns(seq, 0, 1);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002987
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002988 do {
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002989 writel(seq->gbl.curr_read_lat, &phy_mgr_cfg->phy_rlat);
Marek Vasut4df2d7b2016-04-04 21:21:05 +02002990 debug_cond(DLEVEL >= 2, "%s:%d lfifo: read_lat=%u",
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002991 __func__, __LINE__, seq->gbl.curr_read_lat);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002992
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002993 if (!rw_mgr_mem_calibrate_read_test_all_ranks(seq, 0,
2994 NUM_READ_TESTS,
Marek Vasuta3581272015-07-21 06:18:57 +02002995 PASS_ALL_BITS, 1))
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002996 break;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002997
2998 found_one = 1;
Marek Vasuta3581272015-07-21 06:18:57 +02002999 /*
3000 * Reduce read latency and see if things are
3001 * working correctly.
3002 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003003 seq->gbl.curr_read_lat--;
3004 } while (seq->gbl.curr_read_lat > 0);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003005
Marek Vasuta3581272015-07-21 06:18:57 +02003006 /* Reset the fifos to get pointers to known state. */
Marek Vasutb5450962015-07-12 21:05:08 +02003007 writel(0, &phy_mgr_cmd->fifo_reset);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003008
3009 if (found_one) {
Marek Vasuta3581272015-07-21 06:18:57 +02003010 /* Add a fudge factor to the read latency that was determined */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003011 seq->gbl.curr_read_lat += 2;
3012 writel(seq->gbl.curr_read_lat, &phy_mgr_cfg->phy_rlat);
Marek Vasut4df2d7b2016-04-04 21:21:05 +02003013 debug_cond(DLEVEL >= 2,
Marek Vasuta3581272015-07-21 06:18:57 +02003014 "%s:%d lfifo: success: using read_lat=%u\n",
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003015 __func__, __LINE__, seq->gbl.curr_read_lat);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003016 } else {
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003017 set_failing_group_stage(seq, 0xff, CAL_STAGE_LFIFO,
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003018 CAL_SUBSTAGE_READ_LATENCY);
3019
Marek Vasut4df2d7b2016-04-04 21:21:05 +02003020 debug_cond(DLEVEL >= 2,
Marek Vasuta3581272015-07-21 06:18:57 +02003021 "%s:%d lfifo: failed at initial read_lat=%u\n",
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003022 __func__, __LINE__, seq->gbl.curr_read_lat);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003023 }
Marek Vasuta3581272015-07-21 06:18:57 +02003024
3025 return found_one;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003026}
3027
Marek Vasut4e79b0a2015-07-21 05:26:58 +02003028/**
3029 * search_window() - Search for the/part of the window with DM/DQS shift
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003030 * @search_dm: If 1, search for the DM shift, if 0, search for DQS
3031 * shift
Marek Vasut4e79b0a2015-07-21 05:26:58 +02003032 * @rank_bgn: Rank number
3033 * @write_group: Write Group
3034 * @bgn_curr: Current window begin
3035 * @end_curr: Current window end
3036 * @bgn_best: Current best window begin
3037 * @end_best: Current best window end
3038 * @win_best: Size of the best window
3039 * @new_dqs: New DQS value (only applicable if search_dm = 0).
3040 *
3041 * Search for the/part of the window with DM/DQS shift.
3042 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003043static void search_window(struct socfpga_sdrseq *seq,
3044 const int search_dm, const u32 rank_bgn,
3045 const u32 write_group, int *bgn_curr, int *end_curr,
3046 int *bgn_best, int *end_best, int *win_best,
3047 int new_dqs)
Marek Vasut4e79b0a2015-07-21 05:26:58 +02003048{
3049 u32 bit_chk;
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003050 const int max = seq->iocfg->io_out1_delay_max - new_dqs;
Marek Vasut4e79b0a2015-07-21 05:26:58 +02003051 int d, di;
3052
3053 /* Search for the/part of the window with DM/DQS shift. */
3054 for (di = max; di >= 0; di -= DELTA_D) {
3055 if (search_dm) {
3056 d = di;
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003057 scc_mgr_apply_group_dm_out1_delay(seq, d);
Marek Vasut4e79b0a2015-07-21 05:26:58 +02003058 } else {
3059 /* For DQS, we go from 0...max */
3060 d = max - di;
3061 /*
Marek Vasutc85b9b32015-08-02 19:47:01 +02003062 * Note: This only shifts DQS, so are we limiting
3063 * ourselves to width of DQ unnecessarily.
Marek Vasut4e79b0a2015-07-21 05:26:58 +02003064 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003065 scc_mgr_apply_group_dqs_io_and_oct_out1(seq,
3066 write_group,
Marek Vasut4e79b0a2015-07-21 05:26:58 +02003067 d + new_dqs);
3068 }
3069
3070 writel(0, &sdr_scc_mgr->update);
3071
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003072 if (rw_mgr_mem_calibrate_write_test(seq, rank_bgn, write_group,
3073 1, PASS_ALL_BITS, &bit_chk,
Marek Vasut4e79b0a2015-07-21 05:26:58 +02003074 0)) {
3075 /* Set current end of the window. */
3076 *end_curr = search_dm ? -d : d;
3077
3078 /*
3079 * If a starting edge of our window has not been seen
3080 * this is our current start of the DM window.
3081 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003082 if (*bgn_curr == seq->iocfg->io_out1_delay_max + 1)
Marek Vasut4e79b0a2015-07-21 05:26:58 +02003083 *bgn_curr = search_dm ? -d : d;
3084
3085 /*
3086 * If current window is bigger than best seen.
3087 * Set best seen to be current window.
3088 */
3089 if ((*end_curr - *bgn_curr + 1) > *win_best) {
3090 *win_best = *end_curr - *bgn_curr + 1;
3091 *bgn_best = *bgn_curr;
3092 *end_best = *end_curr;
3093 }
3094 } else {
3095 /* We just saw a failing test. Reset temp edge. */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003096 *bgn_curr = seq->iocfg->io_out1_delay_max + 1;
3097 *end_curr = seq->iocfg->io_out1_delay_max + 1;
Marek Vasut4e79b0a2015-07-21 05:26:58 +02003098
3099 /* Early exit is only applicable to DQS. */
3100 if (search_dm)
3101 continue;
3102
3103 /*
3104 * Early exit optimization: if the remaining delay
3105 * chain space is less than already seen largest
3106 * window we can exit.
3107 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003108 if (*win_best - 1 > seq->iocfg->io_out1_delay_max
3109 - new_dqs - d)
Marek Vasut4e79b0a2015-07-21 05:26:58 +02003110 break;
3111 }
3112 }
3113}
3114
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003115/*
Marek Vasut2595b242015-07-21 05:33:49 +02003116 * rw_mgr_mem_calibrate_writes_center() - Center all windows
3117 * @rank_bgn: Rank number
3118 * @write_group: Write group
3119 * @test_bgn: Rank at which the test begins
3120 *
3121 * Center all windows. Do per-bit-deskew to possibly increase size of
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003122 * certain windows.
3123 */
Marek Vasutaffbc892015-07-21 05:00:42 +02003124static int
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003125rw_mgr_mem_calibrate_writes_center(struct socfpga_sdrseq *seq,
3126 const u32 rank_bgn, const u32 write_group,
Marek Vasutaffbc892015-07-21 05:00:42 +02003127 const u32 test_bgn)
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003128{
Marek Vasut4e79b0a2015-07-21 05:26:58 +02003129 int i;
Marek Vasutaffbc892015-07-21 05:00:42 +02003130 u32 sticky_bit_chk;
3131 u32 min_index;
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003132 int left_edge[seq->rwcfg->mem_dq_per_write_dqs];
3133 int right_edge[seq->rwcfg->mem_dq_per_write_dqs];
Marek Vasutaffbc892015-07-21 05:00:42 +02003134 int mid;
3135 int mid_min, orig_mid_min;
3136 int new_dqs, start_dqs;
3137 int dq_margin, dqs_margin, dm_margin;
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003138 int bgn_curr = seq->iocfg->io_out1_delay_max + 1;
3139 int end_curr = seq->iocfg->io_out1_delay_max + 1;
3140 int bgn_best = seq->iocfg->io_out1_delay_max + 1;
3141 int end_best = seq->iocfg->io_out1_delay_max + 1;
Marek Vasutaffbc892015-07-21 05:00:42 +02003142 int win_best = 0;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003143
Marek Vasutb20a5062015-07-13 02:11:02 +02003144 int ret;
3145
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003146 debug("%s:%d %u %u", __func__, __LINE__, write_group, test_bgn);
3147
3148 dm_margin = 0;
3149
Marek Vasut1bb221e2015-07-21 05:29:05 +02003150 start_dqs = readl((SDR_PHYGRP_SCCGRP_ADDRESS |
3151 SCC_MGR_IO_OUT1_DELAY_OFFSET) +
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003152 (seq->rwcfg->mem_dq_per_write_dqs << 2));
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003153
Marek Vasutaffbc892015-07-21 05:00:42 +02003154 /* Per-bit deskew. */
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003155
3156 /*
Marek Vasutaffbc892015-07-21 05:00:42 +02003157 * Set the left and right edge of each bit to an illegal value.
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003158 * Use (seq->iocfg->io_out1_delay_max + 1) as an illegal value.
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003159 */
3160 sticky_bit_chk = 0;
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003161 for (i = 0; i < seq->rwcfg->mem_dq_per_write_dqs; i++) {
3162 left_edge[i] = seq->iocfg->io_out1_delay_max + 1;
3163 right_edge[i] = seq->iocfg->io_out1_delay_max + 1;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003164 }
3165
Marek Vasutaffbc892015-07-21 05:00:42 +02003166 /* Search for the left edge of the window for each bit. */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003167 search_left_edge(seq, 1, rank_bgn, write_group, 0, test_bgn,
Marek Vasutb69c2472015-07-18 20:34:00 +02003168 &sticky_bit_chk,
Marek Vasute624caf2015-07-13 02:38:15 +02003169 left_edge, right_edge, 0);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003170
Marek Vasutaffbc892015-07-21 05:00:42 +02003171 /* Search for the right edge of the window for each bit. */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003172 ret = search_right_edge(seq, 1, rank_bgn, write_group, 0,
Marek Vasutb20a5062015-07-13 02:11:02 +02003173 start_dqs, 0,
Marek Vasutb69c2472015-07-18 20:34:00 +02003174 &sticky_bit_chk,
Marek Vasutb20a5062015-07-13 02:11:02 +02003175 left_edge, right_edge, 0);
3176 if (ret) {
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003177 set_failing_group_stage(seq, test_bgn + ret - 1,
3178 CAL_STAGE_WRITES,
Marek Vasutb20a5062015-07-13 02:11:02 +02003179 CAL_SUBSTAGE_WRITES_CENTER);
Marek Vasutfc2ec8f2015-07-21 05:32:49 +02003180 return -EINVAL;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003181 }
3182
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003183 min_index = get_window_mid_index(seq, 1, left_edge, right_edge,
3184 &mid_min);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003185
Marek Vasutaffbc892015-07-21 05:00:42 +02003186 /* Determine the amount we can change DQS (which is -mid_min). */
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003187 orig_mid_min = mid_min;
3188 new_dqs = start_dqs;
3189 mid_min = 0;
Marek Vasut4df2d7b2016-04-04 21:21:05 +02003190 debug_cond(DLEVEL >= 1,
Marek Vasutaffbc892015-07-21 05:00:42 +02003191 "%s:%d write_center: start_dqs=%d new_dqs=%d mid_min=%d\n",
3192 __func__, __LINE__, start_dqs, new_dqs, mid_min);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003193
Marek Vasut89feb502015-07-18 19:46:26 +02003194 /* Add delay to bring centre of all DQ windows to the same "level". */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003195 center_dq_windows(seq, 1, left_edge, right_edge, mid_min, orig_mid_min,
Marek Vasut89feb502015-07-18 19:46:26 +02003196 min_index, 0, &dq_margin, &dqs_margin);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003197
3198 /* Move DQS */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003199 scc_mgr_apply_group_dqs_io_and_oct_out1(seq, write_group, new_dqs);
Marek Vasutb5450962015-07-12 21:05:08 +02003200 writel(0, &sdr_scc_mgr->update);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003201
3202 /* Centre DM */
Marek Vasut4df2d7b2016-04-04 21:21:05 +02003203 debug_cond(DLEVEL >= 2, "%s:%d write_center: DM\n", __func__, __LINE__);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003204
Marek Vasutaffbc892015-07-21 05:00:42 +02003205 /* Search for the/part of the window with DM shift. */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003206 search_window(seq, 1, rank_bgn, write_group, &bgn_curr, &end_curr,
Marek Vasut4e79b0a2015-07-21 05:26:58 +02003207 &bgn_best, &end_best, &win_best, 0);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003208
Marek Vasutaffbc892015-07-21 05:00:42 +02003209 /* Reset DM delay chains to 0. */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003210 scc_mgr_apply_group_dm_out1_delay(seq, 0);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003211
3212 /*
3213 * Check to see if the current window nudges up aganist 0 delay.
3214 * If so we need to continue the search by shifting DQS otherwise DQS
Marek Vasutaffbc892015-07-21 05:00:42 +02003215 * search begins as a new search.
3216 */
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003217 if (end_curr != 0) {
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003218 bgn_curr = seq->iocfg->io_out1_delay_max + 1;
3219 end_curr = seq->iocfg->io_out1_delay_max + 1;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003220 }
3221
Marek Vasutaffbc892015-07-21 05:00:42 +02003222 /* Search for the/part of the window with DQS shifts. */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003223 search_window(seq, 0, rank_bgn, write_group, &bgn_curr, &end_curr,
Marek Vasut4e79b0a2015-07-21 05:26:58 +02003224 &bgn_best, &end_best, &win_best, new_dqs);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003225
Marek Vasutaffbc892015-07-21 05:00:42 +02003226 /* Assign left and right edge for cal and reporting. */
3227 left_edge[0] = -1 * bgn_best;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003228 right_edge[0] = end_best;
3229
Marek Vasut4df2d7b2016-04-04 21:21:05 +02003230 debug_cond(DLEVEL >= 2, "%s:%d dm_calib: left=%d right=%d\n",
Marek Vasutaffbc892015-07-21 05:00:42 +02003231 __func__, __LINE__, left_edge[0], right_edge[0]);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003232
Marek Vasutaffbc892015-07-21 05:00:42 +02003233 /* Move DQS (back to orig). */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003234 scc_mgr_apply_group_dqs_io_and_oct_out1(seq, write_group, new_dqs);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003235
3236 /* Move DM */
3237
Marek Vasutaffbc892015-07-21 05:00:42 +02003238 /* Find middle of window for the DM bit. */
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003239 mid = (left_edge[0] - right_edge[0]) / 2;
3240
Marek Vasutaffbc892015-07-21 05:00:42 +02003241 /* Only move right, since we are not moving DQS/DQ. */
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003242 if (mid < 0)
3243 mid = 0;
3244
Marek Vasutaffbc892015-07-21 05:00:42 +02003245 /* dm_marign should fail if we never find a window. */
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003246 if (win_best == 0)
3247 dm_margin = -1;
3248 else
3249 dm_margin = left_edge[0] - mid;
3250
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003251 scc_mgr_apply_group_dm_out1_delay(seq, mid);
Marek Vasutb5450962015-07-12 21:05:08 +02003252 writel(0, &sdr_scc_mgr->update);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003253
Marek Vasut4df2d7b2016-04-04 21:21:05 +02003254 debug_cond(DLEVEL >= 2,
Marek Vasutaffbc892015-07-21 05:00:42 +02003255 "%s:%d dm_calib: left=%d right=%d mid=%d dm_margin=%d\n",
3256 __func__, __LINE__, left_edge[0], right_edge[0],
3257 mid, dm_margin);
3258 /* Export values. */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003259 seq->gbl.fom_out += dq_margin + dqs_margin;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003260
Marek Vasut4df2d7b2016-04-04 21:21:05 +02003261 debug_cond(DLEVEL >= 2,
Marek Vasutaffbc892015-07-21 05:00:42 +02003262 "%s:%d write_center: dq_margin=%d dqs_margin=%d dm_margin=%d\n",
3263 __func__, __LINE__, dq_margin, dqs_margin, dm_margin);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003264
3265 /*
3266 * Do not remove this line as it makes sure all of our
3267 * decisions have been applied.
3268 */
Marek Vasutb5450962015-07-12 21:05:08 +02003269 writel(0, &sdr_scc_mgr->update);
Marek Vasutaffbc892015-07-21 05:00:42 +02003270
Marek Vasutfc2ec8f2015-07-21 05:32:49 +02003271 if ((dq_margin < 0) || (dqs_margin < 0) || (dm_margin < 0))
3272 return -EINVAL;
3273
3274 return 0;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003275}
3276
Marek Vasut4a78cc72015-07-18 07:23:25 +02003277/**
3278 * rw_mgr_mem_calibrate_writes() - Write Calibration Part One
3279 * @rank_bgn: Rank number
3280 * @group: Read/Write Group
3281 * @test_bgn: Rank at which the test begins
3282 *
3283 * Stage 2: Write Calibration Part One.
3284 *
3285 * This function implements UniPHY calibration Stage 2, as explained in
3286 * detail in Altera EMI_RM 2015.05.04 , "UniPHY Calibration Stages".
3287 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003288static int rw_mgr_mem_calibrate_writes(struct socfpga_sdrseq *seq,
3289 const u32 rank_bgn, const u32 group,
Marek Vasut4a78cc72015-07-18 07:23:25 +02003290 const u32 test_bgn)
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003291{
Marek Vasut4a78cc72015-07-18 07:23:25 +02003292 int ret;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003293
Marek Vasut4a78cc72015-07-18 07:23:25 +02003294 /* Update info for sims */
3295 debug("%s:%d %u %u\n", __func__, __LINE__, group, test_bgn);
3296
3297 reg_file_set_group(group);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003298 reg_file_set_stage(CAL_STAGE_WRITES);
3299 reg_file_set_sub_stage(CAL_SUBSTAGE_WRITES_CENTER);
3300
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003301 ret = rw_mgr_mem_calibrate_writes_center(seq, rank_bgn, group,
3302 test_bgn);
Marek Vasutfc2ec8f2015-07-21 05:32:49 +02003303 if (ret)
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003304 set_failing_group_stage(seq, group, CAL_STAGE_WRITES,
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003305 CAL_SUBSTAGE_WRITES_CENTER);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003306
Marek Vasutfc2ec8f2015-07-21 05:32:49 +02003307 return ret;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003308}
3309
Marek Vasutbe333bc2015-07-20 07:33:33 +02003310/**
3311 * mem_precharge_and_activate() - Precharge all banks and activate
3312 *
3313 * Precharge all banks and activate row 0 in bank "000..." and bank "111...".
3314 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003315static void mem_precharge_and_activate(struct socfpga_sdrseq *seq)
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003316{
Marek Vasutbe333bc2015-07-20 07:33:33 +02003317 int r;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003318
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003319 for (r = 0; r < seq->rwcfg->mem_number_of_ranks; r++) {
Marek Vasutbe333bc2015-07-20 07:33:33 +02003320 /* Set rank. */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003321 set_rank_and_odt_mask(seq, r, RW_MGR_ODT_MODE_OFF);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003322
Marek Vasutbe333bc2015-07-20 07:33:33 +02003323 /* Precharge all banks. */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003324 writel(seq->rwcfg->precharge_all, SDR_PHYGRP_RWMGRGRP_ADDRESS |
Marek Vasutb5450962015-07-12 21:05:08 +02003325 RW_MGR_RUN_SINGLE_GROUP_OFFSET);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003326
Marek Vasutb5450962015-07-12 21:05:08 +02003327 writel(0x0F, &sdr_rw_load_mgr_regs->load_cntr0);
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003328 writel(seq->rwcfg->activate_0_and_1_wait1,
Marek Vasutc85b9b32015-08-02 19:47:01 +02003329 &sdr_rw_load_jump_mgr_regs->load_jump_add0);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003330
Marek Vasutb5450962015-07-12 21:05:08 +02003331 writel(0x0F, &sdr_rw_load_mgr_regs->load_cntr1);
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003332 writel(seq->rwcfg->activate_0_and_1_wait2,
Marek Vasutc85b9b32015-08-02 19:47:01 +02003333 &sdr_rw_load_jump_mgr_regs->load_jump_add1);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003334
Marek Vasutbe333bc2015-07-20 07:33:33 +02003335 /* Activate rows. */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003336 writel(seq->rwcfg->activate_0_and_1,
3337 SDR_PHYGRP_RWMGRGRP_ADDRESS |
3338 RW_MGR_RUN_SINGLE_GROUP_OFFSET);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003339 }
3340}
3341
Marek Vasut0f0840d2015-07-17 01:57:41 +02003342/**
3343 * mem_init_latency() - Configure memory RLAT and WLAT settings
3344 *
3345 * Configure memory RLAT and WLAT parameters.
3346 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003347static void mem_init_latency(struct socfpga_sdrseq *seq)
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003348{
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003349 /*
Marek Vasut0f0840d2015-07-17 01:57:41 +02003350 * For AV/CV, LFIFO is hardened and always runs at full rate
3351 * so max latency in AFI clocks, used here, is correspondingly
3352 * smaller.
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003353 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003354 const u32 max_latency = (1 << seq->misccfg->max_latency_count_width)
3355 - 1;
Marek Vasut0f0840d2015-07-17 01:57:41 +02003356 u32 rlat, wlat;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003357
Marek Vasut0f0840d2015-07-17 01:57:41 +02003358 debug("%s:%d\n", __func__, __LINE__);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003359
3360 /*
Marek Vasut0f0840d2015-07-17 01:57:41 +02003361 * Read in write latency.
3362 * WL for Hard PHY does not include additive latency.
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003363 */
Marek Vasut0f0840d2015-07-17 01:57:41 +02003364 wlat = readl(&data_mgr->t_wl_add);
3365 wlat += readl(&data_mgr->mem_t_add);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003366
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003367 seq->gbl.rw_wl_nop_cycles = wlat - 1;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003368
Marek Vasut0f0840d2015-07-17 01:57:41 +02003369 /* Read in readl latency. */
3370 rlat = readl(&data_mgr->t_rl_add);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003371
Marek Vasut0f0840d2015-07-17 01:57:41 +02003372 /* Set a pretty high read latency initially. */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003373 seq->gbl.curr_read_lat = rlat + 16;
3374 if (seq->gbl.curr_read_lat > max_latency)
3375 seq->gbl.curr_read_lat = max_latency;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003376
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003377 writel(seq->gbl.curr_read_lat, &phy_mgr_cfg->phy_rlat);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003378
Marek Vasut0f0840d2015-07-17 01:57:41 +02003379 /* Advertise write latency. */
3380 writel(wlat, &phy_mgr_cfg->afi_wlat);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003381}
3382
Marek Vasut60daef82015-07-26 10:54:15 +02003383/**
3384 * @mem_skip_calibrate() - Set VFIFO and LFIFO to instant-on settings
3385 *
3386 * Set VFIFO and LFIFO to instant-on settings in skip calibration mode.
3387 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003388static void mem_skip_calibrate(struct socfpga_sdrseq *seq)
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003389{
Marek Vasut8af9ca02015-08-02 19:42:26 +02003390 u32 vfifo_offset;
3391 u32 i, j, r;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003392
3393 debug("%s:%d\n", __func__, __LINE__);
3394 /* Need to update every shadow register set used by the interface */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003395 for (r = 0; r < seq->rwcfg->mem_number_of_ranks;
Marek Vasut60daef82015-07-26 10:54:15 +02003396 r += NUM_RANKS_PER_SHADOW_REG) {
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003397 /*
3398 * Set output phase alignment settings appropriate for
3399 * skip calibration.
3400 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003401 for (i = 0; i < seq->rwcfg->mem_if_read_dqs_width; i++) {
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003402 scc_mgr_set_dqs_en_phase(i, 0);
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003403 if (seq->iocfg->dll_chain_length == 6)
Marek Vasut7e8f8a72015-08-02 19:10:58 +02003404 scc_mgr_set_dqdqs_output_phase(i, 6);
3405 else
3406 scc_mgr_set_dqdqs_output_phase(i, 7);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003407 /*
3408 * Case:33398
3409 *
3410 * Write data arrives to the I/O two cycles before write
3411 * latency is reached (720 deg).
3412 * -> due to bit-slip in a/c bus
3413 * -> to allow board skew where dqs is longer than ck
3414 * -> how often can this happen!?
3415 * -> can claim back some ptaps for high freq
3416 * support if we can relax this, but i digress...
3417 *
3418 * The write_clk leads mem_ck by 90 deg
3419 * The minimum ptap of the OPA is 180 deg
3420 * Each ptap has (360 / IO_DLL_CHAIN_LENGH) deg of delay
3421 * The write_clk is always delayed by 2 ptaps
3422 *
3423 * Hence, to make DQS aligned to CK, we need to delay
3424 * DQS by:
Marek Vasutc85b9b32015-08-02 19:47:01 +02003425 * (720 - 90 - 180 - 2) *
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003426 * (360 / seq->iocfg->dll_chain_length)
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003427 *
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003428 * Dividing the above by
3429 (360 / seq->iocfg->dll_chain_length)
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003430 * gives us the number of ptaps, which simplies to:
3431 *
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003432 * (1.25 * seq->iocfg->dll_chain_length - 2)
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003433 */
Marek Vasut60daef82015-07-26 10:54:15 +02003434 scc_mgr_set_dqdqs_output_phase(i,
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003435 ((125 * seq->iocfg->dll_chain_length)
3436 / 100) - 2);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003437 }
Marek Vasutb5450962015-07-12 21:05:08 +02003438 writel(0xff, &sdr_scc_mgr->dqs_ena);
3439 writel(0xff, &sdr_scc_mgr->dqs_io_ena);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003440
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003441 for (i = 0; i < seq->rwcfg->mem_if_write_dqs_width; i++) {
Marek Vasutb5450962015-07-12 21:05:08 +02003442 writel(i, SDR_PHYGRP_SCCGRP_ADDRESS |
3443 SCC_MGR_GROUP_COUNTER_OFFSET);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003444 }
Marek Vasutb5450962015-07-12 21:05:08 +02003445 writel(0xff, &sdr_scc_mgr->dq_ena);
3446 writel(0xff, &sdr_scc_mgr->dm_ena);
3447 writel(0, &sdr_scc_mgr->update);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003448 }
3449
3450 /* Compensate for simulation model behaviour */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003451 for (i = 0; i < seq->rwcfg->mem_if_read_dqs_width; i++) {
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003452 scc_mgr_set_dqs_bus_in_delay(i, 10);
3453 scc_mgr_load_dqs(i);
3454 }
Marek Vasutb5450962015-07-12 21:05:08 +02003455 writel(0, &sdr_scc_mgr->update);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003456
3457 /*
3458 * ArriaV has hard FIFOs that can only be initialized by incrementing
3459 * in sequencer.
3460 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003461 vfifo_offset = seq->misccfg->calib_vfifo_offset;
Marek Vasut60daef82015-07-26 10:54:15 +02003462 for (j = 0; j < vfifo_offset; j++)
Marek Vasutb5450962015-07-12 21:05:08 +02003463 writel(0xff, &phy_mgr_cmd->inc_vfifo_hard_phy);
Marek Vasutb5450962015-07-12 21:05:08 +02003464 writel(0, &phy_mgr_cmd->fifo_reset);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003465
3466 /*
Marek Vasut60daef82015-07-26 10:54:15 +02003467 * For Arria V and Cyclone V with hard LFIFO, we get the skip-cal
3468 * setting from generation-time constant.
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003469 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003470 seq->gbl.curr_read_lat = seq->misccfg->calib_lfifo_offset;
3471 writel(seq->gbl.curr_read_lat, &phy_mgr_cfg->phy_rlat);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003472}
3473
Marek Vasutd9fcf9a2015-07-20 04:34:51 +02003474/**
3475 * mem_calibrate() - Memory calibration entry point.
3476 *
3477 * Perform memory calibration.
3478 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003479static u32 mem_calibrate(struct socfpga_sdrseq *seq)
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003480{
Marek Vasut8af9ca02015-08-02 19:42:26 +02003481 u32 i;
3482 u32 rank_bgn, sr;
3483 u32 write_group, write_test_bgn;
3484 u32 read_group, read_test_bgn;
3485 u32 run_groups, current_run;
3486 u32 failing_groups = 0;
3487 u32 group_failed = 0;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003488
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003489 const u32 rwdqs_ratio = seq->rwcfg->mem_if_read_dqs_width /
3490 seq->rwcfg->mem_if_write_dqs_width;
Marek Vasutd6f28792015-07-17 02:21:47 +02003491
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003492 debug("%s:%d\n", __func__, __LINE__);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003493
Marek Vasut0f0840d2015-07-17 01:57:41 +02003494 /* Initialize the data settings */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003495 seq->gbl.error_substage = CAL_SUBSTAGE_NIL;
3496 seq->gbl.error_stage = CAL_STAGE_NIL;
3497 seq->gbl.error_group = 0xff;
3498 seq->gbl.fom_in = 0;
3499 seq->gbl.fom_out = 0;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003500
Marek Vasut0f0840d2015-07-17 01:57:41 +02003501 /* Initialize WLAT and RLAT. */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003502 mem_init_latency(seq);
Marek Vasut0f0840d2015-07-17 01:57:41 +02003503
3504 /* Initialize bit slips. */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003505 mem_precharge_and_activate(seq);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003506
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003507 for (i = 0; i < seq->rwcfg->mem_if_read_dqs_width; i++) {
Marek Vasutb5450962015-07-12 21:05:08 +02003508 writel(i, SDR_PHYGRP_SCCGRP_ADDRESS |
3509 SCC_MGR_GROUP_COUNTER_OFFSET);
Marek Vasutd4d3de22015-07-19 01:34:43 +02003510 /* Only needed once to set all groups, pins, DQ, DQS, DM. */
3511 if (i == 0)
3512 scc_mgr_set_hhp_extras();
3513
Marek Vasut0341de42015-07-17 02:06:20 +02003514 scc_set_bypass_mode(i);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003515 }
3516
Marek Vasutb984ee82015-07-17 02:07:12 +02003517 /* Calibration is skipped. */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003518 if ((seq->dyn_calib_steps & CALIB_SKIP_ALL) == CALIB_SKIP_ALL) {
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003519 /*
3520 * Set VFIFO and LFIFO to instant-on settings in skip
3521 * calibration mode.
3522 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003523 mem_skip_calibrate(seq);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003524
Marek Vasutb984ee82015-07-17 02:07:12 +02003525 /*
3526 * Do not remove this line as it makes sure all of our
3527 * decisions have been applied.
3528 */
3529 writel(0, &sdr_scc_mgr->update);
3530 return 1;
3531 }
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003532
Marek Vasutb984ee82015-07-17 02:07:12 +02003533 /* Calibration is not skipped. */
3534 for (i = 0; i < NUM_CALIB_REPEAT; i++) {
3535 /*
3536 * Zero all delay chain/phase settings for all
3537 * groups and all shadow register sets.
3538 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003539 scc_mgr_zero_all(seq);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003540
Marek Vasuteb98b382015-08-02 18:27:21 +02003541 run_groups = ~0;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003542
Marek Vasutb984ee82015-07-17 02:07:12 +02003543 for (write_group = 0, write_test_bgn = 0; write_group
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003544 < seq->rwcfg->mem_if_write_dqs_width; write_group++,
3545 write_test_bgn += seq->rwcfg->mem_dq_per_write_dqs) {
Marek Vasut0568f222015-07-17 02:50:56 +02003546 /* Initialize the group failure */
Marek Vasutb984ee82015-07-17 02:07:12 +02003547 group_failed = 0;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003548
Marek Vasutb984ee82015-07-17 02:07:12 +02003549 current_run = run_groups & ((1 <<
3550 RW_MGR_NUM_DQS_PER_WRITE_GROUP) - 1);
3551 run_groups = run_groups >>
3552 RW_MGR_NUM_DQS_PER_WRITE_GROUP;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003553
Marek Vasutb984ee82015-07-17 02:07:12 +02003554 if (current_run == 0)
3555 continue;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003556
Marek Vasutb984ee82015-07-17 02:07:12 +02003557 writel(write_group, SDR_PHYGRP_SCCGRP_ADDRESS |
3558 SCC_MGR_GROUP_COUNTER_OFFSET);
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003559 scc_mgr_zero_group(seq, write_group, 0);
Marek Vasutb984ee82015-07-17 02:07:12 +02003560
Marek Vasutd6f28792015-07-17 02:21:47 +02003561 for (read_group = write_group * rwdqs_ratio,
3562 read_test_bgn = 0;
Marek Vasut0568f222015-07-17 02:50:56 +02003563 read_group < (write_group + 1) * rwdqs_ratio;
Marek Vasutd6f28792015-07-17 02:21:47 +02003564 read_group++,
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003565 read_test_bgn += seq->rwcfg->mem_dq_per_read_dqs) {
Marek Vasutd6f28792015-07-17 02:21:47 +02003566 if (STATIC_CALIB_STEPS & CALIB_SKIP_VFIFO)
3567 continue;
3568
Marek Vasutb984ee82015-07-17 02:07:12 +02003569 /* Calibrate the VFIFO */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003570 if (rw_mgr_mem_calibrate_vfifo(seq, read_group,
Marek Vasutd6f28792015-07-17 02:21:47 +02003571 read_test_bgn))
3572 continue;
Marek Vasutb984ee82015-07-17 02:07:12 +02003573
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003574 if (!(seq->gbl.phy_debug_mode_flags &
Marek Vasutc85b9b32015-08-02 19:47:01 +02003575 PHY_DEBUG_SWEEP_ALL_GROUPS))
Marek Vasutd6f28792015-07-17 02:21:47 +02003576 return 0;
Marek Vasut0568f222015-07-17 02:50:56 +02003577
3578 /* The group failed, we're done. */
3579 goto grp_failed;
Marek Vasutb984ee82015-07-17 02:07:12 +02003580 }
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003581
Marek Vasutb984ee82015-07-17 02:07:12 +02003582 /* Calibrate the output side */
Marek Vasut0568f222015-07-17 02:50:56 +02003583 for (rank_bgn = 0, sr = 0;
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003584 rank_bgn < seq->rwcfg->mem_number_of_ranks;
Marek Vasut0568f222015-07-17 02:50:56 +02003585 rank_bgn += NUM_RANKS_PER_SHADOW_REG, sr++) {
3586 if (STATIC_CALIB_STEPS & CALIB_SKIP_WRITES)
3587 continue;
Marek Vasutf04045f2015-07-17 02:31:04 +02003588
Marek Vasut0568f222015-07-17 02:50:56 +02003589 /* Not needed in quick mode! */
Marek Vasutc85b9b32015-08-02 19:47:01 +02003590 if (STATIC_CALIB_STEPS &
3591 CALIB_SKIP_DELAY_SWEEPS)
Marek Vasut0568f222015-07-17 02:50:56 +02003592 continue;
Marek Vasutf04045f2015-07-17 02:31:04 +02003593
Marek Vasut0568f222015-07-17 02:50:56 +02003594 /* Calibrate WRITEs */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003595 if (!rw_mgr_mem_calibrate_writes(seq, rank_bgn,
Marek Vasutc85b9b32015-08-02 19:47:01 +02003596 write_group,
3597 write_test_bgn))
Marek Vasut0568f222015-07-17 02:50:56 +02003598 continue;
Marek Vasutf04045f2015-07-17 02:31:04 +02003599
Marek Vasut0568f222015-07-17 02:50:56 +02003600 group_failed = 1;
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003601 if (!(seq->gbl.phy_debug_mode_flags &
Marek Vasutc85b9b32015-08-02 19:47:01 +02003602 PHY_DEBUG_SWEEP_ALL_GROUPS))
Marek Vasut0568f222015-07-17 02:50:56 +02003603 return 0;
Marek Vasutb984ee82015-07-17 02:07:12 +02003604 }
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003605
Marek Vasut0568f222015-07-17 02:50:56 +02003606 /* Some group failed, we're done. */
3607 if (group_failed)
3608 goto grp_failed;
Marek Vasut6db55732015-07-17 02:38:51 +02003609
Marek Vasut0568f222015-07-17 02:50:56 +02003610 for (read_group = write_group * rwdqs_ratio,
3611 read_test_bgn = 0;
3612 read_group < (write_group + 1) * rwdqs_ratio;
3613 read_group++,
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003614 read_test_bgn += seq->rwcfg->mem_dq_per_read_dqs) {
Marek Vasut0568f222015-07-17 02:50:56 +02003615 if (STATIC_CALIB_STEPS & CALIB_SKIP_WRITES)
3616 continue;
3617
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003618 if (!rw_mgr_mem_calibrate_vfifo_end(seq,
3619 read_group,
Marek Vasutc85b9b32015-08-02 19:47:01 +02003620 read_test_bgn))
Marek Vasut0568f222015-07-17 02:50:56 +02003621 continue;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003622
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003623 if (!(seq->gbl.phy_debug_mode_flags &
Marek Vasutc85b9b32015-08-02 19:47:01 +02003624 PHY_DEBUG_SWEEP_ALL_GROUPS))
Marek Vasut0568f222015-07-17 02:50:56 +02003625 return 0;
3626
3627 /* The group failed, we're done. */
3628 goto grp_failed;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003629 }
3630
Marek Vasut0568f222015-07-17 02:50:56 +02003631 /* No group failed, continue as usual. */
3632 continue;
3633
3634grp_failed: /* A group failed, increment the counter. */
3635 failing_groups++;
Marek Vasutb984ee82015-07-17 02:07:12 +02003636 }
3637
3638 /*
3639 * USER If there are any failing groups then report
3640 * the failure.
3641 */
3642 if (failing_groups != 0)
3643 return 0;
3644
Marek Vasutfc38d5c2015-07-17 02:40:21 +02003645 if (STATIC_CALIB_STEPS & CALIB_SKIP_LFIFO)
3646 continue;
3647
Marek Vasutb984ee82015-07-17 02:07:12 +02003648 /* Calibrate the LFIFO */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003649 if (!rw_mgr_mem_calibrate_lfifo(seq))
Marek Vasutfc38d5c2015-07-17 02:40:21 +02003650 return 0;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003651 }
3652
3653 /*
3654 * Do not remove this line as it makes sure all of our decisions
3655 * have been applied.
3656 */
Marek Vasutb5450962015-07-12 21:05:08 +02003657 writel(0, &sdr_scc_mgr->update);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003658 return 1;
3659}
3660
Marek Vasut092a1ef2015-07-17 01:20:21 +02003661/**
3662 * run_mem_calibrate() - Perform memory calibration
3663 *
3664 * This function triggers the entire memory calibration procedure.
3665 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003666static int run_mem_calibrate(struct socfpga_sdrseq *seq)
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003667{
Marek Vasut092a1ef2015-07-17 01:20:21 +02003668 int pass;
Marek Vasut69469892016-04-05 23:41:56 +02003669 u32 ctrl_cfg;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003670
3671 debug("%s:%d\n", __func__, __LINE__);
3672
3673 /* Reset pass/fail status shown on afi_cal_success/fail */
Marek Vasutb5450962015-07-12 21:05:08 +02003674 writel(PHY_MGR_CAL_RESET, &phy_mgr_cfg->cal_status);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003675
Marek Vasut092a1ef2015-07-17 01:20:21 +02003676 /* Stop tracking manager. */
Marek Vasut69469892016-04-05 23:41:56 +02003677 ctrl_cfg = readl(&sdr_ctrl->ctrl_cfg);
3678 writel(ctrl_cfg & ~SDR_CTRLGRP_CTRLCFG_DQSTRKEN_MASK,
3679 &sdr_ctrl->ctrl_cfg);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003680
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003681 phy_mgr_initialize(seq);
3682 rw_mgr_mem_initialize(seq);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003683
Marek Vasut092a1ef2015-07-17 01:20:21 +02003684 /* Perform the actual memory calibration. */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003685 pass = mem_calibrate(seq);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003686
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003687 mem_precharge_and_activate(seq);
Marek Vasutb5450962015-07-12 21:05:08 +02003688 writel(0, &phy_mgr_cmd->fifo_reset);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003689
Marek Vasut092a1ef2015-07-17 01:20:21 +02003690 /* Handoff. */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003691 rw_mgr_mem_handoff(seq);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003692 /*
Marek Vasut092a1ef2015-07-17 01:20:21 +02003693 * In Hard PHY this is a 2-bit control:
3694 * 0: AFI Mux Select
3695 * 1: DDIO Mux Select
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003696 */
Marek Vasut092a1ef2015-07-17 01:20:21 +02003697 writel(0x2, &phy_mgr_cfg->mux_sel);
3698
3699 /* Start tracking manager. */
Marek Vasut69469892016-04-05 23:41:56 +02003700 writel(ctrl_cfg, &sdr_ctrl->ctrl_cfg);
Marek Vasut092a1ef2015-07-17 01:20:21 +02003701
3702 return pass;
3703}
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003704
Marek Vasut092a1ef2015-07-17 01:20:21 +02003705/**
3706 * debug_mem_calibrate() - Report result of memory calibration
3707 * @pass: Value indicating whether calibration passed or failed
3708 *
3709 * This function reports the results of the memory calibration
3710 * and writes debug information into the register file.
3711 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003712static void debug_mem_calibrate(struct socfpga_sdrseq *seq, int pass)
Marek Vasut092a1ef2015-07-17 01:20:21 +02003713{
Marek Vasut8af9ca02015-08-02 19:42:26 +02003714 u32 debug_info;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003715
3716 if (pass) {
Marek Vasuted6c1ab2021-09-14 05:20:19 +02003717 debug(KBUILD_BASENAME ": CALIBRATION PASSED\n");
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003718
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003719 seq->gbl.fom_in /= 2;
3720 seq->gbl.fom_out /= 2;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003721
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003722 if (seq->gbl.fom_in > 0xff)
3723 seq->gbl.fom_in = 0xff;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003724
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003725 if (seq->gbl.fom_out > 0xff)
3726 seq->gbl.fom_out = 0xff;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003727
3728 /* Update the FOM in the register file */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003729 debug_info = seq->gbl.fom_in;
3730 debug_info |= seq->gbl.fom_out << 8;
Marek Vasutb5450962015-07-12 21:05:08 +02003731 writel(debug_info, &sdr_reg_file->fom);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003732
Marek Vasutb5450962015-07-12 21:05:08 +02003733 writel(debug_info, &phy_mgr_cfg->cal_debug_info);
3734 writel(PHY_MGR_CAL_SUCCESS, &phy_mgr_cfg->cal_status);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003735 } else {
Marek Vasuted6c1ab2021-09-14 05:20:19 +02003736 debug(KBUILD_BASENAME ": CALIBRATION FAILED\n");
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003737
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003738 debug_info = seq->gbl.error_stage;
3739 debug_info |= seq->gbl.error_substage << 8;
3740 debug_info |= seq->gbl.error_group << 16;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003741
Marek Vasutb5450962015-07-12 21:05:08 +02003742 writel(debug_info, &sdr_reg_file->failing_stage);
3743 writel(debug_info, &phy_mgr_cfg->cal_debug_info);
3744 writel(PHY_MGR_CAL_FAIL, &phy_mgr_cfg->cal_status);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003745
3746 /* Update the failing group/stage in the register file */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003747 debug_info = seq->gbl.error_stage;
3748 debug_info |= seq->gbl.error_substage << 8;
3749 debug_info |= seq->gbl.error_group << 16;
Marek Vasutb5450962015-07-12 21:05:08 +02003750 writel(debug_info, &sdr_reg_file->failing_stage);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003751 }
3752
Marek Vasuted6c1ab2021-09-14 05:20:19 +02003753 debug(KBUILD_BASENAME ": Calibration complete\n");
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003754}
3755
Marek Vasutea9771b2015-07-19 06:12:42 +02003756/**
3757 * hc_initialize_rom_data() - Initialize ROM data
3758 *
3759 * Initialize ROM data.
3760 */
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003761static void hc_initialize_rom_data(void)
3762{
Marek Vasut3384e742015-08-02 17:15:19 +02003763 unsigned int nelem = 0;
3764 const u32 *rom_init;
Marek Vasutea9771b2015-07-19 06:12:42 +02003765 u32 i, addr;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003766
Marek Vasut3384e742015-08-02 17:15:19 +02003767 socfpga_get_seq_inst_init(&rom_init, &nelem);
Marek Vasuta3340102015-07-12 19:03:33 +02003768 addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_INST_ROM_WRITE_OFFSET;
Marek Vasut3384e742015-08-02 17:15:19 +02003769 for (i = 0; i < nelem; i++)
3770 writel(rom_init[i], addr + (i << 2));
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003771
Marek Vasut3384e742015-08-02 17:15:19 +02003772 socfpga_get_seq_ac_init(&rom_init, &nelem);
Marek Vasuta3340102015-07-12 19:03:33 +02003773 addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_AC_ROM_WRITE_OFFSET;
Marek Vasut3384e742015-08-02 17:15:19 +02003774 for (i = 0; i < nelem; i++)
3775 writel(rom_init[i], addr + (i << 2));
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003776}
3777
Marek Vasuta17ae0f2015-07-19 06:13:37 +02003778/**
3779 * initialize_reg_file() - Initialize SDR register file
3780 *
3781 * Initialize SDR register file.
3782 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003783static void initialize_reg_file(struct socfpga_sdrseq *seq)
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003784{
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003785 /* Initialize the register file with the correct data */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003786 writel(seq->misccfg->reg_file_init_seq_signature,
3787 &sdr_reg_file->signature);
Marek Vasutb5450962015-07-12 21:05:08 +02003788 writel(0, &sdr_reg_file->debug_data_addr);
3789 writel(0, &sdr_reg_file->cur_stage);
3790 writel(0, &sdr_reg_file->fom);
3791 writel(0, &sdr_reg_file->failing_stage);
3792 writel(0, &sdr_reg_file->debug1);
3793 writel(0, &sdr_reg_file->debug2);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003794}
3795
Marek Vasut0c9f3cb2015-07-19 06:14:04 +02003796/**
3797 * initialize_hps_phy() - Initialize HPS PHY
3798 *
3799 * Initialize HPS PHY.
3800 */
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003801static void initialize_hps_phy(void)
3802{
Marek Vasut8af9ca02015-08-02 19:42:26 +02003803 u32 reg;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003804 /*
3805 * Tracking also gets configured here because it's in the
3806 * same register.
3807 */
Marek Vasut8af9ca02015-08-02 19:42:26 +02003808 u32 trk_sample_count = 7500;
3809 u32 trk_long_idle_sample_count = (10 << 16) | 100;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003810 /*
3811 * Format is number of outer loops in the 16 MSB, sample
3812 * count in 16 LSB.
3813 */
3814
3815 reg = 0;
3816 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_ACDELAYEN_SET(2);
3817 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_DQDELAYEN_SET(1);
3818 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_DQSDELAYEN_SET(1);
3819 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_DQSLOGICDELAYEN_SET(1);
3820 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_RESETDELAYEN_SET(0);
3821 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_LPDDRDIS_SET(1);
3822 /*
3823 * This field selects the intrinsic latency to RDATA_EN/FULL path.
3824 * 00-bypass, 01- add 5 cycles, 10- add 10 cycles, 11- add 15 cycles.
3825 */
3826 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_ADDLATSEL_SET(0);
3827 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_SAMPLECOUNT_19_0_SET(
3828 trk_sample_count);
Marek Vasutcd5d38e2015-07-12 20:49:39 +02003829 writel(reg, &sdr_ctrl->phy_ctrl0);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003830
3831 reg = 0;
3832 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_1_SAMPLECOUNT_31_20_SET(
3833 trk_sample_count >>
3834 SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_SAMPLECOUNT_19_0_WIDTH);
3835 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_1_LONGIDLESAMPLECOUNT_19_0_SET(
3836 trk_long_idle_sample_count);
Marek Vasutcd5d38e2015-07-12 20:49:39 +02003837 writel(reg, &sdr_ctrl->phy_ctrl1);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003838
3839 reg = 0;
3840 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_2_LONGIDLESAMPLECOUNT_31_20_SET(
3841 trk_long_idle_sample_count >>
3842 SDR_CTRLGRP_PHYCTRL_PHYCTRL_1_LONGIDLESAMPLECOUNT_19_0_WIDTH);
Marek Vasutcd5d38e2015-07-12 20:49:39 +02003843 writel(reg, &sdr_ctrl->phy_ctrl2);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003844}
3845
Marek Vasutb0563cf2015-07-17 00:45:11 +02003846/**
3847 * initialize_tracking() - Initialize tracking
3848 *
3849 * Initialize the register file with usable initial data.
3850 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003851static void initialize_tracking(struct socfpga_sdrseq *seq)
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003852{
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003853 /*
Marek Vasutb0563cf2015-07-17 00:45:11 +02003854 * Initialize the register file with the correct data.
3855 * Compute usable version of value in case we skip full
3856 * computation later.
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003857 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003858 writel(DIV_ROUND_UP(seq->iocfg->delay_per_opa_tap,
3859 seq->iocfg->delay_per_dchain_tap) - 1,
Marek Vasutb0563cf2015-07-17 00:45:11 +02003860 &sdr_reg_file->dtaps_per_ptap);
3861
3862 /* trk_sample_count */
3863 writel(7500, &sdr_reg_file->trk_sample_count);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003864
Marek Vasutb0563cf2015-07-17 00:45:11 +02003865 /* longidle outer loop [15:0] */
3866 writel((10 << 16) | (100 << 0), &sdr_reg_file->trk_longidle);
3867
3868 /*
3869 * longidle sample count [31:24]
3870 * trfc, worst case of 933Mhz 4Gb [23:16]
3871 * trcd, worst case [15:8]
3872 * vfifo wait [7:0]
3873 */
3874 writel((243 << 24) | (14 << 16) | (10 << 8) | (4 << 0),
3875 &sdr_reg_file->delays);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003876
Marek Vasutb0563cf2015-07-17 00:45:11 +02003877 /* mux delay */
Marek Vasut6bccacf2019-10-18 00:22:31 +02003878 if (dram_is_ddr(2)) {
3879 writel(0, &sdr_reg_file->trk_rw_mgr_addr);
3880 } else if (dram_is_ddr(3)) {
3881 writel((seq->rwcfg->idle << 24) |
3882 (seq->rwcfg->activate_1 << 16) |
3883 (seq->rwcfg->sgle_read << 8) |
3884 (seq->rwcfg->precharge_all << 0),
3885 &sdr_reg_file->trk_rw_mgr_addr);
3886 }
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003887
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003888 writel(seq->rwcfg->mem_if_read_dqs_width,
Marek Vasutb0563cf2015-07-17 00:45:11 +02003889 &sdr_reg_file->trk_read_dqs_width);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003890
Marek Vasutb0563cf2015-07-17 00:45:11 +02003891 /* trefi [7:0] */
Marek Vasut6bccacf2019-10-18 00:22:31 +02003892 if (dram_is_ddr(2)) {
3893 writel(1000 << 0, &sdr_reg_file->trk_rfsh);
3894 } else if (dram_is_ddr(3)) {
3895 writel((seq->rwcfg->refresh_all << 24) | (1000 << 0),
3896 &sdr_reg_file->trk_rfsh);
3897 }
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003898}
3899
Simon Goldschmidt24910c32019-04-16 22:04:39 +02003900int sdram_calibration_full(struct socfpga_sdr *sdr)
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003901{
Marek Vasut8af9ca02015-08-02 19:42:26 +02003902 u32 pass;
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003903 struct socfpga_sdrseq seq;
Marek Vasut5da0f5b2015-07-17 01:05:36 +02003904
Simon Goldschmidt24910c32019-04-16 22:04:39 +02003905 /*
3906 * For size reasons, this file uses hard coded addresses.
3907 * Check if we are called with the correct address.
3908 */
3909 if (sdr != (struct socfpga_sdr *)SOCFPGA_SDR_ADDRESS)
3910 return -ENODEV;
3911
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003912 memset(&seq, 0, sizeof(seq));
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003913
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003914 seq.rwcfg = socfpga_get_sdram_rwmgr_config();
3915 seq.iocfg = socfpga_get_sdram_io_config();
3916 seq.misccfg = socfpga_get_sdram_misc_config();
Marek Vasut39b620e2015-08-02 18:12:08 +02003917
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003918 /* Set the calibration enabled by default */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003919 seq.gbl.phy_debug_mode_flags |= PHY_DEBUG_ENABLE_CAL_RPT;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003920 /*
3921 * Only sweep all groups (regardless of fail state) by default
3922 * Set enabled read test by default.
3923 */
3924#if DISABLE_GUARANTEED_READ
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003925 seq.gbl.phy_debug_mode_flags |= PHY_DEBUG_DISABLE_GUARANTEED_READ;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003926#endif
3927 /* Initialize the register file */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003928 initialize_reg_file(&seq);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003929
3930 /* Initialize any PHY CSR */
3931 initialize_hps_phy();
3932
3933 scc_mgr_initialize();
3934
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003935 initialize_tracking(&seq);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003936
Marek Vasuted6c1ab2021-09-14 05:20:19 +02003937 debug(KBUILD_BASENAME ": Preparing to start memory calibration\n");
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003938
3939 debug("%s:%d\n", __func__, __LINE__);
Marek Vasut4df2d7b2016-04-04 21:21:05 +02003940 debug_cond(DLEVEL >= 1,
Marek Vasut6283b4c2015-07-13 01:05:27 +02003941 "DDR3 FULL_RATE ranks=%u cs/dimm=%u dq/dqs=%u,%u vg/dqs=%u,%u ",
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003942 seq.rwcfg->mem_number_of_ranks,
3943 seq.rwcfg->mem_number_of_cs_per_dimm,
3944 seq.rwcfg->mem_dq_per_read_dqs,
3945 seq.rwcfg->mem_dq_per_write_dqs,
3946 seq.rwcfg->mem_virtual_groups_per_read_dqs,
3947 seq.rwcfg->mem_virtual_groups_per_write_dqs);
Marek Vasut4df2d7b2016-04-04 21:21:05 +02003948 debug_cond(DLEVEL >= 1,
Marek Vasut6283b4c2015-07-13 01:05:27 +02003949 "dqs=%u,%u dq=%u dm=%u ptap_delay=%u dtap_delay=%u ",
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003950 seq.rwcfg->mem_if_read_dqs_width,
3951 seq.rwcfg->mem_if_write_dqs_width,
3952 seq.rwcfg->mem_data_width, seq.rwcfg->mem_data_mask_width,
3953 seq.iocfg->delay_per_opa_tap,
3954 seq.iocfg->delay_per_dchain_tap);
Marek Vasut4df2d7b2016-04-04 21:21:05 +02003955 debug_cond(DLEVEL >= 1, "dtap_dqsen_delay=%u, dll=%u",
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003956 seq.iocfg->delay_per_dqs_en_dchain_tap,
3957 seq.iocfg->dll_chain_length);
Marek Vasut4df2d7b2016-04-04 21:21:05 +02003958 debug_cond(DLEVEL >= 1,
Marek Vasutc85b9b32015-08-02 19:47:01 +02003959 "max values: en_p=%u dqdqs_p=%u en_d=%u dqs_in_d=%u ",
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003960 seq.iocfg->dqs_en_phase_max, seq.iocfg->dqdqs_out_phase_max,
3961 seq.iocfg->dqs_en_delay_max, seq.iocfg->dqs_in_delay_max);
Marek Vasut4df2d7b2016-04-04 21:21:05 +02003962 debug_cond(DLEVEL >= 1, "io_in_d=%u io_out1_d=%u io_out2_d=%u ",
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003963 seq.iocfg->io_in_delay_max, seq.iocfg->io_out1_delay_max,
3964 seq.iocfg->io_out2_delay_max);
Marek Vasut4df2d7b2016-04-04 21:21:05 +02003965 debug_cond(DLEVEL >= 1, "dqs_in_reserve=%u dqs_out_reserve=%u\n",
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003966 seq.iocfg->dqs_in_reserve, seq.iocfg->dqs_out_reserve);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003967
3968 hc_initialize_rom_data();
3969
3970 /* update info for sims */
3971 reg_file_set_stage(CAL_STAGE_NIL);
3972 reg_file_set_group(0);
3973
3974 /*
3975 * Load global needed for those actions that require
3976 * some dynamic calibration support.
3977 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003978 seq.dyn_calib_steps = STATIC_CALIB_STEPS;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003979 /*
3980 * Load global to allow dynamic selection of delay loop settings
3981 * based on calibration mode.
3982 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003983 if (!(seq.dyn_calib_steps & CALIB_SKIP_DELAY_LOOPS))
3984 seq.skip_delay_mask = 0xff;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003985 else
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003986 seq.skip_delay_mask = 0x0;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003987
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003988 pass = run_mem_calibrate(&seq);
3989 debug_mem_calibrate(&seq, pass);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003990 return pass;
3991}