blob: 69937fc2d5664bf8aa5a1fe2d59ada91986308fb [file] [log] [blame]
Tom Rini10e47792018-05-06 17:58:06 -04001// SPDX-License-Identifier: BSD-3-Clause
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002/*
3 * Copyright Altera Corporation (C) 2012-2015
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05004 */
5
Simon Glass0f2af882020-05-10 11:40:05 -06006#include <log.h>
Tom Rinidec7ea02024-05-20 13:35:03 -06007#include <linux/string.h>
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05008#include <asm/io.h>
9#include <asm/arch/sdram.h>
Marek Vasut6ca5b962015-07-18 02:46:56 +020010#include <errno.h>
Marek Vasut6bccacf2019-10-18 00:22:31 +020011#include <hang.h>
Dinh Nguyen135cc7f2015-06-02 22:52:49 -050012#include "sequencer.h"
Marek Vasut662a8a62015-08-02 16:55:45 +020013
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +020014static const struct socfpga_sdr_rw_load_manager *sdr_rw_load_mgr_regs =
Marek Vasutc85b9b32015-08-02 19:47:01 +020015 (struct socfpga_sdr_rw_load_manager *)
16 (SDR_PHYGRP_RWMGRGRP_ADDRESS | 0x800);
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +020017static const struct socfpga_sdr_rw_load_jump_manager *sdr_rw_load_jump_mgr_regs
18 = (struct socfpga_sdr_rw_load_jump_manager *)
Marek Vasutc85b9b32015-08-02 19:47:01 +020019 (SDR_PHYGRP_RWMGRGRP_ADDRESS | 0xC00);
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +020020static const struct socfpga_sdr_reg_file *sdr_reg_file =
Marek Vasut341ceec2015-07-12 18:31:05 +020021 (struct socfpga_sdr_reg_file *)SDR_PHYGRP_REGFILEGRP_ADDRESS;
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +020022static const struct socfpga_sdr_scc_mgr *sdr_scc_mgr =
Marek Vasutc85b9b32015-08-02 19:47:01 +020023 (struct socfpga_sdr_scc_mgr *)
24 (SDR_PHYGRP_SCCGRP_ADDRESS | 0xe00);
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +020025static const struct socfpga_phy_mgr_cmd *phy_mgr_cmd =
Marek Vasutc3b9b0f2015-07-12 18:54:37 +020026 (struct socfpga_phy_mgr_cmd *)SDR_PHYGRP_PHYMGRGRP_ADDRESS;
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +020027static const struct socfpga_phy_mgr_cfg *phy_mgr_cfg =
Marek Vasutc85b9b32015-08-02 19:47:01 +020028 (struct socfpga_phy_mgr_cfg *)
29 (SDR_PHYGRP_PHYMGRGRP_ADDRESS | 0x40);
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +020030static const struct socfpga_data_mgr *data_mgr =
Marek Vasuta3340102015-07-12 19:03:33 +020031 (struct socfpga_data_mgr *)SDR_PHYGRP_DATAMGRGRP_ADDRESS;
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +020032static const struct socfpga_sdr_ctrl *sdr_ctrl =
Marek Vasutcd5d38e2015-07-12 20:49:39 +020033 (struct socfpga_sdr_ctrl *)SDR_CTRLGRP_ADDRESS;
34
Dinh Nguyen135cc7f2015-06-02 22:52:49 -050035#define DELTA_D 1
Dinh Nguyen135cc7f2015-06-02 22:52:49 -050036
37/*
38 * In order to reduce ROM size, most of the selectable calibration steps are
39 * decided at compile time based on the user's calibration mode selection,
40 * as captured by the STATIC_CALIB_STEPS selection below.
41 *
42 * However, to support simulation-time selection of fast simulation mode, where
43 * we skip everything except the bare minimum, we need a few of the steps to
44 * be dynamic. In those cases, we either use the DYNAMIC_CALIB_STEPS for the
45 * check, which is based on the rtl-supplied value, or we dynamically compute
46 * the value to use based on the dynamically-chosen calibration mode
47 */
48
49#define DLEVEL 0
50#define STATIC_IN_RTL_SIM 0
51#define STATIC_SKIP_DELAY_LOOPS 0
52
53#define STATIC_CALIB_STEPS (STATIC_IN_RTL_SIM | CALIB_SKIP_FULL_TEST | \
54 STATIC_SKIP_DELAY_LOOPS)
55
Dinh Nguyen135cc7f2015-06-02 22:52:49 -050056#define SKIP_DELAY_LOOP_VALUE_OR_ZERO(non_skip_value) \
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +020057 ((non_skip_value) & seq->skip_delay_mask)
Dinh Nguyen135cc7f2015-06-02 22:52:49 -050058
Marek Vasut6bccacf2019-10-18 00:22:31 +020059bool dram_is_ddr(const u8 ddr)
60{
61 const struct socfpga_sdram_config *cfg = socfpga_get_sdram_config();
62 const u8 type = (cfg->ctrl_cfg >> SDR_CTRLGRP_CTRLCFG_MEMTYPE_LSB) &
63 SDR_CTRLGRP_CTRLCFG_MEMTYPE_MASK;
64
65 if (ddr == 2 && type == 1) /* DDR2 */
66 return true;
67
68 if (ddr == 3 && type == 2) /* DDR3 */
69 return true;
70
71 return false;
72}
73
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +020074static void set_failing_group_stage(struct socfpga_sdrseq *seq,
75 u32 group, u32 stage, u32 substage)
Dinh Nguyen135cc7f2015-06-02 22:52:49 -050076{
77 /*
78 * Only set the global stage if there was not been any other
79 * failing group
80 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +020081 if (seq->gbl.error_stage == CAL_STAGE_NIL) {
82 seq->gbl.error_substage = substage;
83 seq->gbl.error_stage = stage;
84 seq->gbl.error_group = group;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -050085 }
86}
87
Marek Vasut6eeb7472015-07-12 21:10:24 +020088static void reg_file_set_group(u16 set_group)
Dinh Nguyen135cc7f2015-06-02 22:52:49 -050089{
Marek Vasut6eeb7472015-07-12 21:10:24 +020090 clrsetbits_le32(&sdr_reg_file->cur_stage, 0xffff0000, set_group << 16);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -050091}
92
Marek Vasut6eeb7472015-07-12 21:10:24 +020093static void reg_file_set_stage(u8 set_stage)
Dinh Nguyen135cc7f2015-06-02 22:52:49 -050094{
Marek Vasut6eeb7472015-07-12 21:10:24 +020095 clrsetbits_le32(&sdr_reg_file->cur_stage, 0xffff, set_stage & 0xff);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -050096}
97
Marek Vasut6eeb7472015-07-12 21:10:24 +020098static void reg_file_set_sub_stage(u8 set_sub_stage)
Dinh Nguyen135cc7f2015-06-02 22:52:49 -050099{
Marek Vasut6eeb7472015-07-12 21:10:24 +0200100 set_sub_stage &= 0xff;
101 clrsetbits_le32(&sdr_reg_file->cur_stage, 0xff00, set_sub_stage << 8);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500102}
103
Marek Vasutfe5aa452015-07-17 01:36:32 +0200104/**
105 * phy_mgr_initialize() - Initialize PHY Manager
106 *
107 * Initialize PHY Manager.
108 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200109static void phy_mgr_initialize(struct socfpga_sdrseq *seq)
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500110{
Marek Vasutfe5aa452015-07-17 01:36:32 +0200111 u32 ratio;
112
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500113 debug("%s:%d\n", __func__, __LINE__);
Marek Vasutfe5aa452015-07-17 01:36:32 +0200114 /* Calibration has control over path to memory */
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500115 /*
116 * In Hard PHY this is a 2-bit control:
117 * 0: AFI Mux Select
118 * 1: DDIO Mux Select
119 */
Marek Vasutb5450962015-07-12 21:05:08 +0200120 writel(0x3, &phy_mgr_cfg->mux_sel);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500121
122 /* USER memory clock is not stable we begin initialization */
Marek Vasutb5450962015-07-12 21:05:08 +0200123 writel(0, &phy_mgr_cfg->reset_mem_stbl);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500124
125 /* USER calibration status all set to zero */
Marek Vasutb5450962015-07-12 21:05:08 +0200126 writel(0, &phy_mgr_cfg->cal_status);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500127
Marek Vasutb5450962015-07-12 21:05:08 +0200128 writel(0, &phy_mgr_cfg->cal_debug_info);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500129
Marek Vasutfe5aa452015-07-17 01:36:32 +0200130 /* Init params only if we do NOT skip calibration. */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200131 if ((seq->dyn_calib_steps & CALIB_SKIP_ALL) == CALIB_SKIP_ALL)
Marek Vasutfe5aa452015-07-17 01:36:32 +0200132 return;
133
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200134 ratio = seq->rwcfg->mem_dq_per_read_dqs /
135 seq->rwcfg->mem_virtual_groups_per_read_dqs;
136 seq->param.read_correct_mask_vg = (1 << ratio) - 1;
137 seq->param.write_correct_mask_vg = (1 << ratio) - 1;
138 seq->param.read_correct_mask = (1 << seq->rwcfg->mem_dq_per_read_dqs)
139 - 1;
140 seq->param.write_correct_mask = (1 << seq->rwcfg->mem_dq_per_write_dqs)
141 - 1;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500142}
143
Marek Vasut575029d2015-07-20 08:15:57 +0200144/**
145 * set_rank_and_odt_mask() - Set Rank and ODT mask
146 * @rank: Rank mask
147 * @odt_mode: ODT mode, OFF or READ_WRITE
148 *
149 * Set Rank and ODT mask (On-Die Termination).
150 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200151static void set_rank_and_odt_mask(struct socfpga_sdrseq *seq,
152 const u32 rank, const u32 odt_mode)
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500153{
Marek Vasut0b5e2572015-07-20 08:03:11 +0200154 u32 odt_mask_0 = 0;
155 u32 odt_mask_1 = 0;
156 u32 cs_and_odt_mask;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500157
Marek Vasut0b5e2572015-07-20 08:03:11 +0200158 if (odt_mode == RW_MGR_ODT_MODE_OFF) {
159 odt_mask_0 = 0x0;
160 odt_mask_1 = 0x0;
161 } else { /* RW_MGR_ODT_MODE_READ_WRITE */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200162 switch (seq->rwcfg->mem_number_of_ranks) {
Marek Vasut92523082015-07-20 08:09:05 +0200163 case 1: /* 1 Rank */
164 /* Read: ODT = 0 ; Write: ODT = 1 */
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500165 odt_mask_0 = 0x0;
166 odt_mask_1 = 0x1;
Marek Vasut92523082015-07-20 08:09:05 +0200167 break;
168 case 2: /* 2 Ranks */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200169 if (seq->rwcfg->mem_number_of_cs_per_dimm == 1) {
Marek Vasut575029d2015-07-20 08:15:57 +0200170 /*
171 * - Dual-Slot , Single-Rank (1 CS per DIMM)
172 * OR
173 * - RDIMM, 4 total CS (2 CS per DIMM, 2 DIMM)
174 *
175 * Since MEM_NUMBER_OF_RANKS is 2, they
176 * are both single rank with 2 CS each
177 * (special for RDIMM).
178 *
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500179 * Read: Turn on ODT on the opposite rank
180 * Write: Turn on ODT on all ranks
181 */
182 odt_mask_0 = 0x3 & ~(1 << rank);
183 odt_mask_1 = 0x3;
Marek Vasut6bccacf2019-10-18 00:22:31 +0200184 if (dram_is_ddr(2))
185 odt_mask_1 &= ~(1 << rank);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500186 } else {
187 /*
Marek Vasut575029d2015-07-20 08:15:57 +0200188 * - Single-Slot , Dual-Rank (2 CS per DIMM)
189 *
190 * Read: Turn on ODT off on all ranks
191 * Write: Turn on ODT on active rank
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500192 */
193 odt_mask_0 = 0x0;
194 odt_mask_1 = 0x3 & (1 << rank);
195 }
Marek Vasut92523082015-07-20 08:09:05 +0200196 break;
197 case 4: /* 4 Ranks */
Marek Vasut6bccacf2019-10-18 00:22:31 +0200198 /*
199 * DDR3 Read, DDR2 Read/Write:
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500200 * ----------+-----------------------+
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500201 * | ODT |
Marek Vasut6bccacf2019-10-18 00:22:31 +0200202 * +-----------------------+
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500203 * Rank | 3 | 2 | 1 | 0 |
204 * ----------+-----+-----+-----+-----+
205 * 0 | 0 | 1 | 0 | 0 |
206 * 1 | 1 | 0 | 0 | 0 |
207 * 2 | 0 | 0 | 0 | 1 |
208 * 3 | 0 | 0 | 1 | 0 |
209 * ----------+-----+-----+-----+-----+
210 *
Marek Vasut6bccacf2019-10-18 00:22:31 +0200211 * DDR3 Write:
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500212 * ----------+-----------------------+
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500213 * | ODT |
214 * Write To +-----------------------+
215 * Rank | 3 | 2 | 1 | 0 |
216 * ----------+-----+-----+-----+-----+
217 * 0 | 0 | 1 | 0 | 1 |
218 * 1 | 1 | 0 | 1 | 0 |
219 * 2 | 0 | 1 | 0 | 1 |
220 * 3 | 1 | 0 | 1 | 0 |
221 * ----------+-----+-----+-----+-----+
222 */
223 switch (rank) {
224 case 0:
225 odt_mask_0 = 0x4;
Marek Vasut6bccacf2019-10-18 00:22:31 +0200226 if (dram_is_ddr(2))
227 odt_mask_1 = 0x4;
228 else if (dram_is_ddr(3))
229 odt_mask_1 = 0x5;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500230 break;
231 case 1:
232 odt_mask_0 = 0x8;
Marek Vasut6bccacf2019-10-18 00:22:31 +0200233 if (dram_is_ddr(2))
234 odt_mask_1 = 0x8;
235 else if (dram_is_ddr(3))
236 odt_mask_1 = 0xA;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500237 break;
238 case 2:
239 odt_mask_0 = 0x1;
Marek Vasut6bccacf2019-10-18 00:22:31 +0200240 if (dram_is_ddr(2))
241 odt_mask_1 = 0x1;
242 else if (dram_is_ddr(3))
243 odt_mask_1 = 0x5;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500244 break;
245 case 3:
246 odt_mask_0 = 0x2;
Marek Vasut6bccacf2019-10-18 00:22:31 +0200247 if (dram_is_ddr(2))
248 odt_mask_1 = 0x2;
249 else if (dram_is_ddr(3))
250 odt_mask_1 = 0xA;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500251 break;
252 }
Marek Vasut92523082015-07-20 08:09:05 +0200253 break;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500254 }
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500255 }
256
Marek Vasut0b5e2572015-07-20 08:03:11 +0200257 cs_and_odt_mask = (0xFF & ~(1 << rank)) |
258 ((0xFF & odt_mask_0) << 8) |
259 ((0xFF & odt_mask_1) << 16);
Marek Vasutb5450962015-07-12 21:05:08 +0200260 writel(cs_and_odt_mask, SDR_PHYGRP_RWMGRGRP_ADDRESS |
261 RW_MGR_SET_CS_AND_ODT_MASK_OFFSET);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500262}
263
Marek Vasut303a3dc2015-07-12 22:28:33 +0200264/**
265 * scc_mgr_set() - Set SCC Manager register
266 * @off: Base offset in SCC Manager space
267 * @grp: Read/Write group
268 * @val: Value to be set
269 *
270 * This function sets the SCC Manager (Scan Chain Control Manager) register.
271 */
272static void scc_mgr_set(u32 off, u32 grp, u32 val)
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500273{
Marek Vasut303a3dc2015-07-12 22:28:33 +0200274 writel(val, SDR_PHYGRP_SCCGRP_ADDRESS | off | (grp << 2));
275}
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500276
Marek Vasut8957b492015-07-20 07:16:42 +0200277/**
278 * scc_mgr_initialize() - Initialize SCC Manager registers
279 *
280 * Initialize SCC Manager registers.
281 */
Marek Vasut303a3dc2015-07-12 22:28:33 +0200282static void scc_mgr_initialize(void)
283{
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500284 /*
Marek Vasut8957b492015-07-20 07:16:42 +0200285 * Clear register file for HPS. 16 (2^4) is the size of the
286 * full register file in the scc mgr:
287 * RFILE_DEPTH = 1 + log2(MEM_DQ_PER_DQS + 1 + MEM_DM_PER_DQS +
288 * MEM_IF_READ_DQS_WIDTH - 1);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500289 */
Marek Vasut303a3dc2015-07-12 22:28:33 +0200290 int i;
Marek Vasut8957b492015-07-20 07:16:42 +0200291
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500292 for (i = 0; i < 16; i++) {
Marek Vasut4df2d7b2016-04-04 21:21:05 +0200293 debug_cond(DLEVEL >= 1, "%s:%d: Clearing SCC RFILE index %u\n",
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500294 __func__, __LINE__, i);
Marek Vasut45ce2962016-04-04 17:28:16 +0200295 scc_mgr_set(SCC_MGR_HHP_RFILE_OFFSET, i, 0);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500296 }
297}
298
Marek Vasut8af9ca02015-08-02 19:42:26 +0200299static void scc_mgr_set_dqdqs_output_phase(u32 write_group, u32 phase)
Marek Vasut7481b692015-07-12 22:11:55 +0200300{
Marek Vasut303a3dc2015-07-12 22:28:33 +0200301 scc_mgr_set(SCC_MGR_DQDQS_OUT_PHASE_OFFSET, write_group, phase);
Marek Vasut7481b692015-07-12 22:11:55 +0200302}
303
Marek Vasut8af9ca02015-08-02 19:42:26 +0200304static void scc_mgr_set_dqs_bus_in_delay(u32 read_group, u32 delay)
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500305{
Marek Vasut303a3dc2015-07-12 22:28:33 +0200306 scc_mgr_set(SCC_MGR_DQS_IN_DELAY_OFFSET, read_group, delay);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500307}
308
Marek Vasut8af9ca02015-08-02 19:42:26 +0200309static void scc_mgr_set_dqs_en_phase(u32 read_group, u32 phase)
Marek Vasut7481b692015-07-12 22:11:55 +0200310{
Marek Vasut303a3dc2015-07-12 22:28:33 +0200311 scc_mgr_set(SCC_MGR_DQS_EN_PHASE_OFFSET, read_group, phase);
Marek Vasut7481b692015-07-12 22:11:55 +0200312}
313
Marek Vasut8af9ca02015-08-02 19:42:26 +0200314static void scc_mgr_set_dqs_en_delay(u32 read_group, u32 delay)
Marek Vasut7481b692015-07-12 22:11:55 +0200315{
Marek Vasut303a3dc2015-07-12 22:28:33 +0200316 scc_mgr_set(SCC_MGR_DQS_EN_DELAY_OFFSET, read_group, delay);
Marek Vasut7481b692015-07-12 22:11:55 +0200317}
318
Marek Vasutf4d38622016-04-04 21:16:18 +0200319static void scc_mgr_set_dq_in_delay(u32 dq_in_group, u32 delay)
320{
321 scc_mgr_set(SCC_MGR_IO_IN_DELAY_OFFSET, dq_in_group, delay);
322}
323
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200324static void scc_mgr_set_dqs_io_in_delay(struct socfpga_sdrseq *seq,
325 u32 delay)
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500326{
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200327 scc_mgr_set(SCC_MGR_IO_IN_DELAY_OFFSET,
328 seq->rwcfg->mem_dq_per_write_dqs, delay);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500329}
330
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200331static void scc_mgr_set_dm_in_delay(struct socfpga_sdrseq *seq, u32 dm,
332 u32 delay)
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500333{
Marek Vasutf4d38622016-04-04 21:16:18 +0200334 scc_mgr_set(SCC_MGR_IO_IN_DELAY_OFFSET,
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200335 seq->rwcfg->mem_dq_per_write_dqs + 1 + dm,
Marek Vasutf4d38622016-04-04 21:16:18 +0200336 delay);
Marek Vasut7481b692015-07-12 22:11:55 +0200337}
338
Marek Vasut8af9ca02015-08-02 19:42:26 +0200339static void scc_mgr_set_dq_out1_delay(u32 dq_in_group, u32 delay)
Marek Vasut7481b692015-07-12 22:11:55 +0200340{
Marek Vasut303a3dc2015-07-12 22:28:33 +0200341 scc_mgr_set(SCC_MGR_IO_OUT1_DELAY_OFFSET, dq_in_group, delay);
Marek Vasut7481b692015-07-12 22:11:55 +0200342}
343
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200344static void scc_mgr_set_dqs_out1_delay(struct socfpga_sdrseq *seq,
345 u32 delay)
Marek Vasut7481b692015-07-12 22:11:55 +0200346{
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200347 scc_mgr_set(SCC_MGR_IO_OUT1_DELAY_OFFSET,
348 seq->rwcfg->mem_dq_per_write_dqs, delay);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500349}
350
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200351static void scc_mgr_set_dm_out1_delay(struct socfpga_sdrseq *seq, u32 dm,
352 u32 delay)
Marek Vasut7481b692015-07-12 22:11:55 +0200353{
Marek Vasut303a3dc2015-07-12 22:28:33 +0200354 scc_mgr_set(SCC_MGR_IO_OUT1_DELAY_OFFSET,
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200355 seq->rwcfg->mem_dq_per_write_dqs + 1 + dm,
Marek Vasut303a3dc2015-07-12 22:28:33 +0200356 delay);
Marek Vasut7481b692015-07-12 22:11:55 +0200357}
358
359/* load up dqs config settings */
Marek Vasut8af9ca02015-08-02 19:42:26 +0200360static void scc_mgr_load_dqs(u32 dqs)
Marek Vasut7481b692015-07-12 22:11:55 +0200361{
362 writel(dqs, &sdr_scc_mgr->dqs_ena);
363}
364
365/* load up dqs io config settings */
366static void scc_mgr_load_dqs_io(void)
367{
368 writel(0, &sdr_scc_mgr->dqs_io_ena);
369}
370
371/* load up dq config settings */
Marek Vasut8af9ca02015-08-02 19:42:26 +0200372static void scc_mgr_load_dq(u32 dq_in_group)
Marek Vasut7481b692015-07-12 22:11:55 +0200373{
374 writel(dq_in_group, &sdr_scc_mgr->dq_ena);
375}
376
377/* load up dm config settings */
Marek Vasut8af9ca02015-08-02 19:42:26 +0200378static void scc_mgr_load_dm(u32 dm)
Marek Vasut7481b692015-07-12 22:11:55 +0200379{
380 writel(dm, &sdr_scc_mgr->dm_ena);
381}
382
Marek Vasut1d3cde32015-07-12 23:25:21 +0200383/**
384 * scc_mgr_set_all_ranks() - Set SCC Manager register for all ranks
385 * @off: Base offset in SCC Manager space
386 * @grp: Read/Write group
387 * @val: Value to be set
388 * @update: If non-zero, trigger SCC Manager update for all ranks
389 *
390 * This function sets the SCC Manager (Scan Chain Control Manager) register
391 * and optionally triggers the SCC update for all ranks.
392 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200393static void scc_mgr_set_all_ranks(struct socfpga_sdrseq *seq,
394 const u32 off, const u32 grp, const u32 val,
Marek Vasut1d3cde32015-07-12 23:25:21 +0200395 const int update)
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500396{
Marek Vasut1d3cde32015-07-12 23:25:21 +0200397 u32 r;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500398
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200399 for (r = 0; r < seq->rwcfg->mem_number_of_ranks;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500400 r += NUM_RANKS_PER_SHADOW_REG) {
Marek Vasut1d3cde32015-07-12 23:25:21 +0200401 scc_mgr_set(off, grp, val);
Marek Vasut49722822015-07-12 23:14:33 +0200402
Marek Vasut1d3cde32015-07-12 23:25:21 +0200403 if (update || (r == 0)) {
404 writel(grp, &sdr_scc_mgr->dqs_ena);
Marek Vasutb5450962015-07-12 21:05:08 +0200405 writel(0, &sdr_scc_mgr->update);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500406 }
407 }
408}
409
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200410static void scc_mgr_set_dqs_en_phase_all_ranks(struct socfpga_sdrseq *seq,
411 u32 read_group, u32 phase)
Marek Vasut1d3cde32015-07-12 23:25:21 +0200412{
413 /*
414 * USER although the h/w doesn't support different phases per
415 * shadow register, for simplicity our scc manager modeling
416 * keeps different phase settings per shadow reg, and it's
417 * important for us to keep them in sync to match h/w.
418 * for efficiency, the scan chain update should occur only
419 * once to sr0.
420 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200421 scc_mgr_set_all_ranks(seq, SCC_MGR_DQS_EN_PHASE_OFFSET,
Marek Vasut1d3cde32015-07-12 23:25:21 +0200422 read_group, phase, 0);
423}
424
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200425static void scc_mgr_set_dqdqs_output_phase_all_ranks(struct socfpga_sdrseq *seq,
426 u32 write_group, u32 phase)
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500427{
Marek Vasut1d3cde32015-07-12 23:25:21 +0200428 /*
429 * USER although the h/w doesn't support different phases per
430 * shadow register, for simplicity our scc manager modeling
431 * keeps different phase settings per shadow reg, and it's
432 * important for us to keep them in sync to match h/w.
433 * for efficiency, the scan chain update should occur only
434 * once to sr0.
435 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200436 scc_mgr_set_all_ranks(seq, SCC_MGR_DQDQS_OUT_PHASE_OFFSET,
Marek Vasut1d3cde32015-07-12 23:25:21 +0200437 write_group, phase, 0);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500438}
439
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200440static void scc_mgr_set_dqs_en_delay_all_ranks(struct socfpga_sdrseq *seq,
441 u32 read_group, u32 delay)
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500442{
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500443 /*
444 * In shadow register mode, the T11 settings are stored in
445 * registers in the core, which are updated by the DQS_ENA
446 * signals. Not issuing the SCC_MGR_UPD command allows us to
447 * save lots of rank switching overhead, by calling
448 * select_shadow_regs_for_update with update_scan_chains
449 * set to 0.
450 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200451 scc_mgr_set_all_ranks(seq, SCC_MGR_DQS_EN_DELAY_OFFSET,
Marek Vasut1d3cde32015-07-12 23:25:21 +0200452 read_group, delay, 1);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500453}
454
Marek Vasute62f6912015-07-12 23:39:06 +0200455/**
456 * scc_mgr_set_oct_out1_delay() - Set OCT output delay
457 * @write_group: Write group
458 * @delay: Delay value
459 *
460 * This function sets the OCT output delay in SCC manager.
461 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200462static void scc_mgr_set_oct_out1_delay(struct socfpga_sdrseq *seq,
463 const u32 write_group, const u32 delay)
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500464{
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200465 const int ratio = seq->rwcfg->mem_if_read_dqs_width /
466 seq->rwcfg->mem_if_write_dqs_width;
Marek Vasute62f6912015-07-12 23:39:06 +0200467 const int base = write_group * ratio;
468 int i;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500469 /*
470 * Load the setting in the SCC manager
471 * Although OCT affects only write data, the OCT delay is controlled
472 * by the DQS logic block which is instantiated once per read group.
473 * For protocols where a write group consists of multiple read groups,
474 * the setting must be set multiple times.
475 */
Marek Vasute62f6912015-07-12 23:39:06 +0200476 for (i = 0; i < ratio; i++)
477 scc_mgr_set(SCC_MGR_OCT_OUT1_DELAY_OFFSET, base + i, delay);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500478}
479
Marek Vasut3b8e5b02015-07-19 01:32:55 +0200480/**
481 * scc_mgr_set_hhp_extras() - Set HHP extras.
482 *
483 * Load the fixed setting in the SCC manager HHP extras.
484 */
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500485static void scc_mgr_set_hhp_extras(void)
486{
487 /*
488 * Load the fixed setting in the SCC manager
Marek Vasut3b8e5b02015-07-19 01:32:55 +0200489 * bits: 0:0 = 1'b1 - DQS bypass
490 * bits: 1:1 = 1'b1 - DQ bypass
491 * bits: 4:2 = 3'b001 - rfifo_mode
492 * bits: 6:5 = 2'b01 - rfifo clock_select
493 * bits: 7:7 = 1'b0 - separate gating from ungating setting
494 * bits: 8:8 = 1'b0 - separate OE from Output delay setting
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500495 */
Marek Vasut3b8e5b02015-07-19 01:32:55 +0200496 const u32 value = (0 << 8) | (0 << 7) | (1 << 5) |
497 (1 << 2) | (1 << 1) | (1 << 0);
498 const u32 addr = SDR_PHYGRP_SCCGRP_ADDRESS |
499 SCC_MGR_HHP_GLOBALS_OFFSET |
500 SCC_MGR_HHP_EXTRAS_OFFSET;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500501
Marek Vasut4df2d7b2016-04-04 21:21:05 +0200502 debug_cond(DLEVEL >= 1, "%s:%d Setting HHP Extras\n",
Marek Vasut3b8e5b02015-07-19 01:32:55 +0200503 __func__, __LINE__);
504 writel(value, addr);
Marek Vasut4df2d7b2016-04-04 21:21:05 +0200505 debug_cond(DLEVEL >= 1, "%s:%d Done Setting HHP Extras\n",
Marek Vasut3b8e5b02015-07-19 01:32:55 +0200506 __func__, __LINE__);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500507}
508
Marek Vasut08bcb982015-07-20 04:41:53 +0200509/**
510 * scc_mgr_zero_all() - Zero all DQS config
511 *
512 * Zero all DQS config.
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500513 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200514static void scc_mgr_zero_all(struct socfpga_sdrseq *seq)
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500515{
Marek Vasut08bcb982015-07-20 04:41:53 +0200516 int i, r;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500517
518 /*
519 * USER Zero all DQS config settings, across all groups and all
520 * shadow registers
521 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200522 for (r = 0; r < seq->rwcfg->mem_number_of_ranks;
Marek Vasut08bcb982015-07-20 04:41:53 +0200523 r += NUM_RANKS_PER_SHADOW_REG) {
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200524 for (i = 0; i < seq->rwcfg->mem_if_read_dqs_width; i++) {
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500525 /*
526 * The phases actually don't exist on a per-rank basis,
527 * but there's no harm updating them several times, so
528 * let's keep the code simple.
529 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200530 scc_mgr_set_dqs_bus_in_delay(i,
531 seq->iocfg->dqs_in_reserve
532 );
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500533 scc_mgr_set_dqs_en_phase(i, 0);
534 scc_mgr_set_dqs_en_delay(i, 0);
535 }
536
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200537 for (i = 0; i < seq->rwcfg->mem_if_write_dqs_width; i++) {
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500538 scc_mgr_set_dqdqs_output_phase(i, 0);
Marek Vasut08bcb982015-07-20 04:41:53 +0200539 /* Arria V/Cyclone V don't have out2. */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200540 scc_mgr_set_oct_out1_delay(seq, i,
541 seq->iocfg->dqs_out_reserve);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500542 }
543 }
544
Marek Vasut08bcb982015-07-20 04:41:53 +0200545 /* Multicast to all DQS group enables. */
Marek Vasutb5450962015-07-12 21:05:08 +0200546 writel(0xff, &sdr_scc_mgr->dqs_ena);
547 writel(0, &sdr_scc_mgr->update);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500548}
549
Marek Vasut0341de42015-07-17 02:06:20 +0200550/**
551 * scc_set_bypass_mode() - Set bypass mode and trigger SCC update
552 * @write_group: Write group
553 *
554 * Set bypass mode and trigger SCC update.
555 */
556static void scc_set_bypass_mode(const u32 write_group)
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500557{
Marek Vasut0341de42015-07-17 02:06:20 +0200558 /* Multicast to all DQ enables. */
Marek Vasutb5450962015-07-12 21:05:08 +0200559 writel(0xff, &sdr_scc_mgr->dq_ena);
560 writel(0xff, &sdr_scc_mgr->dm_ena);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500561
Marek Vasut0341de42015-07-17 02:06:20 +0200562 /* Update current DQS IO enable. */
Marek Vasutb5450962015-07-12 21:05:08 +0200563 writel(0, &sdr_scc_mgr->dqs_io_ena);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500564
Marek Vasut0341de42015-07-17 02:06:20 +0200565 /* Update the DQS logic. */
Marek Vasutb5450962015-07-12 21:05:08 +0200566 writel(write_group, &sdr_scc_mgr->dqs_ena);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500567
Marek Vasut0341de42015-07-17 02:06:20 +0200568 /* Hit update. */
Marek Vasutb5450962015-07-12 21:05:08 +0200569 writel(0, &sdr_scc_mgr->update);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500570}
571
Marek Vasut5a4379e2015-07-13 00:30:09 +0200572/**
573 * scc_mgr_load_dqs_for_write_group() - Load DQS settings for Write Group
574 * @write_group: Write group
575 *
576 * Load DQS settings for Write Group, do not trigger SCC update.
577 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200578static void scc_mgr_load_dqs_for_write_group(struct socfpga_sdrseq *seq,
579 const u32 write_group)
Marek Vasut7481b692015-07-12 22:11:55 +0200580{
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200581 const int ratio = seq->rwcfg->mem_if_read_dqs_width /
582 seq->rwcfg->mem_if_write_dqs_width;
Marek Vasut5a4379e2015-07-13 00:30:09 +0200583 const int base = write_group * ratio;
584 int i;
Marek Vasut7481b692015-07-12 22:11:55 +0200585 /*
Marek Vasut5a4379e2015-07-13 00:30:09 +0200586 * Load the setting in the SCC manager
Marek Vasut7481b692015-07-12 22:11:55 +0200587 * Although OCT affects only write data, the OCT delay is controlled
588 * by the DQS logic block which is instantiated once per read group.
589 * For protocols where a write group consists of multiple read groups,
Marek Vasut5a4379e2015-07-13 00:30:09 +0200590 * the setting must be set multiple times.
Marek Vasut7481b692015-07-12 22:11:55 +0200591 */
Marek Vasut5a4379e2015-07-13 00:30:09 +0200592 for (i = 0; i < ratio; i++)
593 writel(base + i, &sdr_scc_mgr->dqs_ena);
Marek Vasut7481b692015-07-12 22:11:55 +0200594}
595
Marek Vasut62d3c692015-07-20 08:41:04 +0200596/**
597 * scc_mgr_zero_group() - Zero all configs for a group
598 *
599 * Zero DQ, DM, DQS and OCT configs for a group.
600 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200601static void scc_mgr_zero_group(struct socfpga_sdrseq *seq,
602 const u32 write_group, const int out_only)
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500603{
Marek Vasut62d3c692015-07-20 08:41:04 +0200604 int i, r;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500605
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200606 for (r = 0; r < seq->rwcfg->mem_number_of_ranks;
Marek Vasut62d3c692015-07-20 08:41:04 +0200607 r += NUM_RANKS_PER_SHADOW_REG) {
608 /* Zero all DQ config settings. */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200609 for (i = 0; i < seq->rwcfg->mem_dq_per_write_dqs; i++) {
Marek Vasutcab80792015-07-12 22:07:33 +0200610 scc_mgr_set_dq_out1_delay(i, 0);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500611 if (!out_only)
Marek Vasutcab80792015-07-12 22:07:33 +0200612 scc_mgr_set_dq_in_delay(i, 0);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500613 }
614
Marek Vasut62d3c692015-07-20 08:41:04 +0200615 /* Multicast to all DQ enables. */
Marek Vasutb5450962015-07-12 21:05:08 +0200616 writel(0xff, &sdr_scc_mgr->dq_ena);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500617
Marek Vasut62d3c692015-07-20 08:41:04 +0200618 /* Zero all DM config settings. */
Marek Vasutf4d38622016-04-04 21:16:18 +0200619 for (i = 0; i < RW_MGR_NUM_DM_PER_WRITE_GROUP; i++) {
620 if (!out_only)
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200621 scc_mgr_set_dm_in_delay(seq, i, 0);
622 scc_mgr_set_dm_out1_delay(seq, i, 0);
Marek Vasutf4d38622016-04-04 21:16:18 +0200623 }
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500624
Marek Vasut62d3c692015-07-20 08:41:04 +0200625 /* Multicast to all DM enables. */
Marek Vasutb5450962015-07-12 21:05:08 +0200626 writel(0xff, &sdr_scc_mgr->dm_ena);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500627
Marek Vasut62d3c692015-07-20 08:41:04 +0200628 /* Zero all DQS IO settings. */
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500629 if (!out_only)
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200630 scc_mgr_set_dqs_io_in_delay(seq, 0);
Marek Vasut62d3c692015-07-20 08:41:04 +0200631
632 /* Arria V/Cyclone V don't have out2. */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200633 scc_mgr_set_dqs_out1_delay(seq, seq->iocfg->dqs_out_reserve);
634 scc_mgr_set_oct_out1_delay(seq, write_group,
635 seq->iocfg->dqs_out_reserve);
636 scc_mgr_load_dqs_for_write_group(seq, write_group);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500637
Marek Vasut62d3c692015-07-20 08:41:04 +0200638 /* Multicast to all DQS IO enables (only 1 in total). */
Marek Vasutb5450962015-07-12 21:05:08 +0200639 writel(0, &sdr_scc_mgr->dqs_io_ena);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500640
Marek Vasut62d3c692015-07-20 08:41:04 +0200641 /* Hit update to zero everything. */
Marek Vasutb5450962015-07-12 21:05:08 +0200642 writel(0, &sdr_scc_mgr->update);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500643 }
644}
645
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500646/*
647 * apply and load a particular input delay for the DQ pins in a group
648 * group_bgn is the index of the first dq pin (in the write group)
649 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200650static void scc_mgr_apply_group_dq_in_delay(struct socfpga_sdrseq *seq,
651 u32 group_bgn, u32 delay)
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500652{
Marek Vasut8af9ca02015-08-02 19:42:26 +0200653 u32 i, p;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500654
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200655 for (i = 0, p = group_bgn; i < seq->rwcfg->mem_dq_per_read_dqs;
656 i++, p++) {
Marek Vasutcab80792015-07-12 22:07:33 +0200657 scc_mgr_set_dq_in_delay(p, delay);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500658 scc_mgr_load_dq(p);
659 }
660}
661
Marek Vasutcd649502015-07-17 05:42:49 +0200662/**
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200663 * scc_mgr_apply_group_dq_out1_delay() - Apply and load an output delay for the
664 * DQ pins in a group
Marek Vasutcd649502015-07-17 05:42:49 +0200665 * @delay: Delay value
666 *
667 * Apply and load a particular output delay for the DQ pins in a group.
668 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200669static void scc_mgr_apply_group_dq_out1_delay(struct socfpga_sdrseq *seq,
670 const u32 delay)
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500671{
Marek Vasutcd649502015-07-17 05:42:49 +0200672 int i;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500673
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200674 for (i = 0; i < seq->rwcfg->mem_dq_per_write_dqs; i++) {
Marek Vasutcd649502015-07-17 05:42:49 +0200675 scc_mgr_set_dq_out1_delay(i, delay);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500676 scc_mgr_load_dq(i);
677 }
678}
679
680/* apply and load a particular output delay for the DM pins in a group */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200681static void scc_mgr_apply_group_dm_out1_delay(struct socfpga_sdrseq *seq,
682 u32 delay1)
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500683{
Marek Vasut8af9ca02015-08-02 19:42:26 +0200684 u32 i;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500685
686 for (i = 0; i < RW_MGR_NUM_DM_PER_WRITE_GROUP; i++) {
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200687 scc_mgr_set_dm_out1_delay(seq, i, delay1);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500688 scc_mgr_load_dm(i);
689 }
690}
691
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500692/* apply and load delay on both DQS and OCT out1 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200693static void scc_mgr_apply_group_dqs_io_and_oct_out1(struct socfpga_sdrseq *seq,
694 u32 write_group, u32 delay)
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500695{
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200696 scc_mgr_set_dqs_out1_delay(seq, delay);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500697 scc_mgr_load_dqs_io();
698
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200699 scc_mgr_set_oct_out1_delay(seq, write_group, delay);
700 scc_mgr_load_dqs_for_write_group(seq, write_group);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500701}
702
Marek Vasut484fb3b2015-07-17 05:33:28 +0200703/**
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200704 * scc_mgr_apply_group_all_out_delay_add() - Apply a delay to the entire output
705 * side: DQ, DM, DQS, OCT
Marek Vasut484fb3b2015-07-17 05:33:28 +0200706 * @write_group: Write group
707 * @delay: Delay value
708 *
709 * Apply a delay to the entire output side: DQ, DM, DQS, OCT.
710 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200711static void scc_mgr_apply_group_all_out_delay_add(struct socfpga_sdrseq *seq,
712 const u32 write_group,
Marek Vasut20bfb9d2015-07-17 05:30:14 +0200713 const u32 delay)
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500714{
Marek Vasut20bfb9d2015-07-17 05:30:14 +0200715 u32 i, new_delay;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500716
Marek Vasut20bfb9d2015-07-17 05:30:14 +0200717 /* DQ shift */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200718 for (i = 0; i < seq->rwcfg->mem_dq_per_write_dqs; i++)
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500719 scc_mgr_load_dq(i);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500720
Marek Vasut20bfb9d2015-07-17 05:30:14 +0200721 /* DM shift */
722 for (i = 0; i < RW_MGR_NUM_DM_PER_WRITE_GROUP; i++)
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500723 scc_mgr_load_dm(i);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500724
Marek Vasut484fb3b2015-07-17 05:33:28 +0200725 /* DQS shift */
726 new_delay = READ_SCC_DQS_IO_OUT2_DELAY + delay;
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200727 if (new_delay > seq->iocfg->io_out2_delay_max) {
Marek Vasut4df2d7b2016-04-04 21:21:05 +0200728 debug_cond(DLEVEL >= 1,
Marek Vasut484fb3b2015-07-17 05:33:28 +0200729 "%s:%d (%u, %u) DQS: %u > %d; adding %u to OUT1\n",
730 __func__, __LINE__, write_group, delay, new_delay,
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200731 seq->iocfg->io_out2_delay_max,
732 new_delay - seq->iocfg->io_out2_delay_max);
733 new_delay -= seq->iocfg->io_out2_delay_max;
734 scc_mgr_set_dqs_out1_delay(seq, new_delay);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500735 }
736
737 scc_mgr_load_dqs_io();
738
Marek Vasut484fb3b2015-07-17 05:33:28 +0200739 /* OCT shift */
740 new_delay = READ_SCC_OCT_OUT2_DELAY + delay;
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200741 if (new_delay > seq->iocfg->io_out2_delay_max) {
Marek Vasut4df2d7b2016-04-04 21:21:05 +0200742 debug_cond(DLEVEL >= 1,
Marek Vasut484fb3b2015-07-17 05:33:28 +0200743 "%s:%d (%u, %u) DQS: %u > %d; adding %u to OUT1\n",
744 __func__, __LINE__, write_group, delay,
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200745 new_delay, seq->iocfg->io_out2_delay_max,
746 new_delay - seq->iocfg->io_out2_delay_max);
747 new_delay -= seq->iocfg->io_out2_delay_max;
748 scc_mgr_set_oct_out1_delay(seq, write_group, new_delay);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500749 }
750
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200751 scc_mgr_load_dqs_for_write_group(seq, write_group);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500752}
753
Marek Vasut788870f2015-07-19 02:18:21 +0200754/**
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200755 * scc_mgr_apply_group_all_out_delay_add() - Apply a delay to the entire output
756 * side to all ranks
Marek Vasut788870f2015-07-19 02:18:21 +0200757 * @write_group: Write group
758 * @delay: Delay value
759 *
760 * Apply a delay to the entire output side (DQ, DM, DQS, OCT) to all ranks.
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500761 */
Marek Vasut788870f2015-07-19 02:18:21 +0200762static void
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200763scc_mgr_apply_group_all_out_delay_add_all_ranks(struct socfpga_sdrseq *seq,
764 const u32 write_group,
Marek Vasut788870f2015-07-19 02:18:21 +0200765 const u32 delay)
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500766{
Marek Vasut788870f2015-07-19 02:18:21 +0200767 int r;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500768
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200769 for (r = 0; r < seq->rwcfg->mem_number_of_ranks;
Marek Vasut788870f2015-07-19 02:18:21 +0200770 r += NUM_RANKS_PER_SHADOW_REG) {
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200771 scc_mgr_apply_group_all_out_delay_add(seq, write_group, delay);
Marek Vasutb5450962015-07-12 21:05:08 +0200772 writel(0, &sdr_scc_mgr->update);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500773 }
774}
775
Marek Vasut42e78602015-07-26 11:07:19 +0200776/**
777 * set_jump_as_return() - Return instruction optimization
778 *
779 * Optimization used to recover some slots in ddr3 inst_rom could be
780 * applied to other protocols if we wanted to
781 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200782static void set_jump_as_return(struct socfpga_sdrseq *seq)
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500783{
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500784 /*
Marek Vasut42e78602015-07-26 11:07:19 +0200785 * To save space, we replace return with jump to special shared
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500786 * RETURN instruction so we set the counter to large value so that
Marek Vasut42e78602015-07-26 11:07:19 +0200787 * we always jump.
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500788 */
Marek Vasutb5450962015-07-12 21:05:08 +0200789 writel(0xff, &sdr_rw_load_mgr_regs->load_cntr0);
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200790 writel(seq->rwcfg->rreturn, &sdr_rw_load_jump_mgr_regs->load_jump_add0);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500791}
792
Marek Vasut98d279a2015-07-26 11:46:04 +0200793/**
794 * delay_for_n_mem_clocks() - Delay for N memory clocks
795 * @clocks: Length of the delay
796 *
797 * Delay for N memory clocks.
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500798 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200799static void delay_for_n_mem_clocks(struct socfpga_sdrseq *seq,
800 const u32 clocks)
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500801{
Marek Vasut50d71992015-07-26 11:11:28 +0200802 u32 afi_clocks;
Marek Vasut13ee4382015-07-26 11:42:53 +0200803 u16 c_loop;
804 u8 inner;
805 u8 outer;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500806
807 debug("%s:%d: clocks=%u ... start\n", __func__, __LINE__, clocks);
808
Marek Vasut4b203df2015-07-26 11:34:09 +0200809 /* Scale (rounding up) to get afi clocks. */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200810 afi_clocks = DIV_ROUND_UP(clocks, seq->misccfg->afi_rate_ratio);
Marek Vasut4b203df2015-07-26 11:34:09 +0200811 if (afi_clocks) /* Temporary underflow protection */
812 afi_clocks--;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500813
814 /*
Marek Vasut50d71992015-07-26 11:11:28 +0200815 * Note, we don't bother accounting for being off a little
816 * bit because of a few extra instructions in outer loops.
817 * Note, the loops have a test at the end, and do the test
818 * before the decrement, and so always perform the loop
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500819 * 1 time more than the counter value
820 */
Marek Vasut13ee4382015-07-26 11:42:53 +0200821 c_loop = afi_clocks >> 16;
822 outer = c_loop ? 0xff : (afi_clocks >> 8);
823 inner = outer ? 0xff : afi_clocks;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500824
825 /*
826 * rom instructions are structured as follows:
827 *
828 * IDLE_LOOP2: jnz cntr0, TARGET_A
829 * IDLE_LOOP1: jnz cntr1, TARGET_B
830 * return
831 *
832 * so, when doing nested loops, TARGET_A is set to IDLE_LOOP2, and
833 * TARGET_B is set to IDLE_LOOP2 as well
834 *
835 * if we have no outer loop, though, then we can use IDLE_LOOP1 only,
836 * and set TARGET_B to IDLE_LOOP1 and we skip IDLE_LOOP2 entirely
837 *
838 * a little confusing, but it helps save precious space in the inst_rom
839 * and sequencer rom and keeps the delays more accurate and reduces
840 * overhead
841 */
Marek Vasut4b203df2015-07-26 11:34:09 +0200842 if (afi_clocks < 0x100) {
Marek Vasutb5450962015-07-12 21:05:08 +0200843 writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(inner),
Marek Vasutc85b9b32015-08-02 19:47:01 +0200844 &sdr_rw_load_mgr_regs->load_cntr1);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500845
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200846 writel(seq->rwcfg->idle_loop1,
Marek Vasutc85b9b32015-08-02 19:47:01 +0200847 &sdr_rw_load_jump_mgr_regs->load_jump_add1);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500848
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200849 writel(seq->rwcfg->idle_loop1, SDR_PHYGRP_RWMGRGRP_ADDRESS |
Marek Vasutb5450962015-07-12 21:05:08 +0200850 RW_MGR_RUN_SINGLE_GROUP_OFFSET);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500851 } else {
Marek Vasutb5450962015-07-12 21:05:08 +0200852 writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(inner),
Marek Vasutc85b9b32015-08-02 19:47:01 +0200853 &sdr_rw_load_mgr_regs->load_cntr0);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500854
Marek Vasutb5450962015-07-12 21:05:08 +0200855 writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(outer),
Marek Vasutc85b9b32015-08-02 19:47:01 +0200856 &sdr_rw_load_mgr_regs->load_cntr1);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500857
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200858 writel(seq->rwcfg->idle_loop2,
Marek Vasutc85b9b32015-08-02 19:47:01 +0200859 &sdr_rw_load_jump_mgr_regs->load_jump_add0);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500860
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200861 writel(seq->rwcfg->idle_loop2,
Marek Vasutc85b9b32015-08-02 19:47:01 +0200862 &sdr_rw_load_jump_mgr_regs->load_jump_add1);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500863
Marek Vasut7574c872015-07-26 11:44:54 +0200864 do {
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200865 writel(seq->rwcfg->idle_loop2,
Marek Vasutc85b9b32015-08-02 19:47:01 +0200866 SDR_PHYGRP_RWMGRGRP_ADDRESS |
867 RW_MGR_RUN_SINGLE_GROUP_OFFSET);
Marek Vasut7574c872015-07-26 11:44:54 +0200868 } while (c_loop-- != 0);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500869 }
870 debug("%s:%d clocks=%u ... end\n", __func__, __LINE__, clocks);
871}
872
Marek Vasut6bccacf2019-10-18 00:22:31 +0200873static void delay_for_n_ns(struct socfpga_sdrseq *seq, const u32 ns)
874{
875 delay_for_n_mem_clocks(seq, (ns * seq->misccfg->afi_clk_freq *
876 seq->misccfg->afi_rate_ratio) / 1000);
877}
878
Marek Vasut8bf92272015-07-13 00:44:30 +0200879/**
880 * rw_mgr_mem_init_load_regs() - Load instruction registers
881 * @cntr0: Counter 0 value
882 * @cntr1: Counter 1 value
883 * @cntr2: Counter 2 value
884 * @jump: Jump instruction value
885 *
886 * Load instruction registers.
887 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200888static void rw_mgr_mem_init_load_regs(struct socfpga_sdrseq *seq,
889 u32 cntr0, u32 cntr1, u32 cntr2, u32 jump)
Marek Vasut8bf92272015-07-13 00:44:30 +0200890{
Marek Vasut8af9ca02015-08-02 19:42:26 +0200891 u32 grpaddr = SDR_PHYGRP_RWMGRGRP_ADDRESS |
Marek Vasut8bf92272015-07-13 00:44:30 +0200892 RW_MGR_RUN_SINGLE_GROUP_OFFSET;
893
894 /* Load counters */
895 writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(cntr0),
896 &sdr_rw_load_mgr_regs->load_cntr0);
897 writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(cntr1),
898 &sdr_rw_load_mgr_regs->load_cntr1);
899 writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(cntr2),
900 &sdr_rw_load_mgr_regs->load_cntr2);
901
902 /* Load jump address */
903 writel(jump, &sdr_rw_load_jump_mgr_regs->load_jump_add0);
904 writel(jump, &sdr_rw_load_jump_mgr_regs->load_jump_add1);
905 writel(jump, &sdr_rw_load_jump_mgr_regs->load_jump_add2);
906
907 /* Execute count instruction */
908 writel(jump, grpaddr);
909}
910
Marek Vasutc577ab52015-07-13 00:51:05 +0200911/**
Marek Vasut6bccacf2019-10-18 00:22:31 +0200912 * rw_mgr_mem_load_user_ddr2() - Load user calibration values for DDR2
913 * @handoff: Indicate whether this is initialization or handoff phase
914 *
915 * Load user calibration values and optionally precharge the banks.
916 */
917static void rw_mgr_mem_load_user_ddr2(struct socfpga_sdrseq *seq,
918 const int handoff)
919{
920 u32 grpaddr = SDR_PHYGRP_RWMGRGRP_ADDRESS |
921 RW_MGR_RUN_SINGLE_GROUP_OFFSET;
922 u32 r;
923
924 for (r = 0; r < seq->rwcfg->mem_number_of_ranks; r++) {
925 /* set rank */
926 set_rank_and_odt_mask(seq, r, RW_MGR_ODT_MODE_OFF);
927
928 /* precharge all banks ... */
929 writel(seq->rwcfg->precharge_all, grpaddr);
930
931 writel(seq->rwcfg->emr2, grpaddr);
932 writel(seq->rwcfg->emr3, grpaddr);
933 writel(seq->rwcfg->emr, grpaddr);
934
935 if (handoff) {
936 writel(seq->rwcfg->mr_user, grpaddr);
937 continue;
938 }
939
940 writel(seq->rwcfg->mr_dll_reset, grpaddr);
941
942 writel(seq->rwcfg->precharge_all, grpaddr);
943
944 writel(seq->rwcfg->refresh, grpaddr);
945 delay_for_n_ns(seq, 200);
946 writel(seq->rwcfg->refresh, grpaddr);
947 delay_for_n_ns(seq, 200);
948
949 writel(seq->rwcfg->mr_calib, grpaddr);
950 writel(/*seq->rwcfg->*/0x0b, grpaddr); // EMR_OCD_ENABLE
951 writel(seq->rwcfg->emr, grpaddr);
952 delay_for_n_mem_clocks(seq, 200);
953 }
954}
955
956/**
957 * rw_mgr_mem_load_user_ddr3() - Load user calibration values
Marek Vasutc577ab52015-07-13 00:51:05 +0200958 * @fin1: Final instruction 1
959 * @fin2: Final instruction 2
960 * @precharge: If 1, precharge the banks at the end
961 *
962 * Load user calibration values and optionally precharge the banks.
963 */
Marek Vasut6bccacf2019-10-18 00:22:31 +0200964static void rw_mgr_mem_load_user_ddr3(struct socfpga_sdrseq *seq,
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200965 const u32 fin1, const u32 fin2,
Marek Vasutc577ab52015-07-13 00:51:05 +0200966 const int precharge)
Dinh Nguyen135cc7f2015-06-02 22:52:49 -0500967{
Marek Vasutc577ab52015-07-13 00:51:05 +0200968 u32 grpaddr = SDR_PHYGRP_RWMGRGRP_ADDRESS |
969 RW_MGR_RUN_SINGLE_GROUP_OFFSET;
970 u32 r;
971
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200972 for (r = 0; r < seq->rwcfg->mem_number_of_ranks; r++) {
Marek Vasutc577ab52015-07-13 00:51:05 +0200973 /* set rank */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200974 set_rank_and_odt_mask(seq, r, RW_MGR_ODT_MODE_OFF);
Marek Vasutc577ab52015-07-13 00:51:05 +0200975
976 /* precharge all banks ... */
977 if (precharge)
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200978 writel(seq->rwcfg->precharge_all, grpaddr);
Marek Vasutc577ab52015-07-13 00:51:05 +0200979
980 /*
981 * USER Use Mirror-ed commands for odd ranks if address
982 * mirrorring is on
983 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200984 if ((seq->rwcfg->mem_address_mirroring >> r) & 0x1) {
985 set_jump_as_return(seq);
986 writel(seq->rwcfg->mrs2_mirr, grpaddr);
987 delay_for_n_mem_clocks(seq, 4);
988 set_jump_as_return(seq);
989 writel(seq->rwcfg->mrs3_mirr, grpaddr);
990 delay_for_n_mem_clocks(seq, 4);
991 set_jump_as_return(seq);
992 writel(seq->rwcfg->mrs1_mirr, grpaddr);
993 delay_for_n_mem_clocks(seq, 4);
994 set_jump_as_return(seq);
Marek Vasutc577ab52015-07-13 00:51:05 +0200995 writel(fin1, grpaddr);
996 } else {
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +0200997 set_jump_as_return(seq);
998 writel(seq->rwcfg->mrs2, grpaddr);
999 delay_for_n_mem_clocks(seq, 4);
1000 set_jump_as_return(seq);
1001 writel(seq->rwcfg->mrs3, grpaddr);
1002 delay_for_n_mem_clocks(seq, 4);
1003 set_jump_as_return(seq);
1004 writel(seq->rwcfg->mrs1, grpaddr);
1005 set_jump_as_return(seq);
Marek Vasutc577ab52015-07-13 00:51:05 +02001006 writel(fin2, grpaddr);
1007 }
1008
1009 if (precharge)
1010 continue;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001011
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001012 set_jump_as_return(seq);
1013 writel(seq->rwcfg->zqcl, grpaddr);
Marek Vasutc577ab52015-07-13 00:51:05 +02001014
1015 /* tZQinit = tDLLK = 512 ck cycles */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001016 delay_for_n_mem_clocks(seq, 512);
Marek Vasutc577ab52015-07-13 00:51:05 +02001017 }
1018}
1019
Marek Vasut1185e222015-07-26 10:57:06 +02001020/**
Marek Vasut6bccacf2019-10-18 00:22:31 +02001021 * rw_mgr_mem_load_user() - Load user calibration values
1022 * @fin1: Final instruction 1
1023 * @fin2: Final instruction 2
1024 * @precharge: If 1, precharge the banks at the end
1025 *
1026 * Load user calibration values and optionally precharge the banks.
1027 */
1028static void rw_mgr_mem_load_user(struct socfpga_sdrseq *seq,
1029 const u32 fin1, const u32 fin2,
1030 const int precharge)
1031{
1032 if (dram_is_ddr(2))
1033 rw_mgr_mem_load_user_ddr2(seq, precharge);
1034 else if (dram_is_ddr(3))
1035 rw_mgr_mem_load_user_ddr3(seq, fin1, fin2, precharge);
1036 else
1037 hang();
1038}
1039/**
Marek Vasut1185e222015-07-26 10:57:06 +02001040 * rw_mgr_mem_initialize() - Initialize RW Manager
1041 *
1042 * Initialize RW Manager.
1043 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001044static void rw_mgr_mem_initialize(struct socfpga_sdrseq *seq)
Marek Vasutc577ab52015-07-13 00:51:05 +02001045{
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001046 debug("%s:%d\n", __func__, __LINE__);
1047
1048 /* The reset / cke part of initialization is broadcasted to all ranks */
Marek Vasut6bccacf2019-10-18 00:22:31 +02001049 if (dram_is_ddr(3)) {
1050 writel(RW_MGR_RANK_ALL, SDR_PHYGRP_RWMGRGRP_ADDRESS |
1051 RW_MGR_SET_CS_AND_ODT_MASK_OFFSET);
1052 }
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001053
1054 /*
1055 * Here's how you load register for a loop
1056 * Counters are located @ 0x800
1057 * Jump address are located @ 0xC00
1058 * For both, registers 0 to 3 are selected using bits 3 and 2, like
1059 * in 0x800, 0x804, 0x808, 0x80C and 0xC00, 0xC04, 0xC08, 0xC0C
1060 * I know this ain't pretty, but Avalon bus throws away the 2 least
1061 * significant bits
1062 */
1063
Marek Vasut1185e222015-07-26 10:57:06 +02001064 /* Start with memory RESET activated */
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001065
1066 /* tINIT = 200us */
1067
1068 /*
1069 * 200us @ 266MHz (3.75 ns) ~ 54000 clock cycles
1070 * If a and b are the number of iteration in 2 nested loops
1071 * it takes the following number of cycles to complete the operation:
1072 * number_of_cycles = ((2 + n) * a + 2) * b
1073 * where n is the number of instruction in the inner loop
1074 * One possible solution is n = 0 , a = 256 , b = 106 => a = FF,
1075 * b = 6A
1076 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001077 rw_mgr_mem_init_load_regs(seq, seq->misccfg->tinit_cntr0_val,
1078 seq->misccfg->tinit_cntr1_val,
1079 seq->misccfg->tinit_cntr2_val,
1080 seq->rwcfg->init_reset_0_cke_0);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001081
Marek Vasut1185e222015-07-26 10:57:06 +02001082 /* Indicate that memory is stable. */
Marek Vasutb5450962015-07-12 21:05:08 +02001083 writel(1, &phy_mgr_cfg->reset_mem_stbl);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001084
Marek Vasut6bccacf2019-10-18 00:22:31 +02001085 if (dram_is_ddr(2)) {
1086 writel(seq->rwcfg->nop, SDR_PHYGRP_RWMGRGRP_ADDRESS |
1087 RW_MGR_RUN_SINGLE_GROUP_OFFSET);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001088
Marek Vasut6bccacf2019-10-18 00:22:31 +02001089 /* Bring up clock enable. */
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001090
Marek Vasut6bccacf2019-10-18 00:22:31 +02001091 /* tXRP < 400 ck cycles */
1092 delay_for_n_ns(seq, 400);
1093 } else if (dram_is_ddr(3)) {
1094 /*
1095 * transition the RESET to high
1096 * Wait for 500us
1097 */
1098
1099 /*
1100 * 500us @ 266MHz (3.75 ns) ~ 134000 clock cycles
1101 * If a and b are the number of iteration in 2 nested loops
1102 * it takes the following number of cycles to complete the
1103 * operation number_of_cycles = ((2 + n) * a + 2) * b
1104 * where n is the number of instruction in the inner loop
1105 * One possible solution is
1106 * n = 2 , a = 131 , b = 256 => a = 83, b = FF
1107 */
1108 rw_mgr_mem_init_load_regs(seq, seq->misccfg->treset_cntr0_val,
1109 seq->misccfg->treset_cntr1_val,
1110 seq->misccfg->treset_cntr2_val,
1111 seq->rwcfg->init_reset_1_cke_0);
1112 /* Bring up clock enable. */
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001113
Marek Vasut6bccacf2019-10-18 00:22:31 +02001114 /* tXRP < 250 ck cycles */
1115 delay_for_n_mem_clocks(seq, 250);
1116 }
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001117
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001118 rw_mgr_mem_load_user(seq, seq->rwcfg->mrs0_dll_reset_mirr,
1119 seq->rwcfg->mrs0_dll_reset, 0);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001120}
1121
Marek Vasutc1402752015-07-26 10:59:19 +02001122/**
1123 * rw_mgr_mem_handoff() - Hand off the memory to user
1124 *
1125 * At the end of calibration we have to program the user settings in
1126 * and hand off the memory to the user.
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001127 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001128static void rw_mgr_mem_handoff(struct socfpga_sdrseq *seq)
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001129{
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001130 rw_mgr_mem_load_user(seq, seq->rwcfg->mrs0_user_mirr,
1131 seq->rwcfg->mrs0_user, 1);
Marek Vasutc577ab52015-07-13 00:51:05 +02001132 /*
Marek Vasutc1402752015-07-26 10:59:19 +02001133 * Need to wait tMOD (12CK or 15ns) time before issuing other
1134 * commands, but we will have plenty of NIOS cycles before actual
1135 * handoff so its okay.
Marek Vasutc577ab52015-07-13 00:51:05 +02001136 */
Marek Vasut0b97c422015-07-21 05:43:37 +02001137}
1138
Marek Vasutadbaa2d2015-07-21 06:00:36 +02001139/**
1140 * rw_mgr_mem_calibrate_write_test_issue() - Issue write test command
1141 * @group: Write Group
1142 * @use_dm: Use DM
1143 *
1144 * Issue write test command. Two variants are provided, one that just tests
1145 * a write pattern and another that tests datamask functionality.
Marek Vasut0b97c422015-07-21 05:43:37 +02001146 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001147static void rw_mgr_mem_calibrate_write_test_issue(struct socfpga_sdrseq *seq,
1148 u32 group, u32 test_dm)
Marek Vasut0b97c422015-07-21 05:43:37 +02001149{
Marek Vasutadbaa2d2015-07-21 06:00:36 +02001150 const u32 quick_write_mode =
1151 (STATIC_CALIB_STEPS & CALIB_SKIP_WRITES) &&
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001152 seq->misccfg->enable_super_quick_calibration;
Marek Vasutadbaa2d2015-07-21 06:00:36 +02001153 u32 mcc_instruction;
1154 u32 rw_wl_nop_cycles;
Marek Vasut0b97c422015-07-21 05:43:37 +02001155
1156 /*
1157 * Set counter and jump addresses for the right
1158 * number of NOP cycles.
1159 * The number of supported NOP cycles can range from -1 to infinity
1160 * Three different cases are handled:
1161 *
1162 * 1. For a number of NOP cycles greater than 0, the RW Mgr looping
1163 * mechanism will be used to insert the right number of NOPs
1164 *
1165 * 2. For a number of NOP cycles equals to 0, the micro-instruction
1166 * issuing the write command will jump straight to the
1167 * micro-instruction that turns on DQS (for DDRx), or outputs write
1168 * data (for RLD), skipping
1169 * the NOP micro-instruction all together
1170 *
1171 * 3. A number of NOP cycles equal to -1 indicates that DQS must be
1172 * turned on in the same micro-instruction that issues the write
1173 * command. Then we need
1174 * to directly jump to the micro-instruction that sends out the data
1175 *
1176 * NOTE: Implementing this mechanism uses 2 RW Mgr jump-counters
1177 * (2 and 3). One jump-counter (0) is used to perform multiple
1178 * write-read operations.
1179 * one counter left to issue this command in "multiple-group" mode
1180 */
1181
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001182 rw_wl_nop_cycles = seq->gbl.rw_wl_nop_cycles;
Marek Vasut0b97c422015-07-21 05:43:37 +02001183
1184 if (rw_wl_nop_cycles == -1) {
1185 /*
1186 * CNTR 2 - We want to execute the special write operation that
1187 * turns on DQS right away and then skip directly to the
1188 * instruction that sends out the data. We set the counter to a
1189 * large number so that the jump is always taken.
1190 */
1191 writel(0xFF, &sdr_rw_load_mgr_regs->load_cntr2);
1192
1193 /* CNTR 3 - Not used */
1194 if (test_dm) {
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001195 mcc_instruction = seq->rwcfg->lfsr_wr_rd_dm_bank_0_wl_1;
1196 writel(seq->rwcfg->lfsr_wr_rd_dm_bank_0_data,
Marek Vasut0b97c422015-07-21 05:43:37 +02001197 &sdr_rw_load_jump_mgr_regs->load_jump_add2);
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001198 writel(seq->rwcfg->lfsr_wr_rd_dm_bank_0_nop,
Marek Vasut0b97c422015-07-21 05:43:37 +02001199 &sdr_rw_load_jump_mgr_regs->load_jump_add3);
1200 } else {
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001201 mcc_instruction = seq->rwcfg->lfsr_wr_rd_bank_0_wl_1;
1202 writel(seq->rwcfg->lfsr_wr_rd_bank_0_data,
Marek Vasutc85b9b32015-08-02 19:47:01 +02001203 &sdr_rw_load_jump_mgr_regs->load_jump_add2);
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001204 writel(seq->rwcfg->lfsr_wr_rd_bank_0_nop,
Marek Vasutc85b9b32015-08-02 19:47:01 +02001205 &sdr_rw_load_jump_mgr_regs->load_jump_add3);
Marek Vasut0b97c422015-07-21 05:43:37 +02001206 }
1207 } else if (rw_wl_nop_cycles == 0) {
1208 /*
1209 * CNTR 2 - We want to skip the NOP operation and go straight
1210 * to the DQS enable instruction. We set the counter to a large
1211 * number so that the jump is always taken.
1212 */
1213 writel(0xFF, &sdr_rw_load_mgr_regs->load_cntr2);
1214
1215 /* CNTR 3 - Not used */
1216 if (test_dm) {
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001217 mcc_instruction = seq->rwcfg->lfsr_wr_rd_dm_bank_0;
1218 writel(seq->rwcfg->lfsr_wr_rd_dm_bank_0_dqs,
Marek Vasut0b97c422015-07-21 05:43:37 +02001219 &sdr_rw_load_jump_mgr_regs->load_jump_add2);
1220 } else {
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001221 mcc_instruction = seq->rwcfg->lfsr_wr_rd_bank_0;
1222 writel(seq->rwcfg->lfsr_wr_rd_bank_0_dqs,
Marek Vasutc85b9b32015-08-02 19:47:01 +02001223 &sdr_rw_load_jump_mgr_regs->load_jump_add2);
Marek Vasut0b97c422015-07-21 05:43:37 +02001224 }
1225 } else {
1226 /*
1227 * CNTR 2 - In this case we want to execute the next instruction
1228 * and NOT take the jump. So we set the counter to 0. The jump
1229 * address doesn't count.
1230 */
1231 writel(0x0, &sdr_rw_load_mgr_regs->load_cntr2);
1232 writel(0x0, &sdr_rw_load_jump_mgr_regs->load_jump_add2);
1233
1234 /*
1235 * CNTR 3 - Set the nop counter to the number of cycles we
1236 * need to loop for, minus 1.
1237 */
1238 writel(rw_wl_nop_cycles - 1, &sdr_rw_load_mgr_regs->load_cntr3);
1239 if (test_dm) {
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001240 mcc_instruction = seq->rwcfg->lfsr_wr_rd_dm_bank_0;
1241 writel(seq->rwcfg->lfsr_wr_rd_dm_bank_0_nop,
Marek Vasutc85b9b32015-08-02 19:47:01 +02001242 &sdr_rw_load_jump_mgr_regs->load_jump_add3);
Marek Vasut0b97c422015-07-21 05:43:37 +02001243 } else {
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001244 mcc_instruction = seq->rwcfg->lfsr_wr_rd_bank_0;
1245 writel(seq->rwcfg->lfsr_wr_rd_bank_0_nop,
Marek Vasutc85b9b32015-08-02 19:47:01 +02001246 &sdr_rw_load_jump_mgr_regs->load_jump_add3);
Marek Vasut0b97c422015-07-21 05:43:37 +02001247 }
1248 }
1249
1250 writel(0, SDR_PHYGRP_RWMGRGRP_ADDRESS |
1251 RW_MGR_RESET_READ_DATAPATH_OFFSET);
1252
1253 if (quick_write_mode)
1254 writel(0x08, &sdr_rw_load_mgr_regs->load_cntr0);
1255 else
1256 writel(0x40, &sdr_rw_load_mgr_regs->load_cntr0);
1257
1258 writel(mcc_instruction, &sdr_rw_load_jump_mgr_regs->load_jump_add0);
1259
1260 /*
1261 * CNTR 1 - This is used to ensure enough time elapses
1262 * for read data to come back.
1263 */
1264 writel(0x30, &sdr_rw_load_mgr_regs->load_cntr1);
1265
1266 if (test_dm) {
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001267 writel(seq->rwcfg->lfsr_wr_rd_dm_bank_0_wait,
Marek Vasutc85b9b32015-08-02 19:47:01 +02001268 &sdr_rw_load_jump_mgr_regs->load_jump_add1);
Marek Vasut0b97c422015-07-21 05:43:37 +02001269 } else {
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001270 writel(seq->rwcfg->lfsr_wr_rd_bank_0_wait,
Marek Vasutc85b9b32015-08-02 19:47:01 +02001271 &sdr_rw_load_jump_mgr_regs->load_jump_add1);
Marek Vasut0b97c422015-07-21 05:43:37 +02001272 }
1273
Marek Vasutadbaa2d2015-07-21 06:00:36 +02001274 writel(mcc_instruction, (SDR_PHYGRP_RWMGRGRP_ADDRESS |
1275 RW_MGR_RUN_SINGLE_GROUP_OFFSET) +
1276 (group << 2));
Marek Vasut0b97c422015-07-21 05:43:37 +02001277}
1278
Marek Vasutc67d9622015-07-21 05:57:11 +02001279/**
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001280 * rw_mgr_mem_calibrate_write_test() - Test writes, check for single/multiple
1281 * pass
Marek Vasutc67d9622015-07-21 05:57:11 +02001282 * @rank_bgn: Rank number
1283 * @write_group: Write Group
1284 * @use_dm: Use DM
1285 * @all_correct: All bits must be correct in the mask
1286 * @bit_chk: Resulting bit mask after the test
1287 * @all_ranks: Test all ranks
1288 *
1289 * Test writes, can check for a single bit pass or multiple bit pass.
1290 */
Marek Vasutbc773a12015-07-21 05:54:39 +02001291static int
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001292rw_mgr_mem_calibrate_write_test(struct socfpga_sdrseq *seq,
1293 const u32 rank_bgn, const u32 write_group,
Marek Vasutbc773a12015-07-21 05:54:39 +02001294 const u32 use_dm, const u32 all_correct,
1295 u32 *bit_chk, const u32 all_ranks)
Marek Vasut0b97c422015-07-21 05:43:37 +02001296{
Marek Vasutbc773a12015-07-21 05:54:39 +02001297 const u32 rank_end = all_ranks ?
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001298 seq->rwcfg->mem_number_of_ranks :
Marek Vasutbc773a12015-07-21 05:54:39 +02001299 (rank_bgn + NUM_RANKS_PER_SHADOW_REG);
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001300 const u32 shift_ratio = seq->rwcfg->mem_dq_per_write_dqs /
1301 seq->rwcfg->mem_virtual_groups_per_write_dqs;
1302 const u32 correct_mask_vg = seq->param.write_correct_mask_vg;
Marek Vasutbc773a12015-07-21 05:54:39 +02001303
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001304 u32 tmp_bit_chk, base_rw_mgr, group;
Marek Vasutbc773a12015-07-21 05:54:39 +02001305 int vg, r;
Marek Vasut0b97c422015-07-21 05:43:37 +02001306
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001307 *bit_chk = seq->param.write_correct_mask;
Marek Vasut0b97c422015-07-21 05:43:37 +02001308
1309 for (r = rank_bgn; r < rank_end; r++) {
Marek Vasutbc773a12015-07-21 05:54:39 +02001310 /* Set rank */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001311 set_rank_and_odt_mask(seq, r, RW_MGR_ODT_MODE_READ_WRITE);
Marek Vasut0b97c422015-07-21 05:43:37 +02001312
1313 tmp_bit_chk = 0;
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001314 for (vg = seq->rwcfg->mem_virtual_groups_per_write_dqs - 1;
Marek Vasutbc773a12015-07-21 05:54:39 +02001315 vg >= 0; vg--) {
1316 /* Reset the FIFOs to get pointers to known state. */
Marek Vasut0b97c422015-07-21 05:43:37 +02001317 writel(0, &phy_mgr_cmd->fifo_reset);
1318
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001319 group = write_group *
1320 seq->rwcfg->mem_virtual_groups_per_write_dqs
1321 + vg;
1322 rw_mgr_mem_calibrate_write_test_issue(seq, group,
1323 use_dm);
Marek Vasut0b97c422015-07-21 05:43:37 +02001324
Marek Vasutbc773a12015-07-21 05:54:39 +02001325 base_rw_mgr = readl(SDR_PHYGRP_RWMGRGRP_ADDRESS);
1326 tmp_bit_chk <<= shift_ratio;
1327 tmp_bit_chk |= (correct_mask_vg & ~(base_rw_mgr));
Marek Vasut0b97c422015-07-21 05:43:37 +02001328 }
Marek Vasutbc773a12015-07-21 05:54:39 +02001329
Marek Vasut0b97c422015-07-21 05:43:37 +02001330 *bit_chk &= tmp_bit_chk;
1331 }
1332
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001333 set_rank_and_odt_mask(seq, 0, RW_MGR_ODT_MODE_OFF);
Marek Vasut0b97c422015-07-21 05:43:37 +02001334 if (all_correct) {
Marek Vasut4df2d7b2016-04-04 21:21:05 +02001335 debug_cond(DLEVEL >= 2,
Marek Vasutbc773a12015-07-21 05:54:39 +02001336 "write_test(%u,%u,ALL) : %u == %u => %i\n",
1337 write_group, use_dm, *bit_chk,
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001338 seq->param.write_correct_mask,
1339 *bit_chk == seq->param.write_correct_mask);
1340 return *bit_chk == seq->param.write_correct_mask;
Marek Vasut0b97c422015-07-21 05:43:37 +02001341 } else {
Marek Vasut4df2d7b2016-04-04 21:21:05 +02001342 debug_cond(DLEVEL >= 2,
Marek Vasutbc773a12015-07-21 05:54:39 +02001343 "write_test(%u,%u,ONE) : %u != %i => %i\n",
1344 write_group, use_dm, *bit_chk, 0, *bit_chk != 0);
Marek Vasut0b97c422015-07-21 05:43:37 +02001345 return *bit_chk != 0x00;
1346 }
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001347}
1348
Marek Vasut55c4d692015-07-18 03:55:07 +02001349/**
1350 * rw_mgr_mem_calibrate_read_test_patterns() - Read back test patterns
1351 * @rank_bgn: Rank number
1352 * @group: Read/Write Group
1353 * @all_ranks: Test all ranks
1354 *
1355 * Performs a guaranteed read on the patterns we are going to use during a
1356 * read test to ensure memory works.
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001357 */
Marek Vasut55c4d692015-07-18 03:55:07 +02001358static int
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001359rw_mgr_mem_calibrate_read_test_patterns(struct socfpga_sdrseq *seq,
1360 const u32 rank_bgn, const u32 group,
Marek Vasut55c4d692015-07-18 03:55:07 +02001361 const u32 all_ranks)
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001362{
Marek Vasut55c4d692015-07-18 03:55:07 +02001363 const u32 addr = SDR_PHYGRP_RWMGRGRP_ADDRESS |
1364 RW_MGR_RUN_SINGLE_GROUP_OFFSET;
1365 const u32 addr_offset =
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001366 (group * seq->rwcfg->mem_virtual_groups_per_read_dqs)
1367 << 2;
Marek Vasut55c4d692015-07-18 03:55:07 +02001368 const u32 rank_end = all_ranks ?
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001369 seq->rwcfg->mem_number_of_ranks :
Marek Vasut55c4d692015-07-18 03:55:07 +02001370 (rank_bgn + NUM_RANKS_PER_SHADOW_REG);
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001371 const u32 shift_ratio = seq->rwcfg->mem_dq_per_read_dqs /
1372 seq->rwcfg->mem_virtual_groups_per_read_dqs;
1373 const u32 correct_mask_vg = seq->param.read_correct_mask_vg;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001374
Marek Vasut55c4d692015-07-18 03:55:07 +02001375 u32 tmp_bit_chk, base_rw_mgr, bit_chk;
1376 int vg, r;
1377 int ret = 0;
1378
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001379 bit_chk = seq->param.read_correct_mask;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001380
1381 for (r = rank_bgn; r < rank_end; r++) {
Marek Vasut55c4d692015-07-18 03:55:07 +02001382 /* Set rank */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001383 set_rank_and_odt_mask(seq, r, RW_MGR_ODT_MODE_READ_WRITE);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001384
1385 /* Load up a constant bursts of read commands */
Marek Vasutb5450962015-07-12 21:05:08 +02001386 writel(0x20, &sdr_rw_load_mgr_regs->load_cntr0);
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001387 writel(seq->rwcfg->guaranteed_read,
Marek Vasutc85b9b32015-08-02 19:47:01 +02001388 &sdr_rw_load_jump_mgr_regs->load_jump_add0);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001389
Marek Vasutb5450962015-07-12 21:05:08 +02001390 writel(0x20, &sdr_rw_load_mgr_regs->load_cntr1);
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001391 writel(seq->rwcfg->guaranteed_read_cont,
Marek Vasutc85b9b32015-08-02 19:47:01 +02001392 &sdr_rw_load_jump_mgr_regs->load_jump_add1);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001393
1394 tmp_bit_chk = 0;
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001395 for (vg = seq->rwcfg->mem_virtual_groups_per_read_dqs - 1;
Marek Vasut55c4d692015-07-18 03:55:07 +02001396 vg >= 0; vg--) {
1397 /* Reset the FIFOs to get pointers to known state. */
Marek Vasutb5450962015-07-12 21:05:08 +02001398 writel(0, &phy_mgr_cmd->fifo_reset);
1399 writel(0, SDR_PHYGRP_RWMGRGRP_ADDRESS |
1400 RW_MGR_RESET_READ_DATAPATH_OFFSET);
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001401 writel(seq->rwcfg->guaranteed_read,
Marek Vasut55c4d692015-07-18 03:55:07 +02001402 addr + addr_offset + (vg << 2));
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001403
Marek Vasutb5450962015-07-12 21:05:08 +02001404 base_rw_mgr = readl(SDR_PHYGRP_RWMGRGRP_ADDRESS);
Marek Vasut55c4d692015-07-18 03:55:07 +02001405 tmp_bit_chk <<= shift_ratio;
1406 tmp_bit_chk |= correct_mask_vg & ~base_rw_mgr;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001407 }
Marek Vasut55c4d692015-07-18 03:55:07 +02001408
1409 bit_chk &= tmp_bit_chk;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001410 }
1411
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001412 writel(seq->rwcfg->clear_dqs_enable, addr + (group << 2));
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001413
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001414 set_rank_and_odt_mask(seq, 0, RW_MGR_ODT_MODE_OFF);
Marek Vasut55c4d692015-07-18 03:55:07 +02001415
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001416 if (bit_chk != seq->param.read_correct_mask)
Marek Vasut55c4d692015-07-18 03:55:07 +02001417 ret = -EIO;
1418
Marek Vasut4df2d7b2016-04-04 21:21:05 +02001419 debug_cond(DLEVEL >= 1,
Marek Vasut55c4d692015-07-18 03:55:07 +02001420 "%s:%d test_load_patterns(%u,ALL) => (%u == %u) => %i\n",
1421 __func__, __LINE__, group, bit_chk,
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001422 seq->param.read_correct_mask, ret);
Marek Vasut55c4d692015-07-18 03:55:07 +02001423
1424 return ret;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001425}
1426
Marek Vasut6a752782015-07-18 03:34:22 +02001427/**
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001428 * rw_mgr_mem_calibrate_read_load_patterns() - Load up the patterns for read
1429 * test
Marek Vasut6a752782015-07-18 03:34:22 +02001430 * @rank_bgn: Rank number
1431 * @all_ranks: Test all ranks
1432 *
1433 * Load up the patterns we are going to use during a read test.
1434 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001435static void rw_mgr_mem_calibrate_read_load_patterns(struct socfpga_sdrseq *seq,
1436 const u32 rank_bgn,
Marek Vasut6a752782015-07-18 03:34:22 +02001437 const int all_ranks)
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001438{
Marek Vasut6a752782015-07-18 03:34:22 +02001439 const u32 rank_end = all_ranks ?
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001440 seq->rwcfg->mem_number_of_ranks :
Marek Vasut6a752782015-07-18 03:34:22 +02001441 (rank_bgn + NUM_RANKS_PER_SHADOW_REG);
1442 u32 r;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001443
1444 debug("%s:%d\n", __func__, __LINE__);
Marek Vasut6a752782015-07-18 03:34:22 +02001445
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001446 for (r = rank_bgn; r < rank_end; r++) {
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001447 /* set rank */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001448 set_rank_and_odt_mask(seq, r, RW_MGR_ODT_MODE_READ_WRITE);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001449
1450 /* Load up a constant bursts */
Marek Vasutb5450962015-07-12 21:05:08 +02001451 writel(0x20, &sdr_rw_load_mgr_regs->load_cntr0);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001452
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001453 writel(seq->rwcfg->guaranteed_write_wait0,
Marek Vasutc85b9b32015-08-02 19:47:01 +02001454 &sdr_rw_load_jump_mgr_regs->load_jump_add0);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001455
Marek Vasutb5450962015-07-12 21:05:08 +02001456 writel(0x20, &sdr_rw_load_mgr_regs->load_cntr1);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001457
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001458 writel(seq->rwcfg->guaranteed_write_wait1,
Marek Vasutc85b9b32015-08-02 19:47:01 +02001459 &sdr_rw_load_jump_mgr_regs->load_jump_add1);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001460
Marek Vasutb5450962015-07-12 21:05:08 +02001461 writel(0x04, &sdr_rw_load_mgr_regs->load_cntr2);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001462
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001463 writel(seq->rwcfg->guaranteed_write_wait2,
Marek Vasutc85b9b32015-08-02 19:47:01 +02001464 &sdr_rw_load_jump_mgr_regs->load_jump_add2);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001465
Marek Vasutb5450962015-07-12 21:05:08 +02001466 writel(0x04, &sdr_rw_load_mgr_regs->load_cntr3);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001467
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001468 writel(seq->rwcfg->guaranteed_write_wait3,
Marek Vasutc85b9b32015-08-02 19:47:01 +02001469 &sdr_rw_load_jump_mgr_regs->load_jump_add3);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001470
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001471 writel(seq->rwcfg->guaranteed_write,
1472 SDR_PHYGRP_RWMGRGRP_ADDRESS |
1473 RW_MGR_RUN_SINGLE_GROUP_OFFSET);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001474 }
1475
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001476 set_rank_and_odt_mask(seq, 0, RW_MGR_ODT_MODE_OFF);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001477}
1478
Marek Vasut656002e2015-07-20 03:26:05 +02001479/**
1480 * rw_mgr_mem_calibrate_read_test() - Perform READ test on single rank
1481 * @rank_bgn: Rank number
1482 * @group: Read/Write group
1483 * @num_tries: Number of retries of the test
1484 * @all_correct: All bits must be correct in the mask
1485 * @bit_chk: Resulting bit mask after the test
1486 * @all_groups: Test all R/W groups
1487 * @all_ranks: Test all ranks
1488 *
1489 * Try a read and see if it returns correct data back. Test has dummy reads
1490 * inserted into the mix used to align DQS enable. Test has more thorough
1491 * checks than the regular read test.
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001492 */
Marek Vasutc6c1fe72015-07-19 07:48:58 +02001493static int
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001494rw_mgr_mem_calibrate_read_test(struct socfpga_sdrseq *seq,
1495 const u32 rank_bgn, const u32 group,
Marek Vasutc6c1fe72015-07-19 07:48:58 +02001496 const u32 num_tries, const u32 all_correct,
1497 u32 *bit_chk,
1498 const u32 all_groups, const u32 all_ranks)
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001499{
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001500 const u32 rank_end = all_ranks ? seq->rwcfg->mem_number_of_ranks :
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001501 (rank_bgn + NUM_RANKS_PER_SHADOW_REG);
Marek Vasutc6c1fe72015-07-19 07:48:58 +02001502 const u32 quick_read_mode =
1503 ((STATIC_CALIB_STEPS & CALIB_SKIP_DELAY_SWEEPS) &&
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001504 seq->misccfg->enable_super_quick_calibration);
1505 u32 correct_mask_vg = seq->param.read_correct_mask_vg;
Marek Vasutc6c1fe72015-07-19 07:48:58 +02001506 u32 tmp_bit_chk;
1507 u32 base_rw_mgr;
1508 u32 addr;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001509
Marek Vasutc6c1fe72015-07-19 07:48:58 +02001510 int r, vg, ret;
Marek Vasuta005c772015-07-19 07:44:21 +02001511
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001512 *bit_chk = seq->param.read_correct_mask;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001513
1514 for (r = rank_bgn; r < rank_end; r++) {
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001515 /* set rank */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001516 set_rank_and_odt_mask(seq, r, RW_MGR_ODT_MODE_READ_WRITE);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001517
Marek Vasutb5450962015-07-12 21:05:08 +02001518 writel(0x10, &sdr_rw_load_mgr_regs->load_cntr1);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001519
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001520 writel(seq->rwcfg->read_b2b_wait1,
Marek Vasutc85b9b32015-08-02 19:47:01 +02001521 &sdr_rw_load_jump_mgr_regs->load_jump_add1);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001522
Marek Vasutb5450962015-07-12 21:05:08 +02001523 writel(0x10, &sdr_rw_load_mgr_regs->load_cntr2);
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001524 writel(seq->rwcfg->read_b2b_wait2,
Marek Vasutc85b9b32015-08-02 19:47:01 +02001525 &sdr_rw_load_jump_mgr_regs->load_jump_add2);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001526
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001527 if (quick_read_mode)
Marek Vasutb5450962015-07-12 21:05:08 +02001528 writel(0x1, &sdr_rw_load_mgr_regs->load_cntr0);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001529 /* need at least two (1+1) reads to capture failures */
1530 else if (all_groups)
Marek Vasutb5450962015-07-12 21:05:08 +02001531 writel(0x06, &sdr_rw_load_mgr_regs->load_cntr0);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001532 else
Marek Vasutb5450962015-07-12 21:05:08 +02001533 writel(0x32, &sdr_rw_load_mgr_regs->load_cntr0);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001534
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001535 writel(seq->rwcfg->read_b2b,
Marek Vasutc85b9b32015-08-02 19:47:01 +02001536 &sdr_rw_load_jump_mgr_regs->load_jump_add0);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001537 if (all_groups)
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001538 writel(seq->rwcfg->mem_if_read_dqs_width *
1539 seq->rwcfg->mem_virtual_groups_per_read_dqs - 1,
Marek Vasutb5450962015-07-12 21:05:08 +02001540 &sdr_rw_load_mgr_regs->load_cntr3);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001541 else
Marek Vasutb5450962015-07-12 21:05:08 +02001542 writel(0x0, &sdr_rw_load_mgr_regs->load_cntr3);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001543
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001544 writel(seq->rwcfg->read_b2b,
Marek Vasutc85b9b32015-08-02 19:47:01 +02001545 &sdr_rw_load_jump_mgr_regs->load_jump_add3);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001546
1547 tmp_bit_chk = 0;
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001548 for (vg = seq->rwcfg->mem_virtual_groups_per_read_dqs - 1;
1549 vg >= 0; vg--) {
Marek Vasut50a780f2015-07-19 07:57:28 +02001550 /* Reset the FIFOs to get pointers to known state. */
Marek Vasutb5450962015-07-12 21:05:08 +02001551 writel(0, &phy_mgr_cmd->fifo_reset);
1552 writel(0, SDR_PHYGRP_RWMGRGRP_ADDRESS |
1553 RW_MGR_RESET_READ_DATAPATH_OFFSET);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001554
Marek Vasut50a780f2015-07-19 07:57:28 +02001555 if (all_groups) {
1556 addr = SDR_PHYGRP_RWMGRGRP_ADDRESS |
1557 RW_MGR_RUN_ALL_GROUPS_OFFSET;
1558 } else {
1559 addr = SDR_PHYGRP_RWMGRGRP_ADDRESS |
1560 RW_MGR_RUN_SINGLE_GROUP_OFFSET;
1561 }
Marek Vasuta3340102015-07-12 19:03:33 +02001562
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001563 writel(seq->rwcfg->read_b2b, addr +
Marek Vasutc85b9b32015-08-02 19:47:01 +02001564 ((group *
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001565 seq->rwcfg->mem_virtual_groups_per_read_dqs +
Marek Vasutc85b9b32015-08-02 19:47:01 +02001566 vg) << 2));
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001567
Marek Vasutb5450962015-07-12 21:05:08 +02001568 base_rw_mgr = readl(SDR_PHYGRP_RWMGRGRP_ADDRESS);
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001569 tmp_bit_chk <<=
1570 seq->rwcfg->mem_dq_per_read_dqs /
1571 seq->rwcfg->mem_virtual_groups_per_read_dqs;
Marek Vasut50a780f2015-07-19 07:57:28 +02001572 tmp_bit_chk |= correct_mask_vg & ~(base_rw_mgr);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001573 }
Marek Vasut28957f32015-07-19 07:51:17 +02001574
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001575 *bit_chk &= tmp_bit_chk;
1576 }
1577
Marek Vasuta3340102015-07-12 19:03:33 +02001578 addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_RUN_SINGLE_GROUP_OFFSET;
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001579 writel(seq->rwcfg->clear_dqs_enable, addr + (group << 2));
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001580
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001581 set_rank_and_odt_mask(seq, 0, RW_MGR_ODT_MODE_OFF);
Marek Vasuta005c772015-07-19 07:44:21 +02001582
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001583 if (all_correct) {
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001584 ret = (*bit_chk == seq->param.read_correct_mask);
Marek Vasut4df2d7b2016-04-04 21:21:05 +02001585 debug_cond(DLEVEL >= 2,
Marek Vasuta005c772015-07-19 07:44:21 +02001586 "%s:%d read_test(%u,ALL,%u) => (%u == %u) => %i\n",
1587 __func__, __LINE__, group, all_groups, *bit_chk,
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001588 seq->param.read_correct_mask, ret);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001589 } else {
Marek Vasuta005c772015-07-19 07:44:21 +02001590 ret = (*bit_chk != 0x00);
Marek Vasut4df2d7b2016-04-04 21:21:05 +02001591 debug_cond(DLEVEL >= 2,
Marek Vasuta005c772015-07-19 07:44:21 +02001592 "%s:%d read_test(%u,ONE,%u) => (%u != %u) => %i\n",
1593 __func__, __LINE__, group, all_groups, *bit_chk,
1594 0, ret);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001595 }
Marek Vasuta005c772015-07-19 07:44:21 +02001596
1597 return ret;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001598}
1599
Marek Vasuta50d5d72015-07-19 07:35:36 +02001600/**
1601 * rw_mgr_mem_calibrate_read_test_all_ranks() - Perform READ test on all ranks
1602 * @grp: Read/Write group
1603 * @num_tries: Number of retries of the test
1604 * @all_correct: All bits must be correct in the mask
1605 * @all_groups: Test all R/W groups
1606 *
1607 * Perform a READ test across all memory ranks.
1608 */
1609static int
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001610rw_mgr_mem_calibrate_read_test_all_ranks(struct socfpga_sdrseq *seq,
1611 const u32 grp, const u32 num_tries,
Marek Vasuta50d5d72015-07-19 07:35:36 +02001612 const u32 all_correct,
1613 const u32 all_groups)
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001614{
Marek Vasuta50d5d72015-07-19 07:35:36 +02001615 u32 bit_chk;
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001616 return rw_mgr_mem_calibrate_read_test(seq, 0, grp, num_tries,
1617 all_correct, &bit_chk, all_groups,
1618 1);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001619}
1620
Marek Vasut1c9f25b2015-07-19 06:25:27 +02001621/**
1622 * rw_mgr_incr_vfifo() - Increase VFIFO value
1623 * @grp: Read/Write group
Marek Vasut1c9f25b2015-07-19 06:25:27 +02001624 *
1625 * Increase VFIFO value.
1626 */
Marek Vasut42e43ab2015-07-19 06:37:51 +02001627static void rw_mgr_incr_vfifo(const u32 grp)
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001628{
Marek Vasutb5450962015-07-12 21:05:08 +02001629 writel(grp, &phy_mgr_cmd->inc_vfifo_hard_phy);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001630}
1631
Marek Vasut1c9f25b2015-07-19 06:25:27 +02001632/**
1633 * rw_mgr_decr_vfifo() - Decrease VFIFO value
1634 * @grp: Read/Write group
Marek Vasut1c9f25b2015-07-19 06:25:27 +02001635 *
1636 * Decrease VFIFO value.
1637 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001638static void rw_mgr_decr_vfifo(struct socfpga_sdrseq *seq, const u32 grp)
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001639{
Marek Vasut1c9f25b2015-07-19 06:25:27 +02001640 u32 i;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001641
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001642 for (i = 0; i < seq->misccfg->read_valid_fifo_size - 1; i++)
Marek Vasut42e43ab2015-07-19 06:37:51 +02001643 rw_mgr_incr_vfifo(grp);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001644}
1645
Marek Vasut088eb212015-07-19 06:45:43 +02001646/**
1647 * find_vfifo_failing_read() - Push VFIFO to get a failing read
1648 * @grp: Read/Write group
1649 *
1650 * Push VFIFO until a failing read happens.
1651 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001652static int find_vfifo_failing_read(struct socfpga_sdrseq *seq,
1653 const u32 grp)
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001654{
Marek Vasuta50d5d72015-07-19 07:35:36 +02001655 u32 v, ret, fail_cnt = 0;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001656
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001657 for (v = 0; v < seq->misccfg->read_valid_fifo_size; v++) {
Marek Vasut4df2d7b2016-04-04 21:21:05 +02001658 debug_cond(DLEVEL >= 2, "%s:%d: vfifo %u\n",
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001659 __func__, __LINE__, v);
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001660 ret = rw_mgr_mem_calibrate_read_test_all_ranks(seq, grp, 1,
1661 PASS_ONE_BIT, 0);
Marek Vasut088eb212015-07-19 06:45:43 +02001662 if (!ret) {
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001663 fail_cnt++;
1664
1665 if (fail_cnt == 2)
Marek Vasut088eb212015-07-19 06:45:43 +02001666 return v;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001667 }
1668
Marek Vasut088eb212015-07-19 06:45:43 +02001669 /* Fiddle with FIFO. */
Marek Vasut42e43ab2015-07-19 06:37:51 +02001670 rw_mgr_incr_vfifo(grp);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001671 }
1672
Marek Vasut088eb212015-07-19 06:45:43 +02001673 /* No failing read found! Something must have gone wrong. */
Marek Vasut4df2d7b2016-04-04 21:21:05 +02001674 debug_cond(DLEVEL >= 2, "%s:%d: vfifo failed\n", __func__, __LINE__);
Marek Vasut088eb212015-07-19 06:45:43 +02001675 return 0;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001676}
1677
Marek Vasutf2b02d42015-07-19 05:26:49 +02001678/**
Marek Vasut6ff36b72015-07-19 07:27:06 +02001679 * sdr_find_phase_delay() - Find DQS enable phase or delay
1680 * @working: If 1, look for working phase/delay, if 0, look for non-working
1681 * @delay: If 1, look for delay, if 0, look for phase
1682 * @grp: Read/Write group
1683 * @work: Working window position
1684 * @work_inc: Working window increment
1685 * @pd: DQS Phase/Delay Iterator
1686 *
1687 * Find working or non-working DQS enable phase setting.
1688 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001689static int sdr_find_phase_delay(struct socfpga_sdrseq *seq, int working,
1690 int delay, const u32 grp, u32 *work,
1691 const u32 work_inc, u32 *pd)
Marek Vasut6ff36b72015-07-19 07:27:06 +02001692{
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001693 const u32 max = delay ? seq->iocfg->dqs_en_delay_max :
1694 seq->iocfg->dqs_en_phase_max;
Marek Vasuta50d5d72015-07-19 07:35:36 +02001695 u32 ret;
Marek Vasut6ff36b72015-07-19 07:27:06 +02001696
1697 for (; *pd <= max; (*pd)++) {
1698 if (delay)
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001699 scc_mgr_set_dqs_en_delay_all_ranks(seq, grp, *pd);
Marek Vasut6ff36b72015-07-19 07:27:06 +02001700 else
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001701 scc_mgr_set_dqs_en_phase_all_ranks(seq, grp, *pd);
Marek Vasut6ff36b72015-07-19 07:27:06 +02001702
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001703 ret = rw_mgr_mem_calibrate_read_test_all_ranks(seq, grp, 1,
1704 PASS_ONE_BIT, 0);
Marek Vasut6ff36b72015-07-19 07:27:06 +02001705 if (!working)
1706 ret = !ret;
1707
1708 if (ret)
1709 return 0;
1710
1711 if (work)
1712 *work += work_inc;
1713 }
1714
1715 return -EINVAL;
1716}
1717/**
Marek Vasutf2b02d42015-07-19 05:26:49 +02001718 * sdr_find_phase() - Find DQS enable phase
1719 * @working: If 1, look for working phase, if 0, look for non-working phase
1720 * @grp: Read/Write group
Marek Vasutf2b02d42015-07-19 05:26:49 +02001721 * @work: Working window position
1722 * @i: Iterator
1723 * @p: DQS Phase Iterator
Marek Vasutf2b02d42015-07-19 05:26:49 +02001724 *
1725 * Find working or non-working DQS enable phase setting.
1726 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001727static int sdr_find_phase(struct socfpga_sdrseq *seq, int working,
1728 const u32 grp, u32 *work, u32 *i, u32 *p)
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001729{
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001730 const u32 end = seq->misccfg->read_valid_fifo_size + (working ? 0 : 1);
Marek Vasut6ff36b72015-07-19 07:27:06 +02001731 int ret;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001732
Marek Vasutf2b02d42015-07-19 05:26:49 +02001733 for (; *i < end; (*i)++) {
1734 if (working)
1735 *p = 0;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001736
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001737 ret = sdr_find_phase_delay(seq, working, 0, grp, work,
1738 seq->iocfg->delay_per_opa_tap, p);
Marek Vasut6ff36b72015-07-19 07:27:06 +02001739 if (!ret)
1740 return 0;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001741
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001742 if (*p > seq->iocfg->dqs_en_phase_max) {
Marek Vasutf2b02d42015-07-19 05:26:49 +02001743 /* Fiddle with FIFO. */
Marek Vasut42e43ab2015-07-19 06:37:51 +02001744 rw_mgr_incr_vfifo(grp);
Marek Vasutf2b02d42015-07-19 05:26:49 +02001745 if (!working)
1746 *p = 0;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001747 }
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001748 }
1749
Marek Vasutf2b02d42015-07-19 05:26:49 +02001750 return -EINVAL;
1751}
1752
Marek Vasut6394ef52015-07-19 06:04:00 +02001753/**
1754 * sdr_working_phase() - Find working DQS enable phase
1755 * @grp: Read/Write group
1756 * @work_bgn: Working window start position
Marek Vasut6394ef52015-07-19 06:04:00 +02001757 * @d: dtaps output value
1758 * @p: DQS Phase Iterator
1759 * @i: Iterator
1760 *
1761 * Find working DQS enable phase setting.
1762 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001763static int sdr_working_phase(struct socfpga_sdrseq *seq, const u32 grp,
1764 u32 *work_bgn, u32 *d, u32 *p, u32 *i)
Marek Vasutf2b02d42015-07-19 05:26:49 +02001765{
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001766 const u32 dtaps_per_ptap = seq->iocfg->delay_per_opa_tap /
1767 seq->iocfg->delay_per_dqs_en_dchain_tap;
Marek Vasutf2b02d42015-07-19 05:26:49 +02001768 int ret;
1769
1770 *work_bgn = 0;
1771
1772 for (*d = 0; *d <= dtaps_per_ptap; (*d)++) {
1773 *i = 0;
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001774 scc_mgr_set_dqs_en_delay_all_ranks(seq, grp, *d);
1775 ret = sdr_find_phase(seq, 1, grp, work_bgn, i, p);
Marek Vasutf2b02d42015-07-19 05:26:49 +02001776 if (!ret)
1777 return 0;
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001778 *work_bgn += seq->iocfg->delay_per_dqs_en_dchain_tap;
Marek Vasutf2b02d42015-07-19 05:26:49 +02001779 }
1780
Marek Vasutb148ebe2015-07-19 05:01:12 +02001781 /* Cannot find working solution */
Marek Vasut4df2d7b2016-04-04 21:21:05 +02001782 debug_cond(DLEVEL >= 2, "%s:%d find_dqs_en_phase: no vfifo/ptap/dtap\n",
Marek Vasutf2b02d42015-07-19 05:26:49 +02001783 __func__, __LINE__);
1784 return -EINVAL;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001785}
1786
Marek Vasut6394ef52015-07-19 06:04:00 +02001787/**
1788 * sdr_backup_phase() - Find DQS enable backup phase
1789 * @grp: Read/Write group
1790 * @work_bgn: Working window start position
Marek Vasut6394ef52015-07-19 06:04:00 +02001791 * @p: DQS Phase Iterator
1792 *
1793 * Find DQS enable backup phase setting.
1794 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001795static void sdr_backup_phase(struct socfpga_sdrseq *seq, const u32 grp,
1796 u32 *work_bgn, u32 *p)
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001797{
Marek Vasuta50d5d72015-07-19 07:35:36 +02001798 u32 tmp_delay, d;
Marek Vasut6394ef52015-07-19 06:04:00 +02001799 int ret;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001800
1801 /* Special case code for backing up a phase */
1802 if (*p == 0) {
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001803 *p = seq->iocfg->dqs_en_phase_max;
1804 rw_mgr_decr_vfifo(seq, grp);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001805 } else {
1806 (*p)--;
1807 }
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001808 tmp_delay = *work_bgn - seq->iocfg->delay_per_opa_tap;
1809 scc_mgr_set_dqs_en_phase_all_ranks(seq, grp, *p);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001810
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001811 for (d = 0; d <= seq->iocfg->dqs_en_delay_max && tmp_delay < *work_bgn;
Marek Vasutc85b9b32015-08-02 19:47:01 +02001812 d++) {
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001813 scc_mgr_set_dqs_en_delay_all_ranks(seq, grp, d);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001814
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001815 ret = rw_mgr_mem_calibrate_read_test_all_ranks(seq, grp, 1,
1816 PASS_ONE_BIT, 0);
Marek Vasut6394ef52015-07-19 06:04:00 +02001817 if (ret) {
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001818 *work_bgn = tmp_delay;
1819 break;
1820 }
Marek Vasut6eff8032015-07-19 05:48:30 +02001821
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001822 tmp_delay += seq->iocfg->delay_per_dqs_en_dchain_tap;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001823 }
1824
Marek Vasut6394ef52015-07-19 06:04:00 +02001825 /* Restore VFIFO to old state before we decremented it (if needed). */
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001826 (*p)++;
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001827 if (*p > seq->iocfg->dqs_en_phase_max) {
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001828 *p = 0;
Marek Vasut42e43ab2015-07-19 06:37:51 +02001829 rw_mgr_incr_vfifo(grp);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001830 }
1831
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001832 scc_mgr_set_dqs_en_delay_all_ranks(seq, grp, 0);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001833}
1834
Marek Vasut6394ef52015-07-19 06:04:00 +02001835/**
1836 * sdr_nonworking_phase() - Find non-working DQS enable phase
1837 * @grp: Read/Write group
1838 * @work_end: Working window end position
Marek Vasut6394ef52015-07-19 06:04:00 +02001839 * @p: DQS Phase Iterator
1840 * @i: Iterator
1841 *
1842 * Find non-working DQS enable phase setting.
1843 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001844static int sdr_nonworking_phase(struct socfpga_sdrseq *seq,
1845 const u32 grp, u32 *work_end, u32 *p, u32 *i)
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001846{
Marek Vasutf2b02d42015-07-19 05:26:49 +02001847 int ret;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001848
1849 (*p)++;
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001850 *work_end += seq->iocfg->delay_per_opa_tap;
1851 if (*p > seq->iocfg->dqs_en_phase_max) {
Marek Vasutf2b02d42015-07-19 05:26:49 +02001852 /* Fiddle with FIFO. */
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001853 *p = 0;
Marek Vasut42e43ab2015-07-19 06:37:51 +02001854 rw_mgr_incr_vfifo(grp);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001855 }
1856
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001857 ret = sdr_find_phase(seq, 0, grp, work_end, i, p);
Marek Vasutf2b02d42015-07-19 05:26:49 +02001858 if (ret) {
1859 /* Cannot see edge of failing read. */
Marek Vasut4df2d7b2016-04-04 21:21:05 +02001860 debug_cond(DLEVEL >= 2, "%s:%d: end: failed\n",
Marek Vasutf2b02d42015-07-19 05:26:49 +02001861 __func__, __LINE__);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001862 }
1863
Marek Vasutf2b02d42015-07-19 05:26:49 +02001864 return ret;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001865}
1866
Marek Vasutfea03c32015-07-19 04:14:32 +02001867/**
1868 * sdr_find_window_center() - Find center of the working DQS window.
1869 * @grp: Read/Write group
1870 * @work_bgn: First working settings
1871 * @work_end: Last working settings
Marek Vasutfea03c32015-07-19 04:14:32 +02001872 *
1873 * Find center of the working DQS enable window.
1874 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001875static int sdr_find_window_center(struct socfpga_sdrseq *seq,
1876 const u32 grp, const u32 work_bgn,
Marek Vasut42e43ab2015-07-19 06:37:51 +02001877 const u32 work_end)
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001878{
Marek Vasuta50d5d72015-07-19 07:35:36 +02001879 u32 work_mid;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001880 int tmp_delay = 0;
Marek Vasutd996e802015-07-19 02:56:59 +02001881 int i, p, d;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001882
Marek Vasutd996e802015-07-19 02:56:59 +02001883 work_mid = (work_bgn + work_end) / 2;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001884
Marek Vasut4df2d7b2016-04-04 21:21:05 +02001885 debug_cond(DLEVEL >= 2, "work_bgn=%d work_end=%d work_mid=%d\n",
Marek Vasutd996e802015-07-19 02:56:59 +02001886 work_bgn, work_end, work_mid);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001887 /* Get the middle delay to be less than a VFIFO delay */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001888 tmp_delay = (seq->iocfg->dqs_en_phase_max + 1)
1889 * seq->iocfg->delay_per_opa_tap;
Marek Vasutd996e802015-07-19 02:56:59 +02001890
Marek Vasut4df2d7b2016-04-04 21:21:05 +02001891 debug_cond(DLEVEL >= 2, "vfifo ptap delay %d\n", tmp_delay);
Marek Vasutea4c4bb2015-07-19 04:04:33 +02001892 work_mid %= tmp_delay;
Marek Vasut4df2d7b2016-04-04 21:21:05 +02001893 debug_cond(DLEVEL >= 2, "new work_mid %d\n", work_mid);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001894
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001895 tmp_delay = rounddown(work_mid, seq->iocfg->delay_per_opa_tap);
1896 if (tmp_delay > seq->iocfg->dqs_en_phase_max
1897 * seq->iocfg->delay_per_opa_tap) {
1898 tmp_delay = seq->iocfg->dqs_en_phase_max
1899 * seq->iocfg->delay_per_opa_tap;
1900 }
1901 p = tmp_delay / seq->iocfg->delay_per_opa_tap;
Marek Vasutd996e802015-07-19 02:56:59 +02001902
Marek Vasut4df2d7b2016-04-04 21:21:05 +02001903 debug_cond(DLEVEL >= 2, "new p %d, tmp_delay=%d\n", p, tmp_delay);
Marek Vasutea4c4bb2015-07-19 04:04:33 +02001904
Marek Vasutc85b9b32015-08-02 19:47:01 +02001905 d = DIV_ROUND_UP(work_mid - tmp_delay,
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001906 seq->iocfg->delay_per_dqs_en_dchain_tap);
1907 if (d > seq->iocfg->dqs_en_delay_max)
1908 d = seq->iocfg->dqs_en_delay_max;
1909 tmp_delay += d * seq->iocfg->delay_per_dqs_en_dchain_tap;
Marek Vasutea4c4bb2015-07-19 04:04:33 +02001910
Marek Vasut4df2d7b2016-04-04 21:21:05 +02001911 debug_cond(DLEVEL >= 2, "new d %d, tmp_delay=%d\n", d, tmp_delay);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001912
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001913 scc_mgr_set_dqs_en_phase_all_ranks(seq, grp, p);
1914 scc_mgr_set_dqs_en_delay_all_ranks(seq, grp, d);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001915
1916 /*
1917 * push vfifo until we can successfully calibrate. We can do this
1918 * because the largest possible margin in 1 VFIFO cycle.
1919 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001920 for (i = 0; i < seq->misccfg->read_valid_fifo_size; i++) {
Marek Vasut4df2d7b2016-04-04 21:21:05 +02001921 debug_cond(DLEVEL >= 2, "find_dqs_en_phase: center\n");
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001922 if (rw_mgr_mem_calibrate_read_test_all_ranks(seq, grp, 1,
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001923 PASS_ONE_BIT,
Marek Vasuta50d5d72015-07-19 07:35:36 +02001924 0)) {
Marek Vasut4df2d7b2016-04-04 21:21:05 +02001925 debug_cond(DLEVEL >= 2,
Marek Vasut42e43ab2015-07-19 06:37:51 +02001926 "%s:%d center: found: ptap=%u dtap=%u\n",
1927 __func__, __LINE__, p, d);
Marek Vasutfea03c32015-07-19 04:14:32 +02001928 return 0;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001929 }
1930
Marek Vasutfea03c32015-07-19 04:14:32 +02001931 /* Fiddle with FIFO. */
Marek Vasut42e43ab2015-07-19 06:37:51 +02001932 rw_mgr_incr_vfifo(grp);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001933 }
1934
Marek Vasut4df2d7b2016-04-04 21:21:05 +02001935 debug_cond(DLEVEL >= 2, "%s:%d center: failed.\n",
Marek Vasutfea03c32015-07-19 04:14:32 +02001936 __func__, __LINE__);
1937 return -EINVAL;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001938}
1939
Marek Vasutec4bbd32015-07-20 09:11:09 +02001940/**
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001941 * rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase() - Find a good DQS enable to
1942 * use
Marek Vasutec4bbd32015-07-20 09:11:09 +02001943 * @grp: Read/Write Group
1944 *
1945 * Find a good DQS enable to use.
1946 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001947static int
1948rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase(struct socfpga_sdrseq *seq,
1949 const u32 grp)
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001950{
Marek Vasut59729a62015-07-20 09:20:20 +02001951 u32 d, p, i;
1952 u32 dtaps_per_ptap;
1953 u32 work_bgn, work_end;
Marek Vasuteb447cb2015-08-10 23:01:43 +02001954 u32 found_passing_read, found_failing_read = 0, initial_failing_dtap;
Marek Vasut59729a62015-07-20 09:20:20 +02001955 int ret;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001956
1957 debug("%s:%d %u\n", __func__, __LINE__, grp);
1958
1959 reg_file_set_sub_stage(CAL_SUBSTAGE_VFIFO_CENTER);
1960
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001961 scc_mgr_set_dqs_en_delay_all_ranks(seq, grp, 0);
1962 scc_mgr_set_dqs_en_phase_all_ranks(seq, grp, 0);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001963
Marek Vasut4896bcc2015-07-19 02:42:21 +02001964 /* Step 0: Determine number of delay taps for each phase tap. */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001965 dtaps_per_ptap = seq->iocfg->delay_per_opa_tap /
1966 seq->iocfg->delay_per_dqs_en_dchain_tap;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001967
Marek Vasut4896bcc2015-07-19 02:42:21 +02001968 /* Step 1: First push vfifo until we get a failing read. */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001969 find_vfifo_failing_read(seq, grp);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001970
Marek Vasut4896bcc2015-07-19 02:42:21 +02001971 /* Step 2: Find first working phase, increment in ptaps. */
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001972 work_bgn = 0;
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001973 ret = sdr_working_phase(seq, grp, &work_bgn, &d, &p, &i);
Marek Vasut28dbf122015-07-20 09:20:42 +02001974 if (ret)
1975 return ret;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001976
1977 work_end = work_bgn;
1978
1979 /*
Marek Vasut4896bcc2015-07-19 02:42:21 +02001980 * If d is 0 then the working window covers a phase tap and we can
1981 * follow the old procedure. Otherwise, we've found the beginning
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001982 * and we need to increment the dtaps until we find the end.
1983 */
1984 if (d == 0) {
Marek Vasut4896bcc2015-07-19 02:42:21 +02001985 /*
1986 * Step 3a: If we have room, back off by one and
1987 * increment in dtaps.
1988 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001989 sdr_backup_phase(seq, grp, &work_bgn, &p);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001990
Marek Vasut4896bcc2015-07-19 02:42:21 +02001991 /*
1992 * Step 4a: go forward from working phase to non working
1993 * phase, increment in ptaps.
1994 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02001995 ret = sdr_nonworking_phase(seq, grp, &work_end, &p, &i);
Marek Vasut28dbf122015-07-20 09:20:42 +02001996 if (ret)
1997 return ret;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05001998
Marek Vasut4896bcc2015-07-19 02:42:21 +02001999 /* Step 5a: Back off one from last, increment in dtaps. */
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002000
2001 /* Special case code for backing up a phase */
2002 if (p == 0) {
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002003 p = seq->iocfg->dqs_en_phase_max;
2004 rw_mgr_decr_vfifo(seq, grp);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002005 } else {
2006 p = p - 1;
2007 }
2008
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002009 work_end -= seq->iocfg->delay_per_opa_tap;
2010 scc_mgr_set_dqs_en_phase_all_ranks(seq, grp, p);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002011
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002012 d = 0;
2013
Marek Vasut4df2d7b2016-04-04 21:21:05 +02002014 debug_cond(DLEVEL >= 2, "%s:%d p: ptap=%u\n",
Marek Vasut4896bcc2015-07-19 02:42:21 +02002015 __func__, __LINE__, p);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002016 }
2017
Marek Vasut4896bcc2015-07-19 02:42:21 +02002018 /* The dtap increment to find the failing edge is done here. */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002019 sdr_find_phase_delay(seq, 0, 1, grp, &work_end,
2020 seq->iocfg->delay_per_dqs_en_dchain_tap, &d);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002021
2022 /* Go back to working dtap */
2023 if (d != 0)
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002024 work_end -= seq->iocfg->delay_per_dqs_en_dchain_tap;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002025
Marek Vasut4df2d7b2016-04-04 21:21:05 +02002026 debug_cond(DLEVEL >= 2,
Marek Vasut4896bcc2015-07-19 02:42:21 +02002027 "%s:%d p/d: ptap=%u dtap=%u end=%u\n",
2028 __func__, __LINE__, p, d - 1, work_end);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002029
2030 if (work_end < work_bgn) {
2031 /* nil range */
Marek Vasut4df2d7b2016-04-04 21:21:05 +02002032 debug_cond(DLEVEL >= 2, "%s:%d end-2: failed\n",
Marek Vasut4896bcc2015-07-19 02:42:21 +02002033 __func__, __LINE__);
Marek Vasut28dbf122015-07-20 09:20:42 +02002034 return -EINVAL;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002035 }
2036
Marek Vasut4df2d7b2016-04-04 21:21:05 +02002037 debug_cond(DLEVEL >= 2, "%s:%d found range [%u,%u]\n",
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002038 __func__, __LINE__, work_bgn, work_end);
2039
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002040 /*
Marek Vasut4896bcc2015-07-19 02:42:21 +02002041 * We need to calculate the number of dtaps that equal a ptap.
2042 * To do that we'll back up a ptap and re-find the edge of the
2043 * window using dtaps
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002044 */
Marek Vasut4df2d7b2016-04-04 21:21:05 +02002045 debug_cond(DLEVEL >= 2, "%s:%d calculate dtaps_per_ptap for tracking\n",
Marek Vasut4896bcc2015-07-19 02:42:21 +02002046 __func__, __LINE__);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002047
2048 /* Special case code for backing up a phase */
2049 if (p == 0) {
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002050 p = seq->iocfg->dqs_en_phase_max;
2051 rw_mgr_decr_vfifo(seq, grp);
Marek Vasut4df2d7b2016-04-04 21:21:05 +02002052 debug_cond(DLEVEL >= 2, "%s:%d backedup cycle/phase: p=%u\n",
Marek Vasut4896bcc2015-07-19 02:42:21 +02002053 __func__, __LINE__, p);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002054 } else {
2055 p = p - 1;
Marek Vasut4df2d7b2016-04-04 21:21:05 +02002056 debug_cond(DLEVEL >= 2, "%s:%d backedup phase only: p=%u",
Marek Vasut4896bcc2015-07-19 02:42:21 +02002057 __func__, __LINE__, p);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002058 }
2059
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002060 scc_mgr_set_dqs_en_phase_all_ranks(seq, grp, p);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002061
2062 /*
2063 * Increase dtap until we first see a passing read (in case the
Marek Vasut4896bcc2015-07-19 02:42:21 +02002064 * window is smaller than a ptap), and then a failing read to
2065 * mark the edge of the window again.
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002066 */
2067
Marek Vasut4896bcc2015-07-19 02:42:21 +02002068 /* Find a passing read. */
Marek Vasut4df2d7b2016-04-04 21:21:05 +02002069 debug_cond(DLEVEL >= 2, "%s:%d find passing read\n",
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002070 __func__, __LINE__);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002071
Marek Vasut6ff36b72015-07-19 07:27:06 +02002072 initial_failing_dtap = d;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002073
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002074 found_passing_read = !sdr_find_phase_delay(seq, 1, 1, grp, NULL, 0, &d);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002075 if (found_passing_read) {
Marek Vasut4896bcc2015-07-19 02:42:21 +02002076 /* Find a failing read. */
Marek Vasut4df2d7b2016-04-04 21:21:05 +02002077 debug_cond(DLEVEL >= 2, "%s:%d find failing read\n",
Marek Vasut4896bcc2015-07-19 02:42:21 +02002078 __func__, __LINE__);
Marek Vasut6ff36b72015-07-19 07:27:06 +02002079 d++;
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002080 found_failing_read = !sdr_find_phase_delay(seq, 0, 1, grp, NULL,
2081 0, &d);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002082 } else {
Marek Vasut4df2d7b2016-04-04 21:21:05 +02002083 debug_cond(DLEVEL >= 1,
Marek Vasut4896bcc2015-07-19 02:42:21 +02002084 "%s:%d failed to calculate dtaps per ptap. Fall back on static value\n",
2085 __func__, __LINE__);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002086 }
2087
2088 /*
2089 * The dynamically calculated dtaps_per_ptap is only valid if we
2090 * found a passing/failing read. If we didn't, it means d hit the max
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002091 * (seq->iocfg->dqs_en_delay_max). Otherwise, dtaps_per_ptap retains its
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002092 * statically calculated value.
2093 */
2094 if (found_passing_read && found_failing_read)
2095 dtaps_per_ptap = d - initial_failing_dtap;
2096
Marek Vasutb5450962015-07-12 21:05:08 +02002097 writel(dtaps_per_ptap, &sdr_reg_file->dtaps_per_ptap);
Marek Vasut4df2d7b2016-04-04 21:21:05 +02002098 debug_cond(DLEVEL >= 2, "%s:%d dtaps_per_ptap=%u - %u = %u",
Marek Vasut4896bcc2015-07-19 02:42:21 +02002099 __func__, __LINE__, d, initial_failing_dtap, dtaps_per_ptap);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002100
Marek Vasut4896bcc2015-07-19 02:42:21 +02002101 /* Step 6: Find the centre of the window. */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002102 ret = sdr_find_window_center(seq, grp, work_bgn, work_end);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002103
Marek Vasut28dbf122015-07-20 09:20:42 +02002104 return ret;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002105}
2106
Marek Vasutb20a5062015-07-13 02:11:02 +02002107/**
Marek Vasut85cd4d72015-07-13 02:48:34 +02002108 * search_stop_check() - Check if the detected edge is valid
2109 * @write: Perform read (Stage 2) or write (Stage 3) calibration
2110 * @d: DQS delay
2111 * @rank_bgn: Rank number
2112 * @write_group: Write Group
2113 * @read_group: Read Group
2114 * @bit_chk: Resulting bit mask after the test
2115 * @sticky_bit_chk: Resulting sticky bit mask after the test
2116 * @use_read_test: Perform read test
2117 *
2118 * Test if the found edge is valid.
2119 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002120static u32 search_stop_check(struct socfpga_sdrseq *seq, const int write,
2121 const int d, const int rank_bgn,
Marek Vasut85cd4d72015-07-13 02:48:34 +02002122 const u32 write_group, const u32 read_group,
2123 u32 *bit_chk, u32 *sticky_bit_chk,
2124 const u32 use_read_test)
2125{
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002126 const u32 ratio = seq->rwcfg->mem_if_read_dqs_width /
2127 seq->rwcfg->mem_if_write_dqs_width;
2128 const u32 correct_mask = write ? seq->param.write_correct_mask :
2129 seq->param.read_correct_mask;
2130 const u32 per_dqs = write ? seq->rwcfg->mem_dq_per_write_dqs :
2131 seq->rwcfg->mem_dq_per_read_dqs;
Marek Vasut85cd4d72015-07-13 02:48:34 +02002132 u32 ret;
2133 /*
2134 * Stop searching when the read test doesn't pass AND when
2135 * we've seen a passing read on every bit.
2136 */
2137 if (write) { /* WRITE-ONLY */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002138 ret = !rw_mgr_mem_calibrate_write_test(seq, rank_bgn,
2139 write_group, 0,
2140 PASS_ONE_BIT, bit_chk,
2141 0);
Marek Vasut85cd4d72015-07-13 02:48:34 +02002142 } else if (use_read_test) { /* READ-ONLY */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002143 ret = !rw_mgr_mem_calibrate_read_test(seq, rank_bgn, read_group,
Marek Vasut85cd4d72015-07-13 02:48:34 +02002144 NUM_READ_PB_TESTS,
2145 PASS_ONE_BIT, bit_chk,
2146 0, 0);
2147 } else { /* READ-ONLY */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002148 rw_mgr_mem_calibrate_write_test(seq, rank_bgn, write_group, 0,
Marek Vasut85cd4d72015-07-13 02:48:34 +02002149 PASS_ONE_BIT, bit_chk, 0);
2150 *bit_chk = *bit_chk >> (per_dqs *
2151 (read_group - (write_group * ratio)));
2152 ret = (*bit_chk == 0);
2153 }
2154 *sticky_bit_chk = *sticky_bit_chk | *bit_chk;
2155 ret = ret && (*sticky_bit_chk == correct_mask);
Marek Vasut4df2d7b2016-04-04 21:21:05 +02002156 debug_cond(DLEVEL >= 2,
Marek Vasut85cd4d72015-07-13 02:48:34 +02002157 "%s:%d center(left): dtap=%u => %u == %u && %u",
2158 __func__, __LINE__, d,
2159 *sticky_bit_chk, correct_mask, ret);
2160 return ret;
2161}
2162
2163/**
Marek Vasute624caf2015-07-13 02:38:15 +02002164 * search_left_edge() - Find left edge of DQ/DQS working phase
2165 * @write: Perform read (Stage 2) or write (Stage 3) calibration
2166 * @rank_bgn: Rank number
2167 * @write_group: Write Group
2168 * @read_group: Read Group
2169 * @test_bgn: Rank number to begin the test
Marek Vasute624caf2015-07-13 02:38:15 +02002170 * @sticky_bit_chk: Resulting sticky bit mask after the test
2171 * @left_edge: Left edge of the DQ/DQS phase
2172 * @right_edge: Right edge of the DQ/DQS phase
2173 * @use_read_test: Perform read test
2174 *
2175 * Find left edge of DQ/DQS working phase.
2176 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002177static void search_left_edge(struct socfpga_sdrseq *seq, const int write,
2178 const int rank_bgn, const u32 write_group,
2179 const u32 read_group, const u32 test_bgn,
2180 u32 *sticky_bit_chk, int *left_edge,
2181 int *right_edge, const u32 use_read_test)
Marek Vasute624caf2015-07-13 02:38:15 +02002182{
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002183 const u32 delay_max = write ? seq->iocfg->io_out1_delay_max :
2184 seq->iocfg->io_in_delay_max;
2185 const u32 dqs_max = write ? seq->iocfg->io_out1_delay_max :
2186 seq->iocfg->dqs_in_delay_max;
2187 const u32 per_dqs = write ? seq->rwcfg->mem_dq_per_write_dqs :
2188 seq->rwcfg->mem_dq_per_read_dqs;
Marek Vasutb69c2472015-07-18 20:34:00 +02002189 u32 stop, bit_chk;
Marek Vasute624caf2015-07-13 02:38:15 +02002190 int i, d;
2191
2192 for (d = 0; d <= dqs_max; d++) {
2193 if (write)
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002194 scc_mgr_apply_group_dq_out1_delay(seq, d);
Marek Vasute624caf2015-07-13 02:38:15 +02002195 else
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002196 scc_mgr_apply_group_dq_in_delay(seq, test_bgn, d);
Marek Vasute624caf2015-07-13 02:38:15 +02002197
2198 writel(0, &sdr_scc_mgr->update);
2199
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002200 stop = search_stop_check(seq, write, d, rank_bgn, write_group,
Marek Vasutb69c2472015-07-18 20:34:00 +02002201 read_group, &bit_chk, sticky_bit_chk,
Marek Vasut85cd4d72015-07-13 02:48:34 +02002202 use_read_test);
Marek Vasute624caf2015-07-13 02:38:15 +02002203 if (stop == 1)
2204 break;
2205
2206 /* stop != 1 */
2207 for (i = 0; i < per_dqs; i++) {
Marek Vasutb69c2472015-07-18 20:34:00 +02002208 if (bit_chk & 1) {
Marek Vasute624caf2015-07-13 02:38:15 +02002209 /*
2210 * Remember a passing test as
2211 * the left_edge.
2212 */
2213 left_edge[i] = d;
2214 } else {
2215 /*
2216 * If a left edge has not been seen
2217 * yet, then a future passing test
2218 * will mark this edge as the right
2219 * edge.
2220 */
2221 if (left_edge[i] == delay_max + 1)
2222 right_edge[i] = -(d + 1);
2223 }
Marek Vasutb69c2472015-07-18 20:34:00 +02002224 bit_chk >>= 1;
Marek Vasute624caf2015-07-13 02:38:15 +02002225 }
2226 }
2227
2228 /* Reset DQ delay chains to 0 */
2229 if (write)
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002230 scc_mgr_apply_group_dq_out1_delay(seq, 0);
Marek Vasute624caf2015-07-13 02:38:15 +02002231 else
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002232 scc_mgr_apply_group_dq_in_delay(seq, test_bgn, 0);
Marek Vasute624caf2015-07-13 02:38:15 +02002233
2234 *sticky_bit_chk = 0;
2235 for (i = per_dqs - 1; i >= 0; i--) {
Marek Vasut4df2d7b2016-04-04 21:21:05 +02002236 debug_cond(DLEVEL >= 2,
Marek Vasute624caf2015-07-13 02:38:15 +02002237 "%s:%d vfifo_center: left_edge[%u]: %d right_edge[%u]: %d\n",
2238 __func__, __LINE__, i, left_edge[i],
2239 i, right_edge[i]);
2240
2241 /*
2242 * Check for cases where we haven't found the left edge,
2243 * which makes our assignment of the the right edge invalid.
2244 * Reset it to the illegal value.
2245 */
2246 if ((left_edge[i] == delay_max + 1) &&
2247 (right_edge[i] != delay_max + 1)) {
2248 right_edge[i] = delay_max + 1;
Marek Vasut4df2d7b2016-04-04 21:21:05 +02002249 debug_cond(DLEVEL >= 2,
Marek Vasute624caf2015-07-13 02:38:15 +02002250 "%s:%d vfifo_center: reset right_edge[%u]: %d\n",
2251 __func__, __LINE__, i, right_edge[i]);
2252 }
2253
2254 /*
2255 * Reset sticky bit
2256 * READ: except for bits where we have seen both
2257 * the left and right edge.
2258 * WRITE: except for bits where we have seen the
2259 * left edge.
2260 */
2261 *sticky_bit_chk <<= 1;
2262 if (write) {
2263 if (left_edge[i] != delay_max + 1)
2264 *sticky_bit_chk |= 1;
2265 } else {
2266 if ((left_edge[i] != delay_max + 1) &&
2267 (right_edge[i] != delay_max + 1))
2268 *sticky_bit_chk |= 1;
2269 }
2270 }
Marek Vasute624caf2015-07-13 02:38:15 +02002271}
2272
2273/**
Marek Vasutb20a5062015-07-13 02:11:02 +02002274 * search_right_edge() - Find right edge of DQ/DQS working phase
2275 * @write: Perform read (Stage 2) or write (Stage 3) calibration
2276 * @rank_bgn: Rank number
2277 * @write_group: Write Group
2278 * @read_group: Read Group
2279 * @start_dqs: DQS start phase
2280 * @start_dqs_en: DQS enable start phase
Marek Vasutb20a5062015-07-13 02:11:02 +02002281 * @sticky_bit_chk: Resulting sticky bit mask after the test
2282 * @left_edge: Left edge of the DQ/DQS phase
2283 * @right_edge: Right edge of the DQ/DQS phase
2284 * @use_read_test: Perform read test
2285 *
2286 * Find right edge of DQ/DQS working phase.
2287 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002288static int search_right_edge(struct socfpga_sdrseq *seq, const int write,
2289 const int rank_bgn, const u32 write_group,
2290 const u32 read_group, const int start_dqs,
2291 const int start_dqs_en, u32 *sticky_bit_chk,
2292 int *left_edge, int *right_edge,
2293 const u32 use_read_test)
Marek Vasutb20a5062015-07-13 02:11:02 +02002294{
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002295 const u32 delay_max = write ? seq->iocfg->io_out1_delay_max :
2296 seq->iocfg->io_in_delay_max;
2297 const u32 dqs_max = write ? seq->iocfg->io_out1_delay_max :
2298 seq->iocfg->dqs_in_delay_max;
2299 const u32 per_dqs = write ? seq->rwcfg->mem_dq_per_write_dqs :
2300 seq->rwcfg->mem_dq_per_read_dqs;
Marek Vasutb69c2472015-07-18 20:34:00 +02002301 u32 stop, bit_chk;
Marek Vasutb20a5062015-07-13 02:11:02 +02002302 int i, d;
2303
2304 for (d = 0; d <= dqs_max - start_dqs; d++) {
2305 if (write) { /* WRITE-ONLY */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002306 scc_mgr_apply_group_dqs_io_and_oct_out1(seq,
2307 write_group,
Marek Vasutb20a5062015-07-13 02:11:02 +02002308 d + start_dqs);
2309 } else { /* READ-ONLY */
2310 scc_mgr_set_dqs_bus_in_delay(read_group, d + start_dqs);
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002311 if (seq->iocfg->shift_dqs_en_when_shift_dqs) {
Marek Vasut8af9ca02015-08-02 19:42:26 +02002312 u32 delay = d + start_dqs_en;
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002313 if (delay > seq->iocfg->dqs_en_delay_max)
2314 delay = seq->iocfg->dqs_en_delay_max;
Marek Vasutb20a5062015-07-13 02:11:02 +02002315 scc_mgr_set_dqs_en_delay(read_group, delay);
2316 }
2317 scc_mgr_load_dqs(read_group);
2318 }
2319
2320 writel(0, &sdr_scc_mgr->update);
2321
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002322 stop = search_stop_check(seq, write, d, rank_bgn, write_group,
Marek Vasutb69c2472015-07-18 20:34:00 +02002323 read_group, &bit_chk, sticky_bit_chk,
Marek Vasut85cd4d72015-07-13 02:48:34 +02002324 use_read_test);
Marek Vasutb20a5062015-07-13 02:11:02 +02002325 if (stop == 1) {
2326 if (write && (d == 0)) { /* WRITE-ONLY */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002327 for (i = 0;
2328 i < seq->rwcfg->mem_dq_per_write_dqs;
Marek Vasutc85b9b32015-08-02 19:47:01 +02002329 i++) {
Marek Vasutb20a5062015-07-13 02:11:02 +02002330 /*
2331 * d = 0 failed, but it passed when
2332 * testing the left edge, so it must be
2333 * marginal, set it to -1
2334 */
2335 if (right_edge[i] == delay_max + 1 &&
2336 left_edge[i] != delay_max + 1)
2337 right_edge[i] = -1;
2338 }
2339 }
2340 break;
2341 }
2342
2343 /* stop != 1 */
2344 for (i = 0; i < per_dqs; i++) {
Marek Vasutb69c2472015-07-18 20:34:00 +02002345 if (bit_chk & 1) {
Marek Vasutb20a5062015-07-13 02:11:02 +02002346 /*
2347 * Remember a passing test as
2348 * the right_edge.
2349 */
2350 right_edge[i] = d;
2351 } else {
2352 if (d != 0) {
2353 /*
2354 * If a right edge has not
2355 * been seen yet, then a future
2356 * passing test will mark this
2357 * edge as the left edge.
2358 */
2359 if (right_edge[i] == delay_max + 1)
2360 left_edge[i] = -(d + 1);
2361 } else {
2362 /*
2363 * d = 0 failed, but it passed
2364 * when testing the left edge,
2365 * so it must be marginal, set
2366 * it to -1
2367 */
2368 if (right_edge[i] == delay_max + 1 &&
2369 left_edge[i] != delay_max + 1)
2370 right_edge[i] = -1;
2371 /*
2372 * If a right edge has not been
2373 * seen yet, then a future
2374 * passing test will mark this
2375 * edge as the left edge.
2376 */
2377 else if (right_edge[i] == delay_max + 1)
2378 left_edge[i] = -(d + 1);
2379 }
2380 }
2381
Marek Vasut4df2d7b2016-04-04 21:21:05 +02002382 debug_cond(DLEVEL >= 2, "%s:%d center[r,d=%u]: ",
Marek Vasutb20a5062015-07-13 02:11:02 +02002383 __func__, __LINE__, d);
Marek Vasut4df2d7b2016-04-04 21:21:05 +02002384 debug_cond(DLEVEL >= 2,
Marek Vasutb20a5062015-07-13 02:11:02 +02002385 "bit_chk_test=%i left_edge[%u]: %d ",
Marek Vasutb69c2472015-07-18 20:34:00 +02002386 bit_chk & 1, i, left_edge[i]);
Marek Vasut4df2d7b2016-04-04 21:21:05 +02002387 debug_cond(DLEVEL >= 2, "right_edge[%u]: %d\n", i,
Marek Vasutb20a5062015-07-13 02:11:02 +02002388 right_edge[i]);
Marek Vasutb69c2472015-07-18 20:34:00 +02002389 bit_chk >>= 1;
Marek Vasutb20a5062015-07-13 02:11:02 +02002390 }
2391 }
2392
2393 /* Check that all bits have a window */
2394 for (i = 0; i < per_dqs; i++) {
Marek Vasut4df2d7b2016-04-04 21:21:05 +02002395 debug_cond(DLEVEL >= 2,
Marek Vasutb20a5062015-07-13 02:11:02 +02002396 "%s:%d write_center: left_edge[%u]: %d right_edge[%u]: %d",
2397 __func__, __LINE__, i, left_edge[i],
2398 i, right_edge[i]);
2399 if ((left_edge[i] == dqs_max + 1) ||
2400 (right_edge[i] == dqs_max + 1))
2401 return i + 1; /* FIXME: If we fail, retval > 0 */
2402 }
2403
2404 return 0;
2405}
2406
Marek Vasutaa0e6e12015-07-18 19:18:06 +02002407/**
2408 * get_window_mid_index() - Find the best middle setting of DQ/DQS phase
2409 * @write: Perform read (Stage 2) or write (Stage 3) calibration
2410 * @left_edge: Left edge of the DQ/DQS phase
2411 * @right_edge: Right edge of the DQ/DQS phase
2412 * @mid_min: Best DQ/DQS phase middle setting
2413 *
2414 * Find index and value of the middle of the DQ/DQS working phase.
2415 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002416static int get_window_mid_index(struct socfpga_sdrseq *seq,
2417 const int write, int *left_edge,
Marek Vasutaa0e6e12015-07-18 19:18:06 +02002418 int *right_edge, int *mid_min)
2419{
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002420 const u32 per_dqs = write ? seq->rwcfg->mem_dq_per_write_dqs :
2421 seq->rwcfg->mem_dq_per_read_dqs;
Marek Vasutaa0e6e12015-07-18 19:18:06 +02002422 int i, mid, min_index;
2423
2424 /* Find middle of window for each DQ bit */
2425 *mid_min = left_edge[0] - right_edge[0];
2426 min_index = 0;
2427 for (i = 1; i < per_dqs; i++) {
2428 mid = left_edge[i] - right_edge[i];
2429 if (mid < *mid_min) {
2430 *mid_min = mid;
2431 min_index = i;
2432 }
2433 }
2434
2435 /*
2436 * -mid_min/2 represents the amount that we need to move DQS.
2437 * If mid_min is odd and positive we'll need to add one to make
2438 * sure the rounding in further calculations is correct (always
2439 * bias to the right), so just add 1 for all positive values.
2440 */
2441 if (*mid_min > 0)
2442 (*mid_min)++;
2443 *mid_min = *mid_min / 2;
2444
Marek Vasut4df2d7b2016-04-04 21:21:05 +02002445 debug_cond(DLEVEL >= 1, "%s:%d vfifo_center: *mid_min=%d (index=%u)\n",
Marek Vasutaa0e6e12015-07-18 19:18:06 +02002446 __func__, __LINE__, *mid_min, min_index);
2447 return min_index;
2448}
2449
Marek Vasut89feb502015-07-18 19:46:26 +02002450/**
2451 * center_dq_windows() - Center the DQ/DQS windows
2452 * @write: Perform read (Stage 2) or write (Stage 3) calibration
2453 * @left_edge: Left edge of the DQ/DQS phase
2454 * @right_edge: Right edge of the DQ/DQS phase
2455 * @mid_min: Adjusted DQ/DQS phase middle setting
2456 * @orig_mid_min: Original DQ/DQS phase middle setting
2457 * @min_index: DQ/DQS phase middle setting index
2458 * @test_bgn: Rank number to begin the test
2459 * @dq_margin: Amount of shift for the DQ
2460 * @dqs_margin: Amount of shift for the DQS
2461 *
2462 * Align the DQ/DQS windows in each group.
2463 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002464static void center_dq_windows(struct socfpga_sdrseq *seq,
2465 const int write, int *left_edge, int *right_edge,
Marek Vasut89feb502015-07-18 19:46:26 +02002466 const int mid_min, const int orig_mid_min,
2467 const int min_index, const int test_bgn,
2468 int *dq_margin, int *dqs_margin)
2469{
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002470 const s32 delay_max = write ? seq->iocfg->io_out1_delay_max :
2471 seq->iocfg->io_in_delay_max;
2472 const s32 per_dqs = write ? seq->rwcfg->mem_dq_per_write_dqs :
2473 seq->rwcfg->mem_dq_per_read_dqs;
Marek Vasut66acabc2016-04-05 23:17:35 +02002474 const s32 delay_off = write ? SCC_MGR_IO_OUT1_DELAY_OFFSET :
Marek Vasut89feb502015-07-18 19:46:26 +02002475 SCC_MGR_IO_IN_DELAY_OFFSET;
Marek Vasut66acabc2016-04-05 23:17:35 +02002476 const s32 addr = SDR_PHYGRP_SCCGRP_ADDRESS | delay_off;
Marek Vasut89feb502015-07-18 19:46:26 +02002477
Marek Vasut66acabc2016-04-05 23:17:35 +02002478 s32 temp_dq_io_delay1;
Marek Vasut89feb502015-07-18 19:46:26 +02002479 int shift_dq, i, p;
2480
2481 /* Initialize data for export structures */
2482 *dqs_margin = delay_max + 1;
2483 *dq_margin = delay_max + 1;
2484
2485 /* add delay to bring centre of all DQ windows to the same "level" */
2486 for (i = 0, p = test_bgn; i < per_dqs; i++, p++) {
2487 /* Use values before divide by 2 to reduce round off error */
2488 shift_dq = (left_edge[i] - right_edge[i] -
2489 (left_edge[min_index] - right_edge[min_index]))/2 +
2490 (orig_mid_min - mid_min);
2491
Marek Vasut4df2d7b2016-04-04 21:21:05 +02002492 debug_cond(DLEVEL >= 2,
Marek Vasut89feb502015-07-18 19:46:26 +02002493 "vfifo_center: before: shift_dq[%u]=%d\n",
2494 i, shift_dq);
2495
Marek Vasut66acabc2016-04-05 23:17:35 +02002496 temp_dq_io_delay1 = readl(addr + (i << 2));
Marek Vasut89feb502015-07-18 19:46:26 +02002497
2498 if (shift_dq + temp_dq_io_delay1 > delay_max)
Marek Vasut66acabc2016-04-05 23:17:35 +02002499 shift_dq = delay_max - temp_dq_io_delay1;
Marek Vasut89feb502015-07-18 19:46:26 +02002500 else if (shift_dq + temp_dq_io_delay1 < 0)
2501 shift_dq = -temp_dq_io_delay1;
2502
Marek Vasut4df2d7b2016-04-04 21:21:05 +02002503 debug_cond(DLEVEL >= 2,
Marek Vasut89feb502015-07-18 19:46:26 +02002504 "vfifo_center: after: shift_dq[%u]=%d\n",
2505 i, shift_dq);
2506
2507 if (write)
Marek Vasutc85b9b32015-08-02 19:47:01 +02002508 scc_mgr_set_dq_out1_delay(i,
2509 temp_dq_io_delay1 + shift_dq);
Marek Vasut89feb502015-07-18 19:46:26 +02002510 else
Marek Vasutc85b9b32015-08-02 19:47:01 +02002511 scc_mgr_set_dq_in_delay(p,
2512 temp_dq_io_delay1 + shift_dq);
Marek Vasut89feb502015-07-18 19:46:26 +02002513
2514 scc_mgr_load_dq(p);
2515
Marek Vasut4df2d7b2016-04-04 21:21:05 +02002516 debug_cond(DLEVEL >= 2,
Marek Vasut89feb502015-07-18 19:46:26 +02002517 "vfifo_center: margin[%u]=[%d,%d]\n", i,
2518 left_edge[i] - shift_dq + (-mid_min),
2519 right_edge[i] + shift_dq - (-mid_min));
2520
2521 /* To determine values for export structures */
2522 if (left_edge[i] - shift_dq + (-mid_min) < *dq_margin)
2523 *dq_margin = left_edge[i] - shift_dq + (-mid_min);
2524
2525 if (right_edge[i] + shift_dq - (-mid_min) < *dqs_margin)
2526 *dqs_margin = right_edge[i] + shift_dq - (-mid_min);
2527 }
Marek Vasut89feb502015-07-18 19:46:26 +02002528}
2529
Marek Vasut9cdbb962015-07-21 04:27:32 +02002530/**
2531 * rw_mgr_mem_calibrate_vfifo_center() - Per-bit deskew DQ and centering
2532 * @rank_bgn: Rank number
2533 * @rw_group: Read/Write Group
2534 * @test_bgn: Rank at which the test begins
2535 * @use_read_test: Perform a read test
2536 * @update_fom: Update FOM
2537 *
2538 * Per-bit deskew DQ and centering.
2539 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002540static int rw_mgr_mem_calibrate_vfifo_center(struct socfpga_sdrseq *seq,
2541 const u32 rank_bgn,
2542 const u32 rw_group,
2543 const u32 test_bgn,
2544 const int use_read_test,
2545 const int update_fom)
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002546{
Marek Vasutf1b8f712015-07-18 19:57:12 +02002547 const u32 addr =
2548 SDR_PHYGRP_SCCGRP_ADDRESS + SCC_MGR_DQS_IN_DELAY_OFFSET +
Marek Vasutdfed1e62015-07-18 20:42:27 +02002549 (rw_group << 2);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002550 /*
2551 * Store these as signed since there are comparisons with
2552 * signed numbers.
2553 */
Marek Vasut8af9ca02015-08-02 19:42:26 +02002554 u32 sticky_bit_chk;
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002555 s32 left_edge[seq->rwcfg->mem_dq_per_read_dqs];
2556 s32 right_edge[seq->rwcfg->mem_dq_per_read_dqs];
2557 s32 orig_mid_min, mid_min;
2558 s32 new_dqs, start_dqs, start_dqs_en = 0, final_dqs_en;
2559 s32 dq_margin, dqs_margin;
Marek Vasutf1b8f712015-07-18 19:57:12 +02002560 int i, min_index;
Marek Vasutb20a5062015-07-13 02:11:02 +02002561 int ret;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002562
Marek Vasutdfed1e62015-07-18 20:42:27 +02002563 debug("%s:%d: %u %u", __func__, __LINE__, rw_group, test_bgn);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002564
Marek Vasutf1b8f712015-07-18 19:57:12 +02002565 start_dqs = readl(addr);
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002566 if (seq->iocfg->shift_dqs_en_when_shift_dqs)
2567 start_dqs_en = readl(addr - seq->iocfg->dqs_en_delay_offset);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002568
2569 /* set the left and right edge of each bit to an illegal value */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002570 /* use (seq->iocfg->io_in_delay_max + 1) as an illegal value */
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002571 sticky_bit_chk = 0;
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002572 for (i = 0; i < seq->rwcfg->mem_dq_per_read_dqs; i++) {
2573 left_edge[i] = seq->iocfg->io_in_delay_max + 1;
2574 right_edge[i] = seq->iocfg->io_in_delay_max + 1;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002575 }
2576
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002577 /* Search for the left edge of the window for each bit */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002578 search_left_edge(seq, 0, rank_bgn, rw_group, rw_group, test_bgn,
Marek Vasutb69c2472015-07-18 20:34:00 +02002579 &sticky_bit_chk,
Marek Vasute624caf2015-07-13 02:38:15 +02002580 left_edge, right_edge, use_read_test);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002581
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002582 /* Search for the right edge of the window for each bit */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002583 ret = search_right_edge(seq, 0, rank_bgn, rw_group, rw_group,
Marek Vasutb20a5062015-07-13 02:11:02 +02002584 start_dqs, start_dqs_en,
Marek Vasutb69c2472015-07-18 20:34:00 +02002585 &sticky_bit_chk,
Marek Vasutb20a5062015-07-13 02:11:02 +02002586 left_edge, right_edge, use_read_test);
2587 if (ret) {
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002588 /*
Marek Vasutb20a5062015-07-13 02:11:02 +02002589 * Restore delay chain settings before letting the loop
2590 * in rw_mgr_mem_calibrate_vfifo to retry different
2591 * dqs/ck relationships.
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002592 */
Marek Vasutdfed1e62015-07-18 20:42:27 +02002593 scc_mgr_set_dqs_bus_in_delay(rw_group, start_dqs);
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002594 if (seq->iocfg->shift_dqs_en_when_shift_dqs)
Marek Vasutdfed1e62015-07-18 20:42:27 +02002595 scc_mgr_set_dqs_en_delay(rw_group, start_dqs_en);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002596
Marek Vasutdfed1e62015-07-18 20:42:27 +02002597 scc_mgr_load_dqs(rw_group);
Marek Vasutb20a5062015-07-13 02:11:02 +02002598 writel(0, &sdr_scc_mgr->update);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002599
Marek Vasut4df2d7b2016-04-04 21:21:05 +02002600 debug_cond(DLEVEL >= 1,
Marek Vasutb20a5062015-07-13 02:11:02 +02002601 "%s:%d vfifo_center: failed to find edge [%u]: %d %d",
2602 __func__, __LINE__, i, left_edge[i], right_edge[i]);
2603 if (use_read_test) {
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002604 set_failing_group_stage(seq, rw_group *
2605 seq->rwcfg->mem_dq_per_read_dqs + i,
Marek Vasutb20a5062015-07-13 02:11:02 +02002606 CAL_STAGE_VFIFO,
2607 CAL_SUBSTAGE_VFIFO_CENTER);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002608 } else {
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002609 set_failing_group_stage(seq, rw_group *
2610 seq->rwcfg->mem_dq_per_read_dqs + i,
Marek Vasutb20a5062015-07-13 02:11:02 +02002611 CAL_STAGE_VFIFO_AFTER_WRITES,
2612 CAL_SUBSTAGE_VFIFO_CENTER);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002613 }
Marek Vasutd29f8042015-07-18 20:44:28 +02002614 return -EIO;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002615 }
2616
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002617 min_index = get_window_mid_index(seq, 0, left_edge, right_edge,
2618 &mid_min);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002619
2620 /* Determine the amount we can change DQS (which is -mid_min) */
2621 orig_mid_min = mid_min;
2622 new_dqs = start_dqs - mid_min;
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002623 if (new_dqs > seq->iocfg->dqs_in_delay_max)
2624 new_dqs = seq->iocfg->dqs_in_delay_max;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002625 else if (new_dqs < 0)
2626 new_dqs = 0;
2627
2628 mid_min = start_dqs - new_dqs;
Marek Vasut4df2d7b2016-04-04 21:21:05 +02002629 debug_cond(DLEVEL >= 1, "vfifo_center: new mid_min=%d new_dqs=%d\n",
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002630 mid_min, new_dqs);
2631
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002632 if (seq->iocfg->shift_dqs_en_when_shift_dqs) {
2633 if (start_dqs_en - mid_min > seq->iocfg->dqs_en_delay_max)
Marek Vasutc85b9b32015-08-02 19:47:01 +02002634 mid_min += start_dqs_en - mid_min -
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002635 seq->iocfg->dqs_en_delay_max;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002636 else if (start_dqs_en - mid_min < 0)
2637 mid_min += start_dqs_en - mid_min;
2638 }
2639 new_dqs = start_dqs - mid_min;
2640
Marek Vasut4df2d7b2016-04-04 21:21:05 +02002641 debug_cond(DLEVEL >= 1,
Marek Vasutca8ea372015-07-18 08:01:45 +02002642 "vfifo_center: start_dqs=%d start_dqs_en=%d new_dqs=%d mid_min=%d\n",
2643 start_dqs,
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002644 seq->iocfg->shift_dqs_en_when_shift_dqs ? start_dqs_en : -1,
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002645 new_dqs, mid_min);
2646
Marek Vasut89feb502015-07-18 19:46:26 +02002647 /* Add delay to bring centre of all DQ windows to the same "level". */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002648 center_dq_windows(seq, 0, left_edge, right_edge, mid_min, orig_mid_min,
Marek Vasut89feb502015-07-18 19:46:26 +02002649 min_index, test_bgn, &dq_margin, &dqs_margin);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002650
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002651 /* Move DQS-en */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002652 if (seq->iocfg->shift_dqs_en_when_shift_dqs) {
Marek Vasutf1b8f712015-07-18 19:57:12 +02002653 final_dqs_en = start_dqs_en - mid_min;
Marek Vasutdfed1e62015-07-18 20:42:27 +02002654 scc_mgr_set_dqs_en_delay(rw_group, final_dqs_en);
2655 scc_mgr_load_dqs(rw_group);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002656 }
2657
2658 /* Move DQS */
Marek Vasutdfed1e62015-07-18 20:42:27 +02002659 scc_mgr_set_dqs_bus_in_delay(rw_group, new_dqs);
2660 scc_mgr_load_dqs(rw_group);
Marek Vasut4df2d7b2016-04-04 21:21:05 +02002661 debug_cond(DLEVEL >= 2,
Marek Vasutca8ea372015-07-18 08:01:45 +02002662 "%s:%d vfifo_center: dq_margin=%d dqs_margin=%d",
2663 __func__, __LINE__, dq_margin, dqs_margin);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002664
2665 /*
2666 * Do not remove this line as it makes sure all of our decisions
2667 * have been applied. Apply the update bit.
2668 */
Marek Vasutb5450962015-07-12 21:05:08 +02002669 writel(0, &sdr_scc_mgr->update);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002670
Marek Vasutd29f8042015-07-18 20:44:28 +02002671 if ((dq_margin < 0) || (dqs_margin < 0))
2672 return -EINVAL;
2673
2674 return 0;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002675}
2676
Marek Vasutc27ea622015-07-17 03:16:45 +02002677/**
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002678 * rw_mgr_mem_calibrate_guaranteed_write() - Perform guaranteed write into the
2679 * device
Marek Vasut6ca5b962015-07-18 02:46:56 +02002680 * @rw_group: Read/Write Group
2681 * @phase: DQ/DQS phase
2682 *
2683 * Because initially no communication ca be reliably performed with the memory
2684 * device, the sequencer uses a guaranteed write mechanism to write data into
2685 * the memory device.
2686 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002687static int rw_mgr_mem_calibrate_guaranteed_write(struct socfpga_sdrseq *seq,
2688 const u32 rw_group,
Marek Vasut6ca5b962015-07-18 02:46:56 +02002689 const u32 phase)
2690{
Marek Vasut6ca5b962015-07-18 02:46:56 +02002691 int ret;
2692
2693 /* Set a particular DQ/DQS phase. */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002694 scc_mgr_set_dqdqs_output_phase_all_ranks(seq, rw_group, phase);
Marek Vasut6ca5b962015-07-18 02:46:56 +02002695
Marek Vasut4df2d7b2016-04-04 21:21:05 +02002696 debug_cond(DLEVEL >= 1, "%s:%d guaranteed write: g=%u p=%u\n",
Marek Vasut6ca5b962015-07-18 02:46:56 +02002697 __func__, __LINE__, rw_group, phase);
2698
2699 /*
2700 * Altera EMI_RM 2015.05.04 :: Figure 1-25
2701 * Load up the patterns used by read calibration using the
2702 * current DQDQS phase.
2703 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002704 rw_mgr_mem_calibrate_read_load_patterns(seq, 0, 1);
Marek Vasut6ca5b962015-07-18 02:46:56 +02002705
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002706 if (seq->gbl.phy_debug_mode_flags & PHY_DEBUG_DISABLE_GUARANTEED_READ)
Marek Vasut6ca5b962015-07-18 02:46:56 +02002707 return 0;
2708
2709 /*
2710 * Altera EMI_RM 2015.05.04 :: Figure 1-26
2711 * Back-to-Back reads of the patterns used for calibration.
2712 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002713 ret = rw_mgr_mem_calibrate_read_test_patterns(seq, 0, rw_group, 1);
Marek Vasut55c4d692015-07-18 03:55:07 +02002714 if (ret)
Marek Vasut4df2d7b2016-04-04 21:21:05 +02002715 debug_cond(DLEVEL >= 1,
Marek Vasut6ca5b962015-07-18 02:46:56 +02002716 "%s:%d Guaranteed read test failed: g=%u p=%u\n",
2717 __func__, __LINE__, rw_group, phase);
Marek Vasut55c4d692015-07-18 03:55:07 +02002718 return ret;
Marek Vasut6ca5b962015-07-18 02:46:56 +02002719}
2720
2721/**
Marek Vasutfeb5e652015-07-18 02:57:32 +02002722 * rw_mgr_mem_calibrate_dqs_enable_calibration() - DQS Enable Calibration
2723 * @rw_group: Read/Write Group
2724 * @test_bgn: Rank at which the test begins
2725 *
2726 * DQS enable calibration ensures reliable capture of the DQ signal without
2727 * glitches on the DQS line.
2728 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002729static int
2730rw_mgr_mem_calibrate_dqs_enable_calibration(struct socfpga_sdrseq *seq,
2731 const u32 rw_group,
2732 const u32 test_bgn)
Marek Vasutfeb5e652015-07-18 02:57:32 +02002733{
Marek Vasutfeb5e652015-07-18 02:57:32 +02002734 /*
2735 * Altera EMI_RM 2015.05.04 :: Figure 1-27
2736 * DQS and DQS Eanble Signal Relationships.
2737 */
Marek Vasut3aa19dc2015-07-18 04:28:42 +02002738
2739 /* We start at zero, so have one less dq to devide among */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002740 const u32 delay_step = seq->iocfg->io_in_delay_max /
2741 (seq->rwcfg->mem_dq_per_read_dqs - 1);
Marek Vasut28dbf122015-07-20 09:20:42 +02002742 int ret;
Marek Vasut3aa19dc2015-07-18 04:28:42 +02002743 u32 i, p, d, r;
2744
2745 debug("%s:%d (%u,%u)\n", __func__, __LINE__, rw_group, test_bgn);
2746
2747 /* Try different dq_in_delays since the DQ path is shorter than DQS. */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002748 for (r = 0; r < seq->rwcfg->mem_number_of_ranks;
Marek Vasut3aa19dc2015-07-18 04:28:42 +02002749 r += NUM_RANKS_PER_SHADOW_REG) {
2750 for (i = 0, p = test_bgn, d = 0;
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002751 i < seq->rwcfg->mem_dq_per_read_dqs;
Marek Vasut3aa19dc2015-07-18 04:28:42 +02002752 i++, p++, d += delay_step) {
Marek Vasut4df2d7b2016-04-04 21:21:05 +02002753 debug_cond(DLEVEL >= 1,
Marek Vasut3aa19dc2015-07-18 04:28:42 +02002754 "%s:%d: g=%u r=%u i=%u p=%u d=%u\n",
2755 __func__, __LINE__, rw_group, r, i, p, d);
2756
2757 scc_mgr_set_dq_in_delay(p, d);
2758 scc_mgr_load_dq(p);
2759 }
2760
2761 writel(0, &sdr_scc_mgr->update);
2762 }
2763
2764 /*
2765 * Try rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase across different
2766 * dq_in_delay values
2767 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002768 ret = rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase(seq, rw_group);
Marek Vasut3aa19dc2015-07-18 04:28:42 +02002769
Marek Vasut4df2d7b2016-04-04 21:21:05 +02002770 debug_cond(DLEVEL >= 1,
Vagrant Cascadiana321d042021-12-21 13:07:01 -08002771 "%s:%d: g=%u found=%u; Resetting delay chain to zero\n",
Marek Vasut28dbf122015-07-20 09:20:42 +02002772 __func__, __LINE__, rw_group, !ret);
Marek Vasut3aa19dc2015-07-18 04:28:42 +02002773
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002774 for (r = 0; r < seq->rwcfg->mem_number_of_ranks;
Marek Vasut3aa19dc2015-07-18 04:28:42 +02002775 r += NUM_RANKS_PER_SHADOW_REG) {
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002776 scc_mgr_apply_group_dq_in_delay(seq, test_bgn, 0);
Marek Vasut3aa19dc2015-07-18 04:28:42 +02002777 writel(0, &sdr_scc_mgr->update);
2778 }
2779
Marek Vasut28dbf122015-07-20 09:20:42 +02002780 return ret;
Marek Vasutfeb5e652015-07-18 02:57:32 +02002781}
2782
2783/**
Marek Vasut349ea3e2015-07-18 03:10:31 +02002784 * rw_mgr_mem_calibrate_dq_dqs_centering() - Centering DQ/DQS
2785 * @rw_group: Read/Write Group
2786 * @test_bgn: Rank at which the test begins
2787 * @use_read_test: Perform a read test
2788 * @update_fom: Update FOM
2789 *
2790 * The centerin DQ/DQS stage attempts to align DQ and DQS signals on reads
2791 * within a group.
2792 */
2793static int
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002794rw_mgr_mem_calibrate_dq_dqs_centering(struct socfpga_sdrseq *seq,
2795 const u32 rw_group, const u32 test_bgn,
Marek Vasut349ea3e2015-07-18 03:10:31 +02002796 const int use_read_test,
2797 const int update_fom)
2798
2799{
2800 int ret, grp_calibrated;
2801 u32 rank_bgn, sr;
2802
2803 /*
2804 * Altera EMI_RM 2015.05.04 :: Figure 1-28
2805 * Read per-bit deskew can be done on a per shadow register basis.
2806 */
2807 grp_calibrated = 1;
2808 for (rank_bgn = 0, sr = 0;
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002809 rank_bgn < seq->rwcfg->mem_number_of_ranks;
Marek Vasut349ea3e2015-07-18 03:10:31 +02002810 rank_bgn += NUM_RANKS_PER_SHADOW_REG, sr++) {
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002811 ret = rw_mgr_mem_calibrate_vfifo_center(seq, rank_bgn, rw_group,
Marek Vasutdfed1e62015-07-18 20:42:27 +02002812 test_bgn,
Marek Vasut349ea3e2015-07-18 03:10:31 +02002813 use_read_test,
2814 update_fom);
Marek Vasutd29f8042015-07-18 20:44:28 +02002815 if (!ret)
Marek Vasut349ea3e2015-07-18 03:10:31 +02002816 continue;
2817
2818 grp_calibrated = 0;
2819 }
2820
2821 if (!grp_calibrated)
2822 return -EIO;
2823
2824 return 0;
2825}
2826
2827/**
Marek Vasutc27ea622015-07-17 03:16:45 +02002828 * rw_mgr_mem_calibrate_vfifo() - Calibrate the read valid prediction FIFO
2829 * @rw_group: Read/Write Group
2830 * @test_bgn: Rank at which the test begins
2831 *
2832 * Stage 1: Calibrate the read valid prediction FIFO.
2833 *
2834 * This function implements UniPHY calibration Stage 1, as explained in
2835 * detail in Altera EMI_RM 2015.05.04 , "UniPHY Calibration Stages".
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002836 *
Marek Vasutc27ea622015-07-17 03:16:45 +02002837 * - read valid prediction will consist of finding:
2838 * - DQS enable phase and DQS enable delay (DQS Enable Calibration)
2839 * - DQS input phase and DQS input delay (DQ/DQS Centering)
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002840 * - we also do a per-bit deskew on the DQ lines.
2841 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002842static int rw_mgr_mem_calibrate_vfifo(struct socfpga_sdrseq *seq,
2843 const u32 rw_group, const u32 test_bgn)
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002844{
Marek Vasut8af9ca02015-08-02 19:42:26 +02002845 u32 p, d;
2846 u32 dtaps_per_ptap;
2847 u32 failed_substage;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002848
Marek Vasut6ca5b962015-07-18 02:46:56 +02002849 int ret;
2850
Marek Vasute42fcea2015-07-17 04:24:18 +02002851 debug("%s:%d: %u %u\n", __func__, __LINE__, rw_group, test_bgn);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002852
Marek Vasut912d43e2015-07-18 03:15:34 +02002853 /* Update info for sims */
2854 reg_file_set_group(rw_group);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002855 reg_file_set_stage(CAL_STAGE_VFIFO);
Marek Vasut912d43e2015-07-18 03:15:34 +02002856 reg_file_set_sub_stage(CAL_SUBSTAGE_GUARANTEED_READ);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002857
Marek Vasut912d43e2015-07-18 03:15:34 +02002858 failed_substage = CAL_SUBSTAGE_GUARANTEED_READ;
2859
2860 /* USER Determine number of delay taps for each phase tap. */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002861 dtaps_per_ptap = DIV_ROUND_UP(seq->iocfg->delay_per_opa_tap,
2862 seq->iocfg->delay_per_dqs_en_dchain_tap)
2863 - 1;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002864
Marek Vasutf2a4bda2015-07-17 03:50:17 +02002865 for (d = 0; d <= dtaps_per_ptap; d += 2) {
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002866 /*
2867 * In RLDRAMX we may be messing the delay of pins in
Marek Vasute42fcea2015-07-17 04:24:18 +02002868 * the same write rw_group but outside of the current read
2869 * the rw_group, but that's ok because we haven't calibrated
Marek Vasutd7f49152015-07-17 03:44:26 +02002870 * output side yet.
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002871 */
2872 if (d > 0) {
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002873 scc_mgr_apply_group_all_out_delay_add_all_ranks(seq,
2874 rw_group,
2875 d);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002876 }
2877
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002878 for (p = 0; p <= seq->iocfg->dqdqs_out_phase_max; p++) {
Marek Vasut6ca5b962015-07-18 02:46:56 +02002879 /* 1) Guaranteed Write */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002880 ret = rw_mgr_mem_calibrate_guaranteed_write(seq,
2881 rw_group,
2882 p);
Marek Vasut6ca5b962015-07-18 02:46:56 +02002883 if (ret)
2884 break;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002885
Marek Vasutfeb5e652015-07-18 02:57:32 +02002886 /* 2) DQS Enable Calibration */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002887 ret = rw_mgr_mem_calibrate_dqs_enable_calibration(seq,
2888 rw_group,
Marek Vasutfeb5e652015-07-18 02:57:32 +02002889 test_bgn);
2890 if (ret) {
Marek Vasutf2a4bda2015-07-17 03:50:17 +02002891 failed_substage = CAL_SUBSTAGE_DQS_EN_PHASE;
2892 continue;
2893 }
2894
Marek Vasut349ea3e2015-07-18 03:10:31 +02002895 /* 3) Centering DQ/DQS */
Marek Vasutf2a4bda2015-07-17 03:50:17 +02002896 /*
Marek Vasut349ea3e2015-07-18 03:10:31 +02002897 * If doing read after write calibration, do not update
2898 * FOM now. Do it then.
Marek Vasutf2a4bda2015-07-17 03:50:17 +02002899 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002900 ret = rw_mgr_mem_calibrate_dq_dqs_centering(seq,
2901 rw_group,
2902 test_bgn,
2903 1, 0);
Marek Vasut349ea3e2015-07-18 03:10:31 +02002904 if (ret) {
Marek Vasutf2a4bda2015-07-17 03:50:17 +02002905 failed_substage = CAL_SUBSTAGE_VFIFO_CENTER;
Marek Vasut349ea3e2015-07-18 03:10:31 +02002906 continue;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002907 }
Marek Vasutf2a4bda2015-07-17 03:50:17 +02002908
Marek Vasut349ea3e2015-07-18 03:10:31 +02002909 /* All done. */
2910 goto cal_done_ok;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002911 }
2912 }
2913
Marek Vasutf2a4bda2015-07-17 03:50:17 +02002914 /* Calibration Stage 1 failed. */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002915 set_failing_group_stage(seq, rw_group, CAL_STAGE_VFIFO,
2916 failed_substage);
Marek Vasutf2a4bda2015-07-17 03:50:17 +02002917 return 0;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002918
Marek Vasutf2a4bda2015-07-17 03:50:17 +02002919 /* Calibration Stage 1 completed OK. */
2920cal_done_ok:
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002921 /*
2922 * Reset the delay chains back to zero if they have moved > 1
2923 * (check for > 1 because loop will increase d even when pass in
2924 * first case).
2925 */
2926 if (d > 2)
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002927 scc_mgr_zero_group(seq, rw_group, 1);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002928
2929 return 1;
2930}
2931
Marek Vasut2da02572015-07-18 05:58:44 +02002932/**
2933 * rw_mgr_mem_calibrate_vfifo_end() - DQ/DQS Centering.
2934 * @rw_group: Read/Write Group
2935 * @test_bgn: Rank at which the test begins
2936 *
2937 * Stage 3: DQ/DQS Centering.
2938 *
2939 * This function implements UniPHY calibration Stage 3, as explained in
2940 * detail in Altera EMI_RM 2015.05.04 , "UniPHY Calibration Stages".
2941 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002942static int rw_mgr_mem_calibrate_vfifo_end(struct socfpga_sdrseq *seq,
2943 const u32 rw_group,
Marek Vasut2da02572015-07-18 05:58:44 +02002944 const u32 test_bgn)
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002945{
Marek Vasut2da02572015-07-18 05:58:44 +02002946 int ret;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002947
Marek Vasut2da02572015-07-18 05:58:44 +02002948 debug("%s:%d %u %u", __func__, __LINE__, rw_group, test_bgn);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002949
Marek Vasut2da02572015-07-18 05:58:44 +02002950 /* Update info for sims. */
2951 reg_file_set_group(rw_group);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002952 reg_file_set_stage(CAL_STAGE_VFIFO_AFTER_WRITES);
2953 reg_file_set_sub_stage(CAL_SUBSTAGE_VFIFO_CENTER);
2954
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002955 ret = rw_mgr_mem_calibrate_dq_dqs_centering(seq, rw_group, test_bgn, 0,
2956 1);
Marek Vasut2da02572015-07-18 05:58:44 +02002957 if (ret)
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002958 set_failing_group_stage(seq, rw_group,
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002959 CAL_STAGE_VFIFO_AFTER_WRITES,
2960 CAL_SUBSTAGE_VFIFO_CENTER);
Marek Vasut2da02572015-07-18 05:58:44 +02002961 return ret;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002962}
2963
Marek Vasuta3581272015-07-21 06:18:57 +02002964/**
2965 * rw_mgr_mem_calibrate_lfifo() - Minimize latency
2966 *
2967 * Stage 4: Minimize latency.
2968 *
2969 * This function implements UniPHY calibration Stage 4, as explained in
2970 * detail in Altera EMI_RM 2015.05.04 , "UniPHY Calibration Stages".
2971 * Calibrate LFIFO to find smallest read latency.
2972 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002973static u32 rw_mgr_mem_calibrate_lfifo(struct socfpga_sdrseq *seq)
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002974{
Marek Vasuta3581272015-07-21 06:18:57 +02002975 int found_one = 0;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002976
2977 debug("%s:%d\n", __func__, __LINE__);
2978
Marek Vasuta3581272015-07-21 06:18:57 +02002979 /* Update info for sims. */
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002980 reg_file_set_stage(CAL_STAGE_LFIFO);
2981 reg_file_set_sub_stage(CAL_SUBSTAGE_READ_LATENCY);
2982
2983 /* Load up the patterns used by read calibration for all ranks */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002984 rw_mgr_mem_calibrate_read_load_patterns(seq, 0, 1);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002985
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002986 do {
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002987 writel(seq->gbl.curr_read_lat, &phy_mgr_cfg->phy_rlat);
Marek Vasut4df2d7b2016-04-04 21:21:05 +02002988 debug_cond(DLEVEL >= 2, "%s:%d lfifo: read_lat=%u",
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002989 __func__, __LINE__, seq->gbl.curr_read_lat);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002990
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02002991 if (!rw_mgr_mem_calibrate_read_test_all_ranks(seq, 0,
2992 NUM_READ_TESTS,
Marek Vasuta3581272015-07-21 06:18:57 +02002993 PASS_ALL_BITS, 1))
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002994 break;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05002995
2996 found_one = 1;
Marek Vasuta3581272015-07-21 06:18:57 +02002997 /*
2998 * Reduce read latency and see if things are
2999 * working correctly.
3000 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003001 seq->gbl.curr_read_lat--;
3002 } while (seq->gbl.curr_read_lat > 0);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003003
Marek Vasuta3581272015-07-21 06:18:57 +02003004 /* Reset the fifos to get pointers to known state. */
Marek Vasutb5450962015-07-12 21:05:08 +02003005 writel(0, &phy_mgr_cmd->fifo_reset);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003006
3007 if (found_one) {
Marek Vasuta3581272015-07-21 06:18:57 +02003008 /* Add a fudge factor to the read latency that was determined */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003009 seq->gbl.curr_read_lat += 2;
3010 writel(seq->gbl.curr_read_lat, &phy_mgr_cfg->phy_rlat);
Marek Vasut4df2d7b2016-04-04 21:21:05 +02003011 debug_cond(DLEVEL >= 2,
Marek Vasuta3581272015-07-21 06:18:57 +02003012 "%s:%d lfifo: success: using read_lat=%u\n",
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003013 __func__, __LINE__, seq->gbl.curr_read_lat);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003014 } else {
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003015 set_failing_group_stage(seq, 0xff, CAL_STAGE_LFIFO,
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003016 CAL_SUBSTAGE_READ_LATENCY);
3017
Marek Vasut4df2d7b2016-04-04 21:21:05 +02003018 debug_cond(DLEVEL >= 2,
Marek Vasuta3581272015-07-21 06:18:57 +02003019 "%s:%d lfifo: failed at initial read_lat=%u\n",
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003020 __func__, __LINE__, seq->gbl.curr_read_lat);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003021 }
Marek Vasuta3581272015-07-21 06:18:57 +02003022
3023 return found_one;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003024}
3025
Marek Vasut4e79b0a2015-07-21 05:26:58 +02003026/**
3027 * search_window() - Search for the/part of the window with DM/DQS shift
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003028 * @search_dm: If 1, search for the DM shift, if 0, search for DQS
3029 * shift
Marek Vasut4e79b0a2015-07-21 05:26:58 +02003030 * @rank_bgn: Rank number
3031 * @write_group: Write Group
3032 * @bgn_curr: Current window begin
3033 * @end_curr: Current window end
3034 * @bgn_best: Current best window begin
3035 * @end_best: Current best window end
3036 * @win_best: Size of the best window
3037 * @new_dqs: New DQS value (only applicable if search_dm = 0).
3038 *
3039 * Search for the/part of the window with DM/DQS shift.
3040 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003041static void search_window(struct socfpga_sdrseq *seq,
3042 const int search_dm, const u32 rank_bgn,
3043 const u32 write_group, int *bgn_curr, int *end_curr,
3044 int *bgn_best, int *end_best, int *win_best,
3045 int new_dqs)
Marek Vasut4e79b0a2015-07-21 05:26:58 +02003046{
3047 u32 bit_chk;
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003048 const int max = seq->iocfg->io_out1_delay_max - new_dqs;
Marek Vasut4e79b0a2015-07-21 05:26:58 +02003049 int d, di;
3050
3051 /* Search for the/part of the window with DM/DQS shift. */
3052 for (di = max; di >= 0; di -= DELTA_D) {
3053 if (search_dm) {
3054 d = di;
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003055 scc_mgr_apply_group_dm_out1_delay(seq, d);
Marek Vasut4e79b0a2015-07-21 05:26:58 +02003056 } else {
3057 /* For DQS, we go from 0...max */
3058 d = max - di;
3059 /*
Marek Vasutc85b9b32015-08-02 19:47:01 +02003060 * Note: This only shifts DQS, so are we limiting
3061 * ourselves to width of DQ unnecessarily.
Marek Vasut4e79b0a2015-07-21 05:26:58 +02003062 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003063 scc_mgr_apply_group_dqs_io_and_oct_out1(seq,
3064 write_group,
Marek Vasut4e79b0a2015-07-21 05:26:58 +02003065 d + new_dqs);
3066 }
3067
3068 writel(0, &sdr_scc_mgr->update);
3069
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003070 if (rw_mgr_mem_calibrate_write_test(seq, rank_bgn, write_group,
3071 1, PASS_ALL_BITS, &bit_chk,
Marek Vasut4e79b0a2015-07-21 05:26:58 +02003072 0)) {
3073 /* Set current end of the window. */
3074 *end_curr = search_dm ? -d : d;
3075
3076 /*
3077 * If a starting edge of our window has not been seen
3078 * this is our current start of the DM window.
3079 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003080 if (*bgn_curr == seq->iocfg->io_out1_delay_max + 1)
Marek Vasut4e79b0a2015-07-21 05:26:58 +02003081 *bgn_curr = search_dm ? -d : d;
3082
3083 /*
3084 * If current window is bigger than best seen.
3085 * Set best seen to be current window.
3086 */
3087 if ((*end_curr - *bgn_curr + 1) > *win_best) {
3088 *win_best = *end_curr - *bgn_curr + 1;
3089 *bgn_best = *bgn_curr;
3090 *end_best = *end_curr;
3091 }
3092 } else {
3093 /* We just saw a failing test. Reset temp edge. */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003094 *bgn_curr = seq->iocfg->io_out1_delay_max + 1;
3095 *end_curr = seq->iocfg->io_out1_delay_max + 1;
Marek Vasut4e79b0a2015-07-21 05:26:58 +02003096
3097 /* Early exit is only applicable to DQS. */
3098 if (search_dm)
3099 continue;
3100
3101 /*
3102 * Early exit optimization: if the remaining delay
3103 * chain space is less than already seen largest
3104 * window we can exit.
3105 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003106 if (*win_best - 1 > seq->iocfg->io_out1_delay_max
3107 - new_dqs - d)
Marek Vasut4e79b0a2015-07-21 05:26:58 +02003108 break;
3109 }
3110 }
3111}
3112
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003113/*
Marek Vasut2595b242015-07-21 05:33:49 +02003114 * rw_mgr_mem_calibrate_writes_center() - Center all windows
3115 * @rank_bgn: Rank number
3116 * @write_group: Write group
3117 * @test_bgn: Rank at which the test begins
3118 *
3119 * Center all windows. Do per-bit-deskew to possibly increase size of
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003120 * certain windows.
3121 */
Marek Vasutaffbc892015-07-21 05:00:42 +02003122static int
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003123rw_mgr_mem_calibrate_writes_center(struct socfpga_sdrseq *seq,
3124 const u32 rank_bgn, const u32 write_group,
Marek Vasutaffbc892015-07-21 05:00:42 +02003125 const u32 test_bgn)
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003126{
Marek Vasut4e79b0a2015-07-21 05:26:58 +02003127 int i;
Marek Vasutaffbc892015-07-21 05:00:42 +02003128 u32 sticky_bit_chk;
3129 u32 min_index;
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003130 int left_edge[seq->rwcfg->mem_dq_per_write_dqs];
3131 int right_edge[seq->rwcfg->mem_dq_per_write_dqs];
Marek Vasutaffbc892015-07-21 05:00:42 +02003132 int mid;
3133 int mid_min, orig_mid_min;
3134 int new_dqs, start_dqs;
3135 int dq_margin, dqs_margin, dm_margin;
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003136 int bgn_curr = seq->iocfg->io_out1_delay_max + 1;
3137 int end_curr = seq->iocfg->io_out1_delay_max + 1;
3138 int bgn_best = seq->iocfg->io_out1_delay_max + 1;
3139 int end_best = seq->iocfg->io_out1_delay_max + 1;
Marek Vasutaffbc892015-07-21 05:00:42 +02003140 int win_best = 0;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003141
Marek Vasutb20a5062015-07-13 02:11:02 +02003142 int ret;
3143
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003144 debug("%s:%d %u %u", __func__, __LINE__, write_group, test_bgn);
3145
3146 dm_margin = 0;
3147
Marek Vasut1bb221e2015-07-21 05:29:05 +02003148 start_dqs = readl((SDR_PHYGRP_SCCGRP_ADDRESS |
3149 SCC_MGR_IO_OUT1_DELAY_OFFSET) +
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003150 (seq->rwcfg->mem_dq_per_write_dqs << 2));
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003151
Marek Vasutaffbc892015-07-21 05:00:42 +02003152 /* Per-bit deskew. */
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003153
3154 /*
Marek Vasutaffbc892015-07-21 05:00:42 +02003155 * Set the left and right edge of each bit to an illegal value.
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003156 * Use (seq->iocfg->io_out1_delay_max + 1) as an illegal value.
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003157 */
3158 sticky_bit_chk = 0;
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003159 for (i = 0; i < seq->rwcfg->mem_dq_per_write_dqs; i++) {
3160 left_edge[i] = seq->iocfg->io_out1_delay_max + 1;
3161 right_edge[i] = seq->iocfg->io_out1_delay_max + 1;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003162 }
3163
Marek Vasutaffbc892015-07-21 05:00:42 +02003164 /* Search for the left edge of the window for each bit. */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003165 search_left_edge(seq, 1, rank_bgn, write_group, 0, test_bgn,
Marek Vasutb69c2472015-07-18 20:34:00 +02003166 &sticky_bit_chk,
Marek Vasute624caf2015-07-13 02:38:15 +02003167 left_edge, right_edge, 0);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003168
Marek Vasutaffbc892015-07-21 05:00:42 +02003169 /* Search for the right edge of the window for each bit. */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003170 ret = search_right_edge(seq, 1, rank_bgn, write_group, 0,
Marek Vasutb20a5062015-07-13 02:11:02 +02003171 start_dqs, 0,
Marek Vasutb69c2472015-07-18 20:34:00 +02003172 &sticky_bit_chk,
Marek Vasutb20a5062015-07-13 02:11:02 +02003173 left_edge, right_edge, 0);
3174 if (ret) {
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003175 set_failing_group_stage(seq, test_bgn + ret - 1,
3176 CAL_STAGE_WRITES,
Marek Vasutb20a5062015-07-13 02:11:02 +02003177 CAL_SUBSTAGE_WRITES_CENTER);
Marek Vasutfc2ec8f2015-07-21 05:32:49 +02003178 return -EINVAL;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003179 }
3180
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003181 min_index = get_window_mid_index(seq, 1, left_edge, right_edge,
3182 &mid_min);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003183
Marek Vasutaffbc892015-07-21 05:00:42 +02003184 /* Determine the amount we can change DQS (which is -mid_min). */
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003185 orig_mid_min = mid_min;
3186 new_dqs = start_dqs;
3187 mid_min = 0;
Marek Vasut4df2d7b2016-04-04 21:21:05 +02003188 debug_cond(DLEVEL >= 1,
Marek Vasutaffbc892015-07-21 05:00:42 +02003189 "%s:%d write_center: start_dqs=%d new_dqs=%d mid_min=%d\n",
3190 __func__, __LINE__, start_dqs, new_dqs, mid_min);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003191
Marek Vasut89feb502015-07-18 19:46:26 +02003192 /* Add delay to bring centre of all DQ windows to the same "level". */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003193 center_dq_windows(seq, 1, left_edge, right_edge, mid_min, orig_mid_min,
Marek Vasut89feb502015-07-18 19:46:26 +02003194 min_index, 0, &dq_margin, &dqs_margin);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003195
3196 /* Move DQS */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003197 scc_mgr_apply_group_dqs_io_and_oct_out1(seq, write_group, new_dqs);
Marek Vasutb5450962015-07-12 21:05:08 +02003198 writel(0, &sdr_scc_mgr->update);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003199
3200 /* Centre DM */
Marek Vasut4df2d7b2016-04-04 21:21:05 +02003201 debug_cond(DLEVEL >= 2, "%s:%d write_center: DM\n", __func__, __LINE__);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003202
Marek Vasutaffbc892015-07-21 05:00:42 +02003203 /* Search for the/part of the window with DM shift. */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003204 search_window(seq, 1, rank_bgn, write_group, &bgn_curr, &end_curr,
Marek Vasut4e79b0a2015-07-21 05:26:58 +02003205 &bgn_best, &end_best, &win_best, 0);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003206
Marek Vasutaffbc892015-07-21 05:00:42 +02003207 /* Reset DM delay chains to 0. */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003208 scc_mgr_apply_group_dm_out1_delay(seq, 0);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003209
3210 /*
3211 * Check to see if the current window nudges up aganist 0 delay.
3212 * If so we need to continue the search by shifting DQS otherwise DQS
Marek Vasutaffbc892015-07-21 05:00:42 +02003213 * search begins as a new search.
3214 */
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003215 if (end_curr != 0) {
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003216 bgn_curr = seq->iocfg->io_out1_delay_max + 1;
3217 end_curr = seq->iocfg->io_out1_delay_max + 1;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003218 }
3219
Marek Vasutaffbc892015-07-21 05:00:42 +02003220 /* Search for the/part of the window with DQS shifts. */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003221 search_window(seq, 0, rank_bgn, write_group, &bgn_curr, &end_curr,
Marek Vasut4e79b0a2015-07-21 05:26:58 +02003222 &bgn_best, &end_best, &win_best, new_dqs);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003223
Marek Vasutaffbc892015-07-21 05:00:42 +02003224 /* Assign left and right edge for cal and reporting. */
3225 left_edge[0] = -1 * bgn_best;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003226 right_edge[0] = end_best;
3227
Marek Vasut4df2d7b2016-04-04 21:21:05 +02003228 debug_cond(DLEVEL >= 2, "%s:%d dm_calib: left=%d right=%d\n",
Marek Vasutaffbc892015-07-21 05:00:42 +02003229 __func__, __LINE__, left_edge[0], right_edge[0]);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003230
Marek Vasutaffbc892015-07-21 05:00:42 +02003231 /* Move DQS (back to orig). */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003232 scc_mgr_apply_group_dqs_io_and_oct_out1(seq, write_group, new_dqs);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003233
3234 /* Move DM */
3235
Marek Vasutaffbc892015-07-21 05:00:42 +02003236 /* Find middle of window for the DM bit. */
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003237 mid = (left_edge[0] - right_edge[0]) / 2;
3238
Marek Vasutaffbc892015-07-21 05:00:42 +02003239 /* Only move right, since we are not moving DQS/DQ. */
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003240 if (mid < 0)
3241 mid = 0;
3242
Marek Vasutaffbc892015-07-21 05:00:42 +02003243 /* dm_marign should fail if we never find a window. */
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003244 if (win_best == 0)
3245 dm_margin = -1;
3246 else
3247 dm_margin = left_edge[0] - mid;
3248
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003249 scc_mgr_apply_group_dm_out1_delay(seq, mid);
Marek Vasutb5450962015-07-12 21:05:08 +02003250 writel(0, &sdr_scc_mgr->update);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003251
Marek Vasut4df2d7b2016-04-04 21:21:05 +02003252 debug_cond(DLEVEL >= 2,
Marek Vasutaffbc892015-07-21 05:00:42 +02003253 "%s:%d dm_calib: left=%d right=%d mid=%d dm_margin=%d\n",
3254 __func__, __LINE__, left_edge[0], right_edge[0],
3255 mid, dm_margin);
3256 /* Export values. */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003257 seq->gbl.fom_out += dq_margin + dqs_margin;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003258
Marek Vasut4df2d7b2016-04-04 21:21:05 +02003259 debug_cond(DLEVEL >= 2,
Marek Vasutaffbc892015-07-21 05:00:42 +02003260 "%s:%d write_center: dq_margin=%d dqs_margin=%d dm_margin=%d\n",
3261 __func__, __LINE__, dq_margin, dqs_margin, dm_margin);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003262
3263 /*
3264 * Do not remove this line as it makes sure all of our
3265 * decisions have been applied.
3266 */
Marek Vasutb5450962015-07-12 21:05:08 +02003267 writel(0, &sdr_scc_mgr->update);
Marek Vasutaffbc892015-07-21 05:00:42 +02003268
Marek Vasutfc2ec8f2015-07-21 05:32:49 +02003269 if ((dq_margin < 0) || (dqs_margin < 0) || (dm_margin < 0))
3270 return -EINVAL;
3271
3272 return 0;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003273}
3274
Marek Vasut4a78cc72015-07-18 07:23:25 +02003275/**
3276 * rw_mgr_mem_calibrate_writes() - Write Calibration Part One
3277 * @rank_bgn: Rank number
3278 * @group: Read/Write Group
3279 * @test_bgn: Rank at which the test begins
3280 *
3281 * Stage 2: Write Calibration Part One.
3282 *
3283 * This function implements UniPHY calibration Stage 2, as explained in
3284 * detail in Altera EMI_RM 2015.05.04 , "UniPHY Calibration Stages".
3285 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003286static int rw_mgr_mem_calibrate_writes(struct socfpga_sdrseq *seq,
3287 const u32 rank_bgn, const u32 group,
Marek Vasut4a78cc72015-07-18 07:23:25 +02003288 const u32 test_bgn)
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003289{
Marek Vasut4a78cc72015-07-18 07:23:25 +02003290 int ret;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003291
Marek Vasut4a78cc72015-07-18 07:23:25 +02003292 /* Update info for sims */
3293 debug("%s:%d %u %u\n", __func__, __LINE__, group, test_bgn);
3294
3295 reg_file_set_group(group);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003296 reg_file_set_stage(CAL_STAGE_WRITES);
3297 reg_file_set_sub_stage(CAL_SUBSTAGE_WRITES_CENTER);
3298
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003299 ret = rw_mgr_mem_calibrate_writes_center(seq, rank_bgn, group,
3300 test_bgn);
Marek Vasutfc2ec8f2015-07-21 05:32:49 +02003301 if (ret)
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003302 set_failing_group_stage(seq, group, CAL_STAGE_WRITES,
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003303 CAL_SUBSTAGE_WRITES_CENTER);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003304
Marek Vasutfc2ec8f2015-07-21 05:32:49 +02003305 return ret;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003306}
3307
Marek Vasutbe333bc2015-07-20 07:33:33 +02003308/**
3309 * mem_precharge_and_activate() - Precharge all banks and activate
3310 *
3311 * Precharge all banks and activate row 0 in bank "000..." and bank "111...".
3312 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003313static void mem_precharge_and_activate(struct socfpga_sdrseq *seq)
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003314{
Marek Vasutbe333bc2015-07-20 07:33:33 +02003315 int r;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003316
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003317 for (r = 0; r < seq->rwcfg->mem_number_of_ranks; r++) {
Marek Vasutbe333bc2015-07-20 07:33:33 +02003318 /* Set rank. */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003319 set_rank_and_odt_mask(seq, r, RW_MGR_ODT_MODE_OFF);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003320
Marek Vasutbe333bc2015-07-20 07:33:33 +02003321 /* Precharge all banks. */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003322 writel(seq->rwcfg->precharge_all, SDR_PHYGRP_RWMGRGRP_ADDRESS |
Marek Vasutb5450962015-07-12 21:05:08 +02003323 RW_MGR_RUN_SINGLE_GROUP_OFFSET);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003324
Marek Vasutb5450962015-07-12 21:05:08 +02003325 writel(0x0F, &sdr_rw_load_mgr_regs->load_cntr0);
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003326 writel(seq->rwcfg->activate_0_and_1_wait1,
Marek Vasutc85b9b32015-08-02 19:47:01 +02003327 &sdr_rw_load_jump_mgr_regs->load_jump_add0);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003328
Marek Vasutb5450962015-07-12 21:05:08 +02003329 writel(0x0F, &sdr_rw_load_mgr_regs->load_cntr1);
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003330 writel(seq->rwcfg->activate_0_and_1_wait2,
Marek Vasutc85b9b32015-08-02 19:47:01 +02003331 &sdr_rw_load_jump_mgr_regs->load_jump_add1);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003332
Marek Vasutbe333bc2015-07-20 07:33:33 +02003333 /* Activate rows. */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003334 writel(seq->rwcfg->activate_0_and_1,
3335 SDR_PHYGRP_RWMGRGRP_ADDRESS |
3336 RW_MGR_RUN_SINGLE_GROUP_OFFSET);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003337 }
3338}
3339
Marek Vasut0f0840d2015-07-17 01:57:41 +02003340/**
3341 * mem_init_latency() - Configure memory RLAT and WLAT settings
3342 *
3343 * Configure memory RLAT and WLAT parameters.
3344 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003345static void mem_init_latency(struct socfpga_sdrseq *seq)
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003346{
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003347 /*
Marek Vasut0f0840d2015-07-17 01:57:41 +02003348 * For AV/CV, LFIFO is hardened and always runs at full rate
3349 * so max latency in AFI clocks, used here, is correspondingly
3350 * smaller.
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003351 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003352 const u32 max_latency = (1 << seq->misccfg->max_latency_count_width)
3353 - 1;
Marek Vasut0f0840d2015-07-17 01:57:41 +02003354 u32 rlat, wlat;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003355
Marek Vasut0f0840d2015-07-17 01:57:41 +02003356 debug("%s:%d\n", __func__, __LINE__);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003357
3358 /*
Marek Vasut0f0840d2015-07-17 01:57:41 +02003359 * Read in write latency.
3360 * WL for Hard PHY does not include additive latency.
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003361 */
Marek Vasut0f0840d2015-07-17 01:57:41 +02003362 wlat = readl(&data_mgr->t_wl_add);
3363 wlat += readl(&data_mgr->mem_t_add);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003364
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003365 seq->gbl.rw_wl_nop_cycles = wlat - 1;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003366
Marek Vasut0f0840d2015-07-17 01:57:41 +02003367 /* Read in readl latency. */
3368 rlat = readl(&data_mgr->t_rl_add);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003369
Marek Vasut0f0840d2015-07-17 01:57:41 +02003370 /* Set a pretty high read latency initially. */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003371 seq->gbl.curr_read_lat = rlat + 16;
3372 if (seq->gbl.curr_read_lat > max_latency)
3373 seq->gbl.curr_read_lat = max_latency;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003374
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003375 writel(seq->gbl.curr_read_lat, &phy_mgr_cfg->phy_rlat);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003376
Marek Vasut0f0840d2015-07-17 01:57:41 +02003377 /* Advertise write latency. */
3378 writel(wlat, &phy_mgr_cfg->afi_wlat);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003379}
3380
Marek Vasut60daef82015-07-26 10:54:15 +02003381/**
3382 * @mem_skip_calibrate() - Set VFIFO and LFIFO to instant-on settings
3383 *
3384 * Set VFIFO and LFIFO to instant-on settings in skip calibration mode.
3385 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003386static void mem_skip_calibrate(struct socfpga_sdrseq *seq)
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003387{
Marek Vasut8af9ca02015-08-02 19:42:26 +02003388 u32 vfifo_offset;
3389 u32 i, j, r;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003390
3391 debug("%s:%d\n", __func__, __LINE__);
3392 /* Need to update every shadow register set used by the interface */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003393 for (r = 0; r < seq->rwcfg->mem_number_of_ranks;
Marek Vasut60daef82015-07-26 10:54:15 +02003394 r += NUM_RANKS_PER_SHADOW_REG) {
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003395 /*
3396 * Set output phase alignment settings appropriate for
3397 * skip calibration.
3398 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003399 for (i = 0; i < seq->rwcfg->mem_if_read_dqs_width; i++) {
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003400 scc_mgr_set_dqs_en_phase(i, 0);
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003401 if (seq->iocfg->dll_chain_length == 6)
Marek Vasut7e8f8a72015-08-02 19:10:58 +02003402 scc_mgr_set_dqdqs_output_phase(i, 6);
3403 else
3404 scc_mgr_set_dqdqs_output_phase(i, 7);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003405 /*
3406 * Case:33398
3407 *
3408 * Write data arrives to the I/O two cycles before write
3409 * latency is reached (720 deg).
3410 * -> due to bit-slip in a/c bus
3411 * -> to allow board skew where dqs is longer than ck
3412 * -> how often can this happen!?
3413 * -> can claim back some ptaps for high freq
3414 * support if we can relax this, but i digress...
3415 *
3416 * The write_clk leads mem_ck by 90 deg
3417 * The minimum ptap of the OPA is 180 deg
3418 * Each ptap has (360 / IO_DLL_CHAIN_LENGH) deg of delay
3419 * The write_clk is always delayed by 2 ptaps
3420 *
3421 * Hence, to make DQS aligned to CK, we need to delay
3422 * DQS by:
Marek Vasutc85b9b32015-08-02 19:47:01 +02003423 * (720 - 90 - 180 - 2) *
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003424 * (360 / seq->iocfg->dll_chain_length)
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003425 *
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003426 * Dividing the above by
3427 (360 / seq->iocfg->dll_chain_length)
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003428 * gives us the number of ptaps, which simplies to:
3429 *
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003430 * (1.25 * seq->iocfg->dll_chain_length - 2)
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003431 */
Marek Vasut60daef82015-07-26 10:54:15 +02003432 scc_mgr_set_dqdqs_output_phase(i,
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003433 ((125 * seq->iocfg->dll_chain_length)
3434 / 100) - 2);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003435 }
Marek Vasutb5450962015-07-12 21:05:08 +02003436 writel(0xff, &sdr_scc_mgr->dqs_ena);
3437 writel(0xff, &sdr_scc_mgr->dqs_io_ena);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003438
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003439 for (i = 0; i < seq->rwcfg->mem_if_write_dqs_width; i++) {
Marek Vasutb5450962015-07-12 21:05:08 +02003440 writel(i, SDR_PHYGRP_SCCGRP_ADDRESS |
3441 SCC_MGR_GROUP_COUNTER_OFFSET);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003442 }
Marek Vasutb5450962015-07-12 21:05:08 +02003443 writel(0xff, &sdr_scc_mgr->dq_ena);
3444 writel(0xff, &sdr_scc_mgr->dm_ena);
3445 writel(0, &sdr_scc_mgr->update);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003446 }
3447
3448 /* Compensate for simulation model behaviour */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003449 for (i = 0; i < seq->rwcfg->mem_if_read_dqs_width; i++) {
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003450 scc_mgr_set_dqs_bus_in_delay(i, 10);
3451 scc_mgr_load_dqs(i);
3452 }
Marek Vasutb5450962015-07-12 21:05:08 +02003453 writel(0, &sdr_scc_mgr->update);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003454
3455 /*
3456 * ArriaV has hard FIFOs that can only be initialized by incrementing
3457 * in sequencer.
3458 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003459 vfifo_offset = seq->misccfg->calib_vfifo_offset;
Marek Vasut60daef82015-07-26 10:54:15 +02003460 for (j = 0; j < vfifo_offset; j++)
Marek Vasutb5450962015-07-12 21:05:08 +02003461 writel(0xff, &phy_mgr_cmd->inc_vfifo_hard_phy);
Marek Vasutb5450962015-07-12 21:05:08 +02003462 writel(0, &phy_mgr_cmd->fifo_reset);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003463
3464 /*
Marek Vasut60daef82015-07-26 10:54:15 +02003465 * For Arria V and Cyclone V with hard LFIFO, we get the skip-cal
3466 * setting from generation-time constant.
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003467 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003468 seq->gbl.curr_read_lat = seq->misccfg->calib_lfifo_offset;
3469 writel(seq->gbl.curr_read_lat, &phy_mgr_cfg->phy_rlat);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003470}
3471
Marek Vasutd9fcf9a2015-07-20 04:34:51 +02003472/**
3473 * mem_calibrate() - Memory calibration entry point.
3474 *
3475 * Perform memory calibration.
3476 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003477static u32 mem_calibrate(struct socfpga_sdrseq *seq)
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003478{
Marek Vasut8af9ca02015-08-02 19:42:26 +02003479 u32 i;
3480 u32 rank_bgn, sr;
3481 u32 write_group, write_test_bgn;
3482 u32 read_group, read_test_bgn;
3483 u32 run_groups, current_run;
3484 u32 failing_groups = 0;
3485 u32 group_failed = 0;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003486
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003487 const u32 rwdqs_ratio = seq->rwcfg->mem_if_read_dqs_width /
3488 seq->rwcfg->mem_if_write_dqs_width;
Marek Vasutd6f28792015-07-17 02:21:47 +02003489
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003490 debug("%s:%d\n", __func__, __LINE__);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003491
Marek Vasut0f0840d2015-07-17 01:57:41 +02003492 /* Initialize the data settings */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003493 seq->gbl.error_substage = CAL_SUBSTAGE_NIL;
3494 seq->gbl.error_stage = CAL_STAGE_NIL;
3495 seq->gbl.error_group = 0xff;
3496 seq->gbl.fom_in = 0;
3497 seq->gbl.fom_out = 0;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003498
Marek Vasut0f0840d2015-07-17 01:57:41 +02003499 /* Initialize WLAT and RLAT. */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003500 mem_init_latency(seq);
Marek Vasut0f0840d2015-07-17 01:57:41 +02003501
3502 /* Initialize bit slips. */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003503 mem_precharge_and_activate(seq);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003504
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003505 for (i = 0; i < seq->rwcfg->mem_if_read_dqs_width; i++) {
Marek Vasutb5450962015-07-12 21:05:08 +02003506 writel(i, SDR_PHYGRP_SCCGRP_ADDRESS |
3507 SCC_MGR_GROUP_COUNTER_OFFSET);
Marek Vasutd4d3de22015-07-19 01:34:43 +02003508 /* Only needed once to set all groups, pins, DQ, DQS, DM. */
3509 if (i == 0)
3510 scc_mgr_set_hhp_extras();
3511
Marek Vasut0341de42015-07-17 02:06:20 +02003512 scc_set_bypass_mode(i);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003513 }
3514
Marek Vasutb984ee82015-07-17 02:07:12 +02003515 /* Calibration is skipped. */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003516 if ((seq->dyn_calib_steps & CALIB_SKIP_ALL) == CALIB_SKIP_ALL) {
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003517 /*
3518 * Set VFIFO and LFIFO to instant-on settings in skip
3519 * calibration mode.
3520 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003521 mem_skip_calibrate(seq);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003522
Marek Vasutb984ee82015-07-17 02:07:12 +02003523 /*
3524 * Do not remove this line as it makes sure all of our
3525 * decisions have been applied.
3526 */
3527 writel(0, &sdr_scc_mgr->update);
3528 return 1;
3529 }
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003530
Marek Vasutb984ee82015-07-17 02:07:12 +02003531 /* Calibration is not skipped. */
3532 for (i = 0; i < NUM_CALIB_REPEAT; i++) {
3533 /*
3534 * Zero all delay chain/phase settings for all
3535 * groups and all shadow register sets.
3536 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003537 scc_mgr_zero_all(seq);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003538
Marek Vasuteb98b382015-08-02 18:27:21 +02003539 run_groups = ~0;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003540
Marek Vasutb984ee82015-07-17 02:07:12 +02003541 for (write_group = 0, write_test_bgn = 0; write_group
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003542 < seq->rwcfg->mem_if_write_dqs_width; write_group++,
3543 write_test_bgn += seq->rwcfg->mem_dq_per_write_dqs) {
Marek Vasut0568f222015-07-17 02:50:56 +02003544 /* Initialize the group failure */
Marek Vasutb984ee82015-07-17 02:07:12 +02003545 group_failed = 0;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003546
Marek Vasutb984ee82015-07-17 02:07:12 +02003547 current_run = run_groups & ((1 <<
3548 RW_MGR_NUM_DQS_PER_WRITE_GROUP) - 1);
3549 run_groups = run_groups >>
3550 RW_MGR_NUM_DQS_PER_WRITE_GROUP;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003551
Marek Vasutb984ee82015-07-17 02:07:12 +02003552 if (current_run == 0)
3553 continue;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003554
Marek Vasutb984ee82015-07-17 02:07:12 +02003555 writel(write_group, SDR_PHYGRP_SCCGRP_ADDRESS |
3556 SCC_MGR_GROUP_COUNTER_OFFSET);
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003557 scc_mgr_zero_group(seq, write_group, 0);
Marek Vasutb984ee82015-07-17 02:07:12 +02003558
Marek Vasutd6f28792015-07-17 02:21:47 +02003559 for (read_group = write_group * rwdqs_ratio,
3560 read_test_bgn = 0;
Marek Vasut0568f222015-07-17 02:50:56 +02003561 read_group < (write_group + 1) * rwdqs_ratio;
Marek Vasutd6f28792015-07-17 02:21:47 +02003562 read_group++,
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003563 read_test_bgn += seq->rwcfg->mem_dq_per_read_dqs) {
Marek Vasutd6f28792015-07-17 02:21:47 +02003564 if (STATIC_CALIB_STEPS & CALIB_SKIP_VFIFO)
3565 continue;
3566
Marek Vasutb984ee82015-07-17 02:07:12 +02003567 /* Calibrate the VFIFO */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003568 if (rw_mgr_mem_calibrate_vfifo(seq, read_group,
Marek Vasutd6f28792015-07-17 02:21:47 +02003569 read_test_bgn))
3570 continue;
Marek Vasutb984ee82015-07-17 02:07:12 +02003571
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003572 if (!(seq->gbl.phy_debug_mode_flags &
Marek Vasutc85b9b32015-08-02 19:47:01 +02003573 PHY_DEBUG_SWEEP_ALL_GROUPS))
Marek Vasutd6f28792015-07-17 02:21:47 +02003574 return 0;
Marek Vasut0568f222015-07-17 02:50:56 +02003575
3576 /* The group failed, we're done. */
3577 goto grp_failed;
Marek Vasutb984ee82015-07-17 02:07:12 +02003578 }
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003579
Marek Vasutb984ee82015-07-17 02:07:12 +02003580 /* Calibrate the output side */
Marek Vasut0568f222015-07-17 02:50:56 +02003581 for (rank_bgn = 0, sr = 0;
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003582 rank_bgn < seq->rwcfg->mem_number_of_ranks;
Marek Vasut0568f222015-07-17 02:50:56 +02003583 rank_bgn += NUM_RANKS_PER_SHADOW_REG, sr++) {
3584 if (STATIC_CALIB_STEPS & CALIB_SKIP_WRITES)
3585 continue;
Marek Vasutf04045f2015-07-17 02:31:04 +02003586
Marek Vasut0568f222015-07-17 02:50:56 +02003587 /* Not needed in quick mode! */
Marek Vasutc85b9b32015-08-02 19:47:01 +02003588 if (STATIC_CALIB_STEPS &
3589 CALIB_SKIP_DELAY_SWEEPS)
Marek Vasut0568f222015-07-17 02:50:56 +02003590 continue;
Marek Vasutf04045f2015-07-17 02:31:04 +02003591
Marek Vasut0568f222015-07-17 02:50:56 +02003592 /* Calibrate WRITEs */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003593 if (!rw_mgr_mem_calibrate_writes(seq, rank_bgn,
Marek Vasutc85b9b32015-08-02 19:47:01 +02003594 write_group,
3595 write_test_bgn))
Marek Vasut0568f222015-07-17 02:50:56 +02003596 continue;
Marek Vasutf04045f2015-07-17 02:31:04 +02003597
Marek Vasut0568f222015-07-17 02:50:56 +02003598 group_failed = 1;
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003599 if (!(seq->gbl.phy_debug_mode_flags &
Marek Vasutc85b9b32015-08-02 19:47:01 +02003600 PHY_DEBUG_SWEEP_ALL_GROUPS))
Marek Vasut0568f222015-07-17 02:50:56 +02003601 return 0;
Marek Vasutb984ee82015-07-17 02:07:12 +02003602 }
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003603
Marek Vasut0568f222015-07-17 02:50:56 +02003604 /* Some group failed, we're done. */
3605 if (group_failed)
3606 goto grp_failed;
Marek Vasut6db55732015-07-17 02:38:51 +02003607
Marek Vasut0568f222015-07-17 02:50:56 +02003608 for (read_group = write_group * rwdqs_ratio,
3609 read_test_bgn = 0;
3610 read_group < (write_group + 1) * rwdqs_ratio;
3611 read_group++,
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003612 read_test_bgn += seq->rwcfg->mem_dq_per_read_dqs) {
Marek Vasut0568f222015-07-17 02:50:56 +02003613 if (STATIC_CALIB_STEPS & CALIB_SKIP_WRITES)
3614 continue;
3615
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003616 if (!rw_mgr_mem_calibrate_vfifo_end(seq,
3617 read_group,
Marek Vasutc85b9b32015-08-02 19:47:01 +02003618 read_test_bgn))
Marek Vasut0568f222015-07-17 02:50:56 +02003619 continue;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003620
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003621 if (!(seq->gbl.phy_debug_mode_flags &
Marek Vasutc85b9b32015-08-02 19:47:01 +02003622 PHY_DEBUG_SWEEP_ALL_GROUPS))
Marek Vasut0568f222015-07-17 02:50:56 +02003623 return 0;
3624
3625 /* The group failed, we're done. */
3626 goto grp_failed;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003627 }
3628
Marek Vasut0568f222015-07-17 02:50:56 +02003629 /* No group failed, continue as usual. */
3630 continue;
3631
3632grp_failed: /* A group failed, increment the counter. */
3633 failing_groups++;
Marek Vasutb984ee82015-07-17 02:07:12 +02003634 }
3635
3636 /*
3637 * USER If there are any failing groups then report
3638 * the failure.
3639 */
3640 if (failing_groups != 0)
3641 return 0;
3642
Marek Vasutfc38d5c2015-07-17 02:40:21 +02003643 if (STATIC_CALIB_STEPS & CALIB_SKIP_LFIFO)
3644 continue;
3645
Marek Vasutb984ee82015-07-17 02:07:12 +02003646 /* Calibrate the LFIFO */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003647 if (!rw_mgr_mem_calibrate_lfifo(seq))
Marek Vasutfc38d5c2015-07-17 02:40:21 +02003648 return 0;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003649 }
3650
3651 /*
3652 * Do not remove this line as it makes sure all of our decisions
3653 * have been applied.
3654 */
Marek Vasutb5450962015-07-12 21:05:08 +02003655 writel(0, &sdr_scc_mgr->update);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003656 return 1;
3657}
3658
Marek Vasut092a1ef2015-07-17 01:20:21 +02003659/**
3660 * run_mem_calibrate() - Perform memory calibration
3661 *
3662 * This function triggers the entire memory calibration procedure.
3663 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003664static int run_mem_calibrate(struct socfpga_sdrseq *seq)
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003665{
Marek Vasut092a1ef2015-07-17 01:20:21 +02003666 int pass;
Marek Vasut69469892016-04-05 23:41:56 +02003667 u32 ctrl_cfg;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003668
3669 debug("%s:%d\n", __func__, __LINE__);
3670
3671 /* Reset pass/fail status shown on afi_cal_success/fail */
Marek Vasutb5450962015-07-12 21:05:08 +02003672 writel(PHY_MGR_CAL_RESET, &phy_mgr_cfg->cal_status);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003673
Marek Vasut092a1ef2015-07-17 01:20:21 +02003674 /* Stop tracking manager. */
Marek Vasut69469892016-04-05 23:41:56 +02003675 ctrl_cfg = readl(&sdr_ctrl->ctrl_cfg);
3676 writel(ctrl_cfg & ~SDR_CTRLGRP_CTRLCFG_DQSTRKEN_MASK,
3677 &sdr_ctrl->ctrl_cfg);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003678
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003679 phy_mgr_initialize(seq);
3680 rw_mgr_mem_initialize(seq);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003681
Marek Vasut092a1ef2015-07-17 01:20:21 +02003682 /* Perform the actual memory calibration. */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003683 pass = mem_calibrate(seq);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003684
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003685 mem_precharge_and_activate(seq);
Marek Vasutb5450962015-07-12 21:05:08 +02003686 writel(0, &phy_mgr_cmd->fifo_reset);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003687
Marek Vasut092a1ef2015-07-17 01:20:21 +02003688 /* Handoff. */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003689 rw_mgr_mem_handoff(seq);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003690 /*
Marek Vasut092a1ef2015-07-17 01:20:21 +02003691 * In Hard PHY this is a 2-bit control:
3692 * 0: AFI Mux Select
3693 * 1: DDIO Mux Select
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003694 */
Marek Vasut092a1ef2015-07-17 01:20:21 +02003695 writel(0x2, &phy_mgr_cfg->mux_sel);
3696
3697 /* Start tracking manager. */
Marek Vasut69469892016-04-05 23:41:56 +02003698 writel(ctrl_cfg, &sdr_ctrl->ctrl_cfg);
Marek Vasut092a1ef2015-07-17 01:20:21 +02003699
3700 return pass;
3701}
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003702
Marek Vasut092a1ef2015-07-17 01:20:21 +02003703/**
3704 * debug_mem_calibrate() - Report result of memory calibration
3705 * @pass: Value indicating whether calibration passed or failed
3706 *
3707 * This function reports the results of the memory calibration
3708 * and writes debug information into the register file.
3709 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003710static void debug_mem_calibrate(struct socfpga_sdrseq *seq, int pass)
Marek Vasut092a1ef2015-07-17 01:20:21 +02003711{
Marek Vasut8af9ca02015-08-02 19:42:26 +02003712 u32 debug_info;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003713
3714 if (pass) {
Marek Vasuted6c1ab2021-09-14 05:20:19 +02003715 debug(KBUILD_BASENAME ": CALIBRATION PASSED\n");
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003716
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003717 seq->gbl.fom_in /= 2;
3718 seq->gbl.fom_out /= 2;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003719
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003720 if (seq->gbl.fom_in > 0xff)
3721 seq->gbl.fom_in = 0xff;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003722
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003723 if (seq->gbl.fom_out > 0xff)
3724 seq->gbl.fom_out = 0xff;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003725
3726 /* Update the FOM in the register file */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003727 debug_info = seq->gbl.fom_in;
3728 debug_info |= seq->gbl.fom_out << 8;
Marek Vasutb5450962015-07-12 21:05:08 +02003729 writel(debug_info, &sdr_reg_file->fom);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003730
Marek Vasutb5450962015-07-12 21:05:08 +02003731 writel(debug_info, &phy_mgr_cfg->cal_debug_info);
3732 writel(PHY_MGR_CAL_SUCCESS, &phy_mgr_cfg->cal_status);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003733 } else {
Marek Vasuted6c1ab2021-09-14 05:20:19 +02003734 debug(KBUILD_BASENAME ": CALIBRATION FAILED\n");
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003735
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003736 debug_info = seq->gbl.error_stage;
3737 debug_info |= seq->gbl.error_substage << 8;
3738 debug_info |= seq->gbl.error_group << 16;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003739
Marek Vasutb5450962015-07-12 21:05:08 +02003740 writel(debug_info, &sdr_reg_file->failing_stage);
3741 writel(debug_info, &phy_mgr_cfg->cal_debug_info);
3742 writel(PHY_MGR_CAL_FAIL, &phy_mgr_cfg->cal_status);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003743
3744 /* Update the failing group/stage in the register file */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003745 debug_info = seq->gbl.error_stage;
3746 debug_info |= seq->gbl.error_substage << 8;
3747 debug_info |= seq->gbl.error_group << 16;
Marek Vasutb5450962015-07-12 21:05:08 +02003748 writel(debug_info, &sdr_reg_file->failing_stage);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003749 }
3750
Marek Vasuted6c1ab2021-09-14 05:20:19 +02003751 debug(KBUILD_BASENAME ": Calibration complete\n");
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003752}
3753
Marek Vasutea9771b2015-07-19 06:12:42 +02003754/**
3755 * hc_initialize_rom_data() - Initialize ROM data
3756 *
3757 * Initialize ROM data.
3758 */
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003759static void hc_initialize_rom_data(void)
3760{
Marek Vasut3384e742015-08-02 17:15:19 +02003761 unsigned int nelem = 0;
3762 const u32 *rom_init;
Marek Vasutea9771b2015-07-19 06:12:42 +02003763 u32 i, addr;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003764
Marek Vasut3384e742015-08-02 17:15:19 +02003765 socfpga_get_seq_inst_init(&rom_init, &nelem);
Marek Vasuta3340102015-07-12 19:03:33 +02003766 addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_INST_ROM_WRITE_OFFSET;
Marek Vasut3384e742015-08-02 17:15:19 +02003767 for (i = 0; i < nelem; i++)
3768 writel(rom_init[i], addr + (i << 2));
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003769
Marek Vasut3384e742015-08-02 17:15:19 +02003770 socfpga_get_seq_ac_init(&rom_init, &nelem);
Marek Vasuta3340102015-07-12 19:03:33 +02003771 addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_AC_ROM_WRITE_OFFSET;
Marek Vasut3384e742015-08-02 17:15:19 +02003772 for (i = 0; i < nelem; i++)
3773 writel(rom_init[i], addr + (i << 2));
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003774}
3775
Marek Vasuta17ae0f2015-07-19 06:13:37 +02003776/**
3777 * initialize_reg_file() - Initialize SDR register file
3778 *
3779 * Initialize SDR register file.
3780 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003781static void initialize_reg_file(struct socfpga_sdrseq *seq)
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003782{
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003783 /* Initialize the register file with the correct data */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003784 writel(seq->misccfg->reg_file_init_seq_signature,
3785 &sdr_reg_file->signature);
Marek Vasutb5450962015-07-12 21:05:08 +02003786 writel(0, &sdr_reg_file->debug_data_addr);
3787 writel(0, &sdr_reg_file->cur_stage);
3788 writel(0, &sdr_reg_file->fom);
3789 writel(0, &sdr_reg_file->failing_stage);
3790 writel(0, &sdr_reg_file->debug1);
3791 writel(0, &sdr_reg_file->debug2);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003792}
3793
Marek Vasut0c9f3cb2015-07-19 06:14:04 +02003794/**
3795 * initialize_hps_phy() - Initialize HPS PHY
3796 *
3797 * Initialize HPS PHY.
3798 */
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003799static void initialize_hps_phy(void)
3800{
Marek Vasut8af9ca02015-08-02 19:42:26 +02003801 u32 reg;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003802 /*
3803 * Tracking also gets configured here because it's in the
3804 * same register.
3805 */
Marek Vasut8af9ca02015-08-02 19:42:26 +02003806 u32 trk_sample_count = 7500;
3807 u32 trk_long_idle_sample_count = (10 << 16) | 100;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003808 /*
3809 * Format is number of outer loops in the 16 MSB, sample
3810 * count in 16 LSB.
3811 */
3812
3813 reg = 0;
3814 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_ACDELAYEN_SET(2);
3815 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_DQDELAYEN_SET(1);
3816 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_DQSDELAYEN_SET(1);
3817 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_DQSLOGICDELAYEN_SET(1);
3818 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_RESETDELAYEN_SET(0);
3819 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_LPDDRDIS_SET(1);
3820 /*
3821 * This field selects the intrinsic latency to RDATA_EN/FULL path.
3822 * 00-bypass, 01- add 5 cycles, 10- add 10 cycles, 11- add 15 cycles.
3823 */
3824 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_ADDLATSEL_SET(0);
3825 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_SAMPLECOUNT_19_0_SET(
3826 trk_sample_count);
Marek Vasutcd5d38e2015-07-12 20:49:39 +02003827 writel(reg, &sdr_ctrl->phy_ctrl0);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003828
3829 reg = 0;
3830 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_1_SAMPLECOUNT_31_20_SET(
3831 trk_sample_count >>
3832 SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_SAMPLECOUNT_19_0_WIDTH);
3833 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_1_LONGIDLESAMPLECOUNT_19_0_SET(
3834 trk_long_idle_sample_count);
Marek Vasutcd5d38e2015-07-12 20:49:39 +02003835 writel(reg, &sdr_ctrl->phy_ctrl1);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003836
3837 reg = 0;
3838 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_2_LONGIDLESAMPLECOUNT_31_20_SET(
3839 trk_long_idle_sample_count >>
3840 SDR_CTRLGRP_PHYCTRL_PHYCTRL_1_LONGIDLESAMPLECOUNT_19_0_WIDTH);
Marek Vasutcd5d38e2015-07-12 20:49:39 +02003841 writel(reg, &sdr_ctrl->phy_ctrl2);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003842}
3843
Marek Vasutb0563cf2015-07-17 00:45:11 +02003844/**
3845 * initialize_tracking() - Initialize tracking
3846 *
3847 * Initialize the register file with usable initial data.
3848 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003849static void initialize_tracking(struct socfpga_sdrseq *seq)
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003850{
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003851 /*
Marek Vasutb0563cf2015-07-17 00:45:11 +02003852 * Initialize the register file with the correct data.
3853 * Compute usable version of value in case we skip full
3854 * computation later.
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003855 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003856 writel(DIV_ROUND_UP(seq->iocfg->delay_per_opa_tap,
3857 seq->iocfg->delay_per_dchain_tap) - 1,
Marek Vasutb0563cf2015-07-17 00:45:11 +02003858 &sdr_reg_file->dtaps_per_ptap);
3859
3860 /* trk_sample_count */
3861 writel(7500, &sdr_reg_file->trk_sample_count);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003862
Marek Vasutb0563cf2015-07-17 00:45:11 +02003863 /* longidle outer loop [15:0] */
3864 writel((10 << 16) | (100 << 0), &sdr_reg_file->trk_longidle);
3865
3866 /*
3867 * longidle sample count [31:24]
3868 * trfc, worst case of 933Mhz 4Gb [23:16]
3869 * trcd, worst case [15:8]
3870 * vfifo wait [7:0]
3871 */
3872 writel((243 << 24) | (14 << 16) | (10 << 8) | (4 << 0),
3873 &sdr_reg_file->delays);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003874
Marek Vasutb0563cf2015-07-17 00:45:11 +02003875 /* mux delay */
Marek Vasut6bccacf2019-10-18 00:22:31 +02003876 if (dram_is_ddr(2)) {
3877 writel(0, &sdr_reg_file->trk_rw_mgr_addr);
3878 } else if (dram_is_ddr(3)) {
3879 writel((seq->rwcfg->idle << 24) |
3880 (seq->rwcfg->activate_1 << 16) |
3881 (seq->rwcfg->sgle_read << 8) |
3882 (seq->rwcfg->precharge_all << 0),
3883 &sdr_reg_file->trk_rw_mgr_addr);
3884 }
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003885
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003886 writel(seq->rwcfg->mem_if_read_dqs_width,
Marek Vasutb0563cf2015-07-17 00:45:11 +02003887 &sdr_reg_file->trk_read_dqs_width);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003888
Marek Vasutb0563cf2015-07-17 00:45:11 +02003889 /* trefi [7:0] */
Marek Vasut6bccacf2019-10-18 00:22:31 +02003890 if (dram_is_ddr(2)) {
3891 writel(1000 << 0, &sdr_reg_file->trk_rfsh);
3892 } else if (dram_is_ddr(3)) {
3893 writel((seq->rwcfg->refresh_all << 24) | (1000 << 0),
3894 &sdr_reg_file->trk_rfsh);
3895 }
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003896}
3897
Simon Goldschmidt24910c32019-04-16 22:04:39 +02003898int sdram_calibration_full(struct socfpga_sdr *sdr)
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003899{
Marek Vasut8af9ca02015-08-02 19:42:26 +02003900 u32 pass;
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003901 struct socfpga_sdrseq seq;
Marek Vasut5da0f5b2015-07-17 01:05:36 +02003902
Simon Goldschmidt24910c32019-04-16 22:04:39 +02003903 /*
3904 * For size reasons, this file uses hard coded addresses.
3905 * Check if we are called with the correct address.
3906 */
3907 if (sdr != (struct socfpga_sdr *)SOCFPGA_SDR_ADDRESS)
3908 return -ENODEV;
3909
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003910 memset(&seq, 0, sizeof(seq));
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003911
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003912 seq.rwcfg = socfpga_get_sdram_rwmgr_config();
3913 seq.iocfg = socfpga_get_sdram_io_config();
3914 seq.misccfg = socfpga_get_sdram_misc_config();
Marek Vasut39b620e2015-08-02 18:12:08 +02003915
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003916 /* Set the calibration enabled by default */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003917 seq.gbl.phy_debug_mode_flags |= PHY_DEBUG_ENABLE_CAL_RPT;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003918 /*
3919 * Only sweep all groups (regardless of fail state) by default
3920 * Set enabled read test by default.
3921 */
3922#if DISABLE_GUARANTEED_READ
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003923 seq.gbl.phy_debug_mode_flags |= PHY_DEBUG_DISABLE_GUARANTEED_READ;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003924#endif
3925 /* Initialize the register file */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003926 initialize_reg_file(&seq);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003927
3928 /* Initialize any PHY CSR */
3929 initialize_hps_phy();
3930
3931 scc_mgr_initialize();
3932
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003933 initialize_tracking(&seq);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003934
Marek Vasuted6c1ab2021-09-14 05:20:19 +02003935 debug(KBUILD_BASENAME ": Preparing to start memory calibration\n");
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003936
3937 debug("%s:%d\n", __func__, __LINE__);
Marek Vasut4df2d7b2016-04-04 21:21:05 +02003938 debug_cond(DLEVEL >= 1,
Marek Vasut6283b4c2015-07-13 01:05:27 +02003939 "DDR3 FULL_RATE ranks=%u cs/dimm=%u dq/dqs=%u,%u vg/dqs=%u,%u ",
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003940 seq.rwcfg->mem_number_of_ranks,
3941 seq.rwcfg->mem_number_of_cs_per_dimm,
3942 seq.rwcfg->mem_dq_per_read_dqs,
3943 seq.rwcfg->mem_dq_per_write_dqs,
3944 seq.rwcfg->mem_virtual_groups_per_read_dqs,
3945 seq.rwcfg->mem_virtual_groups_per_write_dqs);
Marek Vasut4df2d7b2016-04-04 21:21:05 +02003946 debug_cond(DLEVEL >= 1,
Marek Vasut6283b4c2015-07-13 01:05:27 +02003947 "dqs=%u,%u dq=%u dm=%u ptap_delay=%u dtap_delay=%u ",
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003948 seq.rwcfg->mem_if_read_dqs_width,
3949 seq.rwcfg->mem_if_write_dqs_width,
3950 seq.rwcfg->mem_data_width, seq.rwcfg->mem_data_mask_width,
3951 seq.iocfg->delay_per_opa_tap,
3952 seq.iocfg->delay_per_dchain_tap);
Marek Vasut4df2d7b2016-04-04 21:21:05 +02003953 debug_cond(DLEVEL >= 1, "dtap_dqsen_delay=%u, dll=%u",
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003954 seq.iocfg->delay_per_dqs_en_dchain_tap,
3955 seq.iocfg->dll_chain_length);
Marek Vasut4df2d7b2016-04-04 21:21:05 +02003956 debug_cond(DLEVEL >= 1,
Marek Vasutc85b9b32015-08-02 19:47:01 +02003957 "max values: en_p=%u dqdqs_p=%u en_d=%u dqs_in_d=%u ",
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003958 seq.iocfg->dqs_en_phase_max, seq.iocfg->dqdqs_out_phase_max,
3959 seq.iocfg->dqs_en_delay_max, seq.iocfg->dqs_in_delay_max);
Marek Vasut4df2d7b2016-04-04 21:21:05 +02003960 debug_cond(DLEVEL >= 1, "io_in_d=%u io_out1_d=%u io_out2_d=%u ",
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003961 seq.iocfg->io_in_delay_max, seq.iocfg->io_out1_delay_max,
3962 seq.iocfg->io_out2_delay_max);
Marek Vasut4df2d7b2016-04-04 21:21:05 +02003963 debug_cond(DLEVEL >= 1, "dqs_in_reserve=%u dqs_out_reserve=%u\n",
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003964 seq.iocfg->dqs_in_reserve, seq.iocfg->dqs_out_reserve);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003965
3966 hc_initialize_rom_data();
3967
3968 /* update info for sims */
3969 reg_file_set_stage(CAL_STAGE_NIL);
3970 reg_file_set_group(0);
3971
3972 /*
3973 * Load global needed for those actions that require
3974 * some dynamic calibration support.
3975 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003976 seq.dyn_calib_steps = STATIC_CALIB_STEPS;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003977 /*
3978 * Load global to allow dynamic selection of delay loop settings
3979 * based on calibration mode.
3980 */
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003981 if (!(seq.dyn_calib_steps & CALIB_SKIP_DELAY_LOOPS))
3982 seq.skip_delay_mask = 0xff;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003983 else
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003984 seq.skip_delay_mask = 0x0;
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003985
Simon Goldschmidt2be4a3e2019-07-11 21:18:12 +02003986 pass = run_mem_calibrate(&seq);
3987 debug_mem_calibrate(&seq, pass);
Dinh Nguyen135cc7f2015-06-02 22:52:49 -05003988 return pass;
3989}