blob: f859596d8953227165f840e4dfd6b26c1c845ab1 [file] [log] [blame]
Tom Rini10e47792018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0
Stefan Roese5ffceb82015-03-26 15:36:56 +01002/*
3 * Copyright (C) Marvell International Ltd. and its affiliates
Stefan Roese5ffceb82015-03-26 15:36:56 +01004 */
5
Stefan Roese5ffceb82015-03-26 15:36:56 +01006#include "ddr3_init.h"
7#include "xor_regs.h"
8
9/* defines */
10#ifdef MV_DEBUG
11#define DB(x) x
12#else
13#define DB(x)
14#endif
15
16static u32 ui_xor_regs_ctrl_backup;
Chris Packham1a07d212018-05-10 13:28:29 +120017static u32 ui_xor_regs_base_backup[MAX_CS_NUM + 1];
18static u32 ui_xor_regs_mask_backup[MAX_CS_NUM + 1];
Stefan Roese5ffceb82015-03-26 15:36:56 +010019
Chris Packham1a07d212018-05-10 13:28:29 +120020void mv_sys_xor_init(u32 num_of_cs, u32 cs_ena, uint64_t cs_size, u32 base_delta)
Stefan Roese5ffceb82015-03-26 15:36:56 +010021{
Chris Packham1a07d212018-05-10 13:28:29 +120022 u32 reg, ui, cs_count;
23 uint64_t base, size_mask;
Stefan Roese5ffceb82015-03-26 15:36:56 +010024
25 ui_xor_regs_ctrl_backup = reg_read(XOR_WINDOW_CTRL_REG(0, 0));
Chris Packham1a07d212018-05-10 13:28:29 +120026 for (ui = 0; ui < MAX_CS_NUM + 1; ui++)
Stefan Roese5ffceb82015-03-26 15:36:56 +010027 ui_xor_regs_base_backup[ui] =
28 reg_read(XOR_BASE_ADDR_REG(0, ui));
Chris Packham1a07d212018-05-10 13:28:29 +120029 for (ui = 0; ui < MAX_CS_NUM + 1; ui++)
Stefan Roese5ffceb82015-03-26 15:36:56 +010030 ui_xor_regs_mask_backup[ui] =
31 reg_read(XOR_SIZE_MASK_REG(0, ui));
32
33 reg = 0;
Chris Packham1a07d212018-05-10 13:28:29 +120034 for (ui = 0, cs_count = 0;
35 (cs_count < num_of_cs) && (ui < 8);
36 ui++, cs_count++) {
37 if (cs_ena & (1 << ui)) {
38 /* Enable Window x for each CS */
39 reg |= (0x1 << (ui));
40 /* Enable Window x for each CS */
41 reg |= (0x3 << ((ui * 2) + 16));
42 }
Stefan Roese5ffceb82015-03-26 15:36:56 +010043 }
44
45 reg_write(XOR_WINDOW_CTRL_REG(0, 0), reg);
46
47 cs_count = 0;
Chris Packham1a07d212018-05-10 13:28:29 +120048 for (ui = 0, cs_count = 0;
49 (cs_count < num_of_cs) && (ui < 8);
50 ui++, cs_count++) {
Stefan Roese5ffceb82015-03-26 15:36:56 +010051 if (cs_ena & (1 << ui)) {
52 /*
53 * window x - Base - 0x00000000,
54 * Attribute 0x0e - DRAM
55 */
56 base = cs_size * ui + base_delta;
Chris Packham1a07d212018-05-10 13:28:29 +120057 /* fixed size 2GB for each CS */
58 size_mask = 0x7FFF0000;
Stefan Roese5ffceb82015-03-26 15:36:56 +010059 switch (ui) {
60 case 0:
61 base |= 0xe00;
62 break;
63 case 1:
64 base |= 0xd00;
65 break;
66 case 2:
67 base |= 0xb00;
68 break;
69 case 3:
70 base |= 0x700;
71 break;
Chris Packham1a07d212018-05-10 13:28:29 +120072 case 4: /* SRAM */
73 base = 0x40000000;
74 /* configure as shared transaction */
75 base |= 0x1F00;
76 size_mask = 0xF0000;
77 break;
Stefan Roese5ffceb82015-03-26 15:36:56 +010078 }
79
Chris Packham1a07d212018-05-10 13:28:29 +120080 reg_write(XOR_BASE_ADDR_REG(0, ui), (u32)base);
81 size_mask = (cs_size / _64K) - 1;
82 size_mask = (size_mask << XESMRX_SIZE_MASK_OFFS) & XESMRX_SIZE_MASK_MASK;
Stefan Roese5ffceb82015-03-26 15:36:56 +010083 /* window x - Size */
Chris Packham1a07d212018-05-10 13:28:29 +120084 reg_write(XOR_SIZE_MASK_REG(0, ui), (u32)size_mask);
Stefan Roese5ffceb82015-03-26 15:36:56 +010085 }
86 }
87
88 mv_xor_hal_init(1);
89
90 return;
91}
92
93void mv_sys_xor_finish(void)
94{
95 u32 ui;
96
97 reg_write(XOR_WINDOW_CTRL_REG(0, 0), ui_xor_regs_ctrl_backup);
Chris Packham1a07d212018-05-10 13:28:29 +120098 for (ui = 0; ui < MAX_CS_NUM + 1; ui++)
Stefan Roese5ffceb82015-03-26 15:36:56 +010099 reg_write(XOR_BASE_ADDR_REG(0, ui),
100 ui_xor_regs_base_backup[ui]);
Chris Packham1a07d212018-05-10 13:28:29 +1200101 for (ui = 0; ui < MAX_CS_NUM + 1; ui++)
Stefan Roese5ffceb82015-03-26 15:36:56 +0100102 reg_write(XOR_SIZE_MASK_REG(0, ui),
103 ui_xor_regs_mask_backup[ui]);
104
105 reg_write(XOR_ADDR_OVRD_REG(0, 0), 0);
106}
107
108/*
109 * mv_xor_hal_init - Initialize XOR engine
110 *
111 * DESCRIPTION:
112 * This function initialize XOR unit.
113 * INPUT:
114 * None.
115 *
116 * OUTPUT:
117 * None.
118 *
119 * RETURN:
120 * MV_BAD_PARAM if parameters to function invalid, MV_OK otherwise.
121 */
122void mv_xor_hal_init(u32 xor_chan_num)
123{
124 u32 i;
125
126 /* Abort any XOR activity & set default configuration */
127 for (i = 0; i < xor_chan_num; i++) {
128 mv_xor_command_set(i, MV_STOP);
129 mv_xor_ctrl_set(i, (1 << XEXCR_REG_ACC_PROTECT_OFFS) |
130 (4 << XEXCR_DST_BURST_LIMIT_OFFS) |
131 (4 << XEXCR_SRC_BURST_LIMIT_OFFS));
132 }
133}
134
135/*
136 * mv_xor_ctrl_set - Set XOR channel control registers
137 *
138 * DESCRIPTION:
139 *
140 * INPUT:
141 *
142 * OUTPUT:
143 * None.
144 *
145 * RETURN:
146 * MV_BAD_PARAM if parameters to function invalid, MV_OK otherwise.
147 * NOTE:
148 * This function does not modify the Operation_mode field of control register.
149 */
150int mv_xor_ctrl_set(u32 chan, u32 xor_ctrl)
151{
152 u32 old_value;
153
154 /* update the XOR Engine [0..1] Configuration Registers (XEx_c_r) */
155 old_value = reg_read(XOR_CONFIG_REG(XOR_UNIT(chan), XOR_CHAN(chan))) &
156 XEXCR_OPERATION_MODE_MASK;
157 xor_ctrl &= ~XEXCR_OPERATION_MODE_MASK;
158 xor_ctrl |= old_value;
159 reg_write(XOR_CONFIG_REG(XOR_UNIT(chan), XOR_CHAN(chan)), xor_ctrl);
160
161 return MV_OK;
162}
163
Chris Packham1a07d212018-05-10 13:28:29 +1200164int mv_xor_mem_init(u32 chan, u32 start_ptr, unsigned long long block_size,
Stefan Roese5ffceb82015-03-26 15:36:56 +0100165 u32 init_val_high, u32 init_val_low)
166{
167 u32 temp;
168
Chris Packham1a07d212018-05-10 13:28:29 +1200169 if (block_size == _4G)
170 block_size -= 1;
171
Stefan Roese5ffceb82015-03-26 15:36:56 +0100172 /* Parameter checking */
173 if (chan >= MV_XOR_MAX_CHAN)
174 return MV_BAD_PARAM;
175
176 if (MV_ACTIVE == mv_xor_state_get(chan))
177 return MV_BUSY;
178
179 if ((block_size < XEXBSR_BLOCK_SIZE_MIN_VALUE) ||
180 (block_size > XEXBSR_BLOCK_SIZE_MAX_VALUE))
181 return MV_BAD_PARAM;
182
183 /* set the operation mode to Memory Init */
184 temp = reg_read(XOR_CONFIG_REG(XOR_UNIT(chan), XOR_CHAN(chan)));
185 temp &= ~XEXCR_OPERATION_MODE_MASK;
186 temp |= XEXCR_OPERATION_MODE_MEM_INIT;
187 reg_write(XOR_CONFIG_REG(XOR_UNIT(chan), XOR_CHAN(chan)), temp);
188
189 /*
190 * update the start_ptr field in XOR Engine [0..1] Destination Pointer
191 * Register
192 */
193 reg_write(XOR_DST_PTR_REG(XOR_UNIT(chan), XOR_CHAN(chan)), start_ptr);
194
195 /*
196 * update the Block_size field in the XOR Engine[0..1] Block Size
197 * Registers
198 */
199 reg_write(XOR_BLOCK_SIZE_REG(XOR_UNIT(chan), XOR_CHAN(chan)),
200 block_size);
201
202 /*
203 * update the field Init_val_l in the XOR Engine Initial Value Register
204 * Low (XEIVRL)
205 */
206 reg_write(XOR_INIT_VAL_LOW_REG(XOR_UNIT(chan)), init_val_low);
207
208 /*
209 * update the field Init_val_h in the XOR Engine Initial Value Register
210 * High (XEIVRH)
211 */
212 reg_write(XOR_INIT_VAL_HIGH_REG(XOR_UNIT(chan)), init_val_high);
213
214 /* start transfer */
215 reg_bit_set(XOR_ACTIVATION_REG(XOR_UNIT(chan), XOR_CHAN(chan)),
216 XEXACTR_XESTART_MASK);
217
218 return MV_OK;
219}
220
221/*
222 * mv_xor_state_get - Get XOR channel state.
223 *
224 * DESCRIPTION:
225 * XOR channel activity state can be active, idle, paused.
226 * This function retrunes the channel activity state.
227 *
228 * INPUT:
229 * chan - the channel number
230 *
231 * OUTPUT:
232 * None.
233 *
234 * RETURN:
235 * XOR_CHANNEL_IDLE - If the engine is idle.
236 * XOR_CHANNEL_ACTIVE - If the engine is busy.
237 * XOR_CHANNEL_PAUSED - If the engine is paused.
238 * MV_UNDEFINED_STATE - If the engine state is undefind or there is no
239 * such engine
240 */
241enum mv_state mv_xor_state_get(u32 chan)
242{
243 u32 state;
244
245 /* Parameter checking */
246 if (chan >= MV_XOR_MAX_CHAN) {
247 DB(printf("%s: ERR. Invalid chan num %d\n", __func__, chan));
248 return MV_UNDEFINED_STATE;
249 }
250
251 /* read the current state */
252 state = reg_read(XOR_ACTIVATION_REG(XOR_UNIT(chan), XOR_CHAN(chan)));
253 state &= XEXACTR_XESTATUS_MASK;
254
255 /* return the state */
256 switch (state) {
257 case XEXACTR_XESTATUS_IDLE:
258 return MV_IDLE;
259 case XEXACTR_XESTATUS_ACTIVE:
260 return MV_ACTIVE;
261 case XEXACTR_XESTATUS_PAUSED:
262 return MV_PAUSED;
263 }
264
265 return MV_UNDEFINED_STATE;
266}
267
268/*
269 * mv_xor_command_set - Set command of XOR channel
270 *
271 * DESCRIPTION:
272 * XOR channel can be started, idle, paused and restarted.
273 * Paused can be set only if channel is active.
274 * Start can be set only if channel is idle or paused.
275 * Restart can be set only if channel is paused.
276 * Stop can be set only if channel is active.
277 *
278 * INPUT:
279 * chan - The channel number
280 * command - The command type (start, stop, restart, pause)
281 *
282 * OUTPUT:
283 * None.
284 *
285 * RETURN:
286 * MV_OK on success , MV_BAD_PARAM on erroneous parameter, MV_ERROR on
287 * undefind XOR engine mode
288 */
289int mv_xor_command_set(u32 chan, enum mv_command command)
290{
291 enum mv_state state;
292
293 /* Parameter checking */
294 if (chan >= MV_XOR_MAX_CHAN) {
295 DB(printf("%s: ERR. Invalid chan num %d\n", __func__, chan));
296 return MV_BAD_PARAM;
297 }
298
299 /* get the current state */
300 state = mv_xor_state_get(chan);
301
302 if ((command == MV_START) && (state == MV_IDLE)) {
303 /* command is start and current state is idle */
304 reg_bit_set(XOR_ACTIVATION_REG
305 (XOR_UNIT(chan), XOR_CHAN(chan)),
306 XEXACTR_XESTART_MASK);
307 return MV_OK;
308 } else if ((command == MV_STOP) && (state == MV_ACTIVE)) {
309 /* command is stop and current state is active */
310 reg_bit_set(XOR_ACTIVATION_REG
311 (XOR_UNIT(chan), XOR_CHAN(chan)),
312 XEXACTR_XESTOP_MASK);
313 return MV_OK;
314 } else if (((enum mv_state)command == MV_PAUSED) &&
315 (state == MV_ACTIVE)) {
316 /* command is paused and current state is active */
317 reg_bit_set(XOR_ACTIVATION_REG
318 (XOR_UNIT(chan), XOR_CHAN(chan)),
319 XEXACTR_XEPAUSE_MASK);
320 return MV_OK;
321 } else if ((command == MV_RESTART) && (state == MV_PAUSED)) {
322 /* command is restart and current state is paused */
323 reg_bit_set(XOR_ACTIVATION_REG
324 (XOR_UNIT(chan), XOR_CHAN(chan)),
325 XEXACTR_XERESTART_MASK);
326 return MV_OK;
327 } else if ((command == MV_STOP) && (state == MV_IDLE)) {
328 /* command is stop and current state is active */
329 return MV_OK;
330 }
331
332 /* illegal command */
333 DB(printf("%s: ERR. Illegal command\n", __func__));
334
335 return MV_BAD_PARAM;
336}
337
338void ddr3_new_tip_ecc_scrub(void)
339{
340 u32 cs_c, max_cs;
341 u32 cs_ena = 0;
Chris Packham1a07d212018-05-10 13:28:29 +1200342 u32 dev_num = 0;
343 uint64_t total_mem_size, cs_mem_size = 0;
Stefan Roese5ffceb82015-03-26 15:36:56 +0100344
Chris Packham1a07d212018-05-10 13:28:29 +1200345 printf("DDR Training Sequence - Start scrubbing\n");
346 max_cs = ddr3_tip_max_cs_get(dev_num);
Stefan Roese5ffceb82015-03-26 15:36:56 +0100347 for (cs_c = 0; cs_c < max_cs; cs_c++)
348 cs_ena |= 1 << cs_c;
349
Chris Packham915f8ee2018-05-10 13:28:31 +1200350 /* assume that all CS have same size */
351 ddr3_calc_mem_cs_size(0, &cs_mem_size);
352
Chris Packham1a07d212018-05-10 13:28:29 +1200353 mv_sys_xor_init(max_cs, cs_ena, cs_mem_size, 0);
354 total_mem_size = max_cs * cs_mem_size;
355 mv_xor_mem_init(0, 0, total_mem_size, 0xdeadbeef, 0xdeadbeef);
Stefan Roese5ffceb82015-03-26 15:36:56 +0100356 /* wait for previous transfer completion */
357 while (mv_xor_state_get(0) != MV_IDLE)
358 ;
Stefan Roese5ffceb82015-03-26 15:36:56 +0100359 /* Return XOR State */
360 mv_sys_xor_finish();
361
362 printf("DDR3 Training Sequence - End scrubbing\n");
363}
Chris Packham1a07d212018-05-10 13:28:29 +1200364
365/*
366* mv_xor_transfer - Transfer data from source to destination in one of
367* three modes: XOR, CRC32 or DMA
368*
369* DESCRIPTION:
370* This function initiates XOR channel, according to function parameters,
371* in order to perform XOR, CRC32 or DMA transaction.
372* To gain maximum performance the user is asked to keep the following
373* restrictions:
374* 1) Selected engine is available (not busy).
375* 2) This module does not take into consideration CPU MMU issues.
376* In order for the XOR engine to access the appropriate source
377* and destination, address parameters must be given in system
378* physical mode.
379* 3) This API does not take care of cache coherency issues. The source,
380* destination and, in case of chain, the descriptor list are assumed
381* to be cache coherent.
382* 4) Parameters validity.
383*
384* INPUT:
385* chan - XOR channel number.
386* type - One of three: XOR, CRC32 and DMA operations.
387* xor_chain_ptr - address of chain pointer
388*
389* OUTPUT:
390* None.
391*
392* RETURN:
393* MV_BAD_PARAM if parameters to function invalid, MV_OK otherwise.
394*
395*******************************************************************************/
396int mv_xor_transfer(u32 chan, enum xor_type type, u32 xor_chain_ptr)
397{
398 u32 temp;
399
400 /* Parameter checking */
401 if (chan >= MV_XOR_MAX_CHAN) {
402 DB(printf("%s: ERR. Invalid chan num %d\n", __func__, chan));
403 return MV_BAD_PARAM;
404 }
405 if (mv_xor_state_get(chan) == MV_ACTIVE) {
406 DB(printf("%s: ERR. Channel is already active\n", __func__));
407 return MV_BUSY;
408 }
409 if (xor_chain_ptr == 0x0) {
410 DB(printf("%s: ERR. xor_chain_ptr is NULL pointer\n", __func__));
411 return MV_BAD_PARAM;
412 }
413
414 /* read configuration register and mask the operation mode field */
415 temp = reg_read(XOR_CONFIG_REG(XOR_UNIT(chan), XOR_CHAN(chan)));
416 temp &= ~XEXCR_OPERATION_MODE_MASK;
417
418 switch (type) {
419 case MV_XOR:
420 if ((xor_chain_ptr & XEXDPR_DST_PTR_XOR_MASK) != 0) {
421 DB(printf("%s: ERR. Invalid chain pointer (bits [5:0] must be cleared)\n",
422 __func__));
423 return MV_BAD_PARAM;
424 }
425 /* set the operation mode to XOR */
426 temp |= XEXCR_OPERATION_MODE_XOR;
427 break;
428 case MV_DMA:
429 if ((xor_chain_ptr & XEXDPR_DST_PTR_DMA_MASK) != 0) {
430 DB(printf("%s: ERR. Invalid chain pointer (bits [4:0] must be cleared)\n",
431 __func__));
432 return MV_BAD_PARAM;
433 }
434 /* set the operation mode to DMA */
435 temp |= XEXCR_OPERATION_MODE_DMA;
436 break;
437 case MV_CRC32:
438 if ((xor_chain_ptr & XEXDPR_DST_PTR_CRC_MASK) != 0) {
439 DB(printf("%s: ERR. Invalid chain pointer (bits [4:0] must be cleared)\n",
440 __func__));
441 return MV_BAD_PARAM;
442 }
443 /* set the operation mode to CRC32 */
444 temp |= XEXCR_OPERATION_MODE_CRC;
445 break;
446 default:
447 return MV_BAD_PARAM;
448 }
449
450 /* write the operation mode to the register */
451 reg_write(XOR_CONFIG_REG(XOR_UNIT(chan), XOR_CHAN(chan)), temp);
452 /*
453 * update the NextDescPtr field in the XOR Engine [0..1] Next Descriptor
454 * Pointer Register (XExNDPR)
455 */
456 reg_write(XOR_NEXT_DESC_PTR_REG(XOR_UNIT(chan), XOR_CHAN(chan)),
457 xor_chain_ptr);
458
459 /* start transfer */
460 reg_bit_set(XOR_ACTIVATION_REG(XOR_UNIT(chan), XOR_CHAN(chan)),
461 XEXACTR_XESTART_MASK);
462
463 return MV_OK;
464}