blob: c1d721cabfc6a228734a5637be9a6bc2367d5723 [file] [log] [blame]
Suneel Garapati9de7d2b2020-08-26 14:37:22 +02001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2018 Marvell International Ltd.
4 */
5
6#include <dm.h>
7#include <dm/of_access.h>
8#include <malloc.h>
9#include <memalign.h>
10#include <nand.h>
11#include <pci.h>
12#include <pci_ids.h>
13#include <time.h>
14#include <linux/bitfield.h>
15#include <linux/ctype.h>
16#include <linux/delay.h>
17#include <linux/errno.h>
18#include <linux/err.h>
19#include <linux/ioport.h>
20#include <linux/libfdt.h>
21#include <linux/mtd/mtd.h>
22#include <linux/mtd/nand_bch.h>
23#include <linux/mtd/nand_ecc.h>
24#include <asm/io.h>
25#include <asm/types.h>
26#include <asm/dma-mapping.h>
27#include <asm/arch/clock.h>
28#include "octeontx_bch.h"
29
30#ifdef DEBUG
31# undef CONFIG_LOGLEVEL
32# define CONFIG_LOGLEVEL 8
33#endif
34
35LIST_HEAD(octeontx_bch_devices);
36static unsigned int num_vfs = BCH_NR_VF;
37static void *bch_pf;
38static void *bch_vf;
39static void *token;
40static bool bch_pf_initialized;
41static bool bch_vf_initialized;
42
43static int pci_enable_sriov(struct udevice *dev, int nr_virtfn)
44{
45 int ret;
46
47 ret = pci_sriov_init(dev, nr_virtfn);
48 if (ret)
49 printf("%s(%s): pci_sriov_init returned %d\n", __func__,
50 dev->name, ret);
51 return ret;
52}
53
54void *octeontx_bch_getv(void)
55{
56 if (!bch_vf)
57 return NULL;
58 if (bch_vf_initialized && bch_pf_initialized)
59 return bch_vf;
60 else
61 return NULL;
62}
63
64void octeontx_bch_putv(void *token)
65{
66 bch_vf_initialized = !!token;
67 bch_vf = token;
68}
69
70void *octeontx_bch_getp(void)
71{
72 return token;
73}
74
75void octeontx_bch_putp(void *token)
76{
77 bch_pf = token;
78 bch_pf_initialized = !!token;
79}
80
81static int do_bch_init(struct bch_device *bch)
82{
83 return 0;
84}
85
86static void bch_reset(struct bch_device *bch)
87{
88 writeq(1, bch->reg_base + BCH_CTL);
89 mdelay(2);
90}
91
92static void bch_disable(struct bch_device *bch)
93{
94 writeq(~0ull, bch->reg_base + BCH_ERR_INT_ENA_W1C);
95 writeq(~0ull, bch->reg_base + BCH_ERR_INT);
96 bch_reset(bch);
97}
98
99static u32 bch_check_bist_status(struct bch_device *bch)
100{
101 return readq(bch->reg_base + BCH_BIST_RESULT);
102}
103
104static int bch_device_init(struct bch_device *bch)
105{
106 u64 bist;
107 int rc;
108
109 debug("%s: Resetting...\n", __func__);
110 /* Reset the PF when probed first */
111 bch_reset(bch);
112
113 debug("%s: Checking BIST...\n", __func__);
114 /* Check BIST status */
115 bist = (u64)bch_check_bist_status(bch);
116 if (bist) {
117 dev_err(dev, "BCH BIST failed with code 0x%llx\n", bist);
118 return -ENODEV;
119 }
120
121 /* Get max VQs/VFs supported by the device */
122
123 bch->max_vfs = pci_sriov_get_totalvfs(bch->dev);
124 debug("%s: %d vfs\n", __func__, bch->max_vfs);
125 if (num_vfs > bch->max_vfs) {
126 dev_warn(dev, "Num of VFs to enable %d is greater than max available. Enabling %d VFs.\n",
127 num_vfs, bch->max_vfs);
128 num_vfs = bch->max_vfs;
129 }
130 bch->vfs_enabled = bch->max_vfs;
131 /* Get number of VQs/VFs to be enabled */
132 /* TODO: Get CLK frequency */
133 /* Reset device parameters */
134
135 debug("%s: Doing initialization\n", __func__);
136 rc = do_bch_init(bch);
137
138 return rc;
139}
140
141static int bch_sriov_configure(struct udevice *dev, int numvfs)
142{
143 struct bch_device *bch = dev_get_priv(dev);
144 int ret = -EBUSY;
145
146 debug("%s(%s, %d), bch: %p, vfs_in_use: %d, enabled: %d\n", __func__,
147 dev->name, numvfs, bch, bch->vfs_in_use, bch->vfs_enabled);
148 if (bch->vfs_in_use)
149 goto exit;
150
151 ret = 0;
152
153 if (numvfs > 0) {
154 debug("%s: Enabling sriov\n", __func__);
155 ret = pci_enable_sriov(dev, numvfs);
156 if (ret == 0) {
157 bch->flags |= BCH_FLAG_SRIOV_ENABLED;
158 ret = numvfs;
159 bch->vfs_enabled = numvfs;
160 }
161 }
162
163 debug("VFs enabled: %d\n", ret);
164exit:
165 debug("%s: Returning %d\n", __func__, ret);
166 return ret;
167}
168
169static int octeontx_pci_bchpf_probe(struct udevice *dev)
170{
171 struct bch_device *bch;
172 int ret;
173
174 debug("%s(%s)\n", __func__, dev->name);
175 bch = dev_get_priv(dev);
176 if (!bch)
177 return -ENOMEM;
178
Andrew Scull6520c822022-04-21 16:11:13 +0000179 bch->reg_base = dm_pci_map_bar(dev, PCI_BASE_ADDRESS_0, 0, 0,
180 PCI_REGION_TYPE, PCI_REGION_MEM);
Suneel Garapati9de7d2b2020-08-26 14:37:22 +0200181 bch->dev = dev;
182
183 debug("%s: base address: %p\n", __func__, bch->reg_base);
184 ret = bch_device_init(bch);
185 if (ret) {
186 printf("%s(%s): init returned %d\n", __func__, dev->name, ret);
187 return ret;
188 }
189 INIT_LIST_HEAD(&bch->list);
190 list_add(&bch->list, &octeontx_bch_devices);
191 token = (void *)dev;
192
193 debug("%s: Configuring SRIOV\n", __func__);
194 bch_sriov_configure(dev, num_vfs);
195 debug("%s: Done.\n", __func__);
196 octeontx_bch_putp(bch);
197
198 return 0;
199}
200
201static const struct pci_device_id octeontx_bchpf_pci_id_table[] = {
202 { PCI_VDEVICE(CAVIUM, PCI_DEVICE_ID_CAVIUM_BCH) },
203 {},
204};
205
206static const struct pci_device_id octeontx_bchvf_pci_id_table[] = {
207 { PCI_VDEVICE(CAVIUM, PCI_DEVICE_ID_CAVIUM_BCHVF)},
208 {},
209};
210
211/**
212 * Given a data block calculate the ecc data and fill in the response
213 *
214 * @param[in] block 8-byte aligned pointer to data block to calculate ECC
215 * @param block_size Size of block in bytes, must be a multiple of two.
216 * @param bch_level Number of errors that must be corrected. The number of
217 * parity bytes is equal to ((15 * bch_level) + 7) / 8.
218 * Must be 4, 8, 16, 24, 32, 40, 48, 56, 60 or 64.
219 * @param[out] ecc 8-byte aligned pointer to where ecc data should go
220 * @param[in] resp pointer to where responses will be written.
221 *
Heinrich Schuchardt47b4c022022-01-19 18:05:50 +0100222 * Return: Zero on success, negative on failure.
Suneel Garapati9de7d2b2020-08-26 14:37:22 +0200223 */
224int octeontx_bch_encode(struct bch_vf *vf, dma_addr_t block, u16 block_size,
225 u8 bch_level, dma_addr_t ecc, dma_addr_t resp)
226{
227 union bch_cmd cmd;
228 int rc;
229
230 memset(&cmd, 0, sizeof(cmd));
231 cmd.s.cword.ecc_gen = eg_gen;
232 cmd.s.cword.ecc_level = bch_level;
233 cmd.s.cword.size = block_size;
234
235 cmd.s.oword.ptr = ecc;
236 cmd.s.iword.ptr = block;
237 cmd.s.rword.ptr = resp;
238 rc = octeontx_cmd_queue_write(QID_BCH, 1,
239 sizeof(cmd) / sizeof(uint64_t), cmd.u);
240 if (rc)
241 return -1;
242
243 octeontx_bch_write_doorbell(1, vf);
244
245 return 0;
246}
247
248/**
249 * Given a data block and ecc data correct the data block
250 *
251 * @param[in] block_ecc_in 8-byte aligned pointer to data block with ECC
252 * data concatenated to the end to correct
253 * @param block_size Size of block in bytes, must be a multiple of
254 * two.
255 * @param bch_level Number of errors that must be corrected. The
256 * number of parity bytes is equal to
257 * ((15 * bch_level) + 7) / 8.
258 * Must be 4, 8, 16, 24, 32, 40, 48, 56, 60 or 64.
259 * @param[out] block_out 8-byte aligned pointer to corrected data buffer.
260 * This should not be the same as block_ecc_in.
261 * @param[in] resp pointer to where responses will be written.
262 *
Heinrich Schuchardt47b4c022022-01-19 18:05:50 +0100263 * Return: Zero on success, negative on failure.
Suneel Garapati9de7d2b2020-08-26 14:37:22 +0200264 */
265
266int octeontx_bch_decode(struct bch_vf *vf, dma_addr_t block_ecc_in,
267 u16 block_size, u8 bch_level,
268 dma_addr_t block_out, dma_addr_t resp)
269{
270 union bch_cmd cmd;
271 int rc;
272
273 memset(&cmd, 0, sizeof(cmd));
274 cmd.s.cword.ecc_gen = eg_correct;
275 cmd.s.cword.ecc_level = bch_level;
276 cmd.s.cword.size = block_size;
277
278 cmd.s.oword.ptr = block_out;
279 cmd.s.iword.ptr = block_ecc_in;
280 cmd.s.rword.ptr = resp;
281 rc = octeontx_cmd_queue_write(QID_BCH, 1,
282 sizeof(cmd) / sizeof(uint64_t), cmd.u);
283 if (rc)
284 return -1;
285
286 octeontx_bch_write_doorbell(1, vf);
287 return 0;
288}
289EXPORT_SYMBOL(octeontx_bch_decode);
290
291int octeontx_bch_wait(struct bch_vf *vf, union bch_resp *resp,
292 dma_addr_t handle)
293{
294 ulong start = get_timer(0);
295
296 __iormb(); /* HW is updating *resp */
297 while (!resp->s.done && get_timer(start) < 10)
298 __iormb(); /* HW is updating *resp */
299
300 if (resp->s.done)
301 return 0;
302
303 return -ETIMEDOUT;
304}
305
306struct bch_q octeontx_bch_q[QID_MAX];
307
308static int octeontx_cmd_queue_initialize(struct udevice *dev, int queue_id,
309 int max_depth, int fpa_pool,
310 int pool_size)
311{
312 /* some params are for later merge with CPT or cn83xx */
313 struct bch_q *q = &octeontx_bch_q[queue_id];
314 unsigned long paddr;
315 u64 *chunk_buffer;
316 int chunk = max_depth + 1;
317 int i, size;
318
319 if ((unsigned int)queue_id >= QID_MAX)
320 return -EINVAL;
321 if (max_depth & chunk) /* must be 2^N - 1 */
322 return -EINVAL;
323
324 size = NQS * chunk * sizeof(u64);
325 chunk_buffer = dma_alloc_coherent(size, &paddr);
326 if (!chunk_buffer)
327 return -ENOMEM;
328
329 q->base_paddr = paddr;
330 q->dev = dev;
331 q->index = 0;
332 q->max_depth = max_depth;
333 q->pool_size_m1 = pool_size;
334 q->base_vaddr = chunk_buffer;
335
336 for (i = 0; i < NQS; i++) {
337 u64 *ixp;
338 int inext = (i + 1) * chunk - 1;
339 int j = (i + 1) % NQS;
340 int jnext = j * chunk;
341 dma_addr_t jbase = q->base_paddr + jnext * sizeof(u64);
342
343 ixp = &chunk_buffer[inext];
344 *ixp = jbase;
345 }
346
347 return 0;
348}
349
350static int octeontx_pci_bchvf_probe(struct udevice *dev)
351{
352 struct bch_vf *vf;
353 union bch_vqx_ctl ctl;
354 union bch_vqx_cmd_buf cbuf;
355 int err;
356
357 debug("%s(%s)\n", __func__, dev->name);
358 vf = dev_get_priv(dev);
359 if (!vf)
360 return -ENOMEM;
361
362 vf->dev = dev;
363
364 /* Map PF's configuration registers */
Andrew Scull6520c822022-04-21 16:11:13 +0000365 vf->reg_base = dm_pci_map_bar(dev, PCI_BASE_ADDRESS_0, 0, 0,
366 PCI_REGION_TYPE, PCI_REGION_MEM);
Suneel Garapati9de7d2b2020-08-26 14:37:22 +0200367 debug("%s: reg base: %p\n", __func__, vf->reg_base);
368
369 err = octeontx_cmd_queue_initialize(dev, QID_BCH, QDEPTH - 1, 0,
370 sizeof(union bch_cmd) * QDEPTH);
371 if (err) {
372 dev_err(dev, "octeontx_cmd_queue_initialize() failed\n");
373 goto release;
374 }
375
376 ctl.u = readq(vf->reg_base + BCH_VQX_CTL(0));
377
378 cbuf.u = 0;
379 cbuf.s.ldwb = 1;
380 cbuf.s.dfb = 1;
381 cbuf.s.size = QDEPTH;
382 writeq(cbuf.u, vf->reg_base + BCH_VQX_CMD_BUF(0));
383
384 writeq(ctl.u, vf->reg_base + BCH_VQX_CTL(0));
385
386 writeq(octeontx_bch_q[QID_BCH].base_paddr,
387 vf->reg_base + BCH_VQX_CMD_PTR(0));
388
389 octeontx_bch_putv(vf);
390
391 debug("%s: bch vf initialization complete\n", __func__);
392
393 if (octeontx_bch_getv())
394 return octeontx_pci_nand_deferred_probe();
395
396 return -1;
397
398release:
399 return err;
400}
401
402static int octeontx_pci_bchpf_remove(struct udevice *dev)
403{
404 struct bch_device *bch = dev_get_priv(dev);
405
406 bch_disable(bch);
407 return 0;
408}
409
410U_BOOT_DRIVER(octeontx_pci_bchpf) = {
411 .name = BCHPF_DRIVER_NAME,
412 .id = UCLASS_MISC,
413 .probe = octeontx_pci_bchpf_probe,
414 .remove = octeontx_pci_bchpf_remove,
Simon Glass8a2b47f2020-12-03 16:55:17 -0700415 .priv_auto = sizeof(struct bch_device),
Suneel Garapati9de7d2b2020-08-26 14:37:22 +0200416 .flags = DM_FLAG_OS_PREPARE,
417};
418
419U_BOOT_DRIVER(octeontx_pci_bchvf) = {
420 .name = BCHVF_DRIVER_NAME,
421 .id = UCLASS_MISC,
422 .probe = octeontx_pci_bchvf_probe,
Simon Glass8a2b47f2020-12-03 16:55:17 -0700423 .priv_auto = sizeof(struct bch_vf),
Suneel Garapati9de7d2b2020-08-26 14:37:22 +0200424};
425
426U_BOOT_PCI_DEVICE(octeontx_pci_bchpf, octeontx_bchpf_pci_id_table);
427U_BOOT_PCI_DEVICE(octeontx_pci_bchvf, octeontx_bchvf_pci_id_table);