Suneel Garapati | 9de7d2b | 2020-08-26 14:37:22 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* |
| 3 | * Copyright (C) 2018 Marvell International Ltd. |
| 4 | */ |
| 5 | |
| 6 | #include <dm.h> |
| 7 | #include <dm/of_access.h> |
| 8 | #include <malloc.h> |
| 9 | #include <memalign.h> |
| 10 | #include <nand.h> |
| 11 | #include <pci.h> |
| 12 | #include <pci_ids.h> |
| 13 | #include <time.h> |
| 14 | #include <linux/bitfield.h> |
| 15 | #include <linux/ctype.h> |
| 16 | #include <linux/delay.h> |
| 17 | #include <linux/errno.h> |
| 18 | #include <linux/err.h> |
| 19 | #include <linux/ioport.h> |
| 20 | #include <linux/libfdt.h> |
| 21 | #include <linux/mtd/mtd.h> |
| 22 | #include <linux/mtd/nand_bch.h> |
| 23 | #include <linux/mtd/nand_ecc.h> |
| 24 | #include <asm/io.h> |
| 25 | #include <asm/types.h> |
| 26 | #include <asm/dma-mapping.h> |
| 27 | #include <asm/arch/clock.h> |
| 28 | #include "octeontx_bch.h" |
| 29 | |
| 30 | #ifdef DEBUG |
| 31 | # undef CONFIG_LOGLEVEL |
| 32 | # define CONFIG_LOGLEVEL 8 |
| 33 | #endif |
| 34 | |
| 35 | LIST_HEAD(octeontx_bch_devices); |
| 36 | static unsigned int num_vfs = BCH_NR_VF; |
| 37 | static void *bch_pf; |
| 38 | static void *bch_vf; |
| 39 | static void *token; |
| 40 | static bool bch_pf_initialized; |
| 41 | static bool bch_vf_initialized; |
| 42 | |
| 43 | static int pci_enable_sriov(struct udevice *dev, int nr_virtfn) |
| 44 | { |
| 45 | int ret; |
| 46 | |
| 47 | ret = pci_sriov_init(dev, nr_virtfn); |
| 48 | if (ret) |
| 49 | printf("%s(%s): pci_sriov_init returned %d\n", __func__, |
| 50 | dev->name, ret); |
| 51 | return ret; |
| 52 | } |
| 53 | |
| 54 | void *octeontx_bch_getv(void) |
| 55 | { |
| 56 | if (!bch_vf) |
| 57 | return NULL; |
| 58 | if (bch_vf_initialized && bch_pf_initialized) |
| 59 | return bch_vf; |
| 60 | else |
| 61 | return NULL; |
| 62 | } |
| 63 | |
| 64 | void octeontx_bch_putv(void *token) |
| 65 | { |
| 66 | bch_vf_initialized = !!token; |
| 67 | bch_vf = token; |
| 68 | } |
| 69 | |
| 70 | void *octeontx_bch_getp(void) |
| 71 | { |
| 72 | return token; |
| 73 | } |
| 74 | |
| 75 | void octeontx_bch_putp(void *token) |
| 76 | { |
| 77 | bch_pf = token; |
| 78 | bch_pf_initialized = !!token; |
| 79 | } |
| 80 | |
| 81 | static int do_bch_init(struct bch_device *bch) |
| 82 | { |
| 83 | return 0; |
| 84 | } |
| 85 | |
| 86 | static void bch_reset(struct bch_device *bch) |
| 87 | { |
| 88 | writeq(1, bch->reg_base + BCH_CTL); |
| 89 | mdelay(2); |
| 90 | } |
| 91 | |
| 92 | static void bch_disable(struct bch_device *bch) |
| 93 | { |
| 94 | writeq(~0ull, bch->reg_base + BCH_ERR_INT_ENA_W1C); |
| 95 | writeq(~0ull, bch->reg_base + BCH_ERR_INT); |
| 96 | bch_reset(bch); |
| 97 | } |
| 98 | |
| 99 | static u32 bch_check_bist_status(struct bch_device *bch) |
| 100 | { |
| 101 | return readq(bch->reg_base + BCH_BIST_RESULT); |
| 102 | } |
| 103 | |
| 104 | static int bch_device_init(struct bch_device *bch) |
| 105 | { |
| 106 | u64 bist; |
| 107 | int rc; |
| 108 | |
| 109 | debug("%s: Resetting...\n", __func__); |
| 110 | /* Reset the PF when probed first */ |
| 111 | bch_reset(bch); |
| 112 | |
| 113 | debug("%s: Checking BIST...\n", __func__); |
| 114 | /* Check BIST status */ |
| 115 | bist = (u64)bch_check_bist_status(bch); |
| 116 | if (bist) { |
| 117 | dev_err(dev, "BCH BIST failed with code 0x%llx\n", bist); |
| 118 | return -ENODEV; |
| 119 | } |
| 120 | |
| 121 | /* Get max VQs/VFs supported by the device */ |
| 122 | |
| 123 | bch->max_vfs = pci_sriov_get_totalvfs(bch->dev); |
| 124 | debug("%s: %d vfs\n", __func__, bch->max_vfs); |
| 125 | if (num_vfs > bch->max_vfs) { |
| 126 | dev_warn(dev, "Num of VFs to enable %d is greater than max available. Enabling %d VFs.\n", |
| 127 | num_vfs, bch->max_vfs); |
| 128 | num_vfs = bch->max_vfs; |
| 129 | } |
| 130 | bch->vfs_enabled = bch->max_vfs; |
| 131 | /* Get number of VQs/VFs to be enabled */ |
| 132 | /* TODO: Get CLK frequency */ |
| 133 | /* Reset device parameters */ |
| 134 | |
| 135 | debug("%s: Doing initialization\n", __func__); |
| 136 | rc = do_bch_init(bch); |
| 137 | |
| 138 | return rc; |
| 139 | } |
| 140 | |
| 141 | static int bch_sriov_configure(struct udevice *dev, int numvfs) |
| 142 | { |
| 143 | struct bch_device *bch = dev_get_priv(dev); |
| 144 | int ret = -EBUSY; |
| 145 | |
| 146 | debug("%s(%s, %d), bch: %p, vfs_in_use: %d, enabled: %d\n", __func__, |
| 147 | dev->name, numvfs, bch, bch->vfs_in_use, bch->vfs_enabled); |
| 148 | if (bch->vfs_in_use) |
| 149 | goto exit; |
| 150 | |
| 151 | ret = 0; |
| 152 | |
| 153 | if (numvfs > 0) { |
| 154 | debug("%s: Enabling sriov\n", __func__); |
| 155 | ret = pci_enable_sriov(dev, numvfs); |
| 156 | if (ret == 0) { |
| 157 | bch->flags |= BCH_FLAG_SRIOV_ENABLED; |
| 158 | ret = numvfs; |
| 159 | bch->vfs_enabled = numvfs; |
| 160 | } |
| 161 | } |
| 162 | |
| 163 | debug("VFs enabled: %d\n", ret); |
| 164 | exit: |
| 165 | debug("%s: Returning %d\n", __func__, ret); |
| 166 | return ret; |
| 167 | } |
| 168 | |
| 169 | static int octeontx_pci_bchpf_probe(struct udevice *dev) |
| 170 | { |
| 171 | struct bch_device *bch; |
| 172 | int ret; |
| 173 | |
| 174 | debug("%s(%s)\n", __func__, dev->name); |
| 175 | bch = dev_get_priv(dev); |
| 176 | if (!bch) |
| 177 | return -ENOMEM; |
| 178 | |
| 179 | bch->reg_base = dm_pci_map_bar(dev, PCI_BASE_ADDRESS_0, PCI_REGION_MEM); |
| 180 | bch->dev = dev; |
| 181 | |
| 182 | debug("%s: base address: %p\n", __func__, bch->reg_base); |
| 183 | ret = bch_device_init(bch); |
| 184 | if (ret) { |
| 185 | printf("%s(%s): init returned %d\n", __func__, dev->name, ret); |
| 186 | return ret; |
| 187 | } |
| 188 | INIT_LIST_HEAD(&bch->list); |
| 189 | list_add(&bch->list, &octeontx_bch_devices); |
| 190 | token = (void *)dev; |
| 191 | |
| 192 | debug("%s: Configuring SRIOV\n", __func__); |
| 193 | bch_sriov_configure(dev, num_vfs); |
| 194 | debug("%s: Done.\n", __func__); |
| 195 | octeontx_bch_putp(bch); |
| 196 | |
| 197 | return 0; |
| 198 | } |
| 199 | |
| 200 | static const struct pci_device_id octeontx_bchpf_pci_id_table[] = { |
| 201 | { PCI_VDEVICE(CAVIUM, PCI_DEVICE_ID_CAVIUM_BCH) }, |
| 202 | {}, |
| 203 | }; |
| 204 | |
| 205 | static const struct pci_device_id octeontx_bchvf_pci_id_table[] = { |
| 206 | { PCI_VDEVICE(CAVIUM, PCI_DEVICE_ID_CAVIUM_BCHVF)}, |
| 207 | {}, |
| 208 | }; |
| 209 | |
| 210 | /** |
| 211 | * Given a data block calculate the ecc data and fill in the response |
| 212 | * |
| 213 | * @param[in] block 8-byte aligned pointer to data block to calculate ECC |
| 214 | * @param block_size Size of block in bytes, must be a multiple of two. |
| 215 | * @param bch_level Number of errors that must be corrected. The number of |
| 216 | * parity bytes is equal to ((15 * bch_level) + 7) / 8. |
| 217 | * Must be 4, 8, 16, 24, 32, 40, 48, 56, 60 or 64. |
| 218 | * @param[out] ecc 8-byte aligned pointer to where ecc data should go |
| 219 | * @param[in] resp pointer to where responses will be written. |
| 220 | * |
| 221 | * @return Zero on success, negative on failure. |
| 222 | */ |
| 223 | int octeontx_bch_encode(struct bch_vf *vf, dma_addr_t block, u16 block_size, |
| 224 | u8 bch_level, dma_addr_t ecc, dma_addr_t resp) |
| 225 | { |
| 226 | union bch_cmd cmd; |
| 227 | int rc; |
| 228 | |
| 229 | memset(&cmd, 0, sizeof(cmd)); |
| 230 | cmd.s.cword.ecc_gen = eg_gen; |
| 231 | cmd.s.cword.ecc_level = bch_level; |
| 232 | cmd.s.cword.size = block_size; |
| 233 | |
| 234 | cmd.s.oword.ptr = ecc; |
| 235 | cmd.s.iword.ptr = block; |
| 236 | cmd.s.rword.ptr = resp; |
| 237 | rc = octeontx_cmd_queue_write(QID_BCH, 1, |
| 238 | sizeof(cmd) / sizeof(uint64_t), cmd.u); |
| 239 | if (rc) |
| 240 | return -1; |
| 241 | |
| 242 | octeontx_bch_write_doorbell(1, vf); |
| 243 | |
| 244 | return 0; |
| 245 | } |
| 246 | |
| 247 | /** |
| 248 | * Given a data block and ecc data correct the data block |
| 249 | * |
| 250 | * @param[in] block_ecc_in 8-byte aligned pointer to data block with ECC |
| 251 | * data concatenated to the end to correct |
| 252 | * @param block_size Size of block in bytes, must be a multiple of |
| 253 | * two. |
| 254 | * @param bch_level Number of errors that must be corrected. The |
| 255 | * number of parity bytes is equal to |
| 256 | * ((15 * bch_level) + 7) / 8. |
| 257 | * Must be 4, 8, 16, 24, 32, 40, 48, 56, 60 or 64. |
| 258 | * @param[out] block_out 8-byte aligned pointer to corrected data buffer. |
| 259 | * This should not be the same as block_ecc_in. |
| 260 | * @param[in] resp pointer to where responses will be written. |
| 261 | * |
| 262 | * @return Zero on success, negative on failure. |
| 263 | */ |
| 264 | |
| 265 | int octeontx_bch_decode(struct bch_vf *vf, dma_addr_t block_ecc_in, |
| 266 | u16 block_size, u8 bch_level, |
| 267 | dma_addr_t block_out, dma_addr_t resp) |
| 268 | { |
| 269 | union bch_cmd cmd; |
| 270 | int rc; |
| 271 | |
| 272 | memset(&cmd, 0, sizeof(cmd)); |
| 273 | cmd.s.cword.ecc_gen = eg_correct; |
| 274 | cmd.s.cword.ecc_level = bch_level; |
| 275 | cmd.s.cword.size = block_size; |
| 276 | |
| 277 | cmd.s.oword.ptr = block_out; |
| 278 | cmd.s.iword.ptr = block_ecc_in; |
| 279 | cmd.s.rword.ptr = resp; |
| 280 | rc = octeontx_cmd_queue_write(QID_BCH, 1, |
| 281 | sizeof(cmd) / sizeof(uint64_t), cmd.u); |
| 282 | if (rc) |
| 283 | return -1; |
| 284 | |
| 285 | octeontx_bch_write_doorbell(1, vf); |
| 286 | return 0; |
| 287 | } |
| 288 | EXPORT_SYMBOL(octeontx_bch_decode); |
| 289 | |
| 290 | int octeontx_bch_wait(struct bch_vf *vf, union bch_resp *resp, |
| 291 | dma_addr_t handle) |
| 292 | { |
| 293 | ulong start = get_timer(0); |
| 294 | |
| 295 | __iormb(); /* HW is updating *resp */ |
| 296 | while (!resp->s.done && get_timer(start) < 10) |
| 297 | __iormb(); /* HW is updating *resp */ |
| 298 | |
| 299 | if (resp->s.done) |
| 300 | return 0; |
| 301 | |
| 302 | return -ETIMEDOUT; |
| 303 | } |
| 304 | |
| 305 | struct bch_q octeontx_bch_q[QID_MAX]; |
| 306 | |
| 307 | static int octeontx_cmd_queue_initialize(struct udevice *dev, int queue_id, |
| 308 | int max_depth, int fpa_pool, |
| 309 | int pool_size) |
| 310 | { |
| 311 | /* some params are for later merge with CPT or cn83xx */ |
| 312 | struct bch_q *q = &octeontx_bch_q[queue_id]; |
| 313 | unsigned long paddr; |
| 314 | u64 *chunk_buffer; |
| 315 | int chunk = max_depth + 1; |
| 316 | int i, size; |
| 317 | |
| 318 | if ((unsigned int)queue_id >= QID_MAX) |
| 319 | return -EINVAL; |
| 320 | if (max_depth & chunk) /* must be 2^N - 1 */ |
| 321 | return -EINVAL; |
| 322 | |
| 323 | size = NQS * chunk * sizeof(u64); |
| 324 | chunk_buffer = dma_alloc_coherent(size, &paddr); |
| 325 | if (!chunk_buffer) |
| 326 | return -ENOMEM; |
| 327 | |
| 328 | q->base_paddr = paddr; |
| 329 | q->dev = dev; |
| 330 | q->index = 0; |
| 331 | q->max_depth = max_depth; |
| 332 | q->pool_size_m1 = pool_size; |
| 333 | q->base_vaddr = chunk_buffer; |
| 334 | |
| 335 | for (i = 0; i < NQS; i++) { |
| 336 | u64 *ixp; |
| 337 | int inext = (i + 1) * chunk - 1; |
| 338 | int j = (i + 1) % NQS; |
| 339 | int jnext = j * chunk; |
| 340 | dma_addr_t jbase = q->base_paddr + jnext * sizeof(u64); |
| 341 | |
| 342 | ixp = &chunk_buffer[inext]; |
| 343 | *ixp = jbase; |
| 344 | } |
| 345 | |
| 346 | return 0; |
| 347 | } |
| 348 | |
| 349 | static int octeontx_pci_bchvf_probe(struct udevice *dev) |
| 350 | { |
| 351 | struct bch_vf *vf; |
| 352 | union bch_vqx_ctl ctl; |
| 353 | union bch_vqx_cmd_buf cbuf; |
| 354 | int err; |
| 355 | |
| 356 | debug("%s(%s)\n", __func__, dev->name); |
| 357 | vf = dev_get_priv(dev); |
| 358 | if (!vf) |
| 359 | return -ENOMEM; |
| 360 | |
| 361 | vf->dev = dev; |
| 362 | |
| 363 | /* Map PF's configuration registers */ |
| 364 | vf->reg_base = dm_pci_map_bar(dev, PCI_BASE_ADDRESS_0, PCI_REGION_MEM); |
| 365 | debug("%s: reg base: %p\n", __func__, vf->reg_base); |
| 366 | |
| 367 | err = octeontx_cmd_queue_initialize(dev, QID_BCH, QDEPTH - 1, 0, |
| 368 | sizeof(union bch_cmd) * QDEPTH); |
| 369 | if (err) { |
| 370 | dev_err(dev, "octeontx_cmd_queue_initialize() failed\n"); |
| 371 | goto release; |
| 372 | } |
| 373 | |
| 374 | ctl.u = readq(vf->reg_base + BCH_VQX_CTL(0)); |
| 375 | |
| 376 | cbuf.u = 0; |
| 377 | cbuf.s.ldwb = 1; |
| 378 | cbuf.s.dfb = 1; |
| 379 | cbuf.s.size = QDEPTH; |
| 380 | writeq(cbuf.u, vf->reg_base + BCH_VQX_CMD_BUF(0)); |
| 381 | |
| 382 | writeq(ctl.u, vf->reg_base + BCH_VQX_CTL(0)); |
| 383 | |
| 384 | writeq(octeontx_bch_q[QID_BCH].base_paddr, |
| 385 | vf->reg_base + BCH_VQX_CMD_PTR(0)); |
| 386 | |
| 387 | octeontx_bch_putv(vf); |
| 388 | |
| 389 | debug("%s: bch vf initialization complete\n", __func__); |
| 390 | |
| 391 | if (octeontx_bch_getv()) |
| 392 | return octeontx_pci_nand_deferred_probe(); |
| 393 | |
| 394 | return -1; |
| 395 | |
| 396 | release: |
| 397 | return err; |
| 398 | } |
| 399 | |
| 400 | static int octeontx_pci_bchpf_remove(struct udevice *dev) |
| 401 | { |
| 402 | struct bch_device *bch = dev_get_priv(dev); |
| 403 | |
| 404 | bch_disable(bch); |
| 405 | return 0; |
| 406 | } |
| 407 | |
| 408 | U_BOOT_DRIVER(octeontx_pci_bchpf) = { |
| 409 | .name = BCHPF_DRIVER_NAME, |
| 410 | .id = UCLASS_MISC, |
| 411 | .probe = octeontx_pci_bchpf_probe, |
| 412 | .remove = octeontx_pci_bchpf_remove, |
Simon Glass | 8a2b47f | 2020-12-03 16:55:17 -0700 | [diff] [blame] | 413 | .priv_auto = sizeof(struct bch_device), |
Suneel Garapati | 9de7d2b | 2020-08-26 14:37:22 +0200 | [diff] [blame] | 414 | .flags = DM_FLAG_OS_PREPARE, |
| 415 | }; |
| 416 | |
| 417 | U_BOOT_DRIVER(octeontx_pci_bchvf) = { |
| 418 | .name = BCHVF_DRIVER_NAME, |
| 419 | .id = UCLASS_MISC, |
| 420 | .probe = octeontx_pci_bchvf_probe, |
Simon Glass | 8a2b47f | 2020-12-03 16:55:17 -0700 | [diff] [blame] | 421 | .priv_auto = sizeof(struct bch_vf), |
Suneel Garapati | 9de7d2b | 2020-08-26 14:37:22 +0200 | [diff] [blame] | 422 | }; |
| 423 | |
| 424 | U_BOOT_PCI_DEVICE(octeontx_pci_bchpf, octeontx_bchpf_pci_id_table); |
| 425 | U_BOOT_PCI_DEVICE(octeontx_pci_bchvf, octeontx_bchvf_pci_id_table); |