Tom Rini | 10e4779 | 2018-05-06 17:58:06 -0400 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0+ |
Álvaro Fernández Rojas | 55d96ec | 2018-01-20 02:13:38 +0100 | [diff] [blame] | 2 | /* |
| 3 | * Copyright (C) 2017 Álvaro Fernández Rojas <noltari@gmail.com> |
| 4 | * |
| 5 | * Derived from linux/drivers/spi/spi-bcm63xx-hsspi.c: |
| 6 | * Copyright (C) 2000-2010 Broadcom Corporation |
| 7 | * Copyright (C) 2012-2013 Jonas Gorski <jogo@openwrt.org> |
Álvaro Fernández Rojas | 55d96ec | 2018-01-20 02:13:38 +0100 | [diff] [blame] | 8 | */ |
| 9 | |
| 10 | #include <common.h> |
| 11 | #include <clk.h> |
| 12 | #include <dm.h> |
Simon Glass | 0f2af88 | 2020-05-10 11:40:05 -0600 | [diff] [blame] | 13 | #include <log.h> |
Simon Glass | 9bc1564 | 2020-02-03 07:36:16 -0700 | [diff] [blame] | 14 | #include <malloc.h> |
Álvaro Fernández Rojas | 55d96ec | 2018-01-20 02:13:38 +0100 | [diff] [blame] | 15 | #include <spi.h> |
| 16 | #include <reset.h> |
| 17 | #include <wait_bit.h> |
| 18 | #include <asm/io.h> |
Simon Glass | 4dcacfc | 2020-05-10 11:40:13 -0600 | [diff] [blame] | 19 | #include <linux/bitops.h> |
Álvaro Fernández Rojas | 55d96ec | 2018-01-20 02:13:38 +0100 | [diff] [blame] | 20 | |
Álvaro Fernández Rojas | 55d96ec | 2018-01-20 02:13:38 +0100 | [diff] [blame] | 21 | #define HSSPI_PP 0 |
| 22 | |
William Zhang | 9a0ff5d | 2023-06-07 16:37:04 -0700 | [diff] [blame] | 23 | /* |
| 24 | * The maximum frequency for SPI synchronous mode is 30MHz for some chips and |
| 25 | * 25MHz for some others. This depends on the chip layout and SPI signals |
| 26 | * distance to the pad. We use the lower of these values to cover all relevant |
| 27 | * chips. |
| 28 | */ |
| 29 | #define SPI_MAX_SYNC_CLOCK 25000000 |
Álvaro Fernández Rojas | 55d96ec | 2018-01-20 02:13:38 +0100 | [diff] [blame] | 30 | |
| 31 | /* SPI Control register */ |
| 32 | #define SPI_CTL_REG 0x000 |
| 33 | #define SPI_CTL_CS_POL_SHIFT 0 |
| 34 | #define SPI_CTL_CS_POL_MASK (0xff << SPI_CTL_CS_POL_SHIFT) |
| 35 | #define SPI_CTL_CLK_GATE_SHIFT 16 |
| 36 | #define SPI_CTL_CLK_GATE_MASK (1 << SPI_CTL_CLK_GATE_SHIFT) |
| 37 | #define SPI_CTL_CLK_POL_SHIFT 17 |
| 38 | #define SPI_CTL_CLK_POL_MASK (1 << SPI_CTL_CLK_POL_SHIFT) |
| 39 | |
| 40 | /* SPI Interrupts registers */ |
| 41 | #define SPI_IR_STAT_REG 0x008 |
| 42 | #define SPI_IR_ST_MASK_REG 0x00c |
| 43 | #define SPI_IR_MASK_REG 0x010 |
| 44 | |
| 45 | #define SPI_IR_CLEAR_ALL 0xff001f1f |
| 46 | |
| 47 | /* SPI Ping-Pong Command registers */ |
| 48 | #define SPI_CMD_REG (0x080 + (0x40 * (HSSPI_PP)) + 0x00) |
| 49 | #define SPI_CMD_OP_SHIFT 0 |
| 50 | #define SPI_CMD_OP_START (0x1 << SPI_CMD_OP_SHIFT) |
| 51 | #define SPI_CMD_PFL_SHIFT 8 |
| 52 | #define SPI_CMD_PFL_MASK (0x7 << SPI_CMD_PFL_SHIFT) |
| 53 | #define SPI_CMD_SLAVE_SHIFT 12 |
| 54 | #define SPI_CMD_SLAVE_MASK (0x7 << SPI_CMD_SLAVE_SHIFT) |
| 55 | |
| 56 | /* SPI Ping-Pong Status registers */ |
| 57 | #define SPI_STAT_REG (0x080 + (0x40 * (HSSPI_PP)) + 0x04) |
| 58 | #define SPI_STAT_SRCBUSY_SHIFT 1 |
| 59 | #define SPI_STAT_SRCBUSY_MASK (1 << SPI_STAT_SRCBUSY_SHIFT) |
| 60 | |
| 61 | /* SPI Profile Clock registers */ |
| 62 | #define SPI_PFL_CLK_REG(x) (0x100 + (0x20 * (x)) + 0x00) |
| 63 | #define SPI_PFL_CLK_FREQ_SHIFT 0 |
| 64 | #define SPI_PFL_CLK_FREQ_MASK (0x3fff << SPI_PFL_CLK_FREQ_SHIFT) |
| 65 | #define SPI_PFL_CLK_RSTLOOP_SHIFT 15 |
| 66 | #define SPI_PFL_CLK_RSTLOOP_MASK (1 << SPI_PFL_CLK_RSTLOOP_SHIFT) |
| 67 | |
| 68 | /* SPI Profile Signal registers */ |
| 69 | #define SPI_PFL_SIG_REG(x) (0x100 + (0x20 * (x)) + 0x04) |
| 70 | #define SPI_PFL_SIG_LATCHRIS_SHIFT 12 |
| 71 | #define SPI_PFL_SIG_LATCHRIS_MASK (1 << SPI_PFL_SIG_LATCHRIS_SHIFT) |
| 72 | #define SPI_PFL_SIG_LAUNCHRIS_SHIFT 13 |
| 73 | #define SPI_PFL_SIG_LAUNCHRIS_MASK (1 << SPI_PFL_SIG_LAUNCHRIS_SHIFT) |
| 74 | #define SPI_PFL_SIG_ASYNCIN_SHIFT 16 |
| 75 | #define SPI_PFL_SIG_ASYNCIN_MASK (1 << SPI_PFL_SIG_ASYNCIN_SHIFT) |
| 76 | |
| 77 | /* SPI Profile Mode registers */ |
| 78 | #define SPI_PFL_MODE_REG(x) (0x100 + (0x20 * (x)) + 0x08) |
| 79 | #define SPI_PFL_MODE_FILL_SHIFT 0 |
| 80 | #define SPI_PFL_MODE_FILL_MASK (0xff << SPI_PFL_MODE_FILL_SHIFT) |
William Zhang | 9a0ff5d | 2023-06-07 16:37:04 -0700 | [diff] [blame] | 81 | #define SPI_PFL_MODE_MDRDST_SHIFT 8 |
| 82 | #define SPI_PFL_MODE_MDWRST_SHIFT 12 |
Álvaro Fernández Rojas | 55d96ec | 2018-01-20 02:13:38 +0100 | [diff] [blame] | 83 | #define SPI_PFL_MODE_MDRDSZ_SHIFT 16 |
| 84 | #define SPI_PFL_MODE_MDRDSZ_MASK (1 << SPI_PFL_MODE_MDRDSZ_SHIFT) |
| 85 | #define SPI_PFL_MODE_MDWRSZ_SHIFT 18 |
| 86 | #define SPI_PFL_MODE_MDWRSZ_MASK (1 << SPI_PFL_MODE_MDWRSZ_SHIFT) |
| 87 | #define SPI_PFL_MODE_3WIRE_SHIFT 20 |
| 88 | #define SPI_PFL_MODE_3WIRE_MASK (1 << SPI_PFL_MODE_3WIRE_SHIFT) |
William Zhang | 9a0ff5d | 2023-06-07 16:37:04 -0700 | [diff] [blame] | 89 | #define SPI_PFL_MODE_PREPCNT_SHIFT 24 |
| 90 | #define SPI_PFL_MODE_PREPCNT_MASK (4 << SPI_PFL_MODE_PREPCNT_SHIFT) |
Álvaro Fernández Rojas | 55d96ec | 2018-01-20 02:13:38 +0100 | [diff] [blame] | 91 | |
| 92 | /* SPI Ping-Pong FIFO registers */ |
| 93 | #define HSSPI_FIFO_SIZE 0x200 |
| 94 | #define HSSPI_FIFO_BASE (0x200 + \ |
| 95 | (HSSPI_FIFO_SIZE * HSSPI_PP)) |
| 96 | |
| 97 | /* SPI Ping-Pong FIFO OP register */ |
| 98 | #define HSSPI_FIFO_OP_SIZE 0x2 |
| 99 | #define HSSPI_FIFO_OP_REG (HSSPI_FIFO_BASE + 0x00) |
| 100 | #define HSSPI_FIFO_OP_BYTES_SHIFT 0 |
| 101 | #define HSSPI_FIFO_OP_BYTES_MASK (0x3ff << HSSPI_FIFO_OP_BYTES_SHIFT) |
| 102 | #define HSSPI_FIFO_OP_MBIT_SHIFT 11 |
| 103 | #define HSSPI_FIFO_OP_MBIT_MASK (1 << HSSPI_FIFO_OP_MBIT_SHIFT) |
| 104 | #define HSSPI_FIFO_OP_CODE_SHIFT 13 |
| 105 | #define HSSPI_FIFO_OP_READ_WRITE (1 << HSSPI_FIFO_OP_CODE_SHIFT) |
| 106 | #define HSSPI_FIFO_OP_CODE_W (2 << HSSPI_FIFO_OP_CODE_SHIFT) |
| 107 | #define HSSPI_FIFO_OP_CODE_R (3 << HSSPI_FIFO_OP_CODE_SHIFT) |
| 108 | |
William Zhang | 9a0ff5d | 2023-06-07 16:37:04 -0700 | [diff] [blame] | 109 | #define HSSPI_MAX_DATA_SIZE (HSSPI_FIFO_SIZE - HSSPI_FIFO_OP_SIZE) |
| 110 | #define HSSPI_MAX_PREPEND_SIZE 15 |
| 111 | |
| 112 | #define HSSPI_XFER_MODE_PREPEND 0 |
| 113 | #define HSSPI_XFER_MODE_DUMMYCS 1 |
| 114 | |
Álvaro Fernández Rojas | 55d96ec | 2018-01-20 02:13:38 +0100 | [diff] [blame] | 115 | struct bcm63xx_hsspi_priv { |
| 116 | void __iomem *regs; |
| 117 | ulong clk_rate; |
| 118 | uint8_t num_cs; |
| 119 | uint8_t cs_pols; |
| 120 | uint speed; |
William Zhang | 9a0ff5d | 2023-06-07 16:37:04 -0700 | [diff] [blame] | 121 | uint xfer_mode; |
| 122 | uint32_t prepend_cnt; |
| 123 | uint8_t prepend_buf[HSSPI_MAX_PREPEND_SIZE]; |
Álvaro Fernández Rojas | 55d96ec | 2018-01-20 02:13:38 +0100 | [diff] [blame] | 124 | }; |
| 125 | |
| 126 | static int bcm63xx_hsspi_cs_info(struct udevice *bus, uint cs, |
| 127 | struct spi_cs_info *info) |
| 128 | { |
| 129 | struct bcm63xx_hsspi_priv *priv = dev_get_priv(bus); |
| 130 | |
| 131 | if (cs >= priv->num_cs) { |
| 132 | printf("no cs %u\n", cs); |
Bin Meng | f8586f6 | 2019-09-09 06:00:01 -0700 | [diff] [blame] | 133 | return -EINVAL; |
Álvaro Fernández Rojas | 55d96ec | 2018-01-20 02:13:38 +0100 | [diff] [blame] | 134 | } |
| 135 | |
| 136 | return 0; |
| 137 | } |
| 138 | |
| 139 | static int bcm63xx_hsspi_set_mode(struct udevice *bus, uint mode) |
| 140 | { |
| 141 | struct bcm63xx_hsspi_priv *priv = dev_get_priv(bus); |
| 142 | |
| 143 | /* clock polarity */ |
| 144 | if (mode & SPI_CPOL) |
Kursad Oney | 711c730 | 2019-08-14 15:18:34 +0200 | [diff] [blame] | 145 | setbits_32(priv->regs + SPI_CTL_REG, SPI_CTL_CLK_POL_MASK); |
Álvaro Fernández Rojas | 55d96ec | 2018-01-20 02:13:38 +0100 | [diff] [blame] | 146 | else |
Kursad Oney | 711c730 | 2019-08-14 15:18:34 +0200 | [diff] [blame] | 147 | clrbits_32(priv->regs + SPI_CTL_REG, SPI_CTL_CLK_POL_MASK); |
Álvaro Fernández Rojas | 55d96ec | 2018-01-20 02:13:38 +0100 | [diff] [blame] | 148 | |
| 149 | return 0; |
| 150 | } |
| 151 | |
| 152 | static int bcm63xx_hsspi_set_speed(struct udevice *bus, uint speed) |
| 153 | { |
| 154 | struct bcm63xx_hsspi_priv *priv = dev_get_priv(bus); |
| 155 | |
| 156 | priv->speed = speed; |
| 157 | |
| 158 | return 0; |
| 159 | } |
| 160 | |
| 161 | static void bcm63xx_hsspi_activate_cs(struct bcm63xx_hsspi_priv *priv, |
Simon Glass | b75b15b | 2020-12-03 16:55:23 -0700 | [diff] [blame] | 162 | struct dm_spi_slave_plat *plat) |
Álvaro Fernández Rojas | 55d96ec | 2018-01-20 02:13:38 +0100 | [diff] [blame] | 163 | { |
| 164 | uint32_t clr, set; |
William Zhang | 9a0ff5d | 2023-06-07 16:37:04 -0700 | [diff] [blame] | 165 | uint speed = priv->speed; |
| 166 | |
| 167 | if (priv->xfer_mode == HSSPI_XFER_MODE_DUMMYCS && |
| 168 | speed > SPI_MAX_SYNC_CLOCK) { |
| 169 | speed = SPI_MAX_SYNC_CLOCK; |
| 170 | debug("Force to dummy cs mode. Reduce the speed to %dHz\n", speed); |
| 171 | } |
Álvaro Fernández Rojas | 55d96ec | 2018-01-20 02:13:38 +0100 | [diff] [blame] | 172 | |
| 173 | /* profile clock */ |
William Zhang | 9a0ff5d | 2023-06-07 16:37:04 -0700 | [diff] [blame] | 174 | set = DIV_ROUND_UP(priv->clk_rate, speed); |
Álvaro Fernández Rojas | 55d96ec | 2018-01-20 02:13:38 +0100 | [diff] [blame] | 175 | set = DIV_ROUND_UP(2048, set); |
| 176 | set &= SPI_PFL_CLK_FREQ_MASK; |
| 177 | set |= SPI_PFL_CLK_RSTLOOP_MASK; |
Kursad Oney | 711c730 | 2019-08-14 15:18:34 +0200 | [diff] [blame] | 178 | writel(set, priv->regs + SPI_PFL_CLK_REG(plat->cs)); |
Álvaro Fernández Rojas | 55d96ec | 2018-01-20 02:13:38 +0100 | [diff] [blame] | 179 | |
| 180 | /* profile signal */ |
| 181 | set = 0; |
| 182 | clr = SPI_PFL_SIG_LAUNCHRIS_MASK | |
| 183 | SPI_PFL_SIG_LATCHRIS_MASK | |
| 184 | SPI_PFL_SIG_ASYNCIN_MASK; |
| 185 | |
| 186 | /* latch/launch config */ |
| 187 | if (plat->mode & SPI_CPHA) |
| 188 | set |= SPI_PFL_SIG_LAUNCHRIS_MASK; |
| 189 | else |
| 190 | set |= SPI_PFL_SIG_LATCHRIS_MASK; |
| 191 | |
| 192 | /* async clk */ |
William Zhang | 9a0ff5d | 2023-06-07 16:37:04 -0700 | [diff] [blame] | 193 | if (speed > SPI_MAX_SYNC_CLOCK) |
Álvaro Fernández Rojas | 55d96ec | 2018-01-20 02:13:38 +0100 | [diff] [blame] | 194 | set |= SPI_PFL_SIG_ASYNCIN_MASK; |
| 195 | |
Kursad Oney | 711c730 | 2019-08-14 15:18:34 +0200 | [diff] [blame] | 196 | clrsetbits_32(priv->regs + SPI_PFL_SIG_REG(plat->cs), clr, set); |
Álvaro Fernández Rojas | 55d96ec | 2018-01-20 02:13:38 +0100 | [diff] [blame] | 197 | |
| 198 | /* global control */ |
| 199 | set = 0; |
| 200 | clr = 0; |
| 201 | |
William Zhang | 9a0ff5d | 2023-06-07 16:37:04 -0700 | [diff] [blame] | 202 | if (priv->xfer_mode == HSSPI_XFER_MODE_PREPEND) { |
| 203 | if (priv->cs_pols & BIT(plat->cs)) |
| 204 | set |= BIT(plat->cs); |
| 205 | else |
| 206 | clr |= BIT(plat->cs); |
| 207 | } else { |
| 208 | /* invert cs polarity */ |
| 209 | if (priv->cs_pols & BIT(plat->cs)) |
| 210 | clr |= BIT(plat->cs); |
| 211 | else |
| 212 | set |= BIT(plat->cs); |
Álvaro Fernández Rojas | 55d96ec | 2018-01-20 02:13:38 +0100 | [diff] [blame] | 213 | |
William Zhang | 9a0ff5d | 2023-06-07 16:37:04 -0700 | [diff] [blame] | 214 | /* invert dummy cs polarity */ |
| 215 | if (priv->cs_pols & BIT(!plat->cs)) |
| 216 | clr |= BIT(!plat->cs); |
| 217 | else |
| 218 | set |= BIT(!plat->cs); |
| 219 | } |
Álvaro Fernández Rojas | 55d96ec | 2018-01-20 02:13:38 +0100 | [diff] [blame] | 220 | |
Kursad Oney | 711c730 | 2019-08-14 15:18:34 +0200 | [diff] [blame] | 221 | clrsetbits_32(priv->regs + SPI_CTL_REG, clr, set); |
Álvaro Fernández Rojas | 55d96ec | 2018-01-20 02:13:38 +0100 | [diff] [blame] | 222 | } |
| 223 | |
| 224 | static void bcm63xx_hsspi_deactivate_cs(struct bcm63xx_hsspi_priv *priv) |
| 225 | { |
| 226 | /* restore cs polarities */ |
Kursad Oney | 711c730 | 2019-08-14 15:18:34 +0200 | [diff] [blame] | 227 | clrsetbits_32(priv->regs + SPI_CTL_REG, SPI_CTL_CS_POL_MASK, |
Álvaro Fernández Rojas | 55d96ec | 2018-01-20 02:13:38 +0100 | [diff] [blame] | 228 | priv->cs_pols); |
| 229 | } |
| 230 | |
| 231 | /* |
| 232 | * BCM63xx HSSPI driver doesn't allow keeping CS active between transfers |
| 233 | * because they are controlled by HW. |
| 234 | * However, it provides a mechanism to prepend write transfers prior to read |
| 235 | * transfers (with a maximum prepend of 15 bytes), which is usually enough for |
| 236 | * SPI-connected flashes since reading requires prepending a write transfer of |
| 237 | * 5 bytes. On the other hand it also provides a way to invert each CS |
| 238 | * polarity, not only between transfers like the older BCM63xx SPI driver, but |
| 239 | * also the rest of the time. |
| 240 | * |
| 241 | * Instead of using the prepend mechanism, this implementation inverts the |
| 242 | * polarity of both the desired CS and another dummy CS when the bus is |
| 243 | * claimed. This way, the dummy CS is restored to its inactive value when |
| 244 | * transfers are issued and the desired CS is preserved in its active value |
| 245 | * all the time. This hack is also used in the upstream linux driver and |
Pengfei Fan | 746271d | 2022-12-09 09:39:50 +0800 | [diff] [blame] | 246 | * allows keeping CS active between transfers even if the HW doesn't give |
Álvaro Fernández Rojas | 55d96ec | 2018-01-20 02:13:38 +0100 | [diff] [blame] | 247 | * this possibility. |
William Zhang | 9a0ff5d | 2023-06-07 16:37:04 -0700 | [diff] [blame] | 248 | * |
| 249 | * This workaround only works when the dummy CS (usually CS1 when the actual |
| 250 | * CS is 0) pinmuxed to SPI chip select function if SPI clock is faster than |
| 251 | * SPI_MAX_SYNC_CLOCK. In old broadcom chip, CS1 pin is default to chip select |
| 252 | * function. But this is not the case for new chips. To make this function |
| 253 | * always work, it should be called with maximum clock of SPI_MAX_SYNC_CLOCK. |
Álvaro Fernández Rojas | 55d96ec | 2018-01-20 02:13:38 +0100 | [diff] [blame] | 254 | */ |
William Zhang | 9a0ff5d | 2023-06-07 16:37:04 -0700 | [diff] [blame] | 255 | static int bcm63xx_hsspi_xfer_dummy_cs(struct udevice *dev, unsigned int data_bytes, |
| 256 | const void *dout, void *din, unsigned long flags) |
Álvaro Fernández Rojas | 55d96ec | 2018-01-20 02:13:38 +0100 | [diff] [blame] | 257 | { |
| 258 | struct bcm63xx_hsspi_priv *priv = dev_get_priv(dev->parent); |
Simon Glass | b75b15b | 2020-12-03 16:55:23 -0700 | [diff] [blame] | 259 | struct dm_spi_slave_plat *plat = dev_get_parent_plat(dev); |
Álvaro Fernández Rojas | 55d96ec | 2018-01-20 02:13:38 +0100 | [diff] [blame] | 260 | size_t step_size = HSSPI_FIFO_SIZE; |
| 261 | uint16_t opcode = 0; |
William Zhang | 7633333 | 2023-06-07 16:37:02 -0700 | [diff] [blame] | 262 | uint32_t val = SPI_PFL_MODE_FILL_MASK; |
Álvaro Fernández Rojas | 55d96ec | 2018-01-20 02:13:38 +0100 | [diff] [blame] | 263 | const uint8_t *tx = dout; |
| 264 | uint8_t *rx = din; |
| 265 | |
| 266 | if (flags & SPI_XFER_BEGIN) |
| 267 | bcm63xx_hsspi_activate_cs(priv, plat); |
| 268 | |
| 269 | /* fifo operation */ |
| 270 | if (tx && rx) |
| 271 | opcode = HSSPI_FIFO_OP_READ_WRITE; |
| 272 | else if (rx) |
| 273 | opcode = HSSPI_FIFO_OP_CODE_R; |
| 274 | else if (tx) |
| 275 | opcode = HSSPI_FIFO_OP_CODE_W; |
| 276 | |
| 277 | if (opcode != HSSPI_FIFO_OP_CODE_R) |
| 278 | step_size -= HSSPI_FIFO_OP_SIZE; |
| 279 | |
| 280 | /* dual mode */ |
William Zhang | 7633333 | 2023-06-07 16:37:02 -0700 | [diff] [blame] | 281 | if ((opcode == HSSPI_FIFO_OP_CODE_R && (plat->mode & SPI_RX_DUAL)) || |
| 282 | (opcode == HSSPI_FIFO_OP_CODE_W && (plat->mode & SPI_TX_DUAL))) { |
Álvaro Fernández Rojas | 55d96ec | 2018-01-20 02:13:38 +0100 | [diff] [blame] | 283 | opcode |= HSSPI_FIFO_OP_MBIT_MASK; |
| 284 | |
William Zhang | 7633333 | 2023-06-07 16:37:02 -0700 | [diff] [blame] | 285 | /* profile mode */ |
| 286 | if (plat->mode & SPI_RX_DUAL) |
| 287 | val |= SPI_PFL_MODE_MDRDSZ_MASK; |
| 288 | if (plat->mode & SPI_TX_DUAL) |
| 289 | val |= SPI_PFL_MODE_MDWRSZ_MASK; |
| 290 | } |
| 291 | |
Álvaro Fernández Rojas | 55d96ec | 2018-01-20 02:13:38 +0100 | [diff] [blame] | 292 | if (plat->mode & SPI_3WIRE) |
| 293 | val |= SPI_PFL_MODE_3WIRE_MASK; |
Kursad Oney | 711c730 | 2019-08-14 15:18:34 +0200 | [diff] [blame] | 294 | writel(val, priv->regs + SPI_PFL_MODE_REG(plat->cs)); |
Álvaro Fernández Rojas | 55d96ec | 2018-01-20 02:13:38 +0100 | [diff] [blame] | 295 | |
| 296 | /* transfer loop */ |
| 297 | while (data_bytes > 0) { |
William Zhang | 9f3f08c | 2023-08-11 19:03:19 -0700 | [diff] [blame] | 298 | size_t curr_step = min(step_size, (size_t)data_bytes); |
Álvaro Fernández Rojas | 55d96ec | 2018-01-20 02:13:38 +0100 | [diff] [blame] | 299 | int ret; |
| 300 | |
| 301 | /* copy tx data */ |
| 302 | if (tx) { |
| 303 | memcpy_toio(priv->regs + HSSPI_FIFO_BASE + |
| 304 | HSSPI_FIFO_OP_SIZE, tx, curr_step); |
| 305 | tx += curr_step; |
| 306 | } |
| 307 | |
| 308 | /* set fifo operation */ |
Kursad Oney | 711c730 | 2019-08-14 15:18:34 +0200 | [diff] [blame] | 309 | writew(cpu_to_be16(opcode | (curr_step & HSSPI_FIFO_OP_BYTES_MASK)), |
Álvaro Fernández Rojas | 55d96ec | 2018-01-20 02:13:38 +0100 | [diff] [blame] | 310 | priv->regs + HSSPI_FIFO_OP_REG); |
| 311 | |
| 312 | /* issue the transfer */ |
| 313 | val = SPI_CMD_OP_START; |
| 314 | val |= (plat->cs << SPI_CMD_PFL_SHIFT) & |
| 315 | SPI_CMD_PFL_MASK; |
| 316 | val |= (!plat->cs << SPI_CMD_SLAVE_SHIFT) & |
| 317 | SPI_CMD_SLAVE_MASK; |
Kursad Oney | 711c730 | 2019-08-14 15:18:34 +0200 | [diff] [blame] | 318 | writel(val, priv->regs + SPI_CMD_REG); |
Álvaro Fernández Rojas | 55d96ec | 2018-01-20 02:13:38 +0100 | [diff] [blame] | 319 | |
| 320 | /* wait for completion */ |
Kursad Oney | 711c730 | 2019-08-14 15:18:34 +0200 | [diff] [blame] | 321 | ret = wait_for_bit_32(priv->regs + SPI_STAT_REG, |
Álvaro Fernández Rojas | 55d96ec | 2018-01-20 02:13:38 +0100 | [diff] [blame] | 322 | SPI_STAT_SRCBUSY_MASK, false, |
| 323 | 1000, false); |
| 324 | if (ret) { |
| 325 | printf("interrupt timeout\n"); |
| 326 | return ret; |
| 327 | } |
| 328 | |
| 329 | /* copy rx data */ |
| 330 | if (rx) { |
| 331 | memcpy_fromio(rx, priv->regs + HSSPI_FIFO_BASE, |
| 332 | curr_step); |
| 333 | rx += curr_step; |
| 334 | } |
| 335 | |
| 336 | data_bytes -= curr_step; |
| 337 | } |
| 338 | |
| 339 | if (flags & SPI_XFER_END) |
| 340 | bcm63xx_hsspi_deactivate_cs(priv); |
| 341 | |
| 342 | return 0; |
| 343 | } |
| 344 | |
William Zhang | 9a0ff5d | 2023-06-07 16:37:04 -0700 | [diff] [blame] | 345 | static int bcm63xx_prepare_prepend_transfer(struct bcm63xx_hsspi_priv *priv, |
| 346 | unsigned int data_bytes, const void *dout, void *din, |
| 347 | unsigned long flags) |
| 348 | { |
| 349 | /* |
| 350 | * only support multiple half duplex write transfer + optional |
| 351 | * full duplex read/write at the end. |
| 352 | */ |
| 353 | if (flags & SPI_XFER_BEGIN) { |
| 354 | /* clear prepends */ |
| 355 | priv->prepend_cnt = 0; |
| 356 | } |
| 357 | |
| 358 | if (din) { |
| 359 | /* buffering reads not possible for prepend mode */ |
| 360 | if (!(flags & SPI_XFER_END)) { |
| 361 | debug("unable to buffer reads\n"); |
| 362 | return HSSPI_XFER_MODE_DUMMYCS; |
| 363 | } |
| 364 | |
| 365 | /* check rx size */ |
| 366 | if (data_bytes > HSSPI_MAX_DATA_SIZE) { |
| 367 | debug("max rx bytes exceeded\n"); |
| 368 | return HSSPI_XFER_MODE_DUMMYCS; |
| 369 | } |
| 370 | } |
| 371 | |
| 372 | if (dout) { |
| 373 | /* check tx size */ |
| 374 | if (flags & SPI_XFER_END) { |
| 375 | if (priv->prepend_cnt + data_bytes > HSSPI_MAX_DATA_SIZE) { |
| 376 | debug("max tx bytes exceeded\n"); |
| 377 | return HSSPI_XFER_MODE_DUMMYCS; |
| 378 | } |
| 379 | } else { |
| 380 | if (priv->prepend_cnt + data_bytes > HSSPI_MAX_PREPEND_SIZE) { |
| 381 | debug("max prepend bytes exceeded\n"); |
| 382 | return HSSPI_XFER_MODE_DUMMYCS; |
| 383 | } |
| 384 | |
| 385 | /* |
| 386 | * buffer transfer data in the prepend buf in case we have to fall |
| 387 | * back to dummy cs mode. |
| 388 | */ |
| 389 | memcpy(&priv->prepend_buf[priv->prepend_cnt], dout, data_bytes); |
| 390 | priv->prepend_cnt += data_bytes; |
| 391 | } |
| 392 | } |
| 393 | |
| 394 | return HSSPI_XFER_MODE_PREPEND; |
| 395 | } |
| 396 | |
| 397 | static int bcm63xx_hsspi_xfer_prepend(struct udevice *dev, unsigned int data_bytes, |
| 398 | const void *dout, void *din, unsigned long flags) |
| 399 | { |
| 400 | struct bcm63xx_hsspi_priv *priv = dev_get_priv(dev->parent); |
| 401 | struct dm_spi_slave_plat *plat = dev_get_parent_plat(dev); |
| 402 | uint16_t opcode = 0; |
| 403 | uint32_t val, offset; |
| 404 | int ret; |
| 405 | |
| 406 | if (flags & SPI_XFER_END) { |
| 407 | offset = HSSPI_FIFO_BASE + HSSPI_FIFO_OP_SIZE; |
| 408 | if (priv->prepend_cnt) { |
| 409 | /* copy prepend data */ |
| 410 | memcpy_toio(priv->regs + offset, |
| 411 | priv->prepend_buf, priv->prepend_cnt); |
| 412 | } |
| 413 | |
| 414 | if (dout && data_bytes) { |
| 415 | /* copy tx data */ |
| 416 | offset += priv->prepend_cnt; |
| 417 | memcpy_toio(priv->regs + offset, dout, data_bytes); |
| 418 | } |
| 419 | |
| 420 | bcm63xx_hsspi_activate_cs(priv, plat); |
| 421 | if (dout && !din) { |
| 422 | /* all half-duplex write. merge to single write */ |
| 423 | data_bytes += priv->prepend_cnt; |
| 424 | opcode = HSSPI_FIFO_OP_CODE_W; |
| 425 | priv->prepend_cnt = 0; |
| 426 | } else if (!dout && din) { |
| 427 | /* half-duplex read with prepend write */ |
| 428 | opcode = HSSPI_FIFO_OP_CODE_R; |
| 429 | } else { |
| 430 | /* full duplex read/write */ |
| 431 | opcode = HSSPI_FIFO_OP_READ_WRITE; |
| 432 | } |
| 433 | |
| 434 | /* profile mode */ |
| 435 | val = SPI_PFL_MODE_FILL_MASK; |
| 436 | if (plat->mode & SPI_3WIRE) |
| 437 | val |= SPI_PFL_MODE_3WIRE_MASK; |
| 438 | |
| 439 | /* dual mode */ |
| 440 | if ((opcode == HSSPI_FIFO_OP_CODE_R && (plat->mode & SPI_RX_DUAL)) || |
| 441 | (opcode == HSSPI_FIFO_OP_CODE_W && (plat->mode & SPI_TX_DUAL))) { |
| 442 | opcode |= HSSPI_FIFO_OP_MBIT_MASK; |
| 443 | |
| 444 | if (plat->mode & SPI_RX_DUAL) { |
| 445 | val |= SPI_PFL_MODE_MDRDSZ_MASK; |
| 446 | val |= priv->prepend_cnt << SPI_PFL_MODE_MDRDST_SHIFT; |
| 447 | } |
| 448 | if (plat->mode & SPI_TX_DUAL) { |
| 449 | val |= SPI_PFL_MODE_MDWRSZ_MASK; |
| 450 | val |= priv->prepend_cnt << SPI_PFL_MODE_MDWRST_SHIFT; |
| 451 | } |
| 452 | } |
| 453 | val |= (priv->prepend_cnt << SPI_PFL_MODE_PREPCNT_SHIFT); |
| 454 | writel(val, priv->regs + SPI_PFL_MODE_REG(plat->cs)); |
| 455 | |
| 456 | /* set fifo operation */ |
| 457 | val = opcode | (data_bytes & HSSPI_FIFO_OP_BYTES_MASK); |
| 458 | writew(cpu_to_be16(val), |
| 459 | priv->regs + HSSPI_FIFO_OP_REG); |
| 460 | |
| 461 | /* issue the transfer */ |
| 462 | val = SPI_CMD_OP_START; |
| 463 | val |= (plat->cs << SPI_CMD_PFL_SHIFT) & |
| 464 | SPI_CMD_PFL_MASK; |
| 465 | val |= (plat->cs << SPI_CMD_SLAVE_SHIFT) & |
| 466 | SPI_CMD_SLAVE_MASK; |
| 467 | writel(val, priv->regs + SPI_CMD_REG); |
| 468 | |
| 469 | /* wait for completion */ |
| 470 | ret = wait_for_bit_32(priv->regs + SPI_STAT_REG, |
| 471 | SPI_STAT_SRCBUSY_MASK, false, |
| 472 | 1000, false); |
| 473 | if (ret) { |
| 474 | bcm63xx_hsspi_deactivate_cs(priv); |
| 475 | printf("spi polling timeout\n"); |
| 476 | return ret; |
| 477 | } |
| 478 | |
| 479 | /* copy rx data */ |
| 480 | if (din) |
| 481 | memcpy_fromio(din, priv->regs + HSSPI_FIFO_BASE, |
| 482 | data_bytes); |
| 483 | bcm63xx_hsspi_deactivate_cs(priv); |
| 484 | } |
| 485 | |
| 486 | return 0; |
| 487 | } |
| 488 | |
| 489 | static int bcm63xx_hsspi_xfer(struct udevice *dev, unsigned int bitlen, |
| 490 | const void *dout, void *din, unsigned long flags) |
| 491 | { |
| 492 | struct bcm63xx_hsspi_priv *priv = dev_get_priv(dev->parent); |
| 493 | int ret; |
| 494 | u32 data_bytes = bitlen >> 3; |
| 495 | |
| 496 | if (priv->xfer_mode == HSSPI_XFER_MODE_PREPEND) { |
| 497 | priv->xfer_mode = |
| 498 | bcm63xx_prepare_prepend_transfer(priv, data_bytes, dout, din, flags); |
| 499 | } |
| 500 | |
| 501 | /* if not prependable, fall back to dummy cs mode with safe clock */ |
| 502 | if (priv->xfer_mode == HSSPI_XFER_MODE_DUMMYCS) { |
| 503 | /* For pending prepend data from previous transfers, send it first */ |
| 504 | if (priv->prepend_cnt) { |
| 505 | bcm63xx_hsspi_xfer_dummy_cs(dev, priv->prepend_cnt, |
| 506 | priv->prepend_buf, NULL, |
| 507 | (flags & ~SPI_XFER_END) | SPI_XFER_BEGIN); |
| 508 | priv->prepend_cnt = 0; |
| 509 | } |
| 510 | ret = bcm63xx_hsspi_xfer_dummy_cs(dev, data_bytes, dout, din, flags); |
| 511 | } else { |
| 512 | ret = bcm63xx_hsspi_xfer_prepend(dev, data_bytes, dout, din, flags); |
| 513 | } |
| 514 | |
| 515 | if (flags & SPI_XFER_END) |
| 516 | priv->xfer_mode = HSSPI_XFER_MODE_PREPEND; |
| 517 | |
| 518 | return ret; |
| 519 | } |
| 520 | |
Álvaro Fernández Rojas | 55d96ec | 2018-01-20 02:13:38 +0100 | [diff] [blame] | 521 | static const struct dm_spi_ops bcm63xx_hsspi_ops = { |
| 522 | .cs_info = bcm63xx_hsspi_cs_info, |
| 523 | .set_mode = bcm63xx_hsspi_set_mode, |
| 524 | .set_speed = bcm63xx_hsspi_set_speed, |
| 525 | .xfer = bcm63xx_hsspi_xfer, |
| 526 | }; |
| 527 | |
| 528 | static const struct udevice_id bcm63xx_hsspi_ids[] = { |
| 529 | { .compatible = "brcm,bcm6328-hsspi", }, |
William Zhang | 3f55f27 | 2023-06-07 16:37:03 -0700 | [diff] [blame] | 530 | { .compatible = "brcm,bcmbca-hsspi-v1.0", }, |
Álvaro Fernández Rojas | 55d96ec | 2018-01-20 02:13:38 +0100 | [diff] [blame] | 531 | { /* sentinel */ } |
| 532 | }; |
| 533 | |
| 534 | static int bcm63xx_hsspi_child_pre_probe(struct udevice *dev) |
| 535 | { |
| 536 | struct bcm63xx_hsspi_priv *priv = dev_get_priv(dev->parent); |
Simon Glass | b75b15b | 2020-12-03 16:55:23 -0700 | [diff] [blame] | 537 | struct dm_spi_slave_plat *plat = dev_get_parent_plat(dev); |
William Zhang | 9a0ff5d | 2023-06-07 16:37:04 -0700 | [diff] [blame] | 538 | struct spi_slave *slave = dev_get_parent_priv(dev); |
Álvaro Fernández Rojas | 55d96ec | 2018-01-20 02:13:38 +0100 | [diff] [blame] | 539 | |
| 540 | /* check cs */ |
| 541 | if (plat->cs >= priv->num_cs) { |
| 542 | printf("no cs %u\n", plat->cs); |
| 543 | return -ENODEV; |
| 544 | } |
| 545 | |
| 546 | /* cs polarity */ |
| 547 | if (plat->mode & SPI_CS_HIGH) |
| 548 | priv->cs_pols |= BIT(plat->cs); |
| 549 | else |
| 550 | priv->cs_pols &= ~BIT(plat->cs); |
| 551 | |
William Zhang | 9a0ff5d | 2023-06-07 16:37:04 -0700 | [diff] [blame] | 552 | /* |
| 553 | * set the max read/write size to make sure each xfer are within the |
| 554 | * prepend limit |
| 555 | */ |
| 556 | slave->max_read_size = HSSPI_MAX_DATA_SIZE; |
| 557 | slave->max_write_size = HSSPI_MAX_DATA_SIZE; |
| 558 | |
Álvaro Fernández Rojas | 55d96ec | 2018-01-20 02:13:38 +0100 | [diff] [blame] | 559 | return 0; |
| 560 | } |
| 561 | |
| 562 | static int bcm63xx_hsspi_probe(struct udevice *dev) |
| 563 | { |
| 564 | struct bcm63xx_hsspi_priv *priv = dev_get_priv(dev); |
| 565 | struct reset_ctl rst_ctl; |
| 566 | struct clk clk; |
Álvaro Fernández Rojas | 55d96ec | 2018-01-20 02:13:38 +0100 | [diff] [blame] | 567 | int ret; |
| 568 | |
Álvaro Fernández Rojas | 25fb8aa | 2018-03-22 19:39:37 +0100 | [diff] [blame] | 569 | priv->regs = dev_remap_addr(dev); |
| 570 | if (!priv->regs) |
Álvaro Fernández Rojas | 55d96ec | 2018-01-20 02:13:38 +0100 | [diff] [blame] | 571 | return -EINVAL; |
| 572 | |
Álvaro Fernández Rojas | 25fb8aa | 2018-03-22 19:39:37 +0100 | [diff] [blame] | 573 | priv->num_cs = dev_read_u32_default(dev, "num-cs", 8); |
Álvaro Fernández Rojas | 55d96ec | 2018-01-20 02:13:38 +0100 | [diff] [blame] | 574 | |
| 575 | /* enable clock */ |
| 576 | ret = clk_get_by_name(dev, "hsspi", &clk); |
| 577 | if (ret < 0) |
| 578 | return ret; |
| 579 | |
| 580 | ret = clk_enable(&clk); |
Kursad Oney | e31d6f1 | 2019-08-14 15:18:35 +0200 | [diff] [blame] | 581 | if (ret < 0 && ret != -ENOSYS) |
Álvaro Fernández Rojas | 55d96ec | 2018-01-20 02:13:38 +0100 | [diff] [blame] | 582 | return ret; |
| 583 | |
Álvaro Fernández Rojas | 55d96ec | 2018-01-20 02:13:38 +0100 | [diff] [blame] | 584 | /* get clock rate */ |
| 585 | ret = clk_get_by_name(dev, "pll", &clk); |
Kursad Oney | e31d6f1 | 2019-08-14 15:18:35 +0200 | [diff] [blame] | 586 | if (ret < 0 && ret != -ENOSYS) |
Álvaro Fernández Rojas | 55d96ec | 2018-01-20 02:13:38 +0100 | [diff] [blame] | 587 | return ret; |
| 588 | |
| 589 | priv->clk_rate = clk_get_rate(&clk); |
| 590 | |
Álvaro Fernández Rojas | 55d96ec | 2018-01-20 02:13:38 +0100 | [diff] [blame] | 591 | /* perform reset */ |
| 592 | ret = reset_get_by_index(dev, 0, &rst_ctl); |
Kursad Oney | e31d6f1 | 2019-08-14 15:18:35 +0200 | [diff] [blame] | 593 | if (ret >= 0) { |
| 594 | ret = reset_deassert(&rst_ctl); |
| 595 | if (ret < 0) |
| 596 | return ret; |
| 597 | } |
Álvaro Fernández Rojas | 55d96ec | 2018-01-20 02:13:38 +0100 | [diff] [blame] | 598 | |
| 599 | ret = reset_free(&rst_ctl); |
| 600 | if (ret < 0) |
| 601 | return ret; |
| 602 | |
| 603 | /* initialize hardware */ |
Kursad Oney | 711c730 | 2019-08-14 15:18:34 +0200 | [diff] [blame] | 604 | writel(0, priv->regs + SPI_IR_MASK_REG); |
Álvaro Fernández Rojas | 55d96ec | 2018-01-20 02:13:38 +0100 | [diff] [blame] | 605 | |
| 606 | /* clear pending interrupts */ |
Kursad Oney | 711c730 | 2019-08-14 15:18:34 +0200 | [diff] [blame] | 607 | writel(SPI_IR_CLEAR_ALL, priv->regs + SPI_IR_STAT_REG); |
Álvaro Fernández Rojas | 55d96ec | 2018-01-20 02:13:38 +0100 | [diff] [blame] | 608 | |
| 609 | /* enable clk gate */ |
Kursad Oney | 711c730 | 2019-08-14 15:18:34 +0200 | [diff] [blame] | 610 | setbits_32(priv->regs + SPI_CTL_REG, SPI_CTL_CLK_GATE_MASK); |
Álvaro Fernández Rojas | 55d96ec | 2018-01-20 02:13:38 +0100 | [diff] [blame] | 611 | |
| 612 | /* read default cs polarities */ |
Kursad Oney | 711c730 | 2019-08-14 15:18:34 +0200 | [diff] [blame] | 613 | priv->cs_pols = readl(priv->regs + SPI_CTL_REG) & |
Álvaro Fernández Rojas | 55d96ec | 2018-01-20 02:13:38 +0100 | [diff] [blame] | 614 | SPI_CTL_CS_POL_MASK; |
| 615 | |
William Zhang | 9a0ff5d | 2023-06-07 16:37:04 -0700 | [diff] [blame] | 616 | /* default in prepend mode */ |
| 617 | priv->xfer_mode = HSSPI_XFER_MODE_PREPEND; |
| 618 | |
Álvaro Fernández Rojas | 55d96ec | 2018-01-20 02:13:38 +0100 | [diff] [blame] | 619 | return 0; |
| 620 | } |
| 621 | |
| 622 | U_BOOT_DRIVER(bcm63xx_hsspi) = { |
| 623 | .name = "bcm63xx_hsspi", |
| 624 | .id = UCLASS_SPI, |
| 625 | .of_match = bcm63xx_hsspi_ids, |
| 626 | .ops = &bcm63xx_hsspi_ops, |
Simon Glass | 8a2b47f | 2020-12-03 16:55:17 -0700 | [diff] [blame] | 627 | .priv_auto = sizeof(struct bcm63xx_hsspi_priv), |
Álvaro Fernández Rojas | 55d96ec | 2018-01-20 02:13:38 +0100 | [diff] [blame] | 628 | .child_pre_probe = bcm63xx_hsspi_child_pre_probe, |
| 629 | .probe = bcm63xx_hsspi_probe, |
| 630 | }; |