Vignesh R | 4e341d3 | 2019-02-05 11:29:15 +0530 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0+ |
| 2 | /* |
| 3 | * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/ |
| 4 | */ |
| 5 | |
Simon Glass | 0f2af88 | 2020-05-10 11:40:05 -0600 | [diff] [blame] | 6 | #include <log.h> |
Simon Glass | 9bc1564 | 2020-02-03 07:36:16 -0700 | [diff] [blame] | 7 | #include <malloc.h> |
Vignesh R | 4e341d3 | 2019-02-05 11:29:15 +0530 | [diff] [blame] | 8 | #include <spi.h> |
| 9 | #include <spi-mem.h> |
| 10 | |
| 11 | int spi_mem_exec_op(struct spi_slave *slave, |
| 12 | const struct spi_mem_op *op) |
| 13 | { |
| 14 | unsigned int pos = 0; |
| 15 | const u8 *tx_buf = NULL; |
| 16 | u8 *rx_buf = NULL; |
| 17 | u8 *op_buf; |
| 18 | int op_len; |
| 19 | u32 flag; |
| 20 | int ret; |
| 21 | int i; |
| 22 | |
| 23 | if (op->data.nbytes) { |
| 24 | if (op->data.dir == SPI_MEM_DATA_IN) |
| 25 | rx_buf = op->data.buf.in; |
| 26 | else |
| 27 | tx_buf = op->data.buf.out; |
| 28 | } |
| 29 | |
Pratyush Yadav | ed08485 | 2021-06-26 00:47:04 +0530 | [diff] [blame] | 30 | op_len = op->cmd.nbytes + op->addr.nbytes + op->dummy.nbytes; |
Vignesh R | 4e341d3 | 2019-02-05 11:29:15 +0530 | [diff] [blame] | 31 | op_buf = calloc(1, op_len); |
| 32 | |
| 33 | ret = spi_claim_bus(slave); |
| 34 | if (ret < 0) |
| 35 | return ret; |
| 36 | |
| 37 | op_buf[pos++] = op->cmd.opcode; |
| 38 | |
| 39 | if (op->addr.nbytes) { |
| 40 | for (i = 0; i < op->addr.nbytes; i++) |
| 41 | op_buf[pos + i] = op->addr.val >> |
| 42 | (8 * (op->addr.nbytes - i - 1)); |
| 43 | |
| 44 | pos += op->addr.nbytes; |
| 45 | } |
| 46 | |
| 47 | if (op->dummy.nbytes) |
| 48 | memset(op_buf + pos, 0xff, op->dummy.nbytes); |
| 49 | |
| 50 | /* 1st transfer: opcode + address + dummy cycles */ |
| 51 | flag = SPI_XFER_BEGIN; |
| 52 | /* Make sure to set END bit if no tx or rx data messages follow */ |
| 53 | if (!tx_buf && !rx_buf) |
| 54 | flag |= SPI_XFER_END; |
| 55 | |
| 56 | ret = spi_xfer(slave, op_len * 8, op_buf, NULL, flag); |
| 57 | if (ret) |
| 58 | return ret; |
| 59 | |
| 60 | /* 2nd transfer: rx or tx data path */ |
| 61 | if (tx_buf || rx_buf) { |
| 62 | ret = spi_xfer(slave, op->data.nbytes * 8, tx_buf, |
| 63 | rx_buf, SPI_XFER_END); |
| 64 | if (ret) |
| 65 | return ret; |
| 66 | } |
| 67 | |
| 68 | spi_release_bus(slave); |
| 69 | |
| 70 | for (i = 0; i < pos; i++) |
| 71 | debug("%02x ", op_buf[i]); |
| 72 | debug("| [%dB %s] ", |
| 73 | tx_buf || rx_buf ? op->data.nbytes : 0, |
| 74 | tx_buf || rx_buf ? (tx_buf ? "out" : "in") : "-"); |
| 75 | for (i = 0; i < op->data.nbytes; i++) |
| 76 | debug("%02x ", tx_buf ? tx_buf[i] : rx_buf[i]); |
| 77 | debug("[ret %d]\n", ret); |
| 78 | |
| 79 | free(op_buf); |
| 80 | |
| 81 | if (ret < 0) |
| 82 | return ret; |
| 83 | |
| 84 | return 0; |
| 85 | } |
| 86 | |
| 87 | int spi_mem_adjust_op_size(struct spi_slave *slave, |
| 88 | struct spi_mem_op *op) |
| 89 | { |
| 90 | unsigned int len; |
| 91 | |
Pratyush Yadav | ed08485 | 2021-06-26 00:47:04 +0530 | [diff] [blame] | 92 | len = op->cmd.nbytes + op->addr.nbytes + op->dummy.nbytes; |
Vignesh R | 4e341d3 | 2019-02-05 11:29:15 +0530 | [diff] [blame] | 93 | if (slave->max_write_size && len > slave->max_write_size) |
| 94 | return -EINVAL; |
| 95 | |
Bin Meng | 475be5a | 2021-07-28 20:50:14 +0800 | [diff] [blame] | 96 | if (op->data.dir == SPI_MEM_DATA_IN) { |
| 97 | if (slave->max_read_size) |
| 98 | op->data.nbytes = min(op->data.nbytes, |
| 99 | slave->max_read_size); |
| 100 | } else if (slave->max_write_size) { |
Vignesh R | 4e341d3 | 2019-02-05 11:29:15 +0530 | [diff] [blame] | 101 | op->data.nbytes = min(op->data.nbytes, |
| 102 | slave->max_write_size - len); |
Bin Meng | 475be5a | 2021-07-28 20:50:14 +0800 | [diff] [blame] | 103 | } |
Vignesh R | 4e341d3 | 2019-02-05 11:29:15 +0530 | [diff] [blame] | 104 | |
| 105 | if (!op->data.nbytes) |
| 106 | return -EINVAL; |
| 107 | |
| 108 | return 0; |
| 109 | } |
Pratyush Yadav | 0b0a299 | 2021-06-26 00:47:14 +0530 | [diff] [blame] | 110 | |
| 111 | static int spi_check_buswidth_req(struct spi_slave *slave, u8 buswidth, bool tx) |
| 112 | { |
| 113 | u32 mode = slave->mode; |
| 114 | |
| 115 | switch (buswidth) { |
| 116 | case 1: |
| 117 | return 0; |
| 118 | |
| 119 | case 2: |
| 120 | if ((tx && (mode & (SPI_TX_DUAL | SPI_TX_QUAD))) || |
| 121 | (!tx && (mode & (SPI_RX_DUAL | SPI_RX_QUAD)))) |
| 122 | return 0; |
| 123 | |
| 124 | break; |
| 125 | |
| 126 | case 4: |
| 127 | if ((tx && (mode & SPI_TX_QUAD)) || |
| 128 | (!tx && (mode & SPI_RX_QUAD))) |
| 129 | return 0; |
| 130 | |
| 131 | break; |
| 132 | case 8: |
| 133 | if ((tx && (mode & SPI_TX_OCTAL)) || |
| 134 | (!tx && (mode & SPI_RX_OCTAL))) |
| 135 | return 0; |
| 136 | |
| 137 | break; |
| 138 | |
| 139 | default: |
| 140 | break; |
| 141 | } |
| 142 | |
| 143 | return -ENOTSUPP; |
| 144 | } |
| 145 | |
| 146 | bool spi_mem_supports_op(struct spi_slave *slave, const struct spi_mem_op *op) |
| 147 | { |
| 148 | if (spi_check_buswidth_req(slave, op->cmd.buswidth, true)) |
| 149 | return false; |
| 150 | |
| 151 | if (op->addr.nbytes && |
| 152 | spi_check_buswidth_req(slave, op->addr.buswidth, true)) |
| 153 | return false; |
| 154 | |
| 155 | if (op->dummy.nbytes && |
| 156 | spi_check_buswidth_req(slave, op->dummy.buswidth, true)) |
| 157 | return false; |
| 158 | |
| 159 | if (op->data.nbytes && |
| 160 | spi_check_buswidth_req(slave, op->data.buswidth, |
| 161 | op->data.dir == SPI_MEM_DATA_OUT)) |
| 162 | return false; |
| 163 | |
| 164 | if (op->cmd.dtr || op->addr.dtr || op->dummy.dtr || op->data.dtr) |
| 165 | return false; |
| 166 | |
| 167 | if (op->cmd.nbytes != 1) |
| 168 | return false; |
| 169 | |
| 170 | return true; |
| 171 | } |