blob: 3579b7d7db589076b11fa56db7505043ff77e7f3 [file] [log] [blame]
Boris Brezillon32473fe2018-08-16 17:30:11 +02001// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Copyright (C) 2018 Exceet Electronics GmbH
4 * Copyright (C) 2018 Bootlin
5 *
6 * Author: Boris Brezillon <boris.brezillon@bootlin.com>
7 */
8
9#ifndef __UBOOT__
Simon Glass0f2af882020-05-10 11:40:05 -060010#include <log.h>
Simon Glassd66c5f72020-02-03 07:36:15 -070011#include <dm/devres.h>
Boris Brezillon32473fe2018-08-16 17:30:11 +020012#include <linux/dmaengine.h>
13#include <linux/pm_runtime.h>
14#include "internals.h"
15#else
Simon Glassdfb7c082020-07-19 10:15:34 -060016#include <dm.h>
17#include <errno.h>
18#include <malloc.h>
19#include <spi.h>
Boris Brezillon32473fe2018-08-16 17:30:11 +020020#include <spi.h>
21#include <spi-mem.h>
Simon Glassdfb7c082020-07-19 10:15:34 -060022#include <dm/device_compat.h>
Chin-Ting Kuoa891be82022-08-19 17:01:08 +080023#include <dm/devres.h>
24#include <linux/bug.h>
Boris Brezillon32473fe2018-08-16 17:30:11 +020025#endif
26
27#ifndef __UBOOT__
28/**
29 * spi_controller_dma_map_mem_op_data() - DMA-map the buffer attached to a
30 * memory operation
31 * @ctlr: the SPI controller requesting this dma_map()
32 * @op: the memory operation containing the buffer to map
33 * @sgt: a pointer to a non-initialized sg_table that will be filled by this
34 * function
35 *
36 * Some controllers might want to do DMA on the data buffer embedded in @op.
37 * This helper prepares everything for you and provides a ready-to-use
38 * sg_table. This function is not intended to be called from spi drivers.
39 * Only SPI controller drivers should use it.
40 * Note that the caller must ensure the memory region pointed by
41 * op->data.buf.{in,out} is DMA-able before calling this function.
42 *
43 * Return: 0 in case of success, a negative error code otherwise.
44 */
45int spi_controller_dma_map_mem_op_data(struct spi_controller *ctlr,
46 const struct spi_mem_op *op,
47 struct sg_table *sgt)
48{
49 struct device *dmadev;
50
51 if (!op->data.nbytes)
52 return -EINVAL;
53
54 if (op->data.dir == SPI_MEM_DATA_OUT && ctlr->dma_tx)
55 dmadev = ctlr->dma_tx->device->dev;
56 else if (op->data.dir == SPI_MEM_DATA_IN && ctlr->dma_rx)
57 dmadev = ctlr->dma_rx->device->dev;
58 else
59 dmadev = ctlr->dev.parent;
60
61 if (!dmadev)
62 return -EINVAL;
63
64 return spi_map_buf(ctlr, dmadev, sgt, op->data.buf.in, op->data.nbytes,
65 op->data.dir == SPI_MEM_DATA_IN ?
66 DMA_FROM_DEVICE : DMA_TO_DEVICE);
67}
68EXPORT_SYMBOL_GPL(spi_controller_dma_map_mem_op_data);
69
70/**
71 * spi_controller_dma_unmap_mem_op_data() - DMA-unmap the buffer attached to a
72 * memory operation
73 * @ctlr: the SPI controller requesting this dma_unmap()
74 * @op: the memory operation containing the buffer to unmap
75 * @sgt: a pointer to an sg_table previously initialized by
76 * spi_controller_dma_map_mem_op_data()
77 *
78 * Some controllers might want to do DMA on the data buffer embedded in @op.
79 * This helper prepares things so that the CPU can access the
80 * op->data.buf.{in,out} buffer again.
81 *
82 * This function is not intended to be called from SPI drivers. Only SPI
83 * controller drivers should use it.
84 *
85 * This function should be called after the DMA operation has finished and is
86 * only valid if the previous spi_controller_dma_map_mem_op_data() call
87 * returned 0.
88 *
89 * Return: 0 in case of success, a negative error code otherwise.
90 */
91void spi_controller_dma_unmap_mem_op_data(struct spi_controller *ctlr,
92 const struct spi_mem_op *op,
93 struct sg_table *sgt)
94{
95 struct device *dmadev;
96
97 if (!op->data.nbytes)
98 return;
99
100 if (op->data.dir == SPI_MEM_DATA_OUT && ctlr->dma_tx)
101 dmadev = ctlr->dma_tx->device->dev;
102 else if (op->data.dir == SPI_MEM_DATA_IN && ctlr->dma_rx)
103 dmadev = ctlr->dma_rx->device->dev;
104 else
105 dmadev = ctlr->dev.parent;
106
107 spi_unmap_buf(ctlr, dmadev, sgt,
108 op->data.dir == SPI_MEM_DATA_IN ?
109 DMA_FROM_DEVICE : DMA_TO_DEVICE);
110}
111EXPORT_SYMBOL_GPL(spi_controller_dma_unmap_mem_op_data);
112#endif /* __UBOOT__ */
113
114static int spi_check_buswidth_req(struct spi_slave *slave, u8 buswidth, bool tx)
115{
116 u32 mode = slave->mode;
117
118 switch (buswidth) {
119 case 1:
120 return 0;
121
122 case 2:
123 if ((tx && (mode & (SPI_TX_DUAL | SPI_TX_QUAD))) ||
124 (!tx && (mode & (SPI_RX_DUAL | SPI_RX_QUAD))))
125 return 0;
126
127 break;
128
129 case 4:
130 if ((tx && (mode & SPI_TX_QUAD)) ||
131 (!tx && (mode & SPI_RX_QUAD)))
132 return 0;
133
134 break;
Vignesh Raghavendrac063ee32019-12-05 15:46:05 +0530135 case 8:
136 if ((tx && (mode & SPI_TX_OCTAL)) ||
137 (!tx && (mode & SPI_RX_OCTAL)))
138 return 0;
139
140 break;
Boris Brezillon32473fe2018-08-16 17:30:11 +0200141
142 default:
143 break;
144 }
145
146 return -ENOTSUPP;
147}
148
Pratyush Yadav8c8452c2021-06-26 00:47:06 +0530149static bool spi_mem_check_buswidth(struct spi_slave *slave,
150 const struct spi_mem_op *op)
Boris Brezillon32473fe2018-08-16 17:30:11 +0200151{
152 if (spi_check_buswidth_req(slave, op->cmd.buswidth, true))
153 return false;
154
155 if (op->addr.nbytes &&
156 spi_check_buswidth_req(slave, op->addr.buswidth, true))
157 return false;
158
159 if (op->dummy.nbytes &&
160 spi_check_buswidth_req(slave, op->dummy.buswidth, true))
161 return false;
162
Tudor Ambarus2073d542020-03-20 09:35:31 +0000163 if (op->data.dir != SPI_MEM_NO_DATA &&
Boris Brezillon32473fe2018-08-16 17:30:11 +0200164 spi_check_buswidth_req(slave, op->data.buswidth,
165 op->data.dir == SPI_MEM_DATA_OUT))
166 return false;
167
Pratyush Yadav8c8452c2021-06-26 00:47:06 +0530168 return true;
169}
170
171bool spi_mem_dtr_supports_op(struct spi_slave *slave,
172 const struct spi_mem_op *op)
173{
174 if (op->cmd.buswidth == 8 && op->cmd.nbytes % 2)
175 return false;
176
177 if (op->addr.nbytes && op->addr.buswidth == 8 && op->addr.nbytes % 2)
178 return false;
179
180 if (op->dummy.nbytes && op->dummy.buswidth == 8 && op->dummy.nbytes % 2)
181 return false;
182
Dhruva Gole2baee382023-03-01 13:13:45 +0530183 /*
184 * Transactions of odd length do not make sense for 8D-8D-8D mode
185 * because a byte is transferred in just half a cycle.
186 */
Dhruva Goleb70e6742023-03-01 13:13:46 +0530187 if (op->data.dir != SPI_MEM_NO_DATA && op->data.dir != SPI_MEM_DATA_IN &&
Dhruva Gole2baee382023-03-01 13:13:45 +0530188 op->data.buswidth == 8 && op->data.nbytes % 2)
Pratyush Yadav8c8452c2021-06-26 00:47:06 +0530189 return false;
190
191 return spi_mem_check_buswidth(slave, op);
192}
193EXPORT_SYMBOL_GPL(spi_mem_dtr_supports_op);
194
195bool spi_mem_default_supports_op(struct spi_slave *slave,
196 const struct spi_mem_op *op)
197{
Pratyush Yadav87a6db32021-06-26 00:47:03 +0530198 if (op->cmd.dtr || op->addr.dtr || op->dummy.dtr || op->data.dtr)
199 return false;
200
Pratyush Yadaved084852021-06-26 00:47:04 +0530201 if (op->cmd.nbytes != 1)
202 return false;
203
Pratyush Yadav8c8452c2021-06-26 00:47:06 +0530204 return spi_mem_check_buswidth(slave, op);
Boris Brezillon32473fe2018-08-16 17:30:11 +0200205}
206EXPORT_SYMBOL_GPL(spi_mem_default_supports_op);
207
208/**
209 * spi_mem_supports_op() - Check if a memory device and the controller it is
210 * connected to support a specific memory operation
211 * @slave: the SPI device
212 * @op: the memory operation to check
213 *
214 * Some controllers are only supporting Single or Dual IOs, others might only
215 * support specific opcodes, or it can even be that the controller and device
216 * both support Quad IOs but the hardware prevents you from using it because
217 * only 2 IO lines are connected.
218 *
219 * This function checks whether a specific operation is supported.
220 *
221 * Return: true if @op is supported, false otherwise.
222 */
223bool spi_mem_supports_op(struct spi_slave *slave,
224 const struct spi_mem_op *op)
225{
226 struct udevice *bus = slave->dev->parent;
227 struct dm_spi_ops *ops = spi_get_ops(bus);
228
229 if (ops->mem_ops && ops->mem_ops->supports_op)
230 return ops->mem_ops->supports_op(slave, op);
231
232 return spi_mem_default_supports_op(slave, op);
233}
234EXPORT_SYMBOL_GPL(spi_mem_supports_op);
235
236/**
237 * spi_mem_exec_op() - Execute a memory operation
238 * @slave: the SPI device
239 * @op: the memory operation to execute
240 *
241 * Executes a memory operation.
242 *
243 * This function first checks that @op is supported and then tries to execute
244 * it.
245 *
246 * Return: 0 in case of success, a negative error code otherwise.
247 */
248int spi_mem_exec_op(struct spi_slave *slave, const struct spi_mem_op *op)
249{
250 struct udevice *bus = slave->dev->parent;
251 struct dm_spi_ops *ops = spi_get_ops(bus);
252 unsigned int pos = 0;
253 const u8 *tx_buf = NULL;
254 u8 *rx_buf = NULL;
Boris Brezillon32473fe2018-08-16 17:30:11 +0200255 int op_len;
256 u32 flag;
257 int ret;
258 int i;
259
260 if (!spi_mem_supports_op(slave, op))
261 return -ENOTSUPP;
262
Vignesh Rcae870e2019-02-05 11:29:14 +0530263 ret = spi_claim_bus(slave);
264 if (ret < 0)
265 return ret;
266
Bernhard Messerklingere8c3d1b2019-03-26 10:01:24 +0100267 if (ops->mem_ops && ops->mem_ops->exec_op) {
Boris Brezillon32473fe2018-08-16 17:30:11 +0200268#ifndef __UBOOT__
269 /*
270 * Flush the message queue before executing our SPI memory
271 * operation to prevent preemption of regular SPI transfers.
272 */
273 spi_flush_queue(ctlr);
274
275 if (ctlr->auto_runtime_pm) {
276 ret = pm_runtime_get_sync(ctlr->dev.parent);
277 if (ret < 0) {
278 dev_err(&ctlr->dev,
279 "Failed to power device: %d\n",
280 ret);
281 return ret;
282 }
283 }
284
285 mutex_lock(&ctlr->bus_lock_mutex);
286 mutex_lock(&ctlr->io_mutex);
287#endif
288 ret = ops->mem_ops->exec_op(slave, op);
Vignesh Rcae870e2019-02-05 11:29:14 +0530289
Boris Brezillon32473fe2018-08-16 17:30:11 +0200290#ifndef __UBOOT__
291 mutex_unlock(&ctlr->io_mutex);
292 mutex_unlock(&ctlr->bus_lock_mutex);
293
294 if (ctlr->auto_runtime_pm)
295 pm_runtime_put(ctlr->dev.parent);
296#endif
297
298 /*
299 * Some controllers only optimize specific paths (typically the
300 * read path) and expect the core to use the regular SPI
301 * interface in other cases.
302 */
Vignesh Rcae870e2019-02-05 11:29:14 +0530303 if (!ret || ret != -ENOTSUPP) {
304 spi_release_bus(slave);
Boris Brezillon32473fe2018-08-16 17:30:11 +0200305 return ret;
Vignesh Rcae870e2019-02-05 11:29:14 +0530306 }
Boris Brezillon32473fe2018-08-16 17:30:11 +0200307 }
308
309#ifndef __UBOOT__
Pratyush Yadaved084852021-06-26 00:47:04 +0530310 tmpbufsize = op->cmd.nbytes + op->addr.nbytes + op->dummy.nbytes;
Boris Brezillon32473fe2018-08-16 17:30:11 +0200311
312 /*
313 * Allocate a buffer to transmit the CMD, ADDR cycles with kmalloc() so
314 * we're guaranteed that this buffer is DMA-able, as required by the
315 * SPI layer.
316 */
317 tmpbuf = kzalloc(tmpbufsize, GFP_KERNEL | GFP_DMA);
318 if (!tmpbuf)
319 return -ENOMEM;
320
321 spi_message_init(&msg);
322
323 tmpbuf[0] = op->cmd.opcode;
324 xfers[xferpos].tx_buf = tmpbuf;
Pratyush Yadaved084852021-06-26 00:47:04 +0530325 xfers[xferpos].len = op->cmd.nbytes;
Boris Brezillon32473fe2018-08-16 17:30:11 +0200326 xfers[xferpos].tx_nbits = op->cmd.buswidth;
327 spi_message_add_tail(&xfers[xferpos], &msg);
328 xferpos++;
329 totalxferlen++;
330
331 if (op->addr.nbytes) {
332 int i;
333
334 for (i = 0; i < op->addr.nbytes; i++)
335 tmpbuf[i + 1] = op->addr.val >>
336 (8 * (op->addr.nbytes - i - 1));
337
338 xfers[xferpos].tx_buf = tmpbuf + 1;
339 xfers[xferpos].len = op->addr.nbytes;
340 xfers[xferpos].tx_nbits = op->addr.buswidth;
341 spi_message_add_tail(&xfers[xferpos], &msg);
342 xferpos++;
343 totalxferlen += op->addr.nbytes;
344 }
345
346 if (op->dummy.nbytes) {
347 memset(tmpbuf + op->addr.nbytes + 1, 0xff, op->dummy.nbytes);
348 xfers[xferpos].tx_buf = tmpbuf + op->addr.nbytes + 1;
349 xfers[xferpos].len = op->dummy.nbytes;
350 xfers[xferpos].tx_nbits = op->dummy.buswidth;
351 spi_message_add_tail(&xfers[xferpos], &msg);
352 xferpos++;
353 totalxferlen += op->dummy.nbytes;
354 }
355
356 if (op->data.nbytes) {
357 if (op->data.dir == SPI_MEM_DATA_IN) {
358 xfers[xferpos].rx_buf = op->data.buf.in;
359 xfers[xferpos].rx_nbits = op->data.buswidth;
360 } else {
361 xfers[xferpos].tx_buf = op->data.buf.out;
362 xfers[xferpos].tx_nbits = op->data.buswidth;
363 }
364
365 xfers[xferpos].len = op->data.nbytes;
366 spi_message_add_tail(&xfers[xferpos], &msg);
367 xferpos++;
368 totalxferlen += op->data.nbytes;
369 }
370
371 ret = spi_sync(slave, &msg);
372
373 kfree(tmpbuf);
374
375 if (ret)
376 return ret;
377
378 if (msg.actual_length != totalxferlen)
379 return -EIO;
380#else
381
Boris Brezillon32473fe2018-08-16 17:30:11 +0200382 if (op->data.nbytes) {
383 if (op->data.dir == SPI_MEM_DATA_IN)
384 rx_buf = op->data.buf.in;
385 else
386 tx_buf = op->data.buf.out;
387 }
388
Pratyush Yadaved084852021-06-26 00:47:04 +0530389 op_len = op->cmd.nbytes + op->addr.nbytes + op->dummy.nbytes;
Simon Glass6d0d9912019-05-18 11:59:54 -0600390
391 /*
392 * Avoid using malloc() here so that we can use this code in SPL where
393 * simple malloc may be used. That implementation does not allow free()
394 * so repeated calls to this code can exhaust the space.
395 *
396 * The value of op_len is small, since it does not include the actual
397 * data being sent, only the op-code and address. In fact, it should be
398 * possible to just use a small fixed value here instead of op_len.
399 */
400 u8 op_buf[op_len];
Boris Brezillon32473fe2018-08-16 17:30:11 +0200401
Boris Brezillon32473fe2018-08-16 17:30:11 +0200402 op_buf[pos++] = op->cmd.opcode;
403
404 if (op->addr.nbytes) {
405 for (i = 0; i < op->addr.nbytes; i++)
406 op_buf[pos + i] = op->addr.val >>
407 (8 * (op->addr.nbytes - i - 1));
408
409 pos += op->addr.nbytes;
410 }
411
412 if (op->dummy.nbytes)
413 memset(op_buf + pos, 0xff, op->dummy.nbytes);
414
415 /* 1st transfer: opcode + address + dummy cycles */
416 flag = SPI_XFER_BEGIN;
417 /* Make sure to set END bit if no tx or rx data messages follow */
418 if (!tx_buf && !rx_buf)
419 flag |= SPI_XFER_END;
420
421 ret = spi_xfer(slave, op_len * 8, op_buf, NULL, flag);
422 if (ret)
423 return ret;
424
425 /* 2nd transfer: rx or tx data path */
426 if (tx_buf || rx_buf) {
427 ret = spi_xfer(slave, op->data.nbytes * 8, tx_buf,
428 rx_buf, SPI_XFER_END);
429 if (ret)
430 return ret;
431 }
432
433 spi_release_bus(slave);
434
435 for (i = 0; i < pos; i++)
436 debug("%02x ", op_buf[i]);
437 debug("| [%dB %s] ",
438 tx_buf || rx_buf ? op->data.nbytes : 0,
439 tx_buf || rx_buf ? (tx_buf ? "out" : "in") : "-");
440 for (i = 0; i < op->data.nbytes; i++)
441 debug("%02x ", tx_buf ? tx_buf[i] : rx_buf[i]);
442 debug("[ret %d]\n", ret);
443
Boris Brezillon32473fe2018-08-16 17:30:11 +0200444 if (ret < 0)
445 return ret;
446#endif /* __UBOOT__ */
447
448 return 0;
449}
450EXPORT_SYMBOL_GPL(spi_mem_exec_op);
451
452/**
453 * spi_mem_adjust_op_size() - Adjust the data size of a SPI mem operation to
454 * match controller limitations
455 * @slave: the SPI device
456 * @op: the operation to adjust
457 *
458 * Some controllers have FIFO limitations and must split a data transfer
459 * operation into multiple ones, others require a specific alignment for
460 * optimized accesses. This function allows SPI mem drivers to split a single
461 * operation into multiple sub-operations when required.
462 *
463 * Return: a negative error code if the controller can't properly adjust @op,
464 * 0 otherwise. Note that @op->data.nbytes will be updated if @op
465 * can't be handled in a single step.
466 */
467int spi_mem_adjust_op_size(struct spi_slave *slave, struct spi_mem_op *op)
468{
469 struct udevice *bus = slave->dev->parent;
470 struct dm_spi_ops *ops = spi_get_ops(bus);
471
472 if (ops->mem_ops && ops->mem_ops->adjust_op_size)
473 return ops->mem_ops->adjust_op_size(slave, op);
474
Vignesh Rba3691f2019-02-05 11:29:13 +0530475 if (!ops->mem_ops || !ops->mem_ops->exec_op) {
476 unsigned int len;
477
Pratyush Yadaved084852021-06-26 00:47:04 +0530478 len = op->cmd.nbytes + op->addr.nbytes + op->dummy.nbytes;
Vignesh Rba3691f2019-02-05 11:29:13 +0530479 if (slave->max_write_size && len > slave->max_write_size)
480 return -EINVAL;
481
Ye Li3858d522019-07-10 09:23:51 +0000482 if (op->data.dir == SPI_MEM_DATA_IN) {
483 if (slave->max_read_size)
484 op->data.nbytes = min(op->data.nbytes,
Vignesh Rba3691f2019-02-05 11:29:13 +0530485 slave->max_read_size);
Ye Li3858d522019-07-10 09:23:51 +0000486 } else if (slave->max_write_size) {
Vignesh Rba3691f2019-02-05 11:29:13 +0530487 op->data.nbytes = min(op->data.nbytes,
488 slave->max_write_size - len);
Ye Li3858d522019-07-10 09:23:51 +0000489 }
Vignesh Rba3691f2019-02-05 11:29:13 +0530490
491 if (!op->data.nbytes)
492 return -EINVAL;
493 }
494
Boris Brezillon32473fe2018-08-16 17:30:11 +0200495 return 0;
496}
497EXPORT_SYMBOL_GPL(spi_mem_adjust_op_size);
498
Chin-Ting Kuoa891be82022-08-19 17:01:08 +0800499static ssize_t spi_mem_no_dirmap_read(struct spi_mem_dirmap_desc *desc,
500 u64 offs, size_t len, void *buf)
501{
502 struct spi_mem_op op = desc->info.op_tmpl;
503 int ret;
504
505 op.addr.val = desc->info.offset + offs;
506 op.data.buf.in = buf;
507 op.data.nbytes = len;
508 ret = spi_mem_adjust_op_size(desc->slave, &op);
509 if (ret)
510 return ret;
511
512 ret = spi_mem_exec_op(desc->slave, &op);
513 if (ret)
514 return ret;
515
516 return op.data.nbytes;
517}
518
519static ssize_t spi_mem_no_dirmap_write(struct spi_mem_dirmap_desc *desc,
520 u64 offs, size_t len, const void *buf)
521{
522 struct spi_mem_op op = desc->info.op_tmpl;
523 int ret;
524
525 op.addr.val = desc->info.offset + offs;
526 op.data.buf.out = buf;
527 op.data.nbytes = len;
528 ret = spi_mem_adjust_op_size(desc->slave, &op);
529 if (ret)
530 return ret;
531
532 ret = spi_mem_exec_op(desc->slave, &op);
533 if (ret)
534 return ret;
535
536 return op.data.nbytes;
537}
538
539/**
540 * spi_mem_dirmap_create() - Create a direct mapping descriptor
541 * @mem: SPI mem device this direct mapping should be created for
542 * @info: direct mapping information
543 *
544 * This function is creating a direct mapping descriptor which can then be used
545 * to access the memory using spi_mem_dirmap_read() or spi_mem_dirmap_write().
546 * If the SPI controller driver does not support direct mapping, this function
547 * falls back to an implementation using spi_mem_exec_op(), so that the caller
548 * doesn't have to bother implementing a fallback on his own.
549 *
550 * Return: a valid pointer in case of success, and ERR_PTR() otherwise.
551 */
552struct spi_mem_dirmap_desc *
553spi_mem_dirmap_create(struct spi_slave *slave,
554 const struct spi_mem_dirmap_info *info)
555{
556 struct udevice *bus = slave->dev->parent;
557 struct dm_spi_ops *ops = spi_get_ops(bus);
558 struct spi_mem_dirmap_desc *desc;
559 int ret = -EOPNOTSUPP;
560
561 /* Make sure the number of address cycles is between 1 and 8 bytes. */
562 if (!info->op_tmpl.addr.nbytes || info->op_tmpl.addr.nbytes > 8)
563 return ERR_PTR(-EINVAL);
564
565 /* data.dir should either be SPI_MEM_DATA_IN or SPI_MEM_DATA_OUT. */
566 if (info->op_tmpl.data.dir == SPI_MEM_NO_DATA)
567 return ERR_PTR(-EINVAL);
568
569 desc = kzalloc(sizeof(*desc), GFP_KERNEL);
570 if (!desc)
571 return ERR_PTR(-ENOMEM);
572
573 desc->slave = slave;
574 desc->info = *info;
575 if (ops->mem_ops && ops->mem_ops->dirmap_create)
576 ret = ops->mem_ops->dirmap_create(desc);
577
578 if (ret) {
579 desc->nodirmap = true;
580 if (!spi_mem_supports_op(desc->slave, &desc->info.op_tmpl))
581 ret = -EOPNOTSUPP;
582 else
583 ret = 0;
584 }
585
586 if (ret) {
587 kfree(desc);
588 return ERR_PTR(ret);
589 }
590
591 return desc;
592}
593EXPORT_SYMBOL_GPL(spi_mem_dirmap_create);
594
595/**
596 * spi_mem_dirmap_destroy() - Destroy a direct mapping descriptor
597 * @desc: the direct mapping descriptor to destroy
598 *
599 * This function destroys a direct mapping descriptor previously created by
600 * spi_mem_dirmap_create().
601 */
602void spi_mem_dirmap_destroy(struct spi_mem_dirmap_desc *desc)
603{
604 struct udevice *bus = desc->slave->dev->parent;
605 struct dm_spi_ops *ops = spi_get_ops(bus);
606
607 if (!desc->nodirmap && ops->mem_ops && ops->mem_ops->dirmap_destroy)
608 ops->mem_ops->dirmap_destroy(desc);
609
610 kfree(desc);
611}
612EXPORT_SYMBOL_GPL(spi_mem_dirmap_destroy);
613
614#ifndef __UBOOT__
615static void devm_spi_mem_dirmap_release(struct udevice *dev, void *res)
616{
617 struct spi_mem_dirmap_desc *desc = *(struct spi_mem_dirmap_desc **)res;
618
619 spi_mem_dirmap_destroy(desc);
620}
621
622/**
623 * devm_spi_mem_dirmap_create() - Create a direct mapping descriptor and attach
624 * it to a device
625 * @dev: device the dirmap desc will be attached to
626 * @mem: SPI mem device this direct mapping should be created for
627 * @info: direct mapping information
628 *
629 * devm_ variant of the spi_mem_dirmap_create() function. See
630 * spi_mem_dirmap_create() for more details.
631 *
632 * Return: a valid pointer in case of success, and ERR_PTR() otherwise.
633 */
634struct spi_mem_dirmap_desc *
635devm_spi_mem_dirmap_create(struct udevice *dev, struct spi_slave *slave,
636 const struct spi_mem_dirmap_info *info)
637{
638 struct spi_mem_dirmap_desc **ptr, *desc;
639
640 ptr = devres_alloc(devm_spi_mem_dirmap_release, sizeof(*ptr),
641 GFP_KERNEL);
642 if (!ptr)
643 return ERR_PTR(-ENOMEM);
644
645 desc = spi_mem_dirmap_create(slave, info);
646 if (IS_ERR(desc)) {
647 devres_free(ptr);
648 } else {
649 *ptr = desc;
650 devres_add(dev, ptr);
651 }
652
653 return desc;
654}
655EXPORT_SYMBOL_GPL(devm_spi_mem_dirmap_create);
656
657static int devm_spi_mem_dirmap_match(struct udevice *dev, void *res, void *data)
658{
659 struct spi_mem_dirmap_desc **ptr = res;
660
661 if (WARN_ON(!ptr || !*ptr))
662 return 0;
663
664 return *ptr == data;
665}
666
667/**
668 * devm_spi_mem_dirmap_destroy() - Destroy a direct mapping descriptor attached
669 * to a device
670 * @dev: device the dirmap desc is attached to
671 * @desc: the direct mapping descriptor to destroy
672 *
673 * devm_ variant of the spi_mem_dirmap_destroy() function. See
674 * spi_mem_dirmap_destroy() for more details.
675 */
676void devm_spi_mem_dirmap_destroy(struct udevice *dev,
677 struct spi_mem_dirmap_desc *desc)
678{
679 devres_release(dev, devm_spi_mem_dirmap_release,
680 devm_spi_mem_dirmap_match, desc);
681}
682EXPORT_SYMBOL_GPL(devm_spi_mem_dirmap_destroy);
683#endif /* __UBOOT__ */
684
685/**
686 * spi_mem_dirmap_read() - Read data through a direct mapping
687 * @desc: direct mapping descriptor
688 * @offs: offset to start reading from. Note that this is not an absolute
689 * offset, but the offset within the direct mapping which already has
690 * its own offset
691 * @len: length in bytes
692 * @buf: destination buffer. This buffer must be DMA-able
693 *
694 * This function reads data from a memory device using a direct mapping
695 * previously instantiated with spi_mem_dirmap_create().
696 *
697 * Return: the amount of data read from the memory device or a negative error
698 * code. Note that the returned size might be smaller than @len, and the caller
699 * is responsible for calling spi_mem_dirmap_read() again when that happens.
700 */
701ssize_t spi_mem_dirmap_read(struct spi_mem_dirmap_desc *desc,
702 u64 offs, size_t len, void *buf)
703{
704 struct udevice *bus = desc->slave->dev->parent;
705 struct dm_spi_ops *ops = spi_get_ops(bus);
706 ssize_t ret;
707
708 if (desc->info.op_tmpl.data.dir != SPI_MEM_DATA_IN)
709 return -EINVAL;
710
711 if (!len)
712 return 0;
713
714 if (desc->nodirmap)
715 ret = spi_mem_no_dirmap_read(desc, offs, len, buf);
716 else if (ops->mem_ops && ops->mem_ops->dirmap_read)
717 ret = ops->mem_ops->dirmap_read(desc, offs, len, buf);
718 else
719 ret = -EOPNOTSUPP;
720
721 return ret;
722}
723EXPORT_SYMBOL_GPL(spi_mem_dirmap_read);
724
725/**
726 * spi_mem_dirmap_write() - Write data through a direct mapping
727 * @desc: direct mapping descriptor
728 * @offs: offset to start writing from. Note that this is not an absolute
729 * offset, but the offset within the direct mapping which already has
730 * its own offset
731 * @len: length in bytes
732 * @buf: source buffer. This buffer must be DMA-able
733 *
734 * This function writes data to a memory device using a direct mapping
735 * previously instantiated with spi_mem_dirmap_create().
736 *
737 * Return: the amount of data written to the memory device or a negative error
738 * code. Note that the returned size might be smaller than @len, and the caller
739 * is responsible for calling spi_mem_dirmap_write() again when that happens.
740 */
741ssize_t spi_mem_dirmap_write(struct spi_mem_dirmap_desc *desc,
742 u64 offs, size_t len, const void *buf)
743{
744 struct udevice *bus = desc->slave->dev->parent;
745 struct dm_spi_ops *ops = spi_get_ops(bus);
746 ssize_t ret;
747
748 if (desc->info.op_tmpl.data.dir != SPI_MEM_DATA_OUT)
749 return -EINVAL;
750
751 if (!len)
752 return 0;
753
754 if (desc->nodirmap)
755 ret = spi_mem_no_dirmap_write(desc, offs, len, buf);
756 else if (ops->mem_ops && ops->mem_ops->dirmap_write)
757 ret = ops->mem_ops->dirmap_write(desc, offs, len, buf);
758 else
759 ret = -EOPNOTSUPP;
760
761 return ret;
762}
763EXPORT_SYMBOL_GPL(spi_mem_dirmap_write);
764
Boris Brezillon32473fe2018-08-16 17:30:11 +0200765#ifndef __UBOOT__
766static inline struct spi_mem_driver *to_spi_mem_drv(struct device_driver *drv)
767{
768 return container_of(drv, struct spi_mem_driver, spidrv.driver);
769}
770
771static int spi_mem_probe(struct spi_device *spi)
772{
773 struct spi_mem_driver *memdrv = to_spi_mem_drv(spi->dev.driver);
774 struct spi_mem *mem;
775
776 mem = devm_kzalloc(&spi->dev, sizeof(*mem), GFP_KERNEL);
777 if (!mem)
778 return -ENOMEM;
779
780 mem->spi = spi;
781 spi_set_drvdata(spi, mem);
782
783 return memdrv->probe(mem);
784}
785
786static int spi_mem_remove(struct spi_device *spi)
787{
788 struct spi_mem_driver *memdrv = to_spi_mem_drv(spi->dev.driver);
789 struct spi_mem *mem = spi_get_drvdata(spi);
790
791 if (memdrv->remove)
792 return memdrv->remove(mem);
793
794 return 0;
795}
796
797static void spi_mem_shutdown(struct spi_device *spi)
798{
799 struct spi_mem_driver *memdrv = to_spi_mem_drv(spi->dev.driver);
800 struct spi_mem *mem = spi_get_drvdata(spi);
801
802 if (memdrv->shutdown)
803 memdrv->shutdown(mem);
804}
805
806/**
807 * spi_mem_driver_register_with_owner() - Register a SPI memory driver
808 * @memdrv: the SPI memory driver to register
809 * @owner: the owner of this driver
810 *
811 * Registers a SPI memory driver.
812 *
813 * Return: 0 in case of success, a negative error core otherwise.
814 */
815
816int spi_mem_driver_register_with_owner(struct spi_mem_driver *memdrv,
817 struct module *owner)
818{
819 memdrv->spidrv.probe = spi_mem_probe;
820 memdrv->spidrv.remove = spi_mem_remove;
821 memdrv->spidrv.shutdown = spi_mem_shutdown;
822
823 return __spi_register_driver(owner, &memdrv->spidrv);
824}
825EXPORT_SYMBOL_GPL(spi_mem_driver_register_with_owner);
826
827/**
828 * spi_mem_driver_unregister_with_owner() - Unregister a SPI memory driver
829 * @memdrv: the SPI memory driver to unregister
830 *
831 * Unregisters a SPI memory driver.
832 */
833void spi_mem_driver_unregister(struct spi_mem_driver *memdrv)
834{
835 spi_unregister_driver(&memdrv->spidrv);
836}
837EXPORT_SYMBOL_GPL(spi_mem_driver_unregister);
838#endif /* __UBOOT__ */