1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Copyright (C) 2018 Exceet Electronics GmbH
4 * Copyright (C) 2018 Bootlin
5 *
6 * Author: Boris Brezillon <[email protected]>
7 */
8 #include <linux/dmaengine.h>
9 #include <linux/iopoll.h>
10 #include <linux/pm_runtime.h>
11 #include <linux/spi/spi.h>
12 #include <linux/spi/spi-mem.h>
13 #include <linux/sched/task_stack.h>
14
15 #include "internals.h"
16
17 #define SPI_MEM_MAX_BUSWIDTH 8
18
19 /**
20 * spi_controller_dma_map_mem_op_data() - DMA-map the buffer attached to a
21 * memory operation
22 * @ctlr: the SPI controller requesting this dma_map()
23 * @op: the memory operation containing the buffer to map
24 * @sgt: a pointer to a non-initialized sg_table that will be filled by this
25 * function
26 *
27 * Some controllers might want to do DMA on the data buffer embedded in @op.
28 * This helper prepares everything for you and provides a ready-to-use
29 * sg_table. This function is not intended to be called from spi drivers.
30 * Only SPI controller drivers should use it.
31 * Note that the caller must ensure the memory region pointed by
32 * op->data.buf.{in,out} is DMA-able before calling this function.
33 *
34 * Return: 0 in case of success, a negative error code otherwise.
35 */
spi_controller_dma_map_mem_op_data(struct spi_controller * ctlr,const struct spi_mem_op * op,struct sg_table * sgt)36 int spi_controller_dma_map_mem_op_data(struct spi_controller *ctlr,
37 const struct spi_mem_op *op,
38 struct sg_table *sgt)
39 {
40 struct device *dmadev;
41
42 if (!op->data.nbytes)
43 return -EINVAL;
44
45 if (op->data.dir == SPI_MEM_DATA_OUT && ctlr->dma_tx)
46 dmadev = ctlr->dma_tx->device->dev;
47 else if (op->data.dir == SPI_MEM_DATA_IN && ctlr->dma_rx)
48 dmadev = ctlr->dma_rx->device->dev;
49 else
50 dmadev = ctlr->dev.parent;
51
52 if (!dmadev)
53 return -EINVAL;
54
55 return spi_map_buf(ctlr, dmadev, sgt, op->data.buf.in, op->data.nbytes,
56 op->data.dir == SPI_MEM_DATA_IN ?
57 DMA_FROM_DEVICE : DMA_TO_DEVICE);
58 }
59 EXPORT_SYMBOL_GPL(spi_controller_dma_map_mem_op_data);
60
61 /**
62 * spi_controller_dma_unmap_mem_op_data() - DMA-unmap the buffer attached to a
63 * memory operation
64 * @ctlr: the SPI controller requesting this dma_unmap()
65 * @op: the memory operation containing the buffer to unmap
66 * @sgt: a pointer to an sg_table previously initialized by
67 * spi_controller_dma_map_mem_op_data()
68 *
69 * Some controllers might want to do DMA on the data buffer embedded in @op.
70 * This helper prepares things so that the CPU can access the
71 * op->data.buf.{in,out} buffer again.
72 *
73 * This function is not intended to be called from SPI drivers. Only SPI
74 * controller drivers should use it.
75 *
76 * This function should be called after the DMA operation has finished and is
77 * only valid if the previous spi_controller_dma_map_mem_op_data() call
78 * returned 0.
79 *
80 * Return: 0 in case of success, a negative error code otherwise.
81 */
spi_controller_dma_unmap_mem_op_data(struct spi_controller * ctlr,const struct spi_mem_op * op,struct sg_table * sgt)82 void spi_controller_dma_unmap_mem_op_data(struct spi_controller *ctlr,
83 const struct spi_mem_op *op,
84 struct sg_table *sgt)
85 {
86 struct device *dmadev;
87
88 if (!op->data.nbytes)
89 return;
90
91 if (op->data.dir == SPI_MEM_DATA_OUT && ctlr->dma_tx)
92 dmadev = ctlr->dma_tx->device->dev;
93 else if (op->data.dir == SPI_MEM_DATA_IN && ctlr->dma_rx)
94 dmadev = ctlr->dma_rx->device->dev;
95 else
96 dmadev = ctlr->dev.parent;
97
98 spi_unmap_buf(ctlr, dmadev, sgt,
99 op->data.dir == SPI_MEM_DATA_IN ?
100 DMA_FROM_DEVICE : DMA_TO_DEVICE);
101 }
102 EXPORT_SYMBOL_GPL(spi_controller_dma_unmap_mem_op_data);
103
spi_check_buswidth_req(struct spi_mem * mem,u8 buswidth,bool tx)104 static int spi_check_buswidth_req(struct spi_mem *mem, u8 buswidth, bool tx)
105 {
106 u32 mode = mem->spi->mode;
107
108 switch (buswidth) {
109 case 1:
110 return 0;
111
112 case 2:
113 if ((tx &&
114 (mode & (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL))) ||
115 (!tx &&
116 (mode & (SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL))))
117 return 0;
118
119 break;
120
121 case 4:
122 if ((tx && (mode & (SPI_TX_QUAD | SPI_TX_OCTAL))) ||
123 (!tx && (mode & (SPI_RX_QUAD | SPI_RX_OCTAL))))
124 return 0;
125
126 break;
127
128 case 8:
129 if ((tx && (mode & SPI_TX_OCTAL)) ||
130 (!tx && (mode & SPI_RX_OCTAL)))
131 return 0;
132
133 break;
134
135 default:
136 break;
137 }
138
139 return -ENOTSUPP;
140 }
141
spi_mem_check_buswidth(struct spi_mem * mem,const struct spi_mem_op * op)142 static bool spi_mem_check_buswidth(struct spi_mem *mem,
143 const struct spi_mem_op *op)
144 {
145 if (spi_check_buswidth_req(mem, op->cmd.buswidth, true))
146 return false;
147
148 if (op->addr.nbytes &&
149 spi_check_buswidth_req(mem, op->addr.buswidth, true))
150 return false;
151
152 if (op->dummy.nbytes &&
153 spi_check_buswidth_req(mem, op->dummy.buswidth, true))
154 return false;
155
156 if (op->data.dir != SPI_MEM_NO_DATA &&
157 spi_check_buswidth_req(mem, op->data.buswidth,
158 op->data.dir == SPI_MEM_DATA_OUT))
159 return false;
160
161 return true;
162 }
163
spi_mem_default_supports_op(struct spi_mem * mem,const struct spi_mem_op * op)164 bool spi_mem_default_supports_op(struct spi_mem *mem,
165 const struct spi_mem_op *op)
166 {
167 struct spi_controller *ctlr = mem->spi->controller;
168 bool op_is_dtr =
169 op->cmd.dtr || op->addr.dtr || op->dummy.dtr || op->data.dtr;
170
171 if (op_is_dtr) {
172 if (!spi_mem_controller_is_capable(ctlr, dtr))
173 return false;
174
175 if (op->data.swap16 && !spi_mem_controller_is_capable(ctlr, swap16))
176 return false;
177
178 if (op->cmd.nbytes != 2)
179 return false;
180 } else {
181 if (op->cmd.nbytes != 1)
182 return false;
183 }
184
185 if (op->data.ecc) {
186 if (!spi_mem_controller_is_capable(ctlr, ecc))
187 return false;
188 }
189
190 if (op->max_freq && mem->spi->controller->min_speed_hz &&
191 op->max_freq < mem->spi->controller->min_speed_hz)
192 return false;
193
194 if (op->max_freq &&
195 op->max_freq < mem->spi->max_speed_hz) {
196 if (!spi_mem_controller_is_capable(ctlr, per_op_freq))
197 return false;
198 }
199
200 return spi_mem_check_buswidth(mem, op);
201 }
202 EXPORT_SYMBOL_GPL(spi_mem_default_supports_op);
203
spi_mem_buswidth_is_valid(u8 buswidth)204 static bool spi_mem_buswidth_is_valid(u8 buswidth)
205 {
206 if (hweight8(buswidth) > 1 || buswidth > SPI_MEM_MAX_BUSWIDTH)
207 return false;
208
209 return true;
210 }
211
spi_mem_check_op(const struct spi_mem_op * op)212 static int spi_mem_check_op(const struct spi_mem_op *op)
213 {
214 if (!op->cmd.buswidth || !op->cmd.nbytes)
215 return -EINVAL;
216
217 if ((op->addr.nbytes && !op->addr.buswidth) ||
218 (op->dummy.nbytes && !op->dummy.buswidth) ||
219 (op->data.nbytes && !op->data.buswidth))
220 return -EINVAL;
221
222 if (!spi_mem_buswidth_is_valid(op->cmd.buswidth) ||
223 !spi_mem_buswidth_is_valid(op->addr.buswidth) ||
224 !spi_mem_buswidth_is_valid(op->dummy.buswidth) ||
225 !spi_mem_buswidth_is_valid(op->data.buswidth))
226 return -EINVAL;
227
228 /* Buffers must be DMA-able. */
229 if (WARN_ON_ONCE(op->data.dir == SPI_MEM_DATA_IN &&
230 object_is_on_stack(op->data.buf.in)))
231 return -EINVAL;
232
233 if (WARN_ON_ONCE(op->data.dir == SPI_MEM_DATA_OUT &&
234 object_is_on_stack(op->data.buf.out)))
235 return -EINVAL;
236
237 return 0;
238 }
239
spi_mem_internal_supports_op(struct spi_mem * mem,const struct spi_mem_op * op)240 static bool spi_mem_internal_supports_op(struct spi_mem *mem,
241 const struct spi_mem_op *op)
242 {
243 struct spi_controller *ctlr = mem->spi->controller;
244
245 if (ctlr->mem_ops && ctlr->mem_ops->supports_op)
246 return ctlr->mem_ops->supports_op(mem, op);
247
248 return spi_mem_default_supports_op(mem, op);
249 }
250
251 /**
252 * spi_mem_supports_op() - Check if a memory device and the controller it is
253 * connected to support a specific memory operation
254 * @mem: the SPI memory
255 * @op: the memory operation to check
256 *
257 * Some controllers are only supporting Single or Dual IOs, others might only
258 * support specific opcodes, or it can even be that the controller and device
259 * both support Quad IOs but the hardware prevents you from using it because
260 * only 2 IO lines are connected.
261 *
262 * This function checks whether a specific operation is supported.
263 *
264 * Return: true if @op is supported, false otherwise.
265 */
spi_mem_supports_op(struct spi_mem * mem,const struct spi_mem_op * op)266 bool spi_mem_supports_op(struct spi_mem *mem, const struct spi_mem_op *op)
267 {
268 if (spi_mem_check_op(op))
269 return false;
270
271 return spi_mem_internal_supports_op(mem, op);
272 }
273 EXPORT_SYMBOL_GPL(spi_mem_supports_op);
274
spi_mem_access_start(struct spi_mem * mem)275 static int spi_mem_access_start(struct spi_mem *mem)
276 {
277 struct spi_controller *ctlr = mem->spi->controller;
278
279 /*
280 * Flush the message queue before executing our SPI memory
281 * operation to prevent preemption of regular SPI transfers.
282 */
283 spi_flush_queue(ctlr);
284
285 if (ctlr->auto_runtime_pm) {
286 int ret;
287
288 ret = pm_runtime_resume_and_get(ctlr->dev.parent);
289 if (ret < 0) {
290 dev_err(&ctlr->dev, "Failed to power device: %d\n",
291 ret);
292 return ret;
293 }
294 }
295
296 mutex_lock(&ctlr->bus_lock_mutex);
297 mutex_lock(&ctlr->io_mutex);
298
299 return 0;
300 }
301
spi_mem_access_end(struct spi_mem * mem)302 static void spi_mem_access_end(struct spi_mem *mem)
303 {
304 struct spi_controller *ctlr = mem->spi->controller;
305
306 mutex_unlock(&ctlr->io_mutex);
307 mutex_unlock(&ctlr->bus_lock_mutex);
308
309 if (ctlr->auto_runtime_pm)
310 pm_runtime_put(ctlr->dev.parent);
311 }
312
spi_mem_add_op_stats(struct spi_statistics __percpu * pcpu_stats,const struct spi_mem_op * op,int exec_op_ret)313 static void spi_mem_add_op_stats(struct spi_statistics __percpu *pcpu_stats,
314 const struct spi_mem_op *op, int exec_op_ret)
315 {
316 struct spi_statistics *stats;
317 u64 len, l2len;
318
319 get_cpu();
320 stats = this_cpu_ptr(pcpu_stats);
321 u64_stats_update_begin(&stats->syncp);
322
323 /*
324 * We do not have the concept of messages or transfers. Let's consider
325 * that one operation is equivalent to one message and one transfer.
326 */
327 u64_stats_inc(&stats->messages);
328 u64_stats_inc(&stats->transfers);
329
330 /* Use the sum of all lengths as bytes count and histogram value. */
331 len = op->cmd.nbytes + op->addr.nbytes;
332 len += op->dummy.nbytes + op->data.nbytes;
333 u64_stats_add(&stats->bytes, len);
334 l2len = min(fls(len), SPI_STATISTICS_HISTO_SIZE) - 1;
335 u64_stats_inc(&stats->transfer_bytes_histo[l2len]);
336
337 /* Only account for data bytes as transferred bytes. */
338 if (op->data.nbytes && op->data.dir == SPI_MEM_DATA_OUT)
339 u64_stats_add(&stats->bytes_tx, op->data.nbytes);
340 if (op->data.nbytes && op->data.dir == SPI_MEM_DATA_IN)
341 u64_stats_add(&stats->bytes_rx, op->data.nbytes);
342
343 /*
344 * A timeout is not an error, following the same behavior as
345 * spi_transfer_one_message().
346 */
347 if (exec_op_ret == -ETIMEDOUT)
348 u64_stats_inc(&stats->timedout);
349 else if (exec_op_ret)
350 u64_stats_inc(&stats->errors);
351
352 u64_stats_update_end(&stats->syncp);
353 put_cpu();
354 }
355
356 /**
357 * spi_mem_exec_op() - Execute a memory operation
358 * @mem: the SPI memory
359 * @op: the memory operation to execute
360 *
361 * Executes a memory operation.
362 *
363 * This function first checks that @op is supported and then tries to execute
364 * it.
365 *
366 * Return: 0 in case of success, a negative error code otherwise.
367 */
spi_mem_exec_op(struct spi_mem * mem,const struct spi_mem_op * op)368 int spi_mem_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
369 {
370 unsigned int tmpbufsize, xferpos = 0, totalxferlen = 0;
371 struct spi_controller *ctlr = mem->spi->controller;
372 struct spi_transfer xfers[4] = { };
373 struct spi_message msg;
374 u8 *tmpbuf;
375 int ret;
376
377 /* Make sure the operation frequency is correct before going futher */
378 spi_mem_adjust_op_freq(mem, (struct spi_mem_op *)op);
379
380 ret = spi_mem_check_op(op);
381 if (ret)
382 return ret;
383
384 if (!spi_mem_internal_supports_op(mem, op))
385 return -EOPNOTSUPP;
386
387 if (ctlr->mem_ops && ctlr->mem_ops->exec_op && !spi_get_csgpiod(mem->spi, 0)) {
388 ret = spi_mem_access_start(mem);
389 if (ret)
390 return ret;
391
392 ret = ctlr->mem_ops->exec_op(mem, op);
393
394 spi_mem_access_end(mem);
395
396 /*
397 * Some controllers only optimize specific paths (typically the
398 * read path) and expect the core to use the regular SPI
399 * interface in other cases.
400 */
401 if (!ret || (ret != -ENOTSUPP && ret != -EOPNOTSUPP)) {
402 spi_mem_add_op_stats(ctlr->pcpu_statistics, op, ret);
403 spi_mem_add_op_stats(mem->spi->pcpu_statistics, op, ret);
404
405 return ret;
406 }
407 }
408
409 tmpbufsize = op->cmd.nbytes + op->addr.nbytes + op->dummy.nbytes;
410
411 /*
412 * Allocate a buffer to transmit the CMD, ADDR cycles with kmalloc() so
413 * we're guaranteed that this buffer is DMA-able, as required by the
414 * SPI layer.
415 */
416 tmpbuf = kzalloc(tmpbufsize, GFP_KERNEL | GFP_DMA);
417 if (!tmpbuf)
418 return -ENOMEM;
419
420 spi_message_init(&msg);
421
422 tmpbuf[0] = op->cmd.opcode;
423 xfers[xferpos].tx_buf = tmpbuf;
424 xfers[xferpos].len = op->cmd.nbytes;
425 xfers[xferpos].tx_nbits = op->cmd.buswidth;
426 xfers[xferpos].speed_hz = op->max_freq;
427 spi_message_add_tail(&xfers[xferpos], &msg);
428 xferpos++;
429 totalxferlen++;
430
431 if (op->addr.nbytes) {
432 int i;
433
434 for (i = 0; i < op->addr.nbytes; i++)
435 tmpbuf[i + 1] = op->addr.val >>
436 (8 * (op->addr.nbytes - i - 1));
437
438 xfers[xferpos].tx_buf = tmpbuf + 1;
439 xfers[xferpos].len = op->addr.nbytes;
440 xfers[xferpos].tx_nbits = op->addr.buswidth;
441 xfers[xferpos].speed_hz = op->max_freq;
442 spi_message_add_tail(&xfers[xferpos], &msg);
443 xferpos++;
444 totalxferlen += op->addr.nbytes;
445 }
446
447 if (op->dummy.nbytes) {
448 memset(tmpbuf + op->addr.nbytes + 1, 0xff, op->dummy.nbytes);
449 xfers[xferpos].tx_buf = tmpbuf + op->addr.nbytes + 1;
450 xfers[xferpos].len = op->dummy.nbytes;
451 xfers[xferpos].tx_nbits = op->dummy.buswidth;
452 xfers[xferpos].dummy_data = 1;
453 xfers[xferpos].speed_hz = op->max_freq;
454 spi_message_add_tail(&xfers[xferpos], &msg);
455 xferpos++;
456 totalxferlen += op->dummy.nbytes;
457 }
458
459 if (op->data.nbytes) {
460 if (op->data.dir == SPI_MEM_DATA_IN) {
461 xfers[xferpos].rx_buf = op->data.buf.in;
462 xfers[xferpos].rx_nbits = op->data.buswidth;
463 } else {
464 xfers[xferpos].tx_buf = op->data.buf.out;
465 xfers[xferpos].tx_nbits = op->data.buswidth;
466 }
467
468 xfers[xferpos].len = op->data.nbytes;
469 xfers[xferpos].speed_hz = op->max_freq;
470 spi_message_add_tail(&xfers[xferpos], &msg);
471 xferpos++;
472 totalxferlen += op->data.nbytes;
473 }
474
475 ret = spi_sync(mem->spi, &msg);
476
477 kfree(tmpbuf);
478
479 if (ret)
480 return ret;
481
482 if (msg.actual_length != totalxferlen)
483 return -EIO;
484
485 return 0;
486 }
487 EXPORT_SYMBOL_GPL(spi_mem_exec_op);
488
489 /**
490 * spi_mem_get_name() - Return the SPI mem device name to be used by the
491 * upper layer if necessary
492 * @mem: the SPI memory
493 *
494 * This function allows SPI mem users to retrieve the SPI mem device name.
495 * It is useful if the upper layer needs to expose a custom name for
496 * compatibility reasons.
497 *
498 * Return: a string containing the name of the memory device to be used
499 * by the SPI mem user
500 */
spi_mem_get_name(struct spi_mem * mem)501 const char *spi_mem_get_name(struct spi_mem *mem)
502 {
503 return mem->name;
504 }
505 EXPORT_SYMBOL_GPL(spi_mem_get_name);
506
507 /**
508 * spi_mem_adjust_op_size() - Adjust the data size of a SPI mem operation to
509 * match controller limitations
510 * @mem: the SPI memory
511 * @op: the operation to adjust
512 *
513 * Some controllers have FIFO limitations and must split a data transfer
514 * operation into multiple ones, others require a specific alignment for
515 * optimized accesses. This function allows SPI mem drivers to split a single
516 * operation into multiple sub-operations when required.
517 *
518 * Return: a negative error code if the controller can't properly adjust @op,
519 * 0 otherwise. Note that @op->data.nbytes will be updated if @op
520 * can't be handled in a single step.
521 */
spi_mem_adjust_op_size(struct spi_mem * mem,struct spi_mem_op * op)522 int spi_mem_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op)
523 {
524 struct spi_controller *ctlr = mem->spi->controller;
525 size_t len;
526
527 if (ctlr->mem_ops && ctlr->mem_ops->adjust_op_size)
528 return ctlr->mem_ops->adjust_op_size(mem, op);
529
530 if (!ctlr->mem_ops || !ctlr->mem_ops->exec_op) {
531 len = op->cmd.nbytes + op->addr.nbytes + op->dummy.nbytes;
532
533 if (len > spi_max_transfer_size(mem->spi))
534 return -EINVAL;
535
536 op->data.nbytes = min3((size_t)op->data.nbytes,
537 spi_max_transfer_size(mem->spi),
538 spi_max_message_size(mem->spi) -
539 len);
540 if (!op->data.nbytes)
541 return -EINVAL;
542 }
543
544 return 0;
545 }
546 EXPORT_SYMBOL_GPL(spi_mem_adjust_op_size);
547
548 /**
549 * spi_mem_adjust_op_freq() - Adjust the frequency of a SPI mem operation to
550 * match controller, PCB and chip limitations
551 * @mem: the SPI memory
552 * @op: the operation to adjust
553 *
554 * Some chips have per-op frequency limitations and must adapt the maximum
555 * speed. This function allows SPI mem drivers to set @op->max_freq to the
556 * maximum supported value.
557 */
spi_mem_adjust_op_freq(struct spi_mem * mem,struct spi_mem_op * op)558 void spi_mem_adjust_op_freq(struct spi_mem *mem, struct spi_mem_op *op)
559 {
560 if (!op->max_freq || op->max_freq > mem->spi->max_speed_hz)
561 op->max_freq = mem->spi->max_speed_hz;
562 }
563 EXPORT_SYMBOL_GPL(spi_mem_adjust_op_freq);
564
565 /**
566 * spi_mem_calc_op_duration() - Derives the theoretical length (in ns) of an
567 * operation. This helps finding the best variant
568 * among a list of possible choices.
569 * @op: the operation to benchmark
570 *
571 * Some chips have per-op frequency limitations, PCBs usually have their own
572 * limitations as well, and controllers can support dual, quad or even octal
573 * modes, sometimes in DTR. All these combinations make it impossible to
574 * statically list the best combination for all situations. If we want something
575 * accurate, all these combinations should be rated (eg. with a time estimate)
576 * and the best pick should be taken based on these calculations.
577 *
578 * Returns a ns estimate for the time this op would take.
579 */
spi_mem_calc_op_duration(struct spi_mem_op * op)580 u64 spi_mem_calc_op_duration(struct spi_mem_op *op)
581 {
582 u64 ncycles = 0;
583 u32 ns_per_cycles;
584
585 ns_per_cycles = 1000000000 / op->max_freq;
586 ncycles += ((op->cmd.nbytes * 8) / op->cmd.buswidth) / (op->cmd.dtr ? 2 : 1);
587 ncycles += ((op->addr.nbytes * 8) / op->addr.buswidth) / (op->addr.dtr ? 2 : 1);
588 ncycles += ((op->dummy.nbytes * 8) / op->dummy.buswidth) / (op->dummy.dtr ? 2 : 1);
589 ncycles += ((op->data.nbytes * 8) / op->data.buswidth) / (op->data.dtr ? 2 : 1);
590
591 return ncycles * ns_per_cycles;
592 }
593 EXPORT_SYMBOL_GPL(spi_mem_calc_op_duration);
594
spi_mem_no_dirmap_read(struct spi_mem_dirmap_desc * desc,u64 offs,size_t len,void * buf)595 static ssize_t spi_mem_no_dirmap_read(struct spi_mem_dirmap_desc *desc,
596 u64 offs, size_t len, void *buf)
597 {
598 struct spi_mem_op op = desc->info.op_tmpl;
599 int ret;
600
601 op.addr.val = desc->info.offset + offs;
602 op.data.buf.in = buf;
603 op.data.nbytes = len;
604 ret = spi_mem_adjust_op_size(desc->mem, &op);
605 if (ret)
606 return ret;
607
608 ret = spi_mem_exec_op(desc->mem, &op);
609 if (ret)
610 return ret;
611
612 return op.data.nbytes;
613 }
614
spi_mem_no_dirmap_write(struct spi_mem_dirmap_desc * desc,u64 offs,size_t len,const void * buf)615 static ssize_t spi_mem_no_dirmap_write(struct spi_mem_dirmap_desc *desc,
616 u64 offs, size_t len, const void *buf)
617 {
618 struct spi_mem_op op = desc->info.op_tmpl;
619 int ret;
620
621 op.addr.val = desc->info.offset + offs;
622 op.data.buf.out = buf;
623 op.data.nbytes = len;
624 ret = spi_mem_adjust_op_size(desc->mem, &op);
625 if (ret)
626 return ret;
627
628 ret = spi_mem_exec_op(desc->mem, &op);
629 if (ret)
630 return ret;
631
632 return op.data.nbytes;
633 }
634
635 /**
636 * spi_mem_dirmap_create() - Create a direct mapping descriptor
637 * @mem: SPI mem device this direct mapping should be created for
638 * @info: direct mapping information
639 *
640 * This function is creating a direct mapping descriptor which can then be used
641 * to access the memory using spi_mem_dirmap_read() or spi_mem_dirmap_write().
642 * If the SPI controller driver does not support direct mapping, this function
643 * falls back to an implementation using spi_mem_exec_op(), so that the caller
644 * doesn't have to bother implementing a fallback on his own.
645 *
646 * Return: a valid pointer in case of success, and ERR_PTR() otherwise.
647 */
648 struct spi_mem_dirmap_desc *
spi_mem_dirmap_create(struct spi_mem * mem,const struct spi_mem_dirmap_info * info)649 spi_mem_dirmap_create(struct spi_mem *mem,
650 const struct spi_mem_dirmap_info *info)
651 {
652 struct spi_controller *ctlr = mem->spi->controller;
653 struct spi_mem_dirmap_desc *desc;
654 int ret = -ENOTSUPP;
655
656 /* Make sure the number of address cycles is between 1 and 8 bytes. */
657 if (!info->op_tmpl.addr.nbytes || info->op_tmpl.addr.nbytes > 8)
658 return ERR_PTR(-EINVAL);
659
660 /* data.dir should either be SPI_MEM_DATA_IN or SPI_MEM_DATA_OUT. */
661 if (info->op_tmpl.data.dir == SPI_MEM_NO_DATA)
662 return ERR_PTR(-EINVAL);
663
664 desc = kzalloc(sizeof(*desc), GFP_KERNEL);
665 if (!desc)
666 return ERR_PTR(-ENOMEM);
667
668 desc->mem = mem;
669 desc->info = *info;
670 if (ctlr->mem_ops && ctlr->mem_ops->dirmap_create)
671 ret = ctlr->mem_ops->dirmap_create(desc);
672
673 if (ret) {
674 desc->nodirmap = true;
675 if (!spi_mem_supports_op(desc->mem, &desc->info.op_tmpl))
676 ret = -EOPNOTSUPP;
677 else
678 ret = 0;
679 }
680
681 if (ret) {
682 kfree(desc);
683 return ERR_PTR(ret);
684 }
685
686 return desc;
687 }
688 EXPORT_SYMBOL_GPL(spi_mem_dirmap_create);
689
690 /**
691 * spi_mem_dirmap_destroy() - Destroy a direct mapping descriptor
692 * @desc: the direct mapping descriptor to destroy
693 *
694 * This function destroys a direct mapping descriptor previously created by
695 * spi_mem_dirmap_create().
696 */
spi_mem_dirmap_destroy(struct spi_mem_dirmap_desc * desc)697 void spi_mem_dirmap_destroy(struct spi_mem_dirmap_desc *desc)
698 {
699 struct spi_controller *ctlr = desc->mem->spi->controller;
700
701 if (!desc->nodirmap && ctlr->mem_ops && ctlr->mem_ops->dirmap_destroy)
702 ctlr->mem_ops->dirmap_destroy(desc);
703
704 kfree(desc);
705 }
706 EXPORT_SYMBOL_GPL(spi_mem_dirmap_destroy);
707
devm_spi_mem_dirmap_release(struct device * dev,void * res)708 static void devm_spi_mem_dirmap_release(struct device *dev, void *res)
709 {
710 struct spi_mem_dirmap_desc *desc = *(struct spi_mem_dirmap_desc **)res;
711
712 spi_mem_dirmap_destroy(desc);
713 }
714
715 /**
716 * devm_spi_mem_dirmap_create() - Create a direct mapping descriptor and attach
717 * it to a device
718 * @dev: device the dirmap desc will be attached to
719 * @mem: SPI mem device this direct mapping should be created for
720 * @info: direct mapping information
721 *
722 * devm_ variant of the spi_mem_dirmap_create() function. See
723 * spi_mem_dirmap_create() for more details.
724 *
725 * Return: a valid pointer in case of success, and ERR_PTR() otherwise.
726 */
727 struct spi_mem_dirmap_desc *
devm_spi_mem_dirmap_create(struct device * dev,struct spi_mem * mem,const struct spi_mem_dirmap_info * info)728 devm_spi_mem_dirmap_create(struct device *dev, struct spi_mem *mem,
729 const struct spi_mem_dirmap_info *info)
730 {
731 struct spi_mem_dirmap_desc **ptr, *desc;
732
733 ptr = devres_alloc(devm_spi_mem_dirmap_release, sizeof(*ptr),
734 GFP_KERNEL);
735 if (!ptr)
736 return ERR_PTR(-ENOMEM);
737
738 desc = spi_mem_dirmap_create(mem, info);
739 if (IS_ERR(desc)) {
740 devres_free(ptr);
741 } else {
742 *ptr = desc;
743 devres_add(dev, ptr);
744 }
745
746 return desc;
747 }
748 EXPORT_SYMBOL_GPL(devm_spi_mem_dirmap_create);
749
devm_spi_mem_dirmap_match(struct device * dev,void * res,void * data)750 static int devm_spi_mem_dirmap_match(struct device *dev, void *res, void *data)
751 {
752 struct spi_mem_dirmap_desc **ptr = res;
753
754 if (WARN_ON(!ptr || !*ptr))
755 return 0;
756
757 return *ptr == data;
758 }
759
760 /**
761 * devm_spi_mem_dirmap_destroy() - Destroy a direct mapping descriptor attached
762 * to a device
763 * @dev: device the dirmap desc is attached to
764 * @desc: the direct mapping descriptor to destroy
765 *
766 * devm_ variant of the spi_mem_dirmap_destroy() function. See
767 * spi_mem_dirmap_destroy() for more details.
768 */
devm_spi_mem_dirmap_destroy(struct device * dev,struct spi_mem_dirmap_desc * desc)769 void devm_spi_mem_dirmap_destroy(struct device *dev,
770 struct spi_mem_dirmap_desc *desc)
771 {
772 devres_release(dev, devm_spi_mem_dirmap_release,
773 devm_spi_mem_dirmap_match, desc);
774 }
775 EXPORT_SYMBOL_GPL(devm_spi_mem_dirmap_destroy);
776
777 /**
778 * spi_mem_dirmap_read() - Read data through a direct mapping
779 * @desc: direct mapping descriptor
780 * @offs: offset to start reading from. Note that this is not an absolute
781 * offset, but the offset within the direct mapping which already has
782 * its own offset
783 * @len: length in bytes
784 * @buf: destination buffer. This buffer must be DMA-able
785 *
786 * This function reads data from a memory device using a direct mapping
787 * previously instantiated with spi_mem_dirmap_create().
788 *
789 * Return: the amount of data read from the memory device or a negative error
790 * code. Note that the returned size might be smaller than @len, and the caller
791 * is responsible for calling spi_mem_dirmap_read() again when that happens.
792 */
spi_mem_dirmap_read(struct spi_mem_dirmap_desc * desc,u64 offs,size_t len,void * buf)793 ssize_t spi_mem_dirmap_read(struct spi_mem_dirmap_desc *desc,
794 u64 offs, size_t len, void *buf)
795 {
796 struct spi_controller *ctlr = desc->mem->spi->controller;
797 ssize_t ret;
798
799 if (desc->info.op_tmpl.data.dir != SPI_MEM_DATA_IN)
800 return -EINVAL;
801
802 if (!len)
803 return 0;
804
805 if (desc->nodirmap) {
806 ret = spi_mem_no_dirmap_read(desc, offs, len, buf);
807 } else if (ctlr->mem_ops && ctlr->mem_ops->dirmap_read) {
808 ret = spi_mem_access_start(desc->mem);
809 if (ret)
810 return ret;
811
812 ret = ctlr->mem_ops->dirmap_read(desc, offs, len, buf);
813
814 spi_mem_access_end(desc->mem);
815 } else {
816 ret = -ENOTSUPP;
817 }
818
819 return ret;
820 }
821 EXPORT_SYMBOL_GPL(spi_mem_dirmap_read);
822
823 /**
824 * spi_mem_dirmap_write() - Write data through a direct mapping
825 * @desc: direct mapping descriptor
826 * @offs: offset to start writing from. Note that this is not an absolute
827 * offset, but the offset within the direct mapping which already has
828 * its own offset
829 * @len: length in bytes
830 * @buf: source buffer. This buffer must be DMA-able
831 *
832 * This function writes data to a memory device using a direct mapping
833 * previously instantiated with spi_mem_dirmap_create().
834 *
835 * Return: the amount of data written to the memory device or a negative error
836 * code. Note that the returned size might be smaller than @len, and the caller
837 * is responsible for calling spi_mem_dirmap_write() again when that happens.
838 */
spi_mem_dirmap_write(struct spi_mem_dirmap_desc * desc,u64 offs,size_t len,const void * buf)839 ssize_t spi_mem_dirmap_write(struct spi_mem_dirmap_desc *desc,
840 u64 offs, size_t len, const void *buf)
841 {
842 struct spi_controller *ctlr = desc->mem->spi->controller;
843 ssize_t ret;
844
845 if (desc->info.op_tmpl.data.dir != SPI_MEM_DATA_OUT)
846 return -EINVAL;
847
848 if (!len)
849 return 0;
850
851 if (desc->nodirmap) {
852 ret = spi_mem_no_dirmap_write(desc, offs, len, buf);
853 } else if (ctlr->mem_ops && ctlr->mem_ops->dirmap_write) {
854 ret = spi_mem_access_start(desc->mem);
855 if (ret)
856 return ret;
857
858 ret = ctlr->mem_ops->dirmap_write(desc, offs, len, buf);
859
860 spi_mem_access_end(desc->mem);
861 } else {
862 ret = -ENOTSUPP;
863 }
864
865 return ret;
866 }
867 EXPORT_SYMBOL_GPL(spi_mem_dirmap_write);
868
to_spi_mem_drv(struct device_driver * drv)869 static inline struct spi_mem_driver *to_spi_mem_drv(struct device_driver *drv)
870 {
871 return container_of(drv, struct spi_mem_driver, spidrv.driver);
872 }
873
spi_mem_read_status(struct spi_mem * mem,const struct spi_mem_op * op,u16 * status)874 static int spi_mem_read_status(struct spi_mem *mem,
875 const struct spi_mem_op *op,
876 u16 *status)
877 {
878 const u8 *bytes = (u8 *)op->data.buf.in;
879 int ret;
880
881 ret = spi_mem_exec_op(mem, op);
882 if (ret)
883 return ret;
884
885 if (op->data.nbytes > 1)
886 *status = ((u16)bytes[0] << 8) | bytes[1];
887 else
888 *status = bytes[0];
889
890 return 0;
891 }
892
893 /**
894 * spi_mem_poll_status() - Poll memory device status
895 * @mem: SPI memory device
896 * @op: the memory operation to execute
897 * @mask: status bitmask to ckeck
898 * @match: (status & mask) expected value
899 * @initial_delay_us: delay in us before starting to poll
900 * @polling_delay_us: time to sleep between reads in us
901 * @timeout_ms: timeout in milliseconds
902 *
903 * This function polls a status register and returns when
904 * (status & mask) == match or when the timeout has expired.
905 *
906 * Return: 0 in case of success, -ETIMEDOUT in case of error,
907 * -EOPNOTSUPP if not supported.
908 */
spi_mem_poll_status(struct spi_mem * mem,const struct spi_mem_op * op,u16 mask,u16 match,unsigned long initial_delay_us,unsigned long polling_delay_us,u16 timeout_ms)909 int spi_mem_poll_status(struct spi_mem *mem,
910 const struct spi_mem_op *op,
911 u16 mask, u16 match,
912 unsigned long initial_delay_us,
913 unsigned long polling_delay_us,
914 u16 timeout_ms)
915 {
916 struct spi_controller *ctlr = mem->spi->controller;
917 int ret = -EOPNOTSUPP;
918 int read_status_ret;
919 u16 status;
920
921 if (op->data.nbytes < 1 || op->data.nbytes > 2 ||
922 op->data.dir != SPI_MEM_DATA_IN)
923 return -EINVAL;
924
925 if (ctlr->mem_ops && ctlr->mem_ops->poll_status && !spi_get_csgpiod(mem->spi, 0)) {
926 ret = spi_mem_access_start(mem);
927 if (ret)
928 return ret;
929
930 ret = ctlr->mem_ops->poll_status(mem, op, mask, match,
931 initial_delay_us, polling_delay_us,
932 timeout_ms);
933
934 spi_mem_access_end(mem);
935 }
936
937 if (ret == -EOPNOTSUPP) {
938 if (!spi_mem_supports_op(mem, op))
939 return ret;
940
941 if (initial_delay_us < 10)
942 udelay(initial_delay_us);
943 else
944 usleep_range((initial_delay_us >> 2) + 1,
945 initial_delay_us);
946
947 ret = read_poll_timeout(spi_mem_read_status, read_status_ret,
948 (read_status_ret || ((status) & mask) == match),
949 polling_delay_us, timeout_ms * 1000, false, mem,
950 op, &status);
951 if (read_status_ret)
952 return read_status_ret;
953 }
954
955 return ret;
956 }
957 EXPORT_SYMBOL_GPL(spi_mem_poll_status);
958
spi_mem_probe(struct spi_device * spi)959 static int spi_mem_probe(struct spi_device *spi)
960 {
961 struct spi_mem_driver *memdrv = to_spi_mem_drv(spi->dev.driver);
962 struct spi_controller *ctlr = spi->controller;
963 struct spi_mem *mem;
964
965 mem = devm_kzalloc(&spi->dev, sizeof(*mem), GFP_KERNEL);
966 if (!mem)
967 return -ENOMEM;
968
969 mem->spi = spi;
970
971 if (ctlr->mem_ops && ctlr->mem_ops->get_name)
972 mem->name = ctlr->mem_ops->get_name(mem);
973 else
974 mem->name = dev_name(&spi->dev);
975
976 if (IS_ERR_OR_NULL(mem->name))
977 return PTR_ERR_OR_ZERO(mem->name);
978
979 spi_set_drvdata(spi, mem);
980
981 return memdrv->probe(mem);
982 }
983
spi_mem_remove(struct spi_device * spi)984 static void spi_mem_remove(struct spi_device *spi)
985 {
986 struct spi_mem_driver *memdrv = to_spi_mem_drv(spi->dev.driver);
987 struct spi_mem *mem = spi_get_drvdata(spi);
988
989 if (memdrv->remove)
990 memdrv->remove(mem);
991 }
992
spi_mem_shutdown(struct spi_device * spi)993 static void spi_mem_shutdown(struct spi_device *spi)
994 {
995 struct spi_mem_driver *memdrv = to_spi_mem_drv(spi->dev.driver);
996 struct spi_mem *mem = spi_get_drvdata(spi);
997
998 if (memdrv->shutdown)
999 memdrv->shutdown(mem);
1000 }
1001
1002 /**
1003 * spi_mem_driver_register_with_owner() - Register a SPI memory driver
1004 * @memdrv: the SPI memory driver to register
1005 * @owner: the owner of this driver
1006 *
1007 * Registers a SPI memory driver.
1008 *
1009 * Return: 0 in case of success, a negative error core otherwise.
1010 */
1011
spi_mem_driver_register_with_owner(struct spi_mem_driver * memdrv,struct module * owner)1012 int spi_mem_driver_register_with_owner(struct spi_mem_driver *memdrv,
1013 struct module *owner)
1014 {
1015 memdrv->spidrv.probe = spi_mem_probe;
1016 memdrv->spidrv.remove = spi_mem_remove;
1017 memdrv->spidrv.shutdown = spi_mem_shutdown;
1018
1019 return __spi_register_driver(owner, &memdrv->spidrv);
1020 }
1021 EXPORT_SYMBOL_GPL(spi_mem_driver_register_with_owner);
1022
1023 /**
1024 * spi_mem_driver_unregister() - Unregister a SPI memory driver
1025 * @memdrv: the SPI memory driver to unregister
1026 *
1027 * Unregisters a SPI memory driver.
1028 */
spi_mem_driver_unregister(struct spi_mem_driver * memdrv)1029 void spi_mem_driver_unregister(struct spi_mem_driver *memdrv)
1030 {
1031 spi_unregister_driver(&memdrv->spidrv);
1032 }
1033 EXPORT_SYMBOL_GPL(spi_mem_driver_unregister);
1034