1 // SPDX-License-Identifier: GPL-2.0-or-later
2 // SPI init/core code
3 //
4 // Copyright (C) 2005 David Brownell
5 // Copyright (C) 2008 Secret Lab Technologies Ltd.
6
7 #include <linux/acpi.h>
8 #include <linux/cache.h>
9 #include <linux/clk/clk-conf.h>
10 #include <linux/delay.h>
11 #include <linux/device.h>
12 #include <linux/dmaengine.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/export.h>
15 #include <linux/gpio/consumer.h>
16 #include <linux/highmem.h>
17 #include <linux/idr.h>
18 #include <linux/init.h>
19 #include <linux/ioport.h>
20 #include <linux/kernel.h>
21 #include <linux/kthread.h>
22 #include <linux/mod_devicetable.h>
23 #include <linux/mutex.h>
24 #include <linux/of_device.h>
25 #include <linux/of_irq.h>
26 #include <linux/percpu.h>
27 #include <linux/platform_data/x86/apple.h>
28 #include <linux/pm_domain.h>
29 #include <linux/pm_runtime.h>
30 #include <linux/property.h>
31 #include <linux/ptp_clock_kernel.h>
32 #include <linux/sched/rt.h>
33 #include <linux/slab.h>
34 #include <linux/spi/offload/types.h>
35 #include <linux/spi/spi.h>
36 #include <linux/spi/spi-mem.h>
37 #include <uapi/linux/sched/types.h>
38
39 #define CREATE_TRACE_POINTS
40 #include <trace/events/spi.h>
41 EXPORT_TRACEPOINT_SYMBOL(spi_transfer_start);
42 EXPORT_TRACEPOINT_SYMBOL(spi_transfer_stop);
43
44 #include "internals.h"
45
46 static DEFINE_IDR(spi_controller_idr);
47
spidev_release(struct device * dev)48 static void spidev_release(struct device *dev)
49 {
50 struct spi_device *spi = to_spi_device(dev);
51
52 spi_controller_put(spi->controller);
53 kfree(spi->driver_override);
54 free_percpu(spi->pcpu_statistics);
55 kfree(spi);
56 }
57
58 static ssize_t
modalias_show(struct device * dev,struct device_attribute * a,char * buf)59 modalias_show(struct device *dev, struct device_attribute *a, char *buf)
60 {
61 const struct spi_device *spi = to_spi_device(dev);
62 int len;
63
64 len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1);
65 if (len != -ENODEV)
66 return len;
67
68 return sysfs_emit(buf, "%s%s\n", SPI_MODULE_PREFIX, spi->modalias);
69 }
70 static DEVICE_ATTR_RO(modalias);
71
driver_override_store(struct device * dev,struct device_attribute * a,const char * buf,size_t count)72 static ssize_t driver_override_store(struct device *dev,
73 struct device_attribute *a,
74 const char *buf, size_t count)
75 {
76 struct spi_device *spi = to_spi_device(dev);
77 int ret;
78
79 ret = driver_set_override(dev, &spi->driver_override, buf, count);
80 if (ret)
81 return ret;
82
83 return count;
84 }
85
driver_override_show(struct device * dev,struct device_attribute * a,char * buf)86 static ssize_t driver_override_show(struct device *dev,
87 struct device_attribute *a, char *buf)
88 {
89 const struct spi_device *spi = to_spi_device(dev);
90 ssize_t len;
91
92 device_lock(dev);
93 len = sysfs_emit(buf, "%s\n", spi->driver_override ? : "");
94 device_unlock(dev);
95 return len;
96 }
97 static DEVICE_ATTR_RW(driver_override);
98
spi_alloc_pcpu_stats(struct device * dev)99 static struct spi_statistics __percpu *spi_alloc_pcpu_stats(struct device *dev)
100 {
101 struct spi_statistics __percpu *pcpu_stats;
102
103 if (dev)
104 pcpu_stats = devm_alloc_percpu(dev, struct spi_statistics);
105 else
106 pcpu_stats = alloc_percpu_gfp(struct spi_statistics, GFP_KERNEL);
107
108 if (pcpu_stats) {
109 int cpu;
110
111 for_each_possible_cpu(cpu) {
112 struct spi_statistics *stat;
113
114 stat = per_cpu_ptr(pcpu_stats, cpu);
115 u64_stats_init(&stat->syncp);
116 }
117 }
118 return pcpu_stats;
119 }
120
spi_emit_pcpu_stats(struct spi_statistics __percpu * stat,char * buf,size_t offset)121 static ssize_t spi_emit_pcpu_stats(struct spi_statistics __percpu *stat,
122 char *buf, size_t offset)
123 {
124 u64 val = 0;
125 int i;
126
127 for_each_possible_cpu(i) {
128 const struct spi_statistics *pcpu_stats;
129 u64_stats_t *field;
130 unsigned int start;
131 u64 inc;
132
133 pcpu_stats = per_cpu_ptr(stat, i);
134 field = (void *)pcpu_stats + offset;
135 do {
136 start = u64_stats_fetch_begin(&pcpu_stats->syncp);
137 inc = u64_stats_read(field);
138 } while (u64_stats_fetch_retry(&pcpu_stats->syncp, start));
139 val += inc;
140 }
141 return sysfs_emit(buf, "%llu\n", val);
142 }
143
144 #define SPI_STATISTICS_ATTRS(field, file) \
145 static ssize_t spi_controller_##field##_show(struct device *dev, \
146 struct device_attribute *attr, \
147 char *buf) \
148 { \
149 struct spi_controller *ctlr = container_of(dev, \
150 struct spi_controller, dev); \
151 return spi_statistics_##field##_show(ctlr->pcpu_statistics, buf); \
152 } \
153 static struct device_attribute dev_attr_spi_controller_##field = { \
154 .attr = { .name = file, .mode = 0444 }, \
155 .show = spi_controller_##field##_show, \
156 }; \
157 static ssize_t spi_device_##field##_show(struct device *dev, \
158 struct device_attribute *attr, \
159 char *buf) \
160 { \
161 struct spi_device *spi = to_spi_device(dev); \
162 return spi_statistics_##field##_show(spi->pcpu_statistics, buf); \
163 } \
164 static struct device_attribute dev_attr_spi_device_##field = { \
165 .attr = { .name = file, .mode = 0444 }, \
166 .show = spi_device_##field##_show, \
167 }
168
169 #define SPI_STATISTICS_SHOW_NAME(name, file, field) \
170 static ssize_t spi_statistics_##name##_show(struct spi_statistics __percpu *stat, \
171 char *buf) \
172 { \
173 return spi_emit_pcpu_stats(stat, buf, \
174 offsetof(struct spi_statistics, field)); \
175 } \
176 SPI_STATISTICS_ATTRS(name, file)
177
178 #define SPI_STATISTICS_SHOW(field) \
179 SPI_STATISTICS_SHOW_NAME(field, __stringify(field), \
180 field)
181
182 SPI_STATISTICS_SHOW(messages);
183 SPI_STATISTICS_SHOW(transfers);
184 SPI_STATISTICS_SHOW(errors);
185 SPI_STATISTICS_SHOW(timedout);
186
187 SPI_STATISTICS_SHOW(spi_sync);
188 SPI_STATISTICS_SHOW(spi_sync_immediate);
189 SPI_STATISTICS_SHOW(spi_async);
190
191 SPI_STATISTICS_SHOW(bytes);
192 SPI_STATISTICS_SHOW(bytes_rx);
193 SPI_STATISTICS_SHOW(bytes_tx);
194
195 #define SPI_STATISTICS_TRANSFER_BYTES_HISTO(index, number) \
196 SPI_STATISTICS_SHOW_NAME(transfer_bytes_histo##index, \
197 "transfer_bytes_histo_" number, \
198 transfer_bytes_histo[index])
199 SPI_STATISTICS_TRANSFER_BYTES_HISTO(0, "0-1");
200 SPI_STATISTICS_TRANSFER_BYTES_HISTO(1, "2-3");
201 SPI_STATISTICS_TRANSFER_BYTES_HISTO(2, "4-7");
202 SPI_STATISTICS_TRANSFER_BYTES_HISTO(3, "8-15");
203 SPI_STATISTICS_TRANSFER_BYTES_HISTO(4, "16-31");
204 SPI_STATISTICS_TRANSFER_BYTES_HISTO(5, "32-63");
205 SPI_STATISTICS_TRANSFER_BYTES_HISTO(6, "64-127");
206 SPI_STATISTICS_TRANSFER_BYTES_HISTO(7, "128-255");
207 SPI_STATISTICS_TRANSFER_BYTES_HISTO(8, "256-511");
208 SPI_STATISTICS_TRANSFER_BYTES_HISTO(9, "512-1023");
209 SPI_STATISTICS_TRANSFER_BYTES_HISTO(10, "1024-2047");
210 SPI_STATISTICS_TRANSFER_BYTES_HISTO(11, "2048-4095");
211 SPI_STATISTICS_TRANSFER_BYTES_HISTO(12, "4096-8191");
212 SPI_STATISTICS_TRANSFER_BYTES_HISTO(13, "8192-16383");
213 SPI_STATISTICS_TRANSFER_BYTES_HISTO(14, "16384-32767");
214 SPI_STATISTICS_TRANSFER_BYTES_HISTO(15, "32768-65535");
215 SPI_STATISTICS_TRANSFER_BYTES_HISTO(16, "65536+");
216
217 SPI_STATISTICS_SHOW(transfers_split_maxsize);
218
219 static struct attribute *spi_dev_attrs[] = {
220 &dev_attr_modalias.attr,
221 &dev_attr_driver_override.attr,
222 NULL,
223 };
224
225 static const struct attribute_group spi_dev_group = {
226 .attrs = spi_dev_attrs,
227 };
228
229 static struct attribute *spi_device_statistics_attrs[] = {
230 &dev_attr_spi_device_messages.attr,
231 &dev_attr_spi_device_transfers.attr,
232 &dev_attr_spi_device_errors.attr,
233 &dev_attr_spi_device_timedout.attr,
234 &dev_attr_spi_device_spi_sync.attr,
235 &dev_attr_spi_device_spi_sync_immediate.attr,
236 &dev_attr_spi_device_spi_async.attr,
237 &dev_attr_spi_device_bytes.attr,
238 &dev_attr_spi_device_bytes_rx.attr,
239 &dev_attr_spi_device_bytes_tx.attr,
240 &dev_attr_spi_device_transfer_bytes_histo0.attr,
241 &dev_attr_spi_device_transfer_bytes_histo1.attr,
242 &dev_attr_spi_device_transfer_bytes_histo2.attr,
243 &dev_attr_spi_device_transfer_bytes_histo3.attr,
244 &dev_attr_spi_device_transfer_bytes_histo4.attr,
245 &dev_attr_spi_device_transfer_bytes_histo5.attr,
246 &dev_attr_spi_device_transfer_bytes_histo6.attr,
247 &dev_attr_spi_device_transfer_bytes_histo7.attr,
248 &dev_attr_spi_device_transfer_bytes_histo8.attr,
249 &dev_attr_spi_device_transfer_bytes_histo9.attr,
250 &dev_attr_spi_device_transfer_bytes_histo10.attr,
251 &dev_attr_spi_device_transfer_bytes_histo11.attr,
252 &dev_attr_spi_device_transfer_bytes_histo12.attr,
253 &dev_attr_spi_device_transfer_bytes_histo13.attr,
254 &dev_attr_spi_device_transfer_bytes_histo14.attr,
255 &dev_attr_spi_device_transfer_bytes_histo15.attr,
256 &dev_attr_spi_device_transfer_bytes_histo16.attr,
257 &dev_attr_spi_device_transfers_split_maxsize.attr,
258 NULL,
259 };
260
261 static const struct attribute_group spi_device_statistics_group = {
262 .name = "statistics",
263 .attrs = spi_device_statistics_attrs,
264 };
265
266 static const struct attribute_group *spi_dev_groups[] = {
267 &spi_dev_group,
268 &spi_device_statistics_group,
269 NULL,
270 };
271
272 static struct attribute *spi_controller_statistics_attrs[] = {
273 &dev_attr_spi_controller_messages.attr,
274 &dev_attr_spi_controller_transfers.attr,
275 &dev_attr_spi_controller_errors.attr,
276 &dev_attr_spi_controller_timedout.attr,
277 &dev_attr_spi_controller_spi_sync.attr,
278 &dev_attr_spi_controller_spi_sync_immediate.attr,
279 &dev_attr_spi_controller_spi_async.attr,
280 &dev_attr_spi_controller_bytes.attr,
281 &dev_attr_spi_controller_bytes_rx.attr,
282 &dev_attr_spi_controller_bytes_tx.attr,
283 &dev_attr_spi_controller_transfer_bytes_histo0.attr,
284 &dev_attr_spi_controller_transfer_bytes_histo1.attr,
285 &dev_attr_spi_controller_transfer_bytes_histo2.attr,
286 &dev_attr_spi_controller_transfer_bytes_histo3.attr,
287 &dev_attr_spi_controller_transfer_bytes_histo4.attr,
288 &dev_attr_spi_controller_transfer_bytes_histo5.attr,
289 &dev_attr_spi_controller_transfer_bytes_histo6.attr,
290 &dev_attr_spi_controller_transfer_bytes_histo7.attr,
291 &dev_attr_spi_controller_transfer_bytes_histo8.attr,
292 &dev_attr_spi_controller_transfer_bytes_histo9.attr,
293 &dev_attr_spi_controller_transfer_bytes_histo10.attr,
294 &dev_attr_spi_controller_transfer_bytes_histo11.attr,
295 &dev_attr_spi_controller_transfer_bytes_histo12.attr,
296 &dev_attr_spi_controller_transfer_bytes_histo13.attr,
297 &dev_attr_spi_controller_transfer_bytes_histo14.attr,
298 &dev_attr_spi_controller_transfer_bytes_histo15.attr,
299 &dev_attr_spi_controller_transfer_bytes_histo16.attr,
300 &dev_attr_spi_controller_transfers_split_maxsize.attr,
301 NULL,
302 };
303
304 static const struct attribute_group spi_controller_statistics_group = {
305 .name = "statistics",
306 .attrs = spi_controller_statistics_attrs,
307 };
308
309 static const struct attribute_group *spi_controller_groups[] = {
310 &spi_controller_statistics_group,
311 NULL,
312 };
313
spi_statistics_add_transfer_stats(struct spi_statistics __percpu * pcpu_stats,struct spi_transfer * xfer,struct spi_message * msg)314 static void spi_statistics_add_transfer_stats(struct spi_statistics __percpu *pcpu_stats,
315 struct spi_transfer *xfer,
316 struct spi_message *msg)
317 {
318 int l2len = min(fls(xfer->len), SPI_STATISTICS_HISTO_SIZE) - 1;
319 struct spi_statistics *stats;
320
321 if (l2len < 0)
322 l2len = 0;
323
324 get_cpu();
325 stats = this_cpu_ptr(pcpu_stats);
326 u64_stats_update_begin(&stats->syncp);
327
328 u64_stats_inc(&stats->transfers);
329 u64_stats_inc(&stats->transfer_bytes_histo[l2len]);
330
331 u64_stats_add(&stats->bytes, xfer->len);
332 if (spi_valid_txbuf(msg, xfer))
333 u64_stats_add(&stats->bytes_tx, xfer->len);
334 if (spi_valid_rxbuf(msg, xfer))
335 u64_stats_add(&stats->bytes_rx, xfer->len);
336
337 u64_stats_update_end(&stats->syncp);
338 put_cpu();
339 }
340
341 /*
342 * modalias support makes "modprobe $MODALIAS" new-style hotplug work,
343 * and the sysfs version makes coldplug work too.
344 */
spi_match_id(const struct spi_device_id * id,const char * name)345 static const struct spi_device_id *spi_match_id(const struct spi_device_id *id, const char *name)
346 {
347 while (id->name[0]) {
348 if (!strcmp(name, id->name))
349 return id;
350 id++;
351 }
352 return NULL;
353 }
354
spi_get_device_id(const struct spi_device * sdev)355 const struct spi_device_id *spi_get_device_id(const struct spi_device *sdev)
356 {
357 const struct spi_driver *sdrv = to_spi_driver(sdev->dev.driver);
358
359 return spi_match_id(sdrv->id_table, sdev->modalias);
360 }
361 EXPORT_SYMBOL_GPL(spi_get_device_id);
362
spi_get_device_match_data(const struct spi_device * sdev)363 const void *spi_get_device_match_data(const struct spi_device *sdev)
364 {
365 const void *match;
366
367 match = device_get_match_data(&sdev->dev);
368 if (match)
369 return match;
370
371 return (const void *)spi_get_device_id(sdev)->driver_data;
372 }
373 EXPORT_SYMBOL_GPL(spi_get_device_match_data);
374
spi_match_device(struct device * dev,const struct device_driver * drv)375 static int spi_match_device(struct device *dev, const struct device_driver *drv)
376 {
377 const struct spi_device *spi = to_spi_device(dev);
378 const struct spi_driver *sdrv = to_spi_driver(drv);
379
380 /* Check override first, and if set, only use the named driver */
381 if (spi->driver_override)
382 return strcmp(spi->driver_override, drv->name) == 0;
383
384 /* Attempt an OF style match */
385 if (of_driver_match_device(dev, drv))
386 return 1;
387
388 /* Then try ACPI */
389 if (acpi_driver_match_device(dev, drv))
390 return 1;
391
392 if (sdrv->id_table)
393 return !!spi_match_id(sdrv->id_table, spi->modalias);
394
395 return strcmp(spi->modalias, drv->name) == 0;
396 }
397
spi_uevent(const struct device * dev,struct kobj_uevent_env * env)398 static int spi_uevent(const struct device *dev, struct kobj_uevent_env *env)
399 {
400 const struct spi_device *spi = to_spi_device(dev);
401 int rc;
402
403 rc = acpi_device_uevent_modalias(dev, env);
404 if (rc != -ENODEV)
405 return rc;
406
407 return add_uevent_var(env, "MODALIAS=%s%s", SPI_MODULE_PREFIX, spi->modalias);
408 }
409
spi_probe(struct device * dev)410 static int spi_probe(struct device *dev)
411 {
412 const struct spi_driver *sdrv = to_spi_driver(dev->driver);
413 struct spi_device *spi = to_spi_device(dev);
414 struct fwnode_handle *fwnode = dev_fwnode(dev);
415 int ret;
416
417 ret = of_clk_set_defaults(dev->of_node, false);
418 if (ret)
419 return ret;
420
421 if (is_of_node(fwnode))
422 spi->irq = of_irq_get(dev->of_node, 0);
423 else if (is_acpi_device_node(fwnode) && spi->irq < 0)
424 spi->irq = acpi_dev_gpio_irq_get(to_acpi_device_node(fwnode), 0);
425 if (spi->irq == -EPROBE_DEFER)
426 return dev_err_probe(dev, spi->irq, "Failed to get irq\n");
427 if (spi->irq < 0)
428 spi->irq = 0;
429
430 ret = dev_pm_domain_attach(dev, true);
431 if (ret)
432 return ret;
433
434 if (sdrv->probe) {
435 ret = sdrv->probe(spi);
436 if (ret)
437 dev_pm_domain_detach(dev, true);
438 }
439
440 return ret;
441 }
442
spi_remove(struct device * dev)443 static void spi_remove(struct device *dev)
444 {
445 const struct spi_driver *sdrv = to_spi_driver(dev->driver);
446
447 if (sdrv->remove)
448 sdrv->remove(to_spi_device(dev));
449
450 dev_pm_domain_detach(dev, true);
451 }
452
spi_shutdown(struct device * dev)453 static void spi_shutdown(struct device *dev)
454 {
455 if (dev->driver) {
456 const struct spi_driver *sdrv = to_spi_driver(dev->driver);
457
458 if (sdrv->shutdown)
459 sdrv->shutdown(to_spi_device(dev));
460 }
461 }
462
463 const struct bus_type spi_bus_type = {
464 .name = "spi",
465 .dev_groups = spi_dev_groups,
466 .match = spi_match_device,
467 .uevent = spi_uevent,
468 .probe = spi_probe,
469 .remove = spi_remove,
470 .shutdown = spi_shutdown,
471 };
472 EXPORT_SYMBOL_GPL(spi_bus_type);
473
474 /**
475 * __spi_register_driver - register a SPI driver
476 * @owner: owner module of the driver to register
477 * @sdrv: the driver to register
478 * Context: can sleep
479 *
480 * Return: zero on success, else a negative error code.
481 */
__spi_register_driver(struct module * owner,struct spi_driver * sdrv)482 int __spi_register_driver(struct module *owner, struct spi_driver *sdrv)
483 {
484 sdrv->driver.owner = owner;
485 sdrv->driver.bus = &spi_bus_type;
486
487 /*
488 * For Really Good Reasons we use spi: modaliases not of:
489 * modaliases for DT so module autoloading won't work if we
490 * don't have a spi_device_id as well as a compatible string.
491 */
492 if (sdrv->driver.of_match_table) {
493 const struct of_device_id *of_id;
494
495 for (of_id = sdrv->driver.of_match_table; of_id->compatible[0];
496 of_id++) {
497 const char *of_name;
498
499 /* Strip off any vendor prefix */
500 of_name = strnchr(of_id->compatible,
501 sizeof(of_id->compatible), ',');
502 if (of_name)
503 of_name++;
504 else
505 of_name = of_id->compatible;
506
507 if (sdrv->id_table) {
508 const struct spi_device_id *spi_id;
509
510 spi_id = spi_match_id(sdrv->id_table, of_name);
511 if (spi_id)
512 continue;
513 } else {
514 if (strcmp(sdrv->driver.name, of_name) == 0)
515 continue;
516 }
517
518 pr_warn("SPI driver %s has no spi_device_id for %s\n",
519 sdrv->driver.name, of_id->compatible);
520 }
521 }
522
523 return driver_register(&sdrv->driver);
524 }
525 EXPORT_SYMBOL_GPL(__spi_register_driver);
526
527 /*-------------------------------------------------------------------------*/
528
529 /*
530 * SPI devices should normally not be created by SPI device drivers; that
531 * would make them board-specific. Similarly with SPI controller drivers.
532 * Device registration normally goes into like arch/.../mach.../board-YYY.c
533 * with other readonly (flashable) information about mainboard devices.
534 */
535
536 struct boardinfo {
537 struct list_head list;
538 struct spi_board_info board_info;
539 };
540
541 static LIST_HEAD(board_list);
542 static LIST_HEAD(spi_controller_list);
543
544 /*
545 * Used to protect add/del operation for board_info list and
546 * spi_controller list, and their matching process also used
547 * to protect object of type struct idr.
548 */
549 static DEFINE_MUTEX(board_lock);
550
551 /**
552 * spi_alloc_device - Allocate a new SPI device
553 * @ctlr: Controller to which device is connected
554 * Context: can sleep
555 *
556 * Allows a driver to allocate and initialize a spi_device without
557 * registering it immediately. This allows a driver to directly
558 * fill the spi_device with device parameters before calling
559 * spi_add_device() on it.
560 *
561 * Caller is responsible to call spi_add_device() on the returned
562 * spi_device structure to add it to the SPI controller. If the caller
563 * needs to discard the spi_device without adding it, then it should
564 * call spi_dev_put() on it.
565 *
566 * Return: a pointer to the new device, or NULL.
567 */
spi_alloc_device(struct spi_controller * ctlr)568 struct spi_device *spi_alloc_device(struct spi_controller *ctlr)
569 {
570 struct spi_device *spi;
571
572 if (!spi_controller_get(ctlr))
573 return NULL;
574
575 spi = kzalloc(sizeof(*spi), GFP_KERNEL);
576 if (!spi) {
577 spi_controller_put(ctlr);
578 return NULL;
579 }
580
581 spi->pcpu_statistics = spi_alloc_pcpu_stats(NULL);
582 if (!spi->pcpu_statistics) {
583 kfree(spi);
584 spi_controller_put(ctlr);
585 return NULL;
586 }
587
588 spi->controller = ctlr;
589 spi->dev.parent = &ctlr->dev;
590 spi->dev.bus = &spi_bus_type;
591 spi->dev.release = spidev_release;
592 spi->mode = ctlr->buswidth_override_bits;
593
594 device_initialize(&spi->dev);
595 return spi;
596 }
597 EXPORT_SYMBOL_GPL(spi_alloc_device);
598
spi_dev_set_name(struct spi_device * spi)599 static void spi_dev_set_name(struct spi_device *spi)
600 {
601 struct device *dev = &spi->dev;
602 struct fwnode_handle *fwnode = dev_fwnode(dev);
603
604 if (is_acpi_device_node(fwnode)) {
605 dev_set_name(dev, "spi-%s", acpi_dev_name(to_acpi_device_node(fwnode)));
606 return;
607 }
608
609 if (is_software_node(fwnode)) {
610 dev_set_name(dev, "spi-%pfwP", fwnode);
611 return;
612 }
613
614 dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->controller->dev),
615 spi_get_chipselect(spi, 0));
616 }
617
618 /*
619 * Zero(0) is a valid physical CS value and can be located at any
620 * logical CS in the spi->chip_select[]. If all the physical CS
621 * are initialized to 0 then It would be difficult to differentiate
622 * between a valid physical CS 0 & an unused logical CS whose physical
623 * CS can be 0. As a solution to this issue initialize all the CS to -1.
624 * Now all the unused logical CS will have -1 physical CS value & can be
625 * ignored while performing physical CS validity checks.
626 */
627 #define SPI_INVALID_CS ((s8)-1)
628
is_valid_cs(s8 chip_select)629 static inline bool is_valid_cs(s8 chip_select)
630 {
631 return chip_select != SPI_INVALID_CS;
632 }
633
spi_dev_check_cs(struct device * dev,struct spi_device * spi,u8 idx,struct spi_device * new_spi,u8 new_idx)634 static inline int spi_dev_check_cs(struct device *dev,
635 struct spi_device *spi, u8 idx,
636 struct spi_device *new_spi, u8 new_idx)
637 {
638 u8 cs, cs_new;
639 u8 idx_new;
640
641 cs = spi_get_chipselect(spi, idx);
642 for (idx_new = new_idx; idx_new < SPI_CS_CNT_MAX; idx_new++) {
643 cs_new = spi_get_chipselect(new_spi, idx_new);
644 if (is_valid_cs(cs) && is_valid_cs(cs_new) && cs == cs_new) {
645 dev_err(dev, "chipselect %u already in use\n", cs_new);
646 return -EBUSY;
647 }
648 }
649 return 0;
650 }
651
spi_dev_check(struct device * dev,void * data)652 static int spi_dev_check(struct device *dev, void *data)
653 {
654 struct spi_device *spi = to_spi_device(dev);
655 struct spi_device *new_spi = data;
656 int status, idx;
657
658 if (spi->controller == new_spi->controller) {
659 for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) {
660 status = spi_dev_check_cs(dev, spi, idx, new_spi, 0);
661 if (status)
662 return status;
663 }
664 }
665 return 0;
666 }
667
spi_cleanup(struct spi_device * spi)668 static void spi_cleanup(struct spi_device *spi)
669 {
670 if (spi->controller->cleanup)
671 spi->controller->cleanup(spi);
672 }
673
__spi_add_device(struct spi_device * spi)674 static int __spi_add_device(struct spi_device *spi)
675 {
676 struct spi_controller *ctlr = spi->controller;
677 struct device *dev = ctlr->dev.parent;
678 int status, idx;
679 u8 cs;
680
681 for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) {
682 /* Chipselects are numbered 0..max; validate. */
683 cs = spi_get_chipselect(spi, idx);
684 if (is_valid_cs(cs) && cs >= ctlr->num_chipselect) {
685 dev_err(dev, "cs%d >= max %d\n", spi_get_chipselect(spi, idx),
686 ctlr->num_chipselect);
687 return -EINVAL;
688 }
689 }
690
691 /*
692 * Make sure that multiple logical CS doesn't map to the same physical CS.
693 * For example, spi->chip_select[0] != spi->chip_select[1] and so on.
694 */
695 if (!spi_controller_is_target(ctlr)) {
696 for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) {
697 status = spi_dev_check_cs(dev, spi, idx, spi, idx + 1);
698 if (status)
699 return status;
700 }
701 }
702
703 /* Set the bus ID string */
704 spi_dev_set_name(spi);
705
706 /*
707 * We need to make sure there's no other device with this
708 * chipselect **BEFORE** we call setup(), else we'll trash
709 * its configuration.
710 */
711 status = bus_for_each_dev(&spi_bus_type, NULL, spi, spi_dev_check);
712 if (status)
713 return status;
714
715 /* Controller may unregister concurrently */
716 if (IS_ENABLED(CONFIG_SPI_DYNAMIC) &&
717 !device_is_registered(&ctlr->dev)) {
718 return -ENODEV;
719 }
720
721 if (ctlr->cs_gpiods) {
722 u8 cs;
723
724 for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) {
725 cs = spi_get_chipselect(spi, idx);
726 if (is_valid_cs(cs))
727 spi_set_csgpiod(spi, idx, ctlr->cs_gpiods[cs]);
728 }
729 }
730
731 /*
732 * Drivers may modify this initial i/o setup, but will
733 * normally rely on the device being setup. Devices
734 * using SPI_CS_HIGH can't coexist well otherwise...
735 */
736 status = spi_setup(spi);
737 if (status < 0) {
738 dev_err(dev, "can't setup %s, status %d\n",
739 dev_name(&spi->dev), status);
740 return status;
741 }
742
743 /* Device may be bound to an active driver when this returns */
744 status = device_add(&spi->dev);
745 if (status < 0) {
746 dev_err(dev, "can't add %s, status %d\n",
747 dev_name(&spi->dev), status);
748 spi_cleanup(spi);
749 } else {
750 dev_dbg(dev, "registered child %s\n", dev_name(&spi->dev));
751 }
752
753 return status;
754 }
755
756 /**
757 * spi_add_device - Add spi_device allocated with spi_alloc_device
758 * @spi: spi_device to register
759 *
760 * Companion function to spi_alloc_device. Devices allocated with
761 * spi_alloc_device can be added onto the SPI bus with this function.
762 *
763 * Return: 0 on success; negative errno on failure
764 */
spi_add_device(struct spi_device * spi)765 int spi_add_device(struct spi_device *spi)
766 {
767 struct spi_controller *ctlr = spi->controller;
768 int status;
769
770 /* Set the bus ID string */
771 spi_dev_set_name(spi);
772
773 mutex_lock(&ctlr->add_lock);
774 status = __spi_add_device(spi);
775 mutex_unlock(&ctlr->add_lock);
776 return status;
777 }
778 EXPORT_SYMBOL_GPL(spi_add_device);
779
spi_set_all_cs_unused(struct spi_device * spi)780 static void spi_set_all_cs_unused(struct spi_device *spi)
781 {
782 u8 idx;
783
784 for (idx = 0; idx < SPI_CS_CNT_MAX; idx++)
785 spi_set_chipselect(spi, idx, SPI_INVALID_CS);
786 }
787
788 /**
789 * spi_new_device - instantiate one new SPI device
790 * @ctlr: Controller to which device is connected
791 * @chip: Describes the SPI device
792 * Context: can sleep
793 *
794 * On typical mainboards, this is purely internal; and it's not needed
795 * after board init creates the hard-wired devices. Some development
796 * platforms may not be able to use spi_register_board_info though, and
797 * this is exported so that for example a USB or parport based adapter
798 * driver could add devices (which it would learn about out-of-band).
799 *
800 * Return: the new device, or NULL.
801 */
spi_new_device(struct spi_controller * ctlr,struct spi_board_info * chip)802 struct spi_device *spi_new_device(struct spi_controller *ctlr,
803 struct spi_board_info *chip)
804 {
805 struct spi_device *proxy;
806 int status;
807
808 /*
809 * NOTE: caller did any chip->bus_num checks necessary.
810 *
811 * Also, unless we change the return value convention to use
812 * error-or-pointer (not NULL-or-pointer), troubleshootability
813 * suggests syslogged diagnostics are best here (ugh).
814 */
815
816 proxy = spi_alloc_device(ctlr);
817 if (!proxy)
818 return NULL;
819
820 WARN_ON(strlen(chip->modalias) >= sizeof(proxy->modalias));
821
822 /* Use provided chip-select for proxy device */
823 spi_set_all_cs_unused(proxy);
824 spi_set_chipselect(proxy, 0, chip->chip_select);
825
826 proxy->max_speed_hz = chip->max_speed_hz;
827 proxy->mode = chip->mode;
828 proxy->irq = chip->irq;
829 strscpy(proxy->modalias, chip->modalias, sizeof(proxy->modalias));
830 proxy->dev.platform_data = (void *) chip->platform_data;
831 proxy->controller_data = chip->controller_data;
832 proxy->controller_state = NULL;
833 /*
834 * By default spi->chip_select[0] will hold the physical CS number,
835 * so set bit 0 in spi->cs_index_mask.
836 */
837 proxy->cs_index_mask = BIT(0);
838
839 if (chip->swnode) {
840 status = device_add_software_node(&proxy->dev, chip->swnode);
841 if (status) {
842 dev_err(&ctlr->dev, "failed to add software node to '%s': %d\n",
843 chip->modalias, status);
844 goto err_dev_put;
845 }
846 }
847
848 status = spi_add_device(proxy);
849 if (status < 0)
850 goto err_dev_put;
851
852 return proxy;
853
854 err_dev_put:
855 device_remove_software_node(&proxy->dev);
856 spi_dev_put(proxy);
857 return NULL;
858 }
859 EXPORT_SYMBOL_GPL(spi_new_device);
860
861 /**
862 * spi_unregister_device - unregister a single SPI device
863 * @spi: spi_device to unregister
864 *
865 * Start making the passed SPI device vanish. Normally this would be handled
866 * by spi_unregister_controller().
867 */
spi_unregister_device(struct spi_device * spi)868 void spi_unregister_device(struct spi_device *spi)
869 {
870 struct fwnode_handle *fwnode;
871
872 if (!spi)
873 return;
874
875 fwnode = dev_fwnode(&spi->dev);
876 if (is_of_node(fwnode)) {
877 of_node_clear_flag(to_of_node(fwnode), OF_POPULATED);
878 of_node_put(to_of_node(fwnode));
879 } else if (is_acpi_device_node(fwnode)) {
880 acpi_device_clear_enumerated(to_acpi_device_node(fwnode));
881 }
882 device_remove_software_node(&spi->dev);
883 device_del(&spi->dev);
884 spi_cleanup(spi);
885 put_device(&spi->dev);
886 }
887 EXPORT_SYMBOL_GPL(spi_unregister_device);
888
spi_match_controller_to_boardinfo(struct spi_controller * ctlr,struct spi_board_info * bi)889 static void spi_match_controller_to_boardinfo(struct spi_controller *ctlr,
890 struct spi_board_info *bi)
891 {
892 struct spi_device *dev;
893
894 if (ctlr->bus_num != bi->bus_num)
895 return;
896
897 dev = spi_new_device(ctlr, bi);
898 if (!dev)
899 dev_err(ctlr->dev.parent, "can't create new device for %s\n",
900 bi->modalias);
901 }
902
903 /**
904 * spi_register_board_info - register SPI devices for a given board
905 * @info: array of chip descriptors
906 * @n: how many descriptors are provided
907 * Context: can sleep
908 *
909 * Board-specific early init code calls this (probably during arch_initcall)
910 * with segments of the SPI device table. Any device nodes are created later,
911 * after the relevant parent SPI controller (bus_num) is defined. We keep
912 * this table of devices forever, so that reloading a controller driver will
913 * not make Linux forget about these hard-wired devices.
914 *
915 * Other code can also call this, e.g. a particular add-on board might provide
916 * SPI devices through its expansion connector, so code initializing that board
917 * would naturally declare its SPI devices.
918 *
919 * The board info passed can safely be __initdata ... but be careful of
920 * any embedded pointers (platform_data, etc), they're copied as-is.
921 *
922 * Return: zero on success, else a negative error code.
923 */
spi_register_board_info(struct spi_board_info const * info,unsigned n)924 int spi_register_board_info(struct spi_board_info const *info, unsigned n)
925 {
926 struct boardinfo *bi;
927 int i;
928
929 if (!n)
930 return 0;
931
932 bi = kcalloc(n, sizeof(*bi), GFP_KERNEL);
933 if (!bi)
934 return -ENOMEM;
935
936 for (i = 0; i < n; i++, bi++, info++) {
937 struct spi_controller *ctlr;
938
939 memcpy(&bi->board_info, info, sizeof(*info));
940
941 mutex_lock(&board_lock);
942 list_add_tail(&bi->list, &board_list);
943 list_for_each_entry(ctlr, &spi_controller_list, list)
944 spi_match_controller_to_boardinfo(ctlr,
945 &bi->board_info);
946 mutex_unlock(&board_lock);
947 }
948
949 return 0;
950 }
951
952 /*-------------------------------------------------------------------------*/
953
954 /* Core methods for SPI resource management */
955
956 /**
957 * spi_res_alloc - allocate a spi resource that is life-cycle managed
958 * during the processing of a spi_message while using
959 * spi_transfer_one
960 * @spi: the SPI device for which we allocate memory
961 * @release: the release code to execute for this resource
962 * @size: size to alloc and return
963 * @gfp: GFP allocation flags
964 *
965 * Return: the pointer to the allocated data
966 *
967 * This may get enhanced in the future to allocate from a memory pool
968 * of the @spi_device or @spi_controller to avoid repeated allocations.
969 */
spi_res_alloc(struct spi_device * spi,spi_res_release_t release,size_t size,gfp_t gfp)970 static void *spi_res_alloc(struct spi_device *spi, spi_res_release_t release,
971 size_t size, gfp_t gfp)
972 {
973 struct spi_res *sres;
974
975 sres = kzalloc(sizeof(*sres) + size, gfp);
976 if (!sres)
977 return NULL;
978
979 INIT_LIST_HEAD(&sres->entry);
980 sres->release = release;
981
982 return sres->data;
983 }
984
985 /**
986 * spi_res_free - free an SPI resource
987 * @res: pointer to the custom data of a resource
988 */
spi_res_free(void * res)989 static void spi_res_free(void *res)
990 {
991 struct spi_res *sres = container_of(res, struct spi_res, data);
992
993 WARN_ON(!list_empty(&sres->entry));
994 kfree(sres);
995 }
996
997 /**
998 * spi_res_add - add a spi_res to the spi_message
999 * @message: the SPI message
1000 * @res: the spi_resource
1001 */
spi_res_add(struct spi_message * message,void * res)1002 static void spi_res_add(struct spi_message *message, void *res)
1003 {
1004 struct spi_res *sres = container_of(res, struct spi_res, data);
1005
1006 WARN_ON(!list_empty(&sres->entry));
1007 list_add_tail(&sres->entry, &message->resources);
1008 }
1009
1010 /**
1011 * spi_res_release - release all SPI resources for this message
1012 * @ctlr: the @spi_controller
1013 * @message: the @spi_message
1014 */
spi_res_release(struct spi_controller * ctlr,struct spi_message * message)1015 static void spi_res_release(struct spi_controller *ctlr, struct spi_message *message)
1016 {
1017 struct spi_res *res, *tmp;
1018
1019 list_for_each_entry_safe_reverse(res, tmp, &message->resources, entry) {
1020 if (res->release)
1021 res->release(ctlr, message, res->data);
1022
1023 list_del(&res->entry);
1024
1025 kfree(res);
1026 }
1027 }
1028
1029 /*-------------------------------------------------------------------------*/
1030 #define spi_for_each_valid_cs(spi, idx) \
1031 for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) \
1032 if (!(spi->cs_index_mask & BIT(idx))) {} else
1033
spi_is_last_cs(struct spi_device * spi)1034 static inline bool spi_is_last_cs(struct spi_device *spi)
1035 {
1036 u8 idx;
1037 bool last = false;
1038
1039 spi_for_each_valid_cs(spi, idx) {
1040 if (spi->controller->last_cs[idx] == spi_get_chipselect(spi, idx))
1041 last = true;
1042 }
1043 return last;
1044 }
1045
spi_toggle_csgpiod(struct spi_device * spi,u8 idx,bool enable,bool activate)1046 static void spi_toggle_csgpiod(struct spi_device *spi, u8 idx, bool enable, bool activate)
1047 {
1048 /*
1049 * Historically ACPI has no means of the GPIO polarity and
1050 * thus the SPISerialBus() resource defines it on the per-chip
1051 * basis. In order to avoid a chain of negations, the GPIO
1052 * polarity is considered being Active High. Even for the cases
1053 * when _DSD() is involved (in the updated versions of ACPI)
1054 * the GPIO CS polarity must be defined Active High to avoid
1055 * ambiguity. That's why we use enable, that takes SPI_CS_HIGH
1056 * into account.
1057 */
1058 if (is_acpi_device_node(dev_fwnode(&spi->dev)))
1059 gpiod_set_value_cansleep(spi_get_csgpiod(spi, idx), !enable);
1060 else
1061 /* Polarity handled by GPIO library */
1062 gpiod_set_value_cansleep(spi_get_csgpiod(spi, idx), activate);
1063
1064 if (activate)
1065 spi_delay_exec(&spi->cs_setup, NULL);
1066 else
1067 spi_delay_exec(&spi->cs_inactive, NULL);
1068 }
1069
spi_set_cs(struct spi_device * spi,bool enable,bool force)1070 static void spi_set_cs(struct spi_device *spi, bool enable, bool force)
1071 {
1072 bool activate = enable;
1073 u8 idx;
1074
1075 /*
1076 * Avoid calling into the driver (or doing delays) if the chip select
1077 * isn't actually changing from the last time this was called.
1078 */
1079 if (!force && (enable == spi_is_last_cs(spi)) &&
1080 (spi->controller->last_cs_index_mask == spi->cs_index_mask) &&
1081 (spi->controller->last_cs_mode_high == (spi->mode & SPI_CS_HIGH)))
1082 return;
1083
1084 trace_spi_set_cs(spi, activate);
1085
1086 spi->controller->last_cs_index_mask = spi->cs_index_mask;
1087 for (idx = 0; idx < SPI_CS_CNT_MAX; idx++)
1088 spi->controller->last_cs[idx] = enable ? spi_get_chipselect(spi, 0) : SPI_INVALID_CS;
1089
1090 spi->controller->last_cs_mode_high = spi->mode & SPI_CS_HIGH;
1091 if (spi->controller->last_cs_mode_high)
1092 enable = !enable;
1093
1094 /*
1095 * Handle chip select delays for GPIO based CS or controllers without
1096 * programmable chip select timing.
1097 */
1098 if ((spi_is_csgpiod(spi) || !spi->controller->set_cs_timing) && !activate)
1099 spi_delay_exec(&spi->cs_hold, NULL);
1100
1101 if (spi_is_csgpiod(spi)) {
1102 if (!(spi->mode & SPI_NO_CS)) {
1103 spi_for_each_valid_cs(spi, idx) {
1104 if (spi_get_csgpiod(spi, idx))
1105 spi_toggle_csgpiod(spi, idx, enable, activate);
1106 }
1107 }
1108 /* Some SPI controllers need both GPIO CS & ->set_cs() */
1109 if ((spi->controller->flags & SPI_CONTROLLER_GPIO_SS) &&
1110 spi->controller->set_cs)
1111 spi->controller->set_cs(spi, !enable);
1112 } else if (spi->controller->set_cs) {
1113 spi->controller->set_cs(spi, !enable);
1114 }
1115
1116 if (spi_is_csgpiod(spi) || !spi->controller->set_cs_timing) {
1117 if (activate)
1118 spi_delay_exec(&spi->cs_setup, NULL);
1119 else
1120 spi_delay_exec(&spi->cs_inactive, NULL);
1121 }
1122 }
1123
1124 #ifdef CONFIG_HAS_DMA
spi_map_buf_attrs(struct spi_controller * ctlr,struct device * dev,struct sg_table * sgt,void * buf,size_t len,enum dma_data_direction dir,unsigned long attrs)1125 static int spi_map_buf_attrs(struct spi_controller *ctlr, struct device *dev,
1126 struct sg_table *sgt, void *buf, size_t len,
1127 enum dma_data_direction dir, unsigned long attrs)
1128 {
1129 const bool vmalloced_buf = is_vmalloc_addr(buf);
1130 unsigned int max_seg_size = dma_get_max_seg_size(dev);
1131 #ifdef CONFIG_HIGHMEM
1132 const bool kmap_buf = ((unsigned long)buf >= PKMAP_BASE &&
1133 (unsigned long)buf < (PKMAP_BASE +
1134 (LAST_PKMAP * PAGE_SIZE)));
1135 #else
1136 const bool kmap_buf = false;
1137 #endif
1138 int desc_len;
1139 int sgs;
1140 struct page *vm_page;
1141 struct scatterlist *sg;
1142 void *sg_buf;
1143 size_t min;
1144 int i, ret;
1145
1146 if (vmalloced_buf || kmap_buf) {
1147 desc_len = min_t(unsigned long, max_seg_size, PAGE_SIZE);
1148 sgs = DIV_ROUND_UP(len + offset_in_page(buf), desc_len);
1149 } else if (virt_addr_valid(buf)) {
1150 desc_len = min_t(size_t, max_seg_size, ctlr->max_dma_len);
1151 sgs = DIV_ROUND_UP(len, desc_len);
1152 } else {
1153 return -EINVAL;
1154 }
1155
1156 ret = sg_alloc_table(sgt, sgs, GFP_KERNEL);
1157 if (ret != 0)
1158 return ret;
1159
1160 sg = &sgt->sgl[0];
1161 for (i = 0; i < sgs; i++) {
1162
1163 if (vmalloced_buf || kmap_buf) {
1164 /*
1165 * Next scatterlist entry size is the minimum between
1166 * the desc_len and the remaining buffer length that
1167 * fits in a page.
1168 */
1169 min = min_t(size_t, desc_len,
1170 min_t(size_t, len,
1171 PAGE_SIZE - offset_in_page(buf)));
1172 if (vmalloced_buf)
1173 vm_page = vmalloc_to_page(buf);
1174 else
1175 vm_page = kmap_to_page(buf);
1176 if (!vm_page) {
1177 sg_free_table(sgt);
1178 return -ENOMEM;
1179 }
1180 sg_set_page(sg, vm_page,
1181 min, offset_in_page(buf));
1182 } else {
1183 min = min_t(size_t, len, desc_len);
1184 sg_buf = buf;
1185 sg_set_buf(sg, sg_buf, min);
1186 }
1187
1188 buf += min;
1189 len -= min;
1190 sg = sg_next(sg);
1191 }
1192
1193 ret = dma_map_sgtable(dev, sgt, dir, attrs);
1194 if (ret < 0) {
1195 sg_free_table(sgt);
1196 return ret;
1197 }
1198
1199 return 0;
1200 }
1201
spi_map_buf(struct spi_controller * ctlr,struct device * dev,struct sg_table * sgt,void * buf,size_t len,enum dma_data_direction dir)1202 int spi_map_buf(struct spi_controller *ctlr, struct device *dev,
1203 struct sg_table *sgt, void *buf, size_t len,
1204 enum dma_data_direction dir)
1205 {
1206 return spi_map_buf_attrs(ctlr, dev, sgt, buf, len, dir, 0);
1207 }
1208
spi_unmap_buf_attrs(struct spi_controller * ctlr,struct device * dev,struct sg_table * sgt,enum dma_data_direction dir,unsigned long attrs)1209 static void spi_unmap_buf_attrs(struct spi_controller *ctlr,
1210 struct device *dev, struct sg_table *sgt,
1211 enum dma_data_direction dir,
1212 unsigned long attrs)
1213 {
1214 dma_unmap_sgtable(dev, sgt, dir, attrs);
1215 sg_free_table(sgt);
1216 sgt->orig_nents = 0;
1217 sgt->nents = 0;
1218 }
1219
spi_unmap_buf(struct spi_controller * ctlr,struct device * dev,struct sg_table * sgt,enum dma_data_direction dir)1220 void spi_unmap_buf(struct spi_controller *ctlr, struct device *dev,
1221 struct sg_table *sgt, enum dma_data_direction dir)
1222 {
1223 spi_unmap_buf_attrs(ctlr, dev, sgt, dir, 0);
1224 }
1225
__spi_map_msg(struct spi_controller * ctlr,struct spi_message * msg)1226 static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
1227 {
1228 struct device *tx_dev, *rx_dev;
1229 struct spi_transfer *xfer;
1230 int ret;
1231
1232 if (!ctlr->can_dma)
1233 return 0;
1234
1235 if (ctlr->dma_tx)
1236 tx_dev = ctlr->dma_tx->device->dev;
1237 else if (ctlr->dma_map_dev)
1238 tx_dev = ctlr->dma_map_dev;
1239 else
1240 tx_dev = ctlr->dev.parent;
1241
1242 if (ctlr->dma_rx)
1243 rx_dev = ctlr->dma_rx->device->dev;
1244 else if (ctlr->dma_map_dev)
1245 rx_dev = ctlr->dma_map_dev;
1246 else
1247 rx_dev = ctlr->dev.parent;
1248
1249 ret = -ENOMSG;
1250 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1251 /* The sync is done before each transfer. */
1252 unsigned long attrs = DMA_ATTR_SKIP_CPU_SYNC;
1253
1254 if (!ctlr->can_dma(ctlr, msg->spi, xfer))
1255 continue;
1256
1257 if (xfer->tx_buf != NULL) {
1258 ret = spi_map_buf_attrs(ctlr, tx_dev, &xfer->tx_sg,
1259 (void *)xfer->tx_buf,
1260 xfer->len, DMA_TO_DEVICE,
1261 attrs);
1262 if (ret != 0)
1263 return ret;
1264
1265 xfer->tx_sg_mapped = true;
1266 }
1267
1268 if (xfer->rx_buf != NULL) {
1269 ret = spi_map_buf_attrs(ctlr, rx_dev, &xfer->rx_sg,
1270 xfer->rx_buf, xfer->len,
1271 DMA_FROM_DEVICE, attrs);
1272 if (ret != 0) {
1273 spi_unmap_buf_attrs(ctlr, tx_dev,
1274 &xfer->tx_sg, DMA_TO_DEVICE,
1275 attrs);
1276
1277 return ret;
1278 }
1279
1280 xfer->rx_sg_mapped = true;
1281 }
1282 }
1283 /* No transfer has been mapped, bail out with success */
1284 if (ret)
1285 return 0;
1286
1287 ctlr->cur_rx_dma_dev = rx_dev;
1288 ctlr->cur_tx_dma_dev = tx_dev;
1289
1290 return 0;
1291 }
1292
__spi_unmap_msg(struct spi_controller * ctlr,struct spi_message * msg)1293 static int __spi_unmap_msg(struct spi_controller *ctlr, struct spi_message *msg)
1294 {
1295 struct device *rx_dev = ctlr->cur_rx_dma_dev;
1296 struct device *tx_dev = ctlr->cur_tx_dma_dev;
1297 struct spi_transfer *xfer;
1298
1299 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1300 /* The sync has already been done after each transfer. */
1301 unsigned long attrs = DMA_ATTR_SKIP_CPU_SYNC;
1302
1303 if (xfer->rx_sg_mapped)
1304 spi_unmap_buf_attrs(ctlr, rx_dev, &xfer->rx_sg,
1305 DMA_FROM_DEVICE, attrs);
1306 xfer->rx_sg_mapped = false;
1307
1308 if (xfer->tx_sg_mapped)
1309 spi_unmap_buf_attrs(ctlr, tx_dev, &xfer->tx_sg,
1310 DMA_TO_DEVICE, attrs);
1311 xfer->tx_sg_mapped = false;
1312 }
1313
1314 return 0;
1315 }
1316
spi_dma_sync_for_device(struct spi_controller * ctlr,struct spi_transfer * xfer)1317 static void spi_dma_sync_for_device(struct spi_controller *ctlr,
1318 struct spi_transfer *xfer)
1319 {
1320 struct device *rx_dev = ctlr->cur_rx_dma_dev;
1321 struct device *tx_dev = ctlr->cur_tx_dma_dev;
1322
1323 if (xfer->tx_sg_mapped)
1324 dma_sync_sgtable_for_device(tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
1325 if (xfer->rx_sg_mapped)
1326 dma_sync_sgtable_for_device(rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
1327 }
1328
spi_dma_sync_for_cpu(struct spi_controller * ctlr,struct spi_transfer * xfer)1329 static void spi_dma_sync_for_cpu(struct spi_controller *ctlr,
1330 struct spi_transfer *xfer)
1331 {
1332 struct device *rx_dev = ctlr->cur_rx_dma_dev;
1333 struct device *tx_dev = ctlr->cur_tx_dma_dev;
1334
1335 if (xfer->rx_sg_mapped)
1336 dma_sync_sgtable_for_cpu(rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
1337 if (xfer->tx_sg_mapped)
1338 dma_sync_sgtable_for_cpu(tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
1339 }
1340 #else /* !CONFIG_HAS_DMA */
__spi_map_msg(struct spi_controller * ctlr,struct spi_message * msg)1341 static inline int __spi_map_msg(struct spi_controller *ctlr,
1342 struct spi_message *msg)
1343 {
1344 return 0;
1345 }
1346
__spi_unmap_msg(struct spi_controller * ctlr,struct spi_message * msg)1347 static inline int __spi_unmap_msg(struct spi_controller *ctlr,
1348 struct spi_message *msg)
1349 {
1350 return 0;
1351 }
1352
spi_dma_sync_for_device(struct spi_controller * ctrl,struct spi_transfer * xfer)1353 static void spi_dma_sync_for_device(struct spi_controller *ctrl,
1354 struct spi_transfer *xfer)
1355 {
1356 }
1357
spi_dma_sync_for_cpu(struct spi_controller * ctrl,struct spi_transfer * xfer)1358 static void spi_dma_sync_for_cpu(struct spi_controller *ctrl,
1359 struct spi_transfer *xfer)
1360 {
1361 }
1362 #endif /* !CONFIG_HAS_DMA */
1363
spi_unmap_msg(struct spi_controller * ctlr,struct spi_message * msg)1364 static inline int spi_unmap_msg(struct spi_controller *ctlr,
1365 struct spi_message *msg)
1366 {
1367 struct spi_transfer *xfer;
1368
1369 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1370 /*
1371 * Restore the original value of tx_buf or rx_buf if they are
1372 * NULL.
1373 */
1374 if (xfer->tx_buf == ctlr->dummy_tx)
1375 xfer->tx_buf = NULL;
1376 if (xfer->rx_buf == ctlr->dummy_rx)
1377 xfer->rx_buf = NULL;
1378 }
1379
1380 return __spi_unmap_msg(ctlr, msg);
1381 }
1382
spi_map_msg(struct spi_controller * ctlr,struct spi_message * msg)1383 static int spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
1384 {
1385 struct spi_transfer *xfer;
1386 void *tmp;
1387 unsigned int max_tx, max_rx;
1388
1389 if ((ctlr->flags & (SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX))
1390 && !(msg->spi->mode & SPI_3WIRE)) {
1391 max_tx = 0;
1392 max_rx = 0;
1393
1394 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1395 if ((ctlr->flags & SPI_CONTROLLER_MUST_TX) &&
1396 !xfer->tx_buf)
1397 max_tx = max(xfer->len, max_tx);
1398 if ((ctlr->flags & SPI_CONTROLLER_MUST_RX) &&
1399 !xfer->rx_buf)
1400 max_rx = max(xfer->len, max_rx);
1401 }
1402
1403 if (max_tx) {
1404 tmp = krealloc(ctlr->dummy_tx, max_tx,
1405 GFP_KERNEL | GFP_DMA | __GFP_ZERO);
1406 if (!tmp)
1407 return -ENOMEM;
1408 ctlr->dummy_tx = tmp;
1409 }
1410
1411 if (max_rx) {
1412 tmp = krealloc(ctlr->dummy_rx, max_rx,
1413 GFP_KERNEL | GFP_DMA);
1414 if (!tmp)
1415 return -ENOMEM;
1416 ctlr->dummy_rx = tmp;
1417 }
1418
1419 if (max_tx || max_rx) {
1420 list_for_each_entry(xfer, &msg->transfers,
1421 transfer_list) {
1422 if (!xfer->len)
1423 continue;
1424 if (!xfer->tx_buf)
1425 xfer->tx_buf = ctlr->dummy_tx;
1426 if (!xfer->rx_buf)
1427 xfer->rx_buf = ctlr->dummy_rx;
1428 }
1429 }
1430 }
1431
1432 return __spi_map_msg(ctlr, msg);
1433 }
1434
spi_transfer_wait(struct spi_controller * ctlr,struct spi_message * msg,struct spi_transfer * xfer)1435 static int spi_transfer_wait(struct spi_controller *ctlr,
1436 struct spi_message *msg,
1437 struct spi_transfer *xfer)
1438 {
1439 struct spi_statistics __percpu *statm = ctlr->pcpu_statistics;
1440 struct spi_statistics __percpu *stats = msg->spi->pcpu_statistics;
1441 u32 speed_hz = xfer->speed_hz;
1442 unsigned long long ms;
1443
1444 if (spi_controller_is_target(ctlr)) {
1445 if (wait_for_completion_interruptible(&ctlr->xfer_completion)) {
1446 dev_dbg(&msg->spi->dev, "SPI transfer interrupted\n");
1447 return -EINTR;
1448 }
1449 } else {
1450 if (!speed_hz)
1451 speed_hz = 100000;
1452
1453 /*
1454 * For each byte we wait for 8 cycles of the SPI clock.
1455 * Since speed is defined in Hz and we want milliseconds,
1456 * use respective multiplier, but before the division,
1457 * otherwise we may get 0 for short transfers.
1458 */
1459 ms = 8LL * MSEC_PER_SEC * xfer->len;
1460 do_div(ms, speed_hz);
1461
1462 /*
1463 * Increase it twice and add 200 ms tolerance, use
1464 * predefined maximum in case of overflow.
1465 */
1466 ms += ms + 200;
1467 if (ms > UINT_MAX)
1468 ms = UINT_MAX;
1469
1470 ms = wait_for_completion_timeout(&ctlr->xfer_completion,
1471 msecs_to_jiffies(ms));
1472
1473 if (ms == 0) {
1474 SPI_STATISTICS_INCREMENT_FIELD(statm, timedout);
1475 SPI_STATISTICS_INCREMENT_FIELD(stats, timedout);
1476 dev_err(&msg->spi->dev,
1477 "SPI transfer timed out\n");
1478 return -ETIMEDOUT;
1479 }
1480
1481 if (xfer->error & SPI_TRANS_FAIL_IO)
1482 return -EIO;
1483 }
1484
1485 return 0;
1486 }
1487
_spi_transfer_delay_ns(u32 ns)1488 static void _spi_transfer_delay_ns(u32 ns)
1489 {
1490 if (!ns)
1491 return;
1492 if (ns <= NSEC_PER_USEC) {
1493 ndelay(ns);
1494 } else {
1495 u32 us = DIV_ROUND_UP(ns, NSEC_PER_USEC);
1496
1497 fsleep(us);
1498 }
1499 }
1500
spi_delay_to_ns(struct spi_delay * _delay,struct spi_transfer * xfer)1501 int spi_delay_to_ns(struct spi_delay *_delay, struct spi_transfer *xfer)
1502 {
1503 u32 delay = _delay->value;
1504 u32 unit = _delay->unit;
1505 u32 hz;
1506
1507 if (!delay)
1508 return 0;
1509
1510 switch (unit) {
1511 case SPI_DELAY_UNIT_USECS:
1512 delay *= NSEC_PER_USEC;
1513 break;
1514 case SPI_DELAY_UNIT_NSECS:
1515 /* Nothing to do here */
1516 break;
1517 case SPI_DELAY_UNIT_SCK:
1518 /* Clock cycles need to be obtained from spi_transfer */
1519 if (!xfer)
1520 return -EINVAL;
1521 /*
1522 * If there is unknown effective speed, approximate it
1523 * by underestimating with half of the requested Hz.
1524 */
1525 hz = xfer->effective_speed_hz ?: xfer->speed_hz / 2;
1526 if (!hz)
1527 return -EINVAL;
1528
1529 /* Convert delay to nanoseconds */
1530 delay *= DIV_ROUND_UP(NSEC_PER_SEC, hz);
1531 break;
1532 default:
1533 return -EINVAL;
1534 }
1535
1536 return delay;
1537 }
1538 EXPORT_SYMBOL_GPL(spi_delay_to_ns);
1539
spi_delay_exec(struct spi_delay * _delay,struct spi_transfer * xfer)1540 int spi_delay_exec(struct spi_delay *_delay, struct spi_transfer *xfer)
1541 {
1542 int delay;
1543
1544 might_sleep();
1545
1546 if (!_delay)
1547 return -EINVAL;
1548
1549 delay = spi_delay_to_ns(_delay, xfer);
1550 if (delay < 0)
1551 return delay;
1552
1553 _spi_transfer_delay_ns(delay);
1554
1555 return 0;
1556 }
1557 EXPORT_SYMBOL_GPL(spi_delay_exec);
1558
_spi_transfer_cs_change_delay(struct spi_message * msg,struct spi_transfer * xfer)1559 static void _spi_transfer_cs_change_delay(struct spi_message *msg,
1560 struct spi_transfer *xfer)
1561 {
1562 u32 default_delay_ns = 10 * NSEC_PER_USEC;
1563 u32 delay = xfer->cs_change_delay.value;
1564 u32 unit = xfer->cs_change_delay.unit;
1565 int ret;
1566
1567 /* Return early on "fast" mode - for everything but USECS */
1568 if (!delay) {
1569 if (unit == SPI_DELAY_UNIT_USECS)
1570 _spi_transfer_delay_ns(default_delay_ns);
1571 return;
1572 }
1573
1574 ret = spi_delay_exec(&xfer->cs_change_delay, xfer);
1575 if (ret) {
1576 dev_err_once(&msg->spi->dev,
1577 "Use of unsupported delay unit %i, using default of %luus\n",
1578 unit, default_delay_ns / NSEC_PER_USEC);
1579 _spi_transfer_delay_ns(default_delay_ns);
1580 }
1581 }
1582
spi_transfer_cs_change_delay_exec(struct spi_message * msg,struct spi_transfer * xfer)1583 void spi_transfer_cs_change_delay_exec(struct spi_message *msg,
1584 struct spi_transfer *xfer)
1585 {
1586 _spi_transfer_cs_change_delay(msg, xfer);
1587 }
1588 EXPORT_SYMBOL_GPL(spi_transfer_cs_change_delay_exec);
1589
1590 /*
1591 * spi_transfer_one_message - Default implementation of transfer_one_message()
1592 *
1593 * This is a standard implementation of transfer_one_message() for
1594 * drivers which implement a transfer_one() operation. It provides
1595 * standard handling of delays and chip select management.
1596 */
spi_transfer_one_message(struct spi_controller * ctlr,struct spi_message * msg)1597 static int spi_transfer_one_message(struct spi_controller *ctlr,
1598 struct spi_message *msg)
1599 {
1600 struct spi_transfer *xfer;
1601 bool keep_cs = false;
1602 int ret = 0;
1603 struct spi_statistics __percpu *statm = ctlr->pcpu_statistics;
1604 struct spi_statistics __percpu *stats = msg->spi->pcpu_statistics;
1605
1606 xfer = list_first_entry(&msg->transfers, struct spi_transfer, transfer_list);
1607 spi_set_cs(msg->spi, !xfer->cs_off, false);
1608
1609 SPI_STATISTICS_INCREMENT_FIELD(statm, messages);
1610 SPI_STATISTICS_INCREMENT_FIELD(stats, messages);
1611
1612 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1613 trace_spi_transfer_start(msg, xfer);
1614
1615 spi_statistics_add_transfer_stats(statm, xfer, msg);
1616 spi_statistics_add_transfer_stats(stats, xfer, msg);
1617
1618 if (!ctlr->ptp_sts_supported) {
1619 xfer->ptp_sts_word_pre = 0;
1620 ptp_read_system_prets(xfer->ptp_sts);
1621 }
1622
1623 if ((xfer->tx_buf || xfer->rx_buf) && xfer->len) {
1624 reinit_completion(&ctlr->xfer_completion);
1625
1626 fallback_pio:
1627 spi_dma_sync_for_device(ctlr, xfer);
1628 ret = ctlr->transfer_one(ctlr, msg->spi, xfer);
1629 if (ret < 0) {
1630 spi_dma_sync_for_cpu(ctlr, xfer);
1631
1632 if ((xfer->tx_sg_mapped || xfer->rx_sg_mapped) &&
1633 (xfer->error & SPI_TRANS_FAIL_NO_START)) {
1634 __spi_unmap_msg(ctlr, msg);
1635 ctlr->fallback = true;
1636 xfer->error &= ~SPI_TRANS_FAIL_NO_START;
1637 goto fallback_pio;
1638 }
1639
1640 SPI_STATISTICS_INCREMENT_FIELD(statm,
1641 errors);
1642 SPI_STATISTICS_INCREMENT_FIELD(stats,
1643 errors);
1644 dev_err(&msg->spi->dev,
1645 "SPI transfer failed: %d\n", ret);
1646 goto out;
1647 }
1648
1649 if (ret > 0) {
1650 ret = spi_transfer_wait(ctlr, msg, xfer);
1651 if (ret < 0)
1652 msg->status = ret;
1653 }
1654
1655 spi_dma_sync_for_cpu(ctlr, xfer);
1656 } else {
1657 if (xfer->len)
1658 dev_err(&msg->spi->dev,
1659 "Bufferless transfer has length %u\n",
1660 xfer->len);
1661 }
1662
1663 if (!ctlr->ptp_sts_supported) {
1664 ptp_read_system_postts(xfer->ptp_sts);
1665 xfer->ptp_sts_word_post = xfer->len;
1666 }
1667
1668 trace_spi_transfer_stop(msg, xfer);
1669
1670 if (msg->status != -EINPROGRESS)
1671 goto out;
1672
1673 spi_transfer_delay_exec(xfer);
1674
1675 if (xfer->cs_change) {
1676 if (list_is_last(&xfer->transfer_list,
1677 &msg->transfers)) {
1678 keep_cs = true;
1679 } else {
1680 if (!xfer->cs_off)
1681 spi_set_cs(msg->spi, false, false);
1682 _spi_transfer_cs_change_delay(msg, xfer);
1683 if (!list_next_entry(xfer, transfer_list)->cs_off)
1684 spi_set_cs(msg->spi, true, false);
1685 }
1686 } else if (!list_is_last(&xfer->transfer_list, &msg->transfers) &&
1687 xfer->cs_off != list_next_entry(xfer, transfer_list)->cs_off) {
1688 spi_set_cs(msg->spi, xfer->cs_off, false);
1689 }
1690
1691 msg->actual_length += xfer->len;
1692 }
1693
1694 out:
1695 if (ret != 0 || !keep_cs)
1696 spi_set_cs(msg->spi, false, false);
1697
1698 if (msg->status == -EINPROGRESS)
1699 msg->status = ret;
1700
1701 if (msg->status && ctlr->handle_err)
1702 ctlr->handle_err(ctlr, msg);
1703
1704 spi_finalize_current_message(ctlr);
1705
1706 return ret;
1707 }
1708
1709 /**
1710 * spi_finalize_current_transfer - report completion of a transfer
1711 * @ctlr: the controller reporting completion
1712 *
1713 * Called by SPI drivers using the core transfer_one_message()
1714 * implementation to notify it that the current interrupt driven
1715 * transfer has finished and the next one may be scheduled.
1716 */
spi_finalize_current_transfer(struct spi_controller * ctlr)1717 void spi_finalize_current_transfer(struct spi_controller *ctlr)
1718 {
1719 complete(&ctlr->xfer_completion);
1720 }
1721 EXPORT_SYMBOL_GPL(spi_finalize_current_transfer);
1722
spi_idle_runtime_pm(struct spi_controller * ctlr)1723 static void spi_idle_runtime_pm(struct spi_controller *ctlr)
1724 {
1725 if (ctlr->auto_runtime_pm) {
1726 pm_runtime_mark_last_busy(ctlr->dev.parent);
1727 pm_runtime_put_autosuspend(ctlr->dev.parent);
1728 }
1729 }
1730
__spi_pump_transfer_message(struct spi_controller * ctlr,struct spi_message * msg,bool was_busy)1731 static int __spi_pump_transfer_message(struct spi_controller *ctlr,
1732 struct spi_message *msg, bool was_busy)
1733 {
1734 struct spi_transfer *xfer;
1735 int ret;
1736
1737 if (!was_busy && ctlr->auto_runtime_pm) {
1738 ret = pm_runtime_get_sync(ctlr->dev.parent);
1739 if (ret < 0) {
1740 pm_runtime_put_noidle(ctlr->dev.parent);
1741 dev_err(&ctlr->dev, "Failed to power device: %d\n",
1742 ret);
1743
1744 msg->status = ret;
1745 spi_finalize_current_message(ctlr);
1746
1747 return ret;
1748 }
1749 }
1750
1751 if (!was_busy)
1752 trace_spi_controller_busy(ctlr);
1753
1754 if (!was_busy && ctlr->prepare_transfer_hardware) {
1755 ret = ctlr->prepare_transfer_hardware(ctlr);
1756 if (ret) {
1757 dev_err(&ctlr->dev,
1758 "failed to prepare transfer hardware: %d\n",
1759 ret);
1760
1761 if (ctlr->auto_runtime_pm)
1762 pm_runtime_put(ctlr->dev.parent);
1763
1764 msg->status = ret;
1765 spi_finalize_current_message(ctlr);
1766
1767 return ret;
1768 }
1769 }
1770
1771 trace_spi_message_start(msg);
1772
1773 if (ctlr->prepare_message) {
1774 ret = ctlr->prepare_message(ctlr, msg);
1775 if (ret) {
1776 dev_err(&ctlr->dev, "failed to prepare message: %d\n",
1777 ret);
1778 msg->status = ret;
1779 spi_finalize_current_message(ctlr);
1780 return ret;
1781 }
1782 msg->prepared = true;
1783 }
1784
1785 ret = spi_map_msg(ctlr, msg);
1786 if (ret) {
1787 msg->status = ret;
1788 spi_finalize_current_message(ctlr);
1789 return ret;
1790 }
1791
1792 if (!ctlr->ptp_sts_supported && !ctlr->transfer_one) {
1793 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1794 xfer->ptp_sts_word_pre = 0;
1795 ptp_read_system_prets(xfer->ptp_sts);
1796 }
1797 }
1798
1799 /*
1800 * Drivers implementation of transfer_one_message() must arrange for
1801 * spi_finalize_current_message() to get called. Most drivers will do
1802 * this in the calling context, but some don't. For those cases, a
1803 * completion is used to guarantee that this function does not return
1804 * until spi_finalize_current_message() is done accessing
1805 * ctlr->cur_msg.
1806 * Use of the following two flags enable to opportunistically skip the
1807 * use of the completion since its use involves expensive spin locks.
1808 * In case of a race with the context that calls
1809 * spi_finalize_current_message() the completion will always be used,
1810 * due to strict ordering of these flags using barriers.
1811 */
1812 WRITE_ONCE(ctlr->cur_msg_incomplete, true);
1813 WRITE_ONCE(ctlr->cur_msg_need_completion, false);
1814 reinit_completion(&ctlr->cur_msg_completion);
1815 smp_wmb(); /* Make these available to spi_finalize_current_message() */
1816
1817 ret = ctlr->transfer_one_message(ctlr, msg);
1818 if (ret) {
1819 dev_err(&ctlr->dev,
1820 "failed to transfer one message from queue\n");
1821 return ret;
1822 }
1823
1824 WRITE_ONCE(ctlr->cur_msg_need_completion, true);
1825 smp_mb(); /* See spi_finalize_current_message()... */
1826 if (READ_ONCE(ctlr->cur_msg_incomplete))
1827 wait_for_completion(&ctlr->cur_msg_completion);
1828
1829 return 0;
1830 }
1831
1832 /**
1833 * __spi_pump_messages - function which processes SPI message queue
1834 * @ctlr: controller to process queue for
1835 * @in_kthread: true if we are in the context of the message pump thread
1836 *
1837 * This function checks if there is any SPI message in the queue that
1838 * needs processing and if so call out to the driver to initialize hardware
1839 * and transfer each message.
1840 *
1841 * Note that it is called both from the kthread itself and also from
1842 * inside spi_sync(); the queue extraction handling at the top of the
1843 * function should deal with this safely.
1844 */
__spi_pump_messages(struct spi_controller * ctlr,bool in_kthread)1845 static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread)
1846 {
1847 struct spi_message *msg;
1848 bool was_busy = false;
1849 unsigned long flags;
1850 int ret;
1851
1852 /* Take the I/O mutex */
1853 mutex_lock(&ctlr->io_mutex);
1854
1855 /* Lock queue */
1856 spin_lock_irqsave(&ctlr->queue_lock, flags);
1857
1858 /* Make sure we are not already running a message */
1859 if (ctlr->cur_msg)
1860 goto out_unlock;
1861
1862 /* Check if the queue is idle */
1863 if (list_empty(&ctlr->queue) || !ctlr->running) {
1864 if (!ctlr->busy)
1865 goto out_unlock;
1866
1867 /* Defer any non-atomic teardown to the thread */
1868 if (!in_kthread) {
1869 if (!ctlr->dummy_rx && !ctlr->dummy_tx &&
1870 !ctlr->unprepare_transfer_hardware) {
1871 spi_idle_runtime_pm(ctlr);
1872 ctlr->busy = false;
1873 ctlr->queue_empty = true;
1874 trace_spi_controller_idle(ctlr);
1875 } else {
1876 kthread_queue_work(ctlr->kworker,
1877 &ctlr->pump_messages);
1878 }
1879 goto out_unlock;
1880 }
1881
1882 ctlr->busy = false;
1883 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1884
1885 kfree(ctlr->dummy_rx);
1886 ctlr->dummy_rx = NULL;
1887 kfree(ctlr->dummy_tx);
1888 ctlr->dummy_tx = NULL;
1889 if (ctlr->unprepare_transfer_hardware &&
1890 ctlr->unprepare_transfer_hardware(ctlr))
1891 dev_err(&ctlr->dev,
1892 "failed to unprepare transfer hardware\n");
1893 spi_idle_runtime_pm(ctlr);
1894 trace_spi_controller_idle(ctlr);
1895
1896 spin_lock_irqsave(&ctlr->queue_lock, flags);
1897 ctlr->queue_empty = true;
1898 goto out_unlock;
1899 }
1900
1901 /* Extract head of queue */
1902 msg = list_first_entry(&ctlr->queue, struct spi_message, queue);
1903 ctlr->cur_msg = msg;
1904
1905 list_del_init(&msg->queue);
1906 if (ctlr->busy)
1907 was_busy = true;
1908 else
1909 ctlr->busy = true;
1910 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1911
1912 ret = __spi_pump_transfer_message(ctlr, msg, was_busy);
1913 kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
1914
1915 ctlr->cur_msg = NULL;
1916 ctlr->fallback = false;
1917
1918 mutex_unlock(&ctlr->io_mutex);
1919
1920 /* Prod the scheduler in case transfer_one() was busy waiting */
1921 if (!ret)
1922 cond_resched();
1923 return;
1924
1925 out_unlock:
1926 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1927 mutex_unlock(&ctlr->io_mutex);
1928 }
1929
1930 /**
1931 * spi_pump_messages - kthread work function which processes spi message queue
1932 * @work: pointer to kthread work struct contained in the controller struct
1933 */
spi_pump_messages(struct kthread_work * work)1934 static void spi_pump_messages(struct kthread_work *work)
1935 {
1936 struct spi_controller *ctlr =
1937 container_of(work, struct spi_controller, pump_messages);
1938
1939 __spi_pump_messages(ctlr, true);
1940 }
1941
1942 /**
1943 * spi_take_timestamp_pre - helper to collect the beginning of the TX timestamp
1944 * @ctlr: Pointer to the spi_controller structure of the driver
1945 * @xfer: Pointer to the transfer being timestamped
1946 * @progress: How many words (not bytes) have been transferred so far
1947 * @irqs_off: If true, will disable IRQs and preemption for the duration of the
1948 * transfer, for less jitter in time measurement. Only compatible
1949 * with PIO drivers. If true, must follow up with
1950 * spi_take_timestamp_post or otherwise system will crash.
1951 * WARNING: for fully predictable results, the CPU frequency must
1952 * also be under control (governor).
1953 *
1954 * This is a helper for drivers to collect the beginning of the TX timestamp
1955 * for the requested byte from the SPI transfer. The frequency with which this
1956 * function must be called (once per word, once for the whole transfer, once
1957 * per batch of words etc) is arbitrary as long as the @tx buffer offset is
1958 * greater than or equal to the requested byte at the time of the call. The
1959 * timestamp is only taken once, at the first such call. It is assumed that
1960 * the driver advances its @tx buffer pointer monotonically.
1961 */
spi_take_timestamp_pre(struct spi_controller * ctlr,struct spi_transfer * xfer,size_t progress,bool irqs_off)1962 void spi_take_timestamp_pre(struct spi_controller *ctlr,
1963 struct spi_transfer *xfer,
1964 size_t progress, bool irqs_off)
1965 {
1966 if (!xfer->ptp_sts)
1967 return;
1968
1969 if (xfer->timestamped)
1970 return;
1971
1972 if (progress > xfer->ptp_sts_word_pre)
1973 return;
1974
1975 /* Capture the resolution of the timestamp */
1976 xfer->ptp_sts_word_pre = progress;
1977
1978 if (irqs_off) {
1979 local_irq_save(ctlr->irq_flags);
1980 preempt_disable();
1981 }
1982
1983 ptp_read_system_prets(xfer->ptp_sts);
1984 }
1985 EXPORT_SYMBOL_GPL(spi_take_timestamp_pre);
1986
1987 /**
1988 * spi_take_timestamp_post - helper to collect the end of the TX timestamp
1989 * @ctlr: Pointer to the spi_controller structure of the driver
1990 * @xfer: Pointer to the transfer being timestamped
1991 * @progress: How many words (not bytes) have been transferred so far
1992 * @irqs_off: If true, will re-enable IRQs and preemption for the local CPU.
1993 *
1994 * This is a helper for drivers to collect the end of the TX timestamp for
1995 * the requested byte from the SPI transfer. Can be called with an arbitrary
1996 * frequency: only the first call where @tx exceeds or is equal to the
1997 * requested word will be timestamped.
1998 */
spi_take_timestamp_post(struct spi_controller * ctlr,struct spi_transfer * xfer,size_t progress,bool irqs_off)1999 void spi_take_timestamp_post(struct spi_controller *ctlr,
2000 struct spi_transfer *xfer,
2001 size_t progress, bool irqs_off)
2002 {
2003 if (!xfer->ptp_sts)
2004 return;
2005
2006 if (xfer->timestamped)
2007 return;
2008
2009 if (progress < xfer->ptp_sts_word_post)
2010 return;
2011
2012 ptp_read_system_postts(xfer->ptp_sts);
2013
2014 if (irqs_off) {
2015 local_irq_restore(ctlr->irq_flags);
2016 preempt_enable();
2017 }
2018
2019 /* Capture the resolution of the timestamp */
2020 xfer->ptp_sts_word_post = progress;
2021
2022 xfer->timestamped = 1;
2023 }
2024 EXPORT_SYMBOL_GPL(spi_take_timestamp_post);
2025
2026 /**
2027 * spi_set_thread_rt - set the controller to pump at realtime priority
2028 * @ctlr: controller to boost priority of
2029 *
2030 * This can be called because the controller requested realtime priority
2031 * (by setting the ->rt value before calling spi_register_controller()) or
2032 * because a device on the bus said that its transfers needed realtime
2033 * priority.
2034 *
2035 * NOTE: at the moment if any device on a bus says it needs realtime then
2036 * the thread will be at realtime priority for all transfers on that
2037 * controller. If this eventually becomes a problem we may see if we can
2038 * find a way to boost the priority only temporarily during relevant
2039 * transfers.
2040 */
spi_set_thread_rt(struct spi_controller * ctlr)2041 static void spi_set_thread_rt(struct spi_controller *ctlr)
2042 {
2043 dev_info(&ctlr->dev,
2044 "will run message pump with realtime priority\n");
2045 sched_set_fifo(ctlr->kworker->task);
2046 }
2047
spi_init_queue(struct spi_controller * ctlr)2048 static int spi_init_queue(struct spi_controller *ctlr)
2049 {
2050 ctlr->running = false;
2051 ctlr->busy = false;
2052 ctlr->queue_empty = true;
2053
2054 ctlr->kworker = kthread_run_worker(0, dev_name(&ctlr->dev));
2055 if (IS_ERR(ctlr->kworker)) {
2056 dev_err(&ctlr->dev, "failed to create message pump kworker\n");
2057 return PTR_ERR(ctlr->kworker);
2058 }
2059
2060 kthread_init_work(&ctlr->pump_messages, spi_pump_messages);
2061
2062 /*
2063 * Controller config will indicate if this controller should run the
2064 * message pump with high (realtime) priority to reduce the transfer
2065 * latency on the bus by minimising the delay between a transfer
2066 * request and the scheduling of the message pump thread. Without this
2067 * setting the message pump thread will remain at default priority.
2068 */
2069 if (ctlr->rt)
2070 spi_set_thread_rt(ctlr);
2071
2072 return 0;
2073 }
2074
2075 /**
2076 * spi_get_next_queued_message() - called by driver to check for queued
2077 * messages
2078 * @ctlr: the controller to check for queued messages
2079 *
2080 * If there are more messages in the queue, the next message is returned from
2081 * this call.
2082 *
2083 * Return: the next message in the queue, else NULL if the queue is empty.
2084 */
spi_get_next_queued_message(struct spi_controller * ctlr)2085 struct spi_message *spi_get_next_queued_message(struct spi_controller *ctlr)
2086 {
2087 struct spi_message *next;
2088 unsigned long flags;
2089
2090 /* Get a pointer to the next message, if any */
2091 spin_lock_irqsave(&ctlr->queue_lock, flags);
2092 next = list_first_entry_or_null(&ctlr->queue, struct spi_message,
2093 queue);
2094 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2095
2096 return next;
2097 }
2098 EXPORT_SYMBOL_GPL(spi_get_next_queued_message);
2099
2100 /*
2101 * __spi_unoptimize_message - shared implementation of spi_unoptimize_message()
2102 * and spi_maybe_unoptimize_message()
2103 * @msg: the message to unoptimize
2104 *
2105 * Peripheral drivers should use spi_unoptimize_message() and callers inside
2106 * core should use spi_maybe_unoptimize_message() rather than calling this
2107 * function directly.
2108 *
2109 * It is not valid to call this on a message that is not currently optimized.
2110 */
__spi_unoptimize_message(struct spi_message * msg)2111 static void __spi_unoptimize_message(struct spi_message *msg)
2112 {
2113 struct spi_controller *ctlr = msg->spi->controller;
2114
2115 if (ctlr->unoptimize_message)
2116 ctlr->unoptimize_message(msg);
2117
2118 spi_res_release(ctlr, msg);
2119
2120 msg->optimized = false;
2121 msg->opt_state = NULL;
2122 }
2123
2124 /*
2125 * spi_maybe_unoptimize_message - unoptimize msg not managed by a peripheral
2126 * @msg: the message to unoptimize
2127 *
2128 * This function is used to unoptimize a message if and only if it was
2129 * optimized by the core (via spi_maybe_optimize_message()).
2130 */
spi_maybe_unoptimize_message(struct spi_message * msg)2131 static void spi_maybe_unoptimize_message(struct spi_message *msg)
2132 {
2133 if (!msg->pre_optimized && msg->optimized &&
2134 !msg->spi->controller->defer_optimize_message)
2135 __spi_unoptimize_message(msg);
2136 }
2137
2138 /**
2139 * spi_finalize_current_message() - the current message is complete
2140 * @ctlr: the controller to return the message to
2141 *
2142 * Called by the driver to notify the core that the message in the front of the
2143 * queue is complete and can be removed from the queue.
2144 */
spi_finalize_current_message(struct spi_controller * ctlr)2145 void spi_finalize_current_message(struct spi_controller *ctlr)
2146 {
2147 struct spi_transfer *xfer;
2148 struct spi_message *mesg;
2149 int ret;
2150
2151 mesg = ctlr->cur_msg;
2152
2153 if (!ctlr->ptp_sts_supported && !ctlr->transfer_one) {
2154 list_for_each_entry(xfer, &mesg->transfers, transfer_list) {
2155 ptp_read_system_postts(xfer->ptp_sts);
2156 xfer->ptp_sts_word_post = xfer->len;
2157 }
2158 }
2159
2160 if (unlikely(ctlr->ptp_sts_supported))
2161 list_for_each_entry(xfer, &mesg->transfers, transfer_list)
2162 WARN_ON_ONCE(xfer->ptp_sts && !xfer->timestamped);
2163
2164 spi_unmap_msg(ctlr, mesg);
2165
2166 if (mesg->prepared && ctlr->unprepare_message) {
2167 ret = ctlr->unprepare_message(ctlr, mesg);
2168 if (ret) {
2169 dev_err(&ctlr->dev, "failed to unprepare message: %d\n",
2170 ret);
2171 }
2172 }
2173
2174 mesg->prepared = false;
2175
2176 spi_maybe_unoptimize_message(mesg);
2177
2178 WRITE_ONCE(ctlr->cur_msg_incomplete, false);
2179 smp_mb(); /* See __spi_pump_transfer_message()... */
2180 if (READ_ONCE(ctlr->cur_msg_need_completion))
2181 complete(&ctlr->cur_msg_completion);
2182
2183 trace_spi_message_done(mesg);
2184
2185 mesg->state = NULL;
2186 if (mesg->complete)
2187 mesg->complete(mesg->context);
2188 }
2189 EXPORT_SYMBOL_GPL(spi_finalize_current_message);
2190
spi_start_queue(struct spi_controller * ctlr)2191 static int spi_start_queue(struct spi_controller *ctlr)
2192 {
2193 unsigned long flags;
2194
2195 spin_lock_irqsave(&ctlr->queue_lock, flags);
2196
2197 if (ctlr->running || ctlr->busy) {
2198 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2199 return -EBUSY;
2200 }
2201
2202 ctlr->running = true;
2203 ctlr->cur_msg = NULL;
2204 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2205
2206 kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
2207
2208 return 0;
2209 }
2210
spi_stop_queue(struct spi_controller * ctlr)2211 static int spi_stop_queue(struct spi_controller *ctlr)
2212 {
2213 unsigned int limit = 500;
2214 unsigned long flags;
2215
2216 /*
2217 * This is a bit lame, but is optimized for the common execution path.
2218 * A wait_queue on the ctlr->busy could be used, but then the common
2219 * execution path (pump_messages) would be required to call wake_up or
2220 * friends on every SPI message. Do this instead.
2221 */
2222 do {
2223 spin_lock_irqsave(&ctlr->queue_lock, flags);
2224 if (list_empty(&ctlr->queue) && !ctlr->busy) {
2225 ctlr->running = false;
2226 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2227 return 0;
2228 }
2229 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2230 usleep_range(10000, 11000);
2231 } while (--limit);
2232
2233 return -EBUSY;
2234 }
2235
spi_destroy_queue(struct spi_controller * ctlr)2236 static int spi_destroy_queue(struct spi_controller *ctlr)
2237 {
2238 int ret;
2239
2240 ret = spi_stop_queue(ctlr);
2241
2242 /*
2243 * kthread_flush_worker will block until all work is done.
2244 * If the reason that stop_queue timed out is that the work will never
2245 * finish, then it does no good to call flush/stop thread, so
2246 * return anyway.
2247 */
2248 if (ret) {
2249 dev_err(&ctlr->dev, "problem destroying queue\n");
2250 return ret;
2251 }
2252
2253 kthread_destroy_worker(ctlr->kworker);
2254
2255 return 0;
2256 }
2257
__spi_queued_transfer(struct spi_device * spi,struct spi_message * msg,bool need_pump)2258 static int __spi_queued_transfer(struct spi_device *spi,
2259 struct spi_message *msg,
2260 bool need_pump)
2261 {
2262 struct spi_controller *ctlr = spi->controller;
2263 unsigned long flags;
2264
2265 spin_lock_irqsave(&ctlr->queue_lock, flags);
2266
2267 if (!ctlr->running) {
2268 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2269 return -ESHUTDOWN;
2270 }
2271 msg->actual_length = 0;
2272 msg->status = -EINPROGRESS;
2273
2274 list_add_tail(&msg->queue, &ctlr->queue);
2275 ctlr->queue_empty = false;
2276 if (!ctlr->busy && need_pump)
2277 kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
2278
2279 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2280 return 0;
2281 }
2282
2283 /**
2284 * spi_queued_transfer - transfer function for queued transfers
2285 * @spi: SPI device which is requesting transfer
2286 * @msg: SPI message which is to handled is queued to driver queue
2287 *
2288 * Return: zero on success, else a negative error code.
2289 */
spi_queued_transfer(struct spi_device * spi,struct spi_message * msg)2290 static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg)
2291 {
2292 return __spi_queued_transfer(spi, msg, true);
2293 }
2294
spi_controller_initialize_queue(struct spi_controller * ctlr)2295 static int spi_controller_initialize_queue(struct spi_controller *ctlr)
2296 {
2297 int ret;
2298
2299 ctlr->transfer = spi_queued_transfer;
2300 if (!ctlr->transfer_one_message)
2301 ctlr->transfer_one_message = spi_transfer_one_message;
2302
2303 /* Initialize and start queue */
2304 ret = spi_init_queue(ctlr);
2305 if (ret) {
2306 dev_err(&ctlr->dev, "problem initializing queue\n");
2307 goto err_init_queue;
2308 }
2309 ctlr->queued = true;
2310 ret = spi_start_queue(ctlr);
2311 if (ret) {
2312 dev_err(&ctlr->dev, "problem starting queue\n");
2313 goto err_start_queue;
2314 }
2315
2316 return 0;
2317
2318 err_start_queue:
2319 spi_destroy_queue(ctlr);
2320 err_init_queue:
2321 return ret;
2322 }
2323
2324 /**
2325 * spi_flush_queue - Send all pending messages in the queue from the callers'
2326 * context
2327 * @ctlr: controller to process queue for
2328 *
2329 * This should be used when one wants to ensure all pending messages have been
2330 * sent before doing something. Is used by the spi-mem code to make sure SPI
2331 * memory operations do not preempt regular SPI transfers that have been queued
2332 * before the spi-mem operation.
2333 */
spi_flush_queue(struct spi_controller * ctlr)2334 void spi_flush_queue(struct spi_controller *ctlr)
2335 {
2336 if (ctlr->transfer == spi_queued_transfer)
2337 __spi_pump_messages(ctlr, false);
2338 }
2339
2340 /*-------------------------------------------------------------------------*/
2341
2342 #if defined(CONFIG_OF)
of_spi_parse_dt_cs_delay(struct device_node * nc,struct spi_delay * delay,const char * prop)2343 static void of_spi_parse_dt_cs_delay(struct device_node *nc,
2344 struct spi_delay *delay, const char *prop)
2345 {
2346 u32 value;
2347
2348 if (!of_property_read_u32(nc, prop, &value)) {
2349 if (value > U16_MAX) {
2350 delay->value = DIV_ROUND_UP(value, 1000);
2351 delay->unit = SPI_DELAY_UNIT_USECS;
2352 } else {
2353 delay->value = value;
2354 delay->unit = SPI_DELAY_UNIT_NSECS;
2355 }
2356 }
2357 }
2358
of_spi_parse_dt(struct spi_controller * ctlr,struct spi_device * spi,struct device_node * nc)2359 static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi,
2360 struct device_node *nc)
2361 {
2362 u32 value, cs[SPI_CS_CNT_MAX];
2363 int rc, idx;
2364
2365 /* Mode (clock phase/polarity/etc.) */
2366 if (of_property_read_bool(nc, "spi-cpha"))
2367 spi->mode |= SPI_CPHA;
2368 if (of_property_read_bool(nc, "spi-cpol"))
2369 spi->mode |= SPI_CPOL;
2370 if (of_property_read_bool(nc, "spi-3wire"))
2371 spi->mode |= SPI_3WIRE;
2372 if (of_property_read_bool(nc, "spi-lsb-first"))
2373 spi->mode |= SPI_LSB_FIRST;
2374 if (of_property_read_bool(nc, "spi-cs-high"))
2375 spi->mode |= SPI_CS_HIGH;
2376
2377 /* Device DUAL/QUAD mode */
2378 if (!of_property_read_u32(nc, "spi-tx-bus-width", &value)) {
2379 switch (value) {
2380 case 0:
2381 spi->mode |= SPI_NO_TX;
2382 break;
2383 case 1:
2384 break;
2385 case 2:
2386 spi->mode |= SPI_TX_DUAL;
2387 break;
2388 case 4:
2389 spi->mode |= SPI_TX_QUAD;
2390 break;
2391 case 8:
2392 spi->mode |= SPI_TX_OCTAL;
2393 break;
2394 default:
2395 dev_warn(&ctlr->dev,
2396 "spi-tx-bus-width %d not supported\n",
2397 value);
2398 break;
2399 }
2400 }
2401
2402 if (!of_property_read_u32(nc, "spi-rx-bus-width", &value)) {
2403 switch (value) {
2404 case 0:
2405 spi->mode |= SPI_NO_RX;
2406 break;
2407 case 1:
2408 break;
2409 case 2:
2410 spi->mode |= SPI_RX_DUAL;
2411 break;
2412 case 4:
2413 spi->mode |= SPI_RX_QUAD;
2414 break;
2415 case 8:
2416 spi->mode |= SPI_RX_OCTAL;
2417 break;
2418 default:
2419 dev_warn(&ctlr->dev,
2420 "spi-rx-bus-width %d not supported\n",
2421 value);
2422 break;
2423 }
2424 }
2425
2426 if (spi_controller_is_target(ctlr)) {
2427 if (!of_node_name_eq(nc, "slave")) {
2428 dev_err(&ctlr->dev, "%pOF is not called 'slave'\n",
2429 nc);
2430 return -EINVAL;
2431 }
2432 return 0;
2433 }
2434
2435 if (ctlr->num_chipselect > SPI_CS_CNT_MAX) {
2436 dev_err(&ctlr->dev, "No. of CS is more than max. no. of supported CS\n");
2437 return -EINVAL;
2438 }
2439
2440 spi_set_all_cs_unused(spi);
2441
2442 /* Device address */
2443 rc = of_property_read_variable_u32_array(nc, "reg", &cs[0], 1,
2444 SPI_CS_CNT_MAX);
2445 if (rc < 0) {
2446 dev_err(&ctlr->dev, "%pOF has no valid 'reg' property (%d)\n",
2447 nc, rc);
2448 return rc;
2449 }
2450 if (rc > ctlr->num_chipselect) {
2451 dev_err(&ctlr->dev, "%pOF has number of CS > ctlr->num_chipselect (%d)\n",
2452 nc, rc);
2453 return rc;
2454 }
2455 if ((of_property_present(nc, "parallel-memories")) &&
2456 (!(ctlr->flags & SPI_CONTROLLER_MULTI_CS))) {
2457 dev_err(&ctlr->dev, "SPI controller doesn't support multi CS\n");
2458 return -EINVAL;
2459 }
2460 for (idx = 0; idx < rc; idx++)
2461 spi_set_chipselect(spi, idx, cs[idx]);
2462
2463 /*
2464 * By default spi->chip_select[0] will hold the physical CS number,
2465 * so set bit 0 in spi->cs_index_mask.
2466 */
2467 spi->cs_index_mask = BIT(0);
2468
2469 /* Device speed */
2470 if (!of_property_read_u32(nc, "spi-max-frequency", &value))
2471 spi->max_speed_hz = value;
2472
2473 /* Device CS delays */
2474 of_spi_parse_dt_cs_delay(nc, &spi->cs_setup, "spi-cs-setup-delay-ns");
2475 of_spi_parse_dt_cs_delay(nc, &spi->cs_hold, "spi-cs-hold-delay-ns");
2476 of_spi_parse_dt_cs_delay(nc, &spi->cs_inactive, "spi-cs-inactive-delay-ns");
2477
2478 return 0;
2479 }
2480
2481 static struct spi_device *
of_register_spi_device(struct spi_controller * ctlr,struct device_node * nc)2482 of_register_spi_device(struct spi_controller *ctlr, struct device_node *nc)
2483 {
2484 struct spi_device *spi;
2485 int rc;
2486
2487 /* Alloc an spi_device */
2488 spi = spi_alloc_device(ctlr);
2489 if (!spi) {
2490 dev_err(&ctlr->dev, "spi_device alloc error for %pOF\n", nc);
2491 rc = -ENOMEM;
2492 goto err_out;
2493 }
2494
2495 /* Select device driver */
2496 rc = of_alias_from_compatible(nc, spi->modalias,
2497 sizeof(spi->modalias));
2498 if (rc < 0) {
2499 dev_err(&ctlr->dev, "cannot find modalias for %pOF\n", nc);
2500 goto err_out;
2501 }
2502
2503 rc = of_spi_parse_dt(ctlr, spi, nc);
2504 if (rc)
2505 goto err_out;
2506
2507 /* Store a pointer to the node in the device structure */
2508 of_node_get(nc);
2509
2510 device_set_node(&spi->dev, of_fwnode_handle(nc));
2511
2512 /* Register the new device */
2513 rc = spi_add_device(spi);
2514 if (rc) {
2515 dev_err(&ctlr->dev, "spi_device register error %pOF\n", nc);
2516 goto err_of_node_put;
2517 }
2518
2519 return spi;
2520
2521 err_of_node_put:
2522 of_node_put(nc);
2523 err_out:
2524 spi_dev_put(spi);
2525 return ERR_PTR(rc);
2526 }
2527
2528 /**
2529 * of_register_spi_devices() - Register child devices onto the SPI bus
2530 * @ctlr: Pointer to spi_controller device
2531 *
2532 * Registers an spi_device for each child node of controller node which
2533 * represents a valid SPI target device.
2534 */
of_register_spi_devices(struct spi_controller * ctlr)2535 static void of_register_spi_devices(struct spi_controller *ctlr)
2536 {
2537 struct spi_device *spi;
2538 struct device_node *nc;
2539
2540 for_each_available_child_of_node(ctlr->dev.of_node, nc) {
2541 if (of_node_test_and_set_flag(nc, OF_POPULATED))
2542 continue;
2543 spi = of_register_spi_device(ctlr, nc);
2544 if (IS_ERR(spi)) {
2545 dev_warn(&ctlr->dev,
2546 "Failed to create SPI device for %pOF\n", nc);
2547 of_node_clear_flag(nc, OF_POPULATED);
2548 }
2549 }
2550 }
2551 #else
of_register_spi_devices(struct spi_controller * ctlr)2552 static void of_register_spi_devices(struct spi_controller *ctlr) { }
2553 #endif
2554
2555 /**
2556 * spi_new_ancillary_device() - Register ancillary SPI device
2557 * @spi: Pointer to the main SPI device registering the ancillary device
2558 * @chip_select: Chip Select of the ancillary device
2559 *
2560 * Register an ancillary SPI device; for example some chips have a chip-select
2561 * for normal device usage and another one for setup/firmware upload.
2562 *
2563 * This may only be called from main SPI device's probe routine.
2564 *
2565 * Return: 0 on success; negative errno on failure
2566 */
spi_new_ancillary_device(struct spi_device * spi,u8 chip_select)2567 struct spi_device *spi_new_ancillary_device(struct spi_device *spi,
2568 u8 chip_select)
2569 {
2570 struct spi_controller *ctlr = spi->controller;
2571 struct spi_device *ancillary;
2572 int rc;
2573
2574 /* Alloc an spi_device */
2575 ancillary = spi_alloc_device(ctlr);
2576 if (!ancillary) {
2577 rc = -ENOMEM;
2578 goto err_out;
2579 }
2580
2581 strscpy(ancillary->modalias, "dummy", sizeof(ancillary->modalias));
2582
2583 /* Use provided chip-select for ancillary device */
2584 spi_set_all_cs_unused(ancillary);
2585 spi_set_chipselect(ancillary, 0, chip_select);
2586
2587 /* Take over SPI mode/speed from SPI main device */
2588 ancillary->max_speed_hz = spi->max_speed_hz;
2589 ancillary->mode = spi->mode;
2590 /*
2591 * By default spi->chip_select[0] will hold the physical CS number,
2592 * so set bit 0 in spi->cs_index_mask.
2593 */
2594 ancillary->cs_index_mask = BIT(0);
2595
2596 WARN_ON(!mutex_is_locked(&ctlr->add_lock));
2597
2598 /* Register the new device */
2599 rc = __spi_add_device(ancillary);
2600 if (rc) {
2601 dev_err(&spi->dev, "failed to register ancillary device\n");
2602 goto err_out;
2603 }
2604
2605 return ancillary;
2606
2607 err_out:
2608 spi_dev_put(ancillary);
2609 return ERR_PTR(rc);
2610 }
2611 EXPORT_SYMBOL_GPL(spi_new_ancillary_device);
2612
2613 #ifdef CONFIG_ACPI
2614 struct acpi_spi_lookup {
2615 struct spi_controller *ctlr;
2616 u32 max_speed_hz;
2617 u32 mode;
2618 int irq;
2619 u8 bits_per_word;
2620 u8 chip_select;
2621 int n;
2622 int index;
2623 };
2624
acpi_spi_count(struct acpi_resource * ares,void * data)2625 static int acpi_spi_count(struct acpi_resource *ares, void *data)
2626 {
2627 struct acpi_resource_spi_serialbus *sb;
2628 int *count = data;
2629
2630 if (ares->type != ACPI_RESOURCE_TYPE_SERIAL_BUS)
2631 return 1;
2632
2633 sb = &ares->data.spi_serial_bus;
2634 if (sb->type != ACPI_RESOURCE_SERIAL_TYPE_SPI)
2635 return 1;
2636
2637 *count = *count + 1;
2638
2639 return 1;
2640 }
2641
2642 /**
2643 * acpi_spi_count_resources - Count the number of SpiSerialBus resources
2644 * @adev: ACPI device
2645 *
2646 * Return: the number of SpiSerialBus resources in the ACPI-device's
2647 * resource-list; or a negative error code.
2648 */
acpi_spi_count_resources(struct acpi_device * adev)2649 int acpi_spi_count_resources(struct acpi_device *adev)
2650 {
2651 LIST_HEAD(r);
2652 int count = 0;
2653 int ret;
2654
2655 ret = acpi_dev_get_resources(adev, &r, acpi_spi_count, &count);
2656 if (ret < 0)
2657 return ret;
2658
2659 acpi_dev_free_resource_list(&r);
2660
2661 return count;
2662 }
2663 EXPORT_SYMBOL_GPL(acpi_spi_count_resources);
2664
acpi_spi_parse_apple_properties(struct acpi_device * dev,struct acpi_spi_lookup * lookup)2665 static void acpi_spi_parse_apple_properties(struct acpi_device *dev,
2666 struct acpi_spi_lookup *lookup)
2667 {
2668 const union acpi_object *obj;
2669
2670 if (!x86_apple_machine)
2671 return;
2672
2673 if (!acpi_dev_get_property(dev, "spiSclkPeriod", ACPI_TYPE_BUFFER, &obj)
2674 && obj->buffer.length >= 4)
2675 lookup->max_speed_hz = NSEC_PER_SEC / *(u32 *)obj->buffer.pointer;
2676
2677 if (!acpi_dev_get_property(dev, "spiWordSize", ACPI_TYPE_BUFFER, &obj)
2678 && obj->buffer.length == 8)
2679 lookup->bits_per_word = *(u64 *)obj->buffer.pointer;
2680
2681 if (!acpi_dev_get_property(dev, "spiBitOrder", ACPI_TYPE_BUFFER, &obj)
2682 && obj->buffer.length == 8 && !*(u64 *)obj->buffer.pointer)
2683 lookup->mode |= SPI_LSB_FIRST;
2684
2685 if (!acpi_dev_get_property(dev, "spiSPO", ACPI_TYPE_BUFFER, &obj)
2686 && obj->buffer.length == 8 && *(u64 *)obj->buffer.pointer)
2687 lookup->mode |= SPI_CPOL;
2688
2689 if (!acpi_dev_get_property(dev, "spiSPH", ACPI_TYPE_BUFFER, &obj)
2690 && obj->buffer.length == 8 && *(u64 *)obj->buffer.pointer)
2691 lookup->mode |= SPI_CPHA;
2692 }
2693
acpi_spi_add_resource(struct acpi_resource * ares,void * data)2694 static int acpi_spi_add_resource(struct acpi_resource *ares, void *data)
2695 {
2696 struct acpi_spi_lookup *lookup = data;
2697 struct spi_controller *ctlr = lookup->ctlr;
2698
2699 if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
2700 struct acpi_resource_spi_serialbus *sb;
2701 acpi_handle parent_handle;
2702 acpi_status status;
2703
2704 sb = &ares->data.spi_serial_bus;
2705 if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_SPI) {
2706
2707 if (lookup->index != -1 && lookup->n++ != lookup->index)
2708 return 1;
2709
2710 status = acpi_get_handle(NULL,
2711 sb->resource_source.string_ptr,
2712 &parent_handle);
2713
2714 if (ACPI_FAILURE(status))
2715 return -ENODEV;
2716
2717 if (ctlr) {
2718 if (!device_match_acpi_handle(ctlr->dev.parent, parent_handle))
2719 return -ENODEV;
2720 } else {
2721 struct acpi_device *adev;
2722
2723 adev = acpi_fetch_acpi_dev(parent_handle);
2724 if (!adev)
2725 return -ENODEV;
2726
2727 ctlr = acpi_spi_find_controller_by_adev(adev);
2728 if (!ctlr)
2729 return -EPROBE_DEFER;
2730
2731 lookup->ctlr = ctlr;
2732 }
2733
2734 /*
2735 * ACPI DeviceSelection numbering is handled by the
2736 * host controller driver in Windows and can vary
2737 * from driver to driver. In Linux we always expect
2738 * 0 .. max - 1 so we need to ask the driver to
2739 * translate between the two schemes.
2740 */
2741 if (ctlr->fw_translate_cs) {
2742 int cs = ctlr->fw_translate_cs(ctlr,
2743 sb->device_selection);
2744 if (cs < 0)
2745 return cs;
2746 lookup->chip_select = cs;
2747 } else {
2748 lookup->chip_select = sb->device_selection;
2749 }
2750
2751 lookup->max_speed_hz = sb->connection_speed;
2752 lookup->bits_per_word = sb->data_bit_length;
2753
2754 if (sb->clock_phase == ACPI_SPI_SECOND_PHASE)
2755 lookup->mode |= SPI_CPHA;
2756 if (sb->clock_polarity == ACPI_SPI_START_HIGH)
2757 lookup->mode |= SPI_CPOL;
2758 if (sb->device_polarity == ACPI_SPI_ACTIVE_HIGH)
2759 lookup->mode |= SPI_CS_HIGH;
2760 }
2761 } else if (lookup->irq < 0) {
2762 struct resource r;
2763
2764 if (acpi_dev_resource_interrupt(ares, 0, &r))
2765 lookup->irq = r.start;
2766 }
2767
2768 /* Always tell the ACPI core to skip this resource */
2769 return 1;
2770 }
2771
2772 /**
2773 * acpi_spi_device_alloc - Allocate a spi device, and fill it in with ACPI information
2774 * @ctlr: controller to which the spi device belongs
2775 * @adev: ACPI Device for the spi device
2776 * @index: Index of the spi resource inside the ACPI Node
2777 *
2778 * This should be used to allocate a new SPI device from and ACPI Device node.
2779 * The caller is responsible for calling spi_add_device to register the SPI device.
2780 *
2781 * If ctlr is set to NULL, the Controller for the SPI device will be looked up
2782 * using the resource.
2783 * If index is set to -1, index is not used.
2784 * Note: If index is -1, ctlr must be set.
2785 *
2786 * Return: a pointer to the new device, or ERR_PTR on error.
2787 */
acpi_spi_device_alloc(struct spi_controller * ctlr,struct acpi_device * adev,int index)2788 struct spi_device *acpi_spi_device_alloc(struct spi_controller *ctlr,
2789 struct acpi_device *adev,
2790 int index)
2791 {
2792 acpi_handle parent_handle = NULL;
2793 struct list_head resource_list;
2794 struct acpi_spi_lookup lookup = {};
2795 struct spi_device *spi;
2796 int ret;
2797
2798 if (!ctlr && index == -1)
2799 return ERR_PTR(-EINVAL);
2800
2801 lookup.ctlr = ctlr;
2802 lookup.irq = -1;
2803 lookup.index = index;
2804 lookup.n = 0;
2805
2806 INIT_LIST_HEAD(&resource_list);
2807 ret = acpi_dev_get_resources(adev, &resource_list,
2808 acpi_spi_add_resource, &lookup);
2809 acpi_dev_free_resource_list(&resource_list);
2810
2811 if (ret < 0)
2812 /* Found SPI in _CRS but it points to another controller */
2813 return ERR_PTR(ret);
2814
2815 if (!lookup.max_speed_hz &&
2816 ACPI_SUCCESS(acpi_get_parent(adev->handle, &parent_handle)) &&
2817 device_match_acpi_handle(lookup.ctlr->dev.parent, parent_handle)) {
2818 /* Apple does not use _CRS but nested devices for SPI target devices */
2819 acpi_spi_parse_apple_properties(adev, &lookup);
2820 }
2821
2822 if (!lookup.max_speed_hz)
2823 return ERR_PTR(-ENODEV);
2824
2825 spi = spi_alloc_device(lookup.ctlr);
2826 if (!spi) {
2827 dev_err(&lookup.ctlr->dev, "failed to allocate SPI device for %s\n",
2828 dev_name(&adev->dev));
2829 return ERR_PTR(-ENOMEM);
2830 }
2831
2832 spi_set_all_cs_unused(spi);
2833 spi_set_chipselect(spi, 0, lookup.chip_select);
2834
2835 ACPI_COMPANION_SET(&spi->dev, adev);
2836 spi->max_speed_hz = lookup.max_speed_hz;
2837 spi->mode |= lookup.mode;
2838 spi->irq = lookup.irq;
2839 spi->bits_per_word = lookup.bits_per_word;
2840 /*
2841 * By default spi->chip_select[0] will hold the physical CS number,
2842 * so set bit 0 in spi->cs_index_mask.
2843 */
2844 spi->cs_index_mask = BIT(0);
2845
2846 return spi;
2847 }
2848 EXPORT_SYMBOL_GPL(acpi_spi_device_alloc);
2849
acpi_register_spi_device(struct spi_controller * ctlr,struct acpi_device * adev)2850 static acpi_status acpi_register_spi_device(struct spi_controller *ctlr,
2851 struct acpi_device *adev)
2852 {
2853 struct spi_device *spi;
2854
2855 if (acpi_bus_get_status(adev) || !adev->status.present ||
2856 acpi_device_enumerated(adev))
2857 return AE_OK;
2858
2859 spi = acpi_spi_device_alloc(ctlr, adev, -1);
2860 if (IS_ERR(spi)) {
2861 if (PTR_ERR(spi) == -ENOMEM)
2862 return AE_NO_MEMORY;
2863 else
2864 return AE_OK;
2865 }
2866
2867 acpi_set_modalias(adev, acpi_device_hid(adev), spi->modalias,
2868 sizeof(spi->modalias));
2869
2870 acpi_device_set_enumerated(adev);
2871
2872 adev->power.flags.ignore_parent = true;
2873 if (spi_add_device(spi)) {
2874 adev->power.flags.ignore_parent = false;
2875 dev_err(&ctlr->dev, "failed to add SPI device %s from ACPI\n",
2876 dev_name(&adev->dev));
2877 spi_dev_put(spi);
2878 }
2879
2880 return AE_OK;
2881 }
2882
acpi_spi_add_device(acpi_handle handle,u32 level,void * data,void ** return_value)2883 static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level,
2884 void *data, void **return_value)
2885 {
2886 struct acpi_device *adev = acpi_fetch_acpi_dev(handle);
2887 struct spi_controller *ctlr = data;
2888
2889 if (!adev)
2890 return AE_OK;
2891
2892 return acpi_register_spi_device(ctlr, adev);
2893 }
2894
2895 #define SPI_ACPI_ENUMERATE_MAX_DEPTH 32
2896
acpi_register_spi_devices(struct spi_controller * ctlr)2897 static void acpi_register_spi_devices(struct spi_controller *ctlr)
2898 {
2899 acpi_status status;
2900 acpi_handle handle;
2901
2902 handle = ACPI_HANDLE(ctlr->dev.parent);
2903 if (!handle)
2904 return;
2905
2906 status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
2907 SPI_ACPI_ENUMERATE_MAX_DEPTH,
2908 acpi_spi_add_device, NULL, ctlr, NULL);
2909 if (ACPI_FAILURE(status))
2910 dev_warn(&ctlr->dev, "failed to enumerate SPI target devices\n");
2911 }
2912 #else
acpi_register_spi_devices(struct spi_controller * ctlr)2913 static inline void acpi_register_spi_devices(struct spi_controller *ctlr) {}
2914 #endif /* CONFIG_ACPI */
2915
spi_controller_release(struct device * dev)2916 static void spi_controller_release(struct device *dev)
2917 {
2918 struct spi_controller *ctlr;
2919
2920 ctlr = container_of(dev, struct spi_controller, dev);
2921 kfree(ctlr);
2922 }
2923
2924 static const struct class spi_controller_class = {
2925 .name = "spi_master",
2926 .dev_release = spi_controller_release,
2927 .dev_groups = spi_controller_groups,
2928 };
2929
2930 #ifdef CONFIG_SPI_SLAVE
2931 /**
2932 * spi_target_abort - abort the ongoing transfer request on an SPI target controller
2933 * @spi: device used for the current transfer
2934 */
spi_target_abort(struct spi_device * spi)2935 int spi_target_abort(struct spi_device *spi)
2936 {
2937 struct spi_controller *ctlr = spi->controller;
2938
2939 if (spi_controller_is_target(ctlr) && ctlr->target_abort)
2940 return ctlr->target_abort(ctlr);
2941
2942 return -ENOTSUPP;
2943 }
2944 EXPORT_SYMBOL_GPL(spi_target_abort);
2945
slave_show(struct device * dev,struct device_attribute * attr,char * buf)2946 static ssize_t slave_show(struct device *dev, struct device_attribute *attr,
2947 char *buf)
2948 {
2949 struct spi_controller *ctlr = container_of(dev, struct spi_controller,
2950 dev);
2951 struct device *child;
2952 int ret;
2953
2954 child = device_find_any_child(&ctlr->dev);
2955 ret = sysfs_emit(buf, "%s\n", child ? to_spi_device(child)->modalias : NULL);
2956 put_device(child);
2957
2958 return ret;
2959 }
2960
slave_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2961 static ssize_t slave_store(struct device *dev, struct device_attribute *attr,
2962 const char *buf, size_t count)
2963 {
2964 struct spi_controller *ctlr = container_of(dev, struct spi_controller,
2965 dev);
2966 struct spi_device *spi;
2967 struct device *child;
2968 char name[32];
2969 int rc;
2970
2971 rc = sscanf(buf, "%31s", name);
2972 if (rc != 1 || !name[0])
2973 return -EINVAL;
2974
2975 child = device_find_any_child(&ctlr->dev);
2976 if (child) {
2977 /* Remove registered target device */
2978 device_unregister(child);
2979 put_device(child);
2980 }
2981
2982 if (strcmp(name, "(null)")) {
2983 /* Register new target device */
2984 spi = spi_alloc_device(ctlr);
2985 if (!spi)
2986 return -ENOMEM;
2987
2988 strscpy(spi->modalias, name, sizeof(spi->modalias));
2989
2990 rc = spi_add_device(spi);
2991 if (rc) {
2992 spi_dev_put(spi);
2993 return rc;
2994 }
2995 }
2996
2997 return count;
2998 }
2999
3000 static DEVICE_ATTR_RW(slave);
3001
3002 static struct attribute *spi_target_attrs[] = {
3003 &dev_attr_slave.attr,
3004 NULL,
3005 };
3006
3007 static const struct attribute_group spi_target_group = {
3008 .attrs = spi_target_attrs,
3009 };
3010
3011 static const struct attribute_group *spi_target_groups[] = {
3012 &spi_controller_statistics_group,
3013 &spi_target_group,
3014 NULL,
3015 };
3016
3017 static const struct class spi_target_class = {
3018 .name = "spi_slave",
3019 .dev_release = spi_controller_release,
3020 .dev_groups = spi_target_groups,
3021 };
3022 #else
3023 extern struct class spi_target_class; /* dummy */
3024 #endif
3025
3026 /**
3027 * __spi_alloc_controller - allocate an SPI host or target controller
3028 * @dev: the controller, possibly using the platform_bus
3029 * @size: how much zeroed driver-private data to allocate; the pointer to this
3030 * memory is in the driver_data field of the returned device, accessible
3031 * with spi_controller_get_devdata(); the memory is cacheline aligned;
3032 * drivers granting DMA access to portions of their private data need to
3033 * round up @size using ALIGN(size, dma_get_cache_alignment()).
3034 * @target: flag indicating whether to allocate an SPI host (false) or SPI target (true)
3035 * controller
3036 * Context: can sleep
3037 *
3038 * This call is used only by SPI controller drivers, which are the
3039 * only ones directly touching chip registers. It's how they allocate
3040 * an spi_controller structure, prior to calling spi_register_controller().
3041 *
3042 * This must be called from context that can sleep.
3043 *
3044 * The caller is responsible for assigning the bus number and initializing the
3045 * controller's methods before calling spi_register_controller(); and (after
3046 * errors adding the device) calling spi_controller_put() to prevent a memory
3047 * leak.
3048 *
3049 * Return: the SPI controller structure on success, else NULL.
3050 */
__spi_alloc_controller(struct device * dev,unsigned int size,bool target)3051 struct spi_controller *__spi_alloc_controller(struct device *dev,
3052 unsigned int size, bool target)
3053 {
3054 struct spi_controller *ctlr;
3055 size_t ctlr_size = ALIGN(sizeof(*ctlr), dma_get_cache_alignment());
3056
3057 if (!dev)
3058 return NULL;
3059
3060 ctlr = kzalloc(size + ctlr_size, GFP_KERNEL);
3061 if (!ctlr)
3062 return NULL;
3063
3064 device_initialize(&ctlr->dev);
3065 INIT_LIST_HEAD(&ctlr->queue);
3066 spin_lock_init(&ctlr->queue_lock);
3067 spin_lock_init(&ctlr->bus_lock_spinlock);
3068 mutex_init(&ctlr->bus_lock_mutex);
3069 mutex_init(&ctlr->io_mutex);
3070 mutex_init(&ctlr->add_lock);
3071 ctlr->bus_num = -1;
3072 ctlr->num_chipselect = 1;
3073 ctlr->target = target;
3074 if (IS_ENABLED(CONFIG_SPI_SLAVE) && target)
3075 ctlr->dev.class = &spi_target_class;
3076 else
3077 ctlr->dev.class = &spi_controller_class;
3078 ctlr->dev.parent = dev;
3079 pm_suspend_ignore_children(&ctlr->dev, true);
3080 spi_controller_set_devdata(ctlr, (void *)ctlr + ctlr_size);
3081
3082 return ctlr;
3083 }
3084 EXPORT_SYMBOL_GPL(__spi_alloc_controller);
3085
devm_spi_release_controller(struct device * dev,void * ctlr)3086 static void devm_spi_release_controller(struct device *dev, void *ctlr)
3087 {
3088 spi_controller_put(*(struct spi_controller **)ctlr);
3089 }
3090
3091 /**
3092 * __devm_spi_alloc_controller - resource-managed __spi_alloc_controller()
3093 * @dev: physical device of SPI controller
3094 * @size: how much zeroed driver-private data to allocate
3095 * @target: whether to allocate an SPI host (false) or SPI target (true) controller
3096 * Context: can sleep
3097 *
3098 * Allocate an SPI controller and automatically release a reference on it
3099 * when @dev is unbound from its driver. Drivers are thus relieved from
3100 * having to call spi_controller_put().
3101 *
3102 * The arguments to this function are identical to __spi_alloc_controller().
3103 *
3104 * Return: the SPI controller structure on success, else NULL.
3105 */
__devm_spi_alloc_controller(struct device * dev,unsigned int size,bool target)3106 struct spi_controller *__devm_spi_alloc_controller(struct device *dev,
3107 unsigned int size,
3108 bool target)
3109 {
3110 struct spi_controller **ptr, *ctlr;
3111
3112 ptr = devres_alloc(devm_spi_release_controller, sizeof(*ptr),
3113 GFP_KERNEL);
3114 if (!ptr)
3115 return NULL;
3116
3117 ctlr = __spi_alloc_controller(dev, size, target);
3118 if (ctlr) {
3119 ctlr->devm_allocated = true;
3120 *ptr = ctlr;
3121 devres_add(dev, ptr);
3122 } else {
3123 devres_free(ptr);
3124 }
3125
3126 return ctlr;
3127 }
3128 EXPORT_SYMBOL_GPL(__devm_spi_alloc_controller);
3129
3130 /**
3131 * spi_get_gpio_descs() - grab chip select GPIOs for the controller
3132 * @ctlr: The SPI controller to grab GPIO descriptors for
3133 */
spi_get_gpio_descs(struct spi_controller * ctlr)3134 static int spi_get_gpio_descs(struct spi_controller *ctlr)
3135 {
3136 int nb, i;
3137 struct gpio_desc **cs;
3138 struct device *dev = &ctlr->dev;
3139 unsigned long native_cs_mask = 0;
3140 unsigned int num_cs_gpios = 0;
3141
3142 nb = gpiod_count(dev, "cs");
3143 if (nb < 0) {
3144 /* No GPIOs at all is fine, else return the error */
3145 if (nb == -ENOENT)
3146 return 0;
3147 return nb;
3148 }
3149
3150 ctlr->num_chipselect = max_t(int, nb, ctlr->num_chipselect);
3151
3152 cs = devm_kcalloc(dev, ctlr->num_chipselect, sizeof(*cs),
3153 GFP_KERNEL);
3154 if (!cs)
3155 return -ENOMEM;
3156 ctlr->cs_gpiods = cs;
3157
3158 for (i = 0; i < nb; i++) {
3159 /*
3160 * Most chipselects are active low, the inverted
3161 * semantics are handled by special quirks in gpiolib,
3162 * so initializing them GPIOD_OUT_LOW here means
3163 * "unasserted", in most cases this will drive the physical
3164 * line high.
3165 */
3166 cs[i] = devm_gpiod_get_index_optional(dev, "cs", i,
3167 GPIOD_OUT_LOW);
3168 if (IS_ERR(cs[i]))
3169 return PTR_ERR(cs[i]);
3170
3171 if (cs[i]) {
3172 /*
3173 * If we find a CS GPIO, name it after the device and
3174 * chip select line.
3175 */
3176 char *gpioname;
3177
3178 gpioname = devm_kasprintf(dev, GFP_KERNEL, "%s CS%d",
3179 dev_name(dev), i);
3180 if (!gpioname)
3181 return -ENOMEM;
3182 gpiod_set_consumer_name(cs[i], gpioname);
3183 num_cs_gpios++;
3184 continue;
3185 }
3186
3187 if (ctlr->max_native_cs && i >= ctlr->max_native_cs) {
3188 dev_err(dev, "Invalid native chip select %d\n", i);
3189 return -EINVAL;
3190 }
3191 native_cs_mask |= BIT(i);
3192 }
3193
3194 ctlr->unused_native_cs = ffs(~native_cs_mask) - 1;
3195
3196 if ((ctlr->flags & SPI_CONTROLLER_GPIO_SS) && num_cs_gpios &&
3197 ctlr->max_native_cs && ctlr->unused_native_cs >= ctlr->max_native_cs) {
3198 dev_err(dev, "No unused native chip select available\n");
3199 return -EINVAL;
3200 }
3201
3202 return 0;
3203 }
3204
spi_controller_check_ops(struct spi_controller * ctlr)3205 static int spi_controller_check_ops(struct spi_controller *ctlr)
3206 {
3207 /*
3208 * The controller may implement only the high-level SPI-memory like
3209 * operations if it does not support regular SPI transfers, and this is
3210 * valid use case.
3211 * If ->mem_ops or ->mem_ops->exec_op is NULL, we request that at least
3212 * one of the ->transfer_xxx() method be implemented.
3213 */
3214 if (!ctlr->mem_ops || !ctlr->mem_ops->exec_op) {
3215 if (!ctlr->transfer && !ctlr->transfer_one &&
3216 !ctlr->transfer_one_message) {
3217 return -EINVAL;
3218 }
3219 }
3220
3221 return 0;
3222 }
3223
3224 /* Allocate dynamic bus number using Linux idr */
spi_controller_id_alloc(struct spi_controller * ctlr,int start,int end)3225 static int spi_controller_id_alloc(struct spi_controller *ctlr, int start, int end)
3226 {
3227 int id;
3228
3229 mutex_lock(&board_lock);
3230 id = idr_alloc(&spi_controller_idr, ctlr, start, end, GFP_KERNEL);
3231 mutex_unlock(&board_lock);
3232 if (WARN(id < 0, "couldn't get idr"))
3233 return id == -ENOSPC ? -EBUSY : id;
3234 ctlr->bus_num = id;
3235 return 0;
3236 }
3237
3238 /**
3239 * spi_register_controller - register SPI host or target controller
3240 * @ctlr: initialized controller, originally from spi_alloc_host() or
3241 * spi_alloc_target()
3242 * Context: can sleep
3243 *
3244 * SPI controllers connect to their drivers using some non-SPI bus,
3245 * such as the platform bus. The final stage of probe() in that code
3246 * includes calling spi_register_controller() to hook up to this SPI bus glue.
3247 *
3248 * SPI controllers use board specific (often SOC specific) bus numbers,
3249 * and board-specific addressing for SPI devices combines those numbers
3250 * with chip select numbers. Since SPI does not directly support dynamic
3251 * device identification, boards need configuration tables telling which
3252 * chip is at which address.
3253 *
3254 * This must be called from context that can sleep. It returns zero on
3255 * success, else a negative error code (dropping the controller's refcount).
3256 * After a successful return, the caller is responsible for calling
3257 * spi_unregister_controller().
3258 *
3259 * Return: zero on success, else a negative error code.
3260 */
spi_register_controller(struct spi_controller * ctlr)3261 int spi_register_controller(struct spi_controller *ctlr)
3262 {
3263 struct device *dev = ctlr->dev.parent;
3264 struct boardinfo *bi;
3265 int first_dynamic;
3266 int status;
3267 int idx;
3268
3269 if (!dev)
3270 return -ENODEV;
3271
3272 /*
3273 * Make sure all necessary hooks are implemented before registering
3274 * the SPI controller.
3275 */
3276 status = spi_controller_check_ops(ctlr);
3277 if (status)
3278 return status;
3279
3280 if (ctlr->bus_num < 0)
3281 ctlr->bus_num = of_alias_get_id(ctlr->dev.of_node, "spi");
3282 if (ctlr->bus_num >= 0) {
3283 /* Devices with a fixed bus num must check-in with the num */
3284 status = spi_controller_id_alloc(ctlr, ctlr->bus_num, ctlr->bus_num + 1);
3285 if (status)
3286 return status;
3287 }
3288 if (ctlr->bus_num < 0) {
3289 first_dynamic = of_alias_get_highest_id("spi");
3290 if (first_dynamic < 0)
3291 first_dynamic = 0;
3292 else
3293 first_dynamic++;
3294
3295 status = spi_controller_id_alloc(ctlr, first_dynamic, 0);
3296 if (status)
3297 return status;
3298 }
3299 ctlr->bus_lock_flag = 0;
3300 init_completion(&ctlr->xfer_completion);
3301 init_completion(&ctlr->cur_msg_completion);
3302 if (!ctlr->max_dma_len)
3303 ctlr->max_dma_len = INT_MAX;
3304
3305 /*
3306 * Register the device, then userspace will see it.
3307 * Registration fails if the bus ID is in use.
3308 */
3309 dev_set_name(&ctlr->dev, "spi%u", ctlr->bus_num);
3310
3311 if (!spi_controller_is_target(ctlr) && ctlr->use_gpio_descriptors) {
3312 status = spi_get_gpio_descs(ctlr);
3313 if (status)
3314 goto free_bus_id;
3315 /*
3316 * A controller using GPIO descriptors always
3317 * supports SPI_CS_HIGH if need be.
3318 */
3319 ctlr->mode_bits |= SPI_CS_HIGH;
3320 }
3321
3322 /*
3323 * Even if it's just one always-selected device, there must
3324 * be at least one chipselect.
3325 */
3326 if (!ctlr->num_chipselect) {
3327 status = -EINVAL;
3328 goto free_bus_id;
3329 }
3330
3331 /* Setting last_cs to SPI_INVALID_CS means no chip selected */
3332 for (idx = 0; idx < SPI_CS_CNT_MAX; idx++)
3333 ctlr->last_cs[idx] = SPI_INVALID_CS;
3334
3335 status = device_add(&ctlr->dev);
3336 if (status < 0)
3337 goto free_bus_id;
3338 dev_dbg(dev, "registered %s %s\n",
3339 spi_controller_is_target(ctlr) ? "target" : "host",
3340 dev_name(&ctlr->dev));
3341
3342 /*
3343 * If we're using a queued driver, start the queue. Note that we don't
3344 * need the queueing logic if the driver is only supporting high-level
3345 * memory operations.
3346 */
3347 if (ctlr->transfer) {
3348 dev_info(dev, "controller is unqueued, this is deprecated\n");
3349 } else if (ctlr->transfer_one || ctlr->transfer_one_message) {
3350 status = spi_controller_initialize_queue(ctlr);
3351 if (status) {
3352 device_del(&ctlr->dev);
3353 goto free_bus_id;
3354 }
3355 }
3356 /* Add statistics */
3357 ctlr->pcpu_statistics = spi_alloc_pcpu_stats(dev);
3358 if (!ctlr->pcpu_statistics) {
3359 dev_err(dev, "Error allocating per-cpu statistics\n");
3360 status = -ENOMEM;
3361 goto destroy_queue;
3362 }
3363
3364 mutex_lock(&board_lock);
3365 list_add_tail(&ctlr->list, &spi_controller_list);
3366 list_for_each_entry(bi, &board_list, list)
3367 spi_match_controller_to_boardinfo(ctlr, &bi->board_info);
3368 mutex_unlock(&board_lock);
3369
3370 /* Register devices from the device tree and ACPI */
3371 of_register_spi_devices(ctlr);
3372 acpi_register_spi_devices(ctlr);
3373 return status;
3374
3375 destroy_queue:
3376 spi_destroy_queue(ctlr);
3377 free_bus_id:
3378 mutex_lock(&board_lock);
3379 idr_remove(&spi_controller_idr, ctlr->bus_num);
3380 mutex_unlock(&board_lock);
3381 return status;
3382 }
3383 EXPORT_SYMBOL_GPL(spi_register_controller);
3384
devm_spi_unregister(struct device * dev,void * res)3385 static void devm_spi_unregister(struct device *dev, void *res)
3386 {
3387 spi_unregister_controller(*(struct spi_controller **)res);
3388 }
3389
3390 /**
3391 * devm_spi_register_controller - register managed SPI host or target controller
3392 * @dev: device managing SPI controller
3393 * @ctlr: initialized controller, originally from spi_alloc_host() or
3394 * spi_alloc_target()
3395 * Context: can sleep
3396 *
3397 * Register a SPI device as with spi_register_controller() which will
3398 * automatically be unregistered and freed.
3399 *
3400 * Return: zero on success, else a negative error code.
3401 */
devm_spi_register_controller(struct device * dev,struct spi_controller * ctlr)3402 int devm_spi_register_controller(struct device *dev,
3403 struct spi_controller *ctlr)
3404 {
3405 struct spi_controller **ptr;
3406 int ret;
3407
3408 ptr = devres_alloc(devm_spi_unregister, sizeof(*ptr), GFP_KERNEL);
3409 if (!ptr)
3410 return -ENOMEM;
3411
3412 ret = spi_register_controller(ctlr);
3413 if (!ret) {
3414 *ptr = ctlr;
3415 devres_add(dev, ptr);
3416 } else {
3417 devres_free(ptr);
3418 }
3419
3420 return ret;
3421 }
3422 EXPORT_SYMBOL_GPL(devm_spi_register_controller);
3423
__unregister(struct device * dev,void * null)3424 static int __unregister(struct device *dev, void *null)
3425 {
3426 spi_unregister_device(to_spi_device(dev));
3427 return 0;
3428 }
3429
3430 /**
3431 * spi_unregister_controller - unregister SPI host or target controller
3432 * @ctlr: the controller being unregistered
3433 * Context: can sleep
3434 *
3435 * This call is used only by SPI controller drivers, which are the
3436 * only ones directly touching chip registers.
3437 *
3438 * This must be called from context that can sleep.
3439 *
3440 * Note that this function also drops a reference to the controller.
3441 */
spi_unregister_controller(struct spi_controller * ctlr)3442 void spi_unregister_controller(struct spi_controller *ctlr)
3443 {
3444 struct spi_controller *found;
3445 int id = ctlr->bus_num;
3446
3447 /* Prevent addition of new devices, unregister existing ones */
3448 if (IS_ENABLED(CONFIG_SPI_DYNAMIC))
3449 mutex_lock(&ctlr->add_lock);
3450
3451 device_for_each_child(&ctlr->dev, NULL, __unregister);
3452
3453 /* First make sure that this controller was ever added */
3454 mutex_lock(&board_lock);
3455 found = idr_find(&spi_controller_idr, id);
3456 mutex_unlock(&board_lock);
3457 if (ctlr->queued) {
3458 if (spi_destroy_queue(ctlr))
3459 dev_err(&ctlr->dev, "queue remove failed\n");
3460 }
3461 mutex_lock(&board_lock);
3462 list_del(&ctlr->list);
3463 mutex_unlock(&board_lock);
3464
3465 device_del(&ctlr->dev);
3466
3467 /* Free bus id */
3468 mutex_lock(&board_lock);
3469 if (found == ctlr)
3470 idr_remove(&spi_controller_idr, id);
3471 mutex_unlock(&board_lock);
3472
3473 if (IS_ENABLED(CONFIG_SPI_DYNAMIC))
3474 mutex_unlock(&ctlr->add_lock);
3475
3476 /*
3477 * Release the last reference on the controller if its driver
3478 * has not yet been converted to devm_spi_alloc_host/target().
3479 */
3480 if (!ctlr->devm_allocated)
3481 put_device(&ctlr->dev);
3482 }
3483 EXPORT_SYMBOL_GPL(spi_unregister_controller);
3484
__spi_check_suspended(const struct spi_controller * ctlr)3485 static inline int __spi_check_suspended(const struct spi_controller *ctlr)
3486 {
3487 return ctlr->flags & SPI_CONTROLLER_SUSPENDED ? -ESHUTDOWN : 0;
3488 }
3489
__spi_mark_suspended(struct spi_controller * ctlr)3490 static inline void __spi_mark_suspended(struct spi_controller *ctlr)
3491 {
3492 mutex_lock(&ctlr->bus_lock_mutex);
3493 ctlr->flags |= SPI_CONTROLLER_SUSPENDED;
3494 mutex_unlock(&ctlr->bus_lock_mutex);
3495 }
3496
__spi_mark_resumed(struct spi_controller * ctlr)3497 static inline void __spi_mark_resumed(struct spi_controller *ctlr)
3498 {
3499 mutex_lock(&ctlr->bus_lock_mutex);
3500 ctlr->flags &= ~SPI_CONTROLLER_SUSPENDED;
3501 mutex_unlock(&ctlr->bus_lock_mutex);
3502 }
3503
spi_controller_suspend(struct spi_controller * ctlr)3504 int spi_controller_suspend(struct spi_controller *ctlr)
3505 {
3506 int ret = 0;
3507
3508 /* Basically no-ops for non-queued controllers */
3509 if (ctlr->queued) {
3510 ret = spi_stop_queue(ctlr);
3511 if (ret)
3512 dev_err(&ctlr->dev, "queue stop failed\n");
3513 }
3514
3515 __spi_mark_suspended(ctlr);
3516 return ret;
3517 }
3518 EXPORT_SYMBOL_GPL(spi_controller_suspend);
3519
spi_controller_resume(struct spi_controller * ctlr)3520 int spi_controller_resume(struct spi_controller *ctlr)
3521 {
3522 int ret = 0;
3523
3524 __spi_mark_resumed(ctlr);
3525
3526 if (ctlr->queued) {
3527 ret = spi_start_queue(ctlr);
3528 if (ret)
3529 dev_err(&ctlr->dev, "queue restart failed\n");
3530 }
3531 return ret;
3532 }
3533 EXPORT_SYMBOL_GPL(spi_controller_resume);
3534
3535 /*-------------------------------------------------------------------------*/
3536
3537 /* Core methods for spi_message alterations */
3538
__spi_replace_transfers_release(struct spi_controller * ctlr,struct spi_message * msg,void * res)3539 static void __spi_replace_transfers_release(struct spi_controller *ctlr,
3540 struct spi_message *msg,
3541 void *res)
3542 {
3543 struct spi_replaced_transfers *rxfer = res;
3544 size_t i;
3545
3546 /* Call extra callback if requested */
3547 if (rxfer->release)
3548 rxfer->release(ctlr, msg, res);
3549
3550 /* Insert replaced transfers back into the message */
3551 list_splice(&rxfer->replaced_transfers, rxfer->replaced_after);
3552
3553 /* Remove the formerly inserted entries */
3554 for (i = 0; i < rxfer->inserted; i++)
3555 list_del(&rxfer->inserted_transfers[i].transfer_list);
3556 }
3557
3558 /**
3559 * spi_replace_transfers - replace transfers with several transfers
3560 * and register change with spi_message.resources
3561 * @msg: the spi_message we work upon
3562 * @xfer_first: the first spi_transfer we want to replace
3563 * @remove: number of transfers to remove
3564 * @insert: the number of transfers we want to insert instead
3565 * @release: extra release code necessary in some circumstances
3566 * @extradatasize: extra data to allocate (with alignment guarantees
3567 * of struct @spi_transfer)
3568 * @gfp: gfp flags
3569 *
3570 * Returns: pointer to @spi_replaced_transfers,
3571 * PTR_ERR(...) in case of errors.
3572 */
spi_replace_transfers(struct spi_message * msg,struct spi_transfer * xfer_first,size_t remove,size_t insert,spi_replaced_release_t release,size_t extradatasize,gfp_t gfp)3573 static struct spi_replaced_transfers *spi_replace_transfers(
3574 struct spi_message *msg,
3575 struct spi_transfer *xfer_first,
3576 size_t remove,
3577 size_t insert,
3578 spi_replaced_release_t release,
3579 size_t extradatasize,
3580 gfp_t gfp)
3581 {
3582 struct spi_replaced_transfers *rxfer;
3583 struct spi_transfer *xfer;
3584 size_t i;
3585
3586 /* Allocate the structure using spi_res */
3587 rxfer = spi_res_alloc(msg->spi, __spi_replace_transfers_release,
3588 struct_size(rxfer, inserted_transfers, insert)
3589 + extradatasize,
3590 gfp);
3591 if (!rxfer)
3592 return ERR_PTR(-ENOMEM);
3593
3594 /* The release code to invoke before running the generic release */
3595 rxfer->release = release;
3596
3597 /* Assign extradata */
3598 if (extradatasize)
3599 rxfer->extradata =
3600 &rxfer->inserted_transfers[insert];
3601
3602 /* Init the replaced_transfers list */
3603 INIT_LIST_HEAD(&rxfer->replaced_transfers);
3604
3605 /*
3606 * Assign the list_entry after which we should reinsert
3607 * the @replaced_transfers - it may be spi_message.messages!
3608 */
3609 rxfer->replaced_after = xfer_first->transfer_list.prev;
3610
3611 /* Remove the requested number of transfers */
3612 for (i = 0; i < remove; i++) {
3613 /*
3614 * If the entry after replaced_after it is msg->transfers
3615 * then we have been requested to remove more transfers
3616 * than are in the list.
3617 */
3618 if (rxfer->replaced_after->next == &msg->transfers) {
3619 dev_err(&msg->spi->dev,
3620 "requested to remove more spi_transfers than are available\n");
3621 /* Insert replaced transfers back into the message */
3622 list_splice(&rxfer->replaced_transfers,
3623 rxfer->replaced_after);
3624
3625 /* Free the spi_replace_transfer structure... */
3626 spi_res_free(rxfer);
3627
3628 /* ...and return with an error */
3629 return ERR_PTR(-EINVAL);
3630 }
3631
3632 /*
3633 * Remove the entry after replaced_after from list of
3634 * transfers and add it to list of replaced_transfers.
3635 */
3636 list_move_tail(rxfer->replaced_after->next,
3637 &rxfer->replaced_transfers);
3638 }
3639
3640 /*
3641 * Create copy of the given xfer with identical settings
3642 * based on the first transfer to get removed.
3643 */
3644 for (i = 0; i < insert; i++) {
3645 /* We need to run in reverse order */
3646 xfer = &rxfer->inserted_transfers[insert - 1 - i];
3647
3648 /* Copy all spi_transfer data */
3649 memcpy(xfer, xfer_first, sizeof(*xfer));
3650
3651 /* Add to list */
3652 list_add(&xfer->transfer_list, rxfer->replaced_after);
3653
3654 /* Clear cs_change and delay for all but the last */
3655 if (i) {
3656 xfer->cs_change = false;
3657 xfer->delay.value = 0;
3658 }
3659 }
3660
3661 /* Set up inserted... */
3662 rxfer->inserted = insert;
3663
3664 /* ...and register it with spi_res/spi_message */
3665 spi_res_add(msg, rxfer);
3666
3667 return rxfer;
3668 }
3669
__spi_split_transfer_maxsize(struct spi_controller * ctlr,struct spi_message * msg,struct spi_transfer ** xferp,size_t maxsize)3670 static int __spi_split_transfer_maxsize(struct spi_controller *ctlr,
3671 struct spi_message *msg,
3672 struct spi_transfer **xferp,
3673 size_t maxsize)
3674 {
3675 struct spi_transfer *xfer = *xferp, *xfers;
3676 struct spi_replaced_transfers *srt;
3677 size_t offset;
3678 size_t count, i;
3679
3680 /* Calculate how many we have to replace */
3681 count = DIV_ROUND_UP(xfer->len, maxsize);
3682
3683 /* Create replacement */
3684 srt = spi_replace_transfers(msg, xfer, 1, count, NULL, 0, GFP_KERNEL);
3685 if (IS_ERR(srt))
3686 return PTR_ERR(srt);
3687 xfers = srt->inserted_transfers;
3688
3689 /*
3690 * Now handle each of those newly inserted spi_transfers.
3691 * Note that the replacements spi_transfers all are preset
3692 * to the same values as *xferp, so tx_buf, rx_buf and len
3693 * are all identical (as well as most others)
3694 * so we just have to fix up len and the pointers.
3695 */
3696
3697 /*
3698 * The first transfer just needs the length modified, so we
3699 * run it outside the loop.
3700 */
3701 xfers[0].len = min_t(size_t, maxsize, xfer[0].len);
3702
3703 /* All the others need rx_buf/tx_buf also set */
3704 for (i = 1, offset = maxsize; i < count; offset += maxsize, i++) {
3705 /* Update rx_buf, tx_buf and DMA */
3706 if (xfers[i].rx_buf)
3707 xfers[i].rx_buf += offset;
3708 if (xfers[i].tx_buf)
3709 xfers[i].tx_buf += offset;
3710
3711 /* Update length */
3712 xfers[i].len = min(maxsize, xfers[i].len - offset);
3713 }
3714
3715 /*
3716 * We set up xferp to the last entry we have inserted,
3717 * so that we skip those already split transfers.
3718 */
3719 *xferp = &xfers[count - 1];
3720
3721 /* Increment statistics counters */
3722 SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics,
3723 transfers_split_maxsize);
3724 SPI_STATISTICS_INCREMENT_FIELD(msg->spi->pcpu_statistics,
3725 transfers_split_maxsize);
3726
3727 return 0;
3728 }
3729
3730 /**
3731 * spi_split_transfers_maxsize - split spi transfers into multiple transfers
3732 * when an individual transfer exceeds a
3733 * certain size
3734 * @ctlr: the @spi_controller for this transfer
3735 * @msg: the @spi_message to transform
3736 * @maxsize: the maximum when to apply this
3737 *
3738 * This function allocates resources that are automatically freed during the
3739 * spi message unoptimize phase so this function should only be called from
3740 * optimize_message callbacks.
3741 *
3742 * Return: status of transformation
3743 */
spi_split_transfers_maxsize(struct spi_controller * ctlr,struct spi_message * msg,size_t maxsize)3744 int spi_split_transfers_maxsize(struct spi_controller *ctlr,
3745 struct spi_message *msg,
3746 size_t maxsize)
3747 {
3748 struct spi_transfer *xfer;
3749 int ret;
3750
3751 /*
3752 * Iterate over the transfer_list,
3753 * but note that xfer is advanced to the last transfer inserted
3754 * to avoid checking sizes again unnecessarily (also xfer does
3755 * potentially belong to a different list by the time the
3756 * replacement has happened).
3757 */
3758 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
3759 if (xfer->len > maxsize) {
3760 ret = __spi_split_transfer_maxsize(ctlr, msg, &xfer,
3761 maxsize);
3762 if (ret)
3763 return ret;
3764 }
3765 }
3766
3767 return 0;
3768 }
3769 EXPORT_SYMBOL_GPL(spi_split_transfers_maxsize);
3770
3771
3772 /**
3773 * spi_split_transfers_maxwords - split SPI transfers into multiple transfers
3774 * when an individual transfer exceeds a
3775 * certain number of SPI words
3776 * @ctlr: the @spi_controller for this transfer
3777 * @msg: the @spi_message to transform
3778 * @maxwords: the number of words to limit each transfer to
3779 *
3780 * This function allocates resources that are automatically freed during the
3781 * spi message unoptimize phase so this function should only be called from
3782 * optimize_message callbacks.
3783 *
3784 * Return: status of transformation
3785 */
spi_split_transfers_maxwords(struct spi_controller * ctlr,struct spi_message * msg,size_t maxwords)3786 int spi_split_transfers_maxwords(struct spi_controller *ctlr,
3787 struct spi_message *msg,
3788 size_t maxwords)
3789 {
3790 struct spi_transfer *xfer;
3791
3792 /*
3793 * Iterate over the transfer_list,
3794 * but note that xfer is advanced to the last transfer inserted
3795 * to avoid checking sizes again unnecessarily (also xfer does
3796 * potentially belong to a different list by the time the
3797 * replacement has happened).
3798 */
3799 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
3800 size_t maxsize;
3801 int ret;
3802
3803 maxsize = maxwords * spi_bpw_to_bytes(xfer->bits_per_word);
3804 if (xfer->len > maxsize) {
3805 ret = __spi_split_transfer_maxsize(ctlr, msg, &xfer,
3806 maxsize);
3807 if (ret)
3808 return ret;
3809 }
3810 }
3811
3812 return 0;
3813 }
3814 EXPORT_SYMBOL_GPL(spi_split_transfers_maxwords);
3815
3816 /*-------------------------------------------------------------------------*/
3817
3818 /*
3819 * Core methods for SPI controller protocol drivers. Some of the
3820 * other core methods are currently defined as inline functions.
3821 */
3822
__spi_validate_bits_per_word(struct spi_controller * ctlr,u8 bits_per_word)3823 static int __spi_validate_bits_per_word(struct spi_controller *ctlr,
3824 u8 bits_per_word)
3825 {
3826 if (ctlr->bits_per_word_mask) {
3827 /* Only 32 bits fit in the mask */
3828 if (bits_per_word > 32)
3829 return -EINVAL;
3830 if (!(ctlr->bits_per_word_mask & SPI_BPW_MASK(bits_per_word)))
3831 return -EINVAL;
3832 }
3833
3834 return 0;
3835 }
3836
3837 /**
3838 * spi_set_cs_timing - configure CS setup, hold, and inactive delays
3839 * @spi: the device that requires specific CS timing configuration
3840 *
3841 * Return: zero on success, else a negative error code.
3842 */
spi_set_cs_timing(struct spi_device * spi)3843 static int spi_set_cs_timing(struct spi_device *spi)
3844 {
3845 struct device *parent = spi->controller->dev.parent;
3846 int status = 0;
3847
3848 if (spi->controller->set_cs_timing && !spi_get_csgpiod(spi, 0)) {
3849 if (spi->controller->auto_runtime_pm) {
3850 status = pm_runtime_get_sync(parent);
3851 if (status < 0) {
3852 pm_runtime_put_noidle(parent);
3853 dev_err(&spi->controller->dev, "Failed to power device: %d\n",
3854 status);
3855 return status;
3856 }
3857
3858 status = spi->controller->set_cs_timing(spi);
3859 pm_runtime_mark_last_busy(parent);
3860 pm_runtime_put_autosuspend(parent);
3861 } else {
3862 status = spi->controller->set_cs_timing(spi);
3863 }
3864 }
3865 return status;
3866 }
3867
3868 /**
3869 * spi_setup - setup SPI mode and clock rate
3870 * @spi: the device whose settings are being modified
3871 * Context: can sleep, and no requests are queued to the device
3872 *
3873 * SPI protocol drivers may need to update the transfer mode if the
3874 * device doesn't work with its default. They may likewise need
3875 * to update clock rates or word sizes from initial values. This function
3876 * changes those settings, and must be called from a context that can sleep.
3877 * Except for SPI_CS_HIGH, which takes effect immediately, the changes take
3878 * effect the next time the device is selected and data is transferred to
3879 * or from it. When this function returns, the SPI device is deselected.
3880 *
3881 * Note that this call will fail if the protocol driver specifies an option
3882 * that the underlying controller or its driver does not support. For
3883 * example, not all hardware supports wire transfers using nine bit words,
3884 * LSB-first wire encoding, or active-high chipselects.
3885 *
3886 * Return: zero on success, else a negative error code.
3887 */
spi_setup(struct spi_device * spi)3888 int spi_setup(struct spi_device *spi)
3889 {
3890 unsigned bad_bits, ugly_bits;
3891 int status;
3892
3893 /*
3894 * Check mode to prevent that any two of DUAL, QUAD and NO_MOSI/MISO
3895 * are set at the same time.
3896 */
3897 if ((hweight_long(spi->mode &
3898 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_NO_TX)) > 1) ||
3899 (hweight_long(spi->mode &
3900 (SPI_RX_DUAL | SPI_RX_QUAD | SPI_NO_RX)) > 1)) {
3901 dev_err(&spi->dev,
3902 "setup: can not select any two of dual, quad and no-rx/tx at the same time\n");
3903 return -EINVAL;
3904 }
3905 /* If it is SPI_3WIRE mode, DUAL and QUAD should be forbidden */
3906 if ((spi->mode & SPI_3WIRE) && (spi->mode &
3907 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL |
3908 SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL)))
3909 return -EINVAL;
3910 /* Check against conflicting MOSI idle configuration */
3911 if ((spi->mode & SPI_MOSI_IDLE_LOW) && (spi->mode & SPI_MOSI_IDLE_HIGH)) {
3912 dev_err(&spi->dev,
3913 "setup: MOSI configured to idle low and high at the same time.\n");
3914 return -EINVAL;
3915 }
3916 /*
3917 * Help drivers fail *cleanly* when they need options
3918 * that aren't supported with their current controller.
3919 * SPI_CS_WORD has a fallback software implementation,
3920 * so it is ignored here.
3921 */
3922 bad_bits = spi->mode & ~(spi->controller->mode_bits | SPI_CS_WORD |
3923 SPI_NO_TX | SPI_NO_RX);
3924 ugly_bits = bad_bits &
3925 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL |
3926 SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL);
3927 if (ugly_bits) {
3928 dev_warn(&spi->dev,
3929 "setup: ignoring unsupported mode bits %x\n",
3930 ugly_bits);
3931 spi->mode &= ~ugly_bits;
3932 bad_bits &= ~ugly_bits;
3933 }
3934 if (bad_bits) {
3935 dev_err(&spi->dev, "setup: unsupported mode bits %x\n",
3936 bad_bits);
3937 return -EINVAL;
3938 }
3939
3940 if (!spi->bits_per_word) {
3941 spi->bits_per_word = 8;
3942 } else {
3943 /*
3944 * Some controllers may not support the default 8 bits-per-word
3945 * so only perform the check when this is explicitly provided.
3946 */
3947 status = __spi_validate_bits_per_word(spi->controller,
3948 spi->bits_per_word);
3949 if (status)
3950 return status;
3951 }
3952
3953 if (spi->controller->max_speed_hz &&
3954 (!spi->max_speed_hz ||
3955 spi->max_speed_hz > spi->controller->max_speed_hz))
3956 spi->max_speed_hz = spi->controller->max_speed_hz;
3957
3958 mutex_lock(&spi->controller->io_mutex);
3959
3960 if (spi->controller->setup) {
3961 status = spi->controller->setup(spi);
3962 if (status) {
3963 mutex_unlock(&spi->controller->io_mutex);
3964 dev_err(&spi->controller->dev, "Failed to setup device: %d\n",
3965 status);
3966 return status;
3967 }
3968 }
3969
3970 status = spi_set_cs_timing(spi);
3971 if (status) {
3972 mutex_unlock(&spi->controller->io_mutex);
3973 return status;
3974 }
3975
3976 if (spi->controller->auto_runtime_pm && spi->controller->set_cs) {
3977 status = pm_runtime_resume_and_get(spi->controller->dev.parent);
3978 if (status < 0) {
3979 mutex_unlock(&spi->controller->io_mutex);
3980 dev_err(&spi->controller->dev, "Failed to power device: %d\n",
3981 status);
3982 return status;
3983 }
3984
3985 /*
3986 * We do not want to return positive value from pm_runtime_get,
3987 * there are many instances of devices calling spi_setup() and
3988 * checking for a non-zero return value instead of a negative
3989 * return value.
3990 */
3991 status = 0;
3992
3993 spi_set_cs(spi, false, true);
3994 pm_runtime_mark_last_busy(spi->controller->dev.parent);
3995 pm_runtime_put_autosuspend(spi->controller->dev.parent);
3996 } else {
3997 spi_set_cs(spi, false, true);
3998 }
3999
4000 mutex_unlock(&spi->controller->io_mutex);
4001
4002 if (spi->rt && !spi->controller->rt) {
4003 spi->controller->rt = true;
4004 spi_set_thread_rt(spi->controller);
4005 }
4006
4007 trace_spi_setup(spi, status);
4008
4009 dev_dbg(&spi->dev, "setup mode %lu, %s%s%s%s%u bits/w, %u Hz max --> %d\n",
4010 spi->mode & SPI_MODE_X_MASK,
4011 (spi->mode & SPI_CS_HIGH) ? "cs_high, " : "",
4012 (spi->mode & SPI_LSB_FIRST) ? "lsb, " : "",
4013 (spi->mode & SPI_3WIRE) ? "3wire, " : "",
4014 (spi->mode & SPI_LOOP) ? "loopback, " : "",
4015 spi->bits_per_word, spi->max_speed_hz,
4016 status);
4017
4018 return status;
4019 }
4020 EXPORT_SYMBOL_GPL(spi_setup);
4021
_spi_xfer_word_delay_update(struct spi_transfer * xfer,struct spi_device * spi)4022 static int _spi_xfer_word_delay_update(struct spi_transfer *xfer,
4023 struct spi_device *spi)
4024 {
4025 int delay1, delay2;
4026
4027 delay1 = spi_delay_to_ns(&xfer->word_delay, xfer);
4028 if (delay1 < 0)
4029 return delay1;
4030
4031 delay2 = spi_delay_to_ns(&spi->word_delay, xfer);
4032 if (delay2 < 0)
4033 return delay2;
4034
4035 if (delay1 < delay2)
4036 memcpy(&xfer->word_delay, &spi->word_delay,
4037 sizeof(xfer->word_delay));
4038
4039 return 0;
4040 }
4041
__spi_validate(struct spi_device * spi,struct spi_message * message)4042 static int __spi_validate(struct spi_device *spi, struct spi_message *message)
4043 {
4044 struct spi_controller *ctlr = spi->controller;
4045 struct spi_transfer *xfer;
4046 int w_size;
4047
4048 if (list_empty(&message->transfers))
4049 return -EINVAL;
4050
4051 message->spi = spi;
4052
4053 /*
4054 * Half-duplex links include original MicroWire, and ones with
4055 * only one data pin like SPI_3WIRE (switches direction) or where
4056 * either MOSI or MISO is missing. They can also be caused by
4057 * software limitations.
4058 */
4059 if ((ctlr->flags & SPI_CONTROLLER_HALF_DUPLEX) ||
4060 (spi->mode & SPI_3WIRE)) {
4061 unsigned flags = ctlr->flags;
4062
4063 list_for_each_entry(xfer, &message->transfers, transfer_list) {
4064 if (xfer->rx_buf && xfer->tx_buf)
4065 return -EINVAL;
4066 if ((flags & SPI_CONTROLLER_NO_TX) && xfer->tx_buf)
4067 return -EINVAL;
4068 if ((flags & SPI_CONTROLLER_NO_RX) && xfer->rx_buf)
4069 return -EINVAL;
4070 }
4071 }
4072
4073 /*
4074 * Set transfer bits_per_word and max speed as spi device default if
4075 * it is not set for this transfer.
4076 * Set transfer tx_nbits and rx_nbits as single transfer default
4077 * (SPI_NBITS_SINGLE) if it is not set for this transfer.
4078 * Ensure transfer word_delay is at least as long as that required by
4079 * device itself.
4080 */
4081 message->frame_length = 0;
4082 list_for_each_entry(xfer, &message->transfers, transfer_list) {
4083 xfer->effective_speed_hz = 0;
4084 message->frame_length += xfer->len;
4085 if (!xfer->bits_per_word)
4086 xfer->bits_per_word = spi->bits_per_word;
4087
4088 if (!xfer->speed_hz)
4089 xfer->speed_hz = spi->max_speed_hz;
4090
4091 if (ctlr->max_speed_hz && xfer->speed_hz > ctlr->max_speed_hz)
4092 xfer->speed_hz = ctlr->max_speed_hz;
4093
4094 if (__spi_validate_bits_per_word(ctlr, xfer->bits_per_word))
4095 return -EINVAL;
4096
4097 /* DDR mode is supported only if controller has dtr_caps=true.
4098 * default considered as SDR mode for SPI and QSPI controller.
4099 * Note: This is applicable only to QSPI controller.
4100 */
4101 if (xfer->dtr_mode && !ctlr->dtr_caps)
4102 return -EINVAL;
4103
4104 /*
4105 * SPI transfer length should be multiple of SPI word size
4106 * where SPI word size should be power-of-two multiple.
4107 */
4108 if (xfer->bits_per_word <= 8)
4109 w_size = 1;
4110 else if (xfer->bits_per_word <= 16)
4111 w_size = 2;
4112 else
4113 w_size = 4;
4114
4115 /* No partial transfers accepted */
4116 if (xfer->len % w_size)
4117 return -EINVAL;
4118
4119 if (xfer->speed_hz && ctlr->min_speed_hz &&
4120 xfer->speed_hz < ctlr->min_speed_hz)
4121 return -EINVAL;
4122
4123 if (xfer->tx_buf && !xfer->tx_nbits)
4124 xfer->tx_nbits = SPI_NBITS_SINGLE;
4125 if (xfer->rx_buf && !xfer->rx_nbits)
4126 xfer->rx_nbits = SPI_NBITS_SINGLE;
4127 /*
4128 * Check transfer tx/rx_nbits:
4129 * 1. check the value matches one of single, dual and quad
4130 * 2. check tx/rx_nbits match the mode in spi_device
4131 */
4132 if (xfer->tx_buf) {
4133 if (spi->mode & SPI_NO_TX)
4134 return -EINVAL;
4135 if (xfer->tx_nbits != SPI_NBITS_SINGLE &&
4136 xfer->tx_nbits != SPI_NBITS_DUAL &&
4137 xfer->tx_nbits != SPI_NBITS_QUAD &&
4138 xfer->tx_nbits != SPI_NBITS_OCTAL)
4139 return -EINVAL;
4140 if ((xfer->tx_nbits == SPI_NBITS_DUAL) &&
4141 !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL)))
4142 return -EINVAL;
4143 if ((xfer->tx_nbits == SPI_NBITS_QUAD) &&
4144 !(spi->mode & (SPI_TX_QUAD | SPI_TX_OCTAL)))
4145 return -EINVAL;
4146 if ((xfer->tx_nbits == SPI_NBITS_OCTAL) &&
4147 !(spi->mode & SPI_TX_OCTAL))
4148 return -EINVAL;
4149 }
4150 /* Check transfer rx_nbits */
4151 if (xfer->rx_buf) {
4152 if (spi->mode & SPI_NO_RX)
4153 return -EINVAL;
4154 if (xfer->rx_nbits != SPI_NBITS_SINGLE &&
4155 xfer->rx_nbits != SPI_NBITS_DUAL &&
4156 xfer->rx_nbits != SPI_NBITS_QUAD &&
4157 xfer->rx_nbits != SPI_NBITS_OCTAL)
4158 return -EINVAL;
4159 if ((xfer->rx_nbits == SPI_NBITS_DUAL) &&
4160 !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL)))
4161 return -EINVAL;
4162 if ((xfer->rx_nbits == SPI_NBITS_QUAD) &&
4163 !(spi->mode & (SPI_RX_QUAD | SPI_RX_OCTAL)))
4164 return -EINVAL;
4165 if ((xfer->rx_nbits == SPI_NBITS_OCTAL) &&
4166 !(spi->mode & SPI_RX_OCTAL))
4167 return -EINVAL;
4168 }
4169
4170 if (_spi_xfer_word_delay_update(xfer, spi))
4171 return -EINVAL;
4172
4173 /* Make sure controller supports required offload features. */
4174 if (xfer->offload_flags) {
4175 if (!message->offload)
4176 return -EINVAL;
4177
4178 if (xfer->offload_flags & ~message->offload->xfer_flags)
4179 return -EINVAL;
4180 }
4181 }
4182
4183 message->status = -EINPROGRESS;
4184
4185 return 0;
4186 }
4187
4188 /*
4189 * spi_split_transfers - generic handling of transfer splitting
4190 * @msg: the message to split
4191 *
4192 * Under certain conditions, a SPI controller may not support arbitrary
4193 * transfer sizes or other features required by a peripheral. This function
4194 * will split the transfers in the message into smaller transfers that are
4195 * supported by the controller.
4196 *
4197 * Controllers with special requirements not covered here can also split
4198 * transfers in the optimize_message() callback.
4199 *
4200 * Context: can sleep
4201 * Return: zero on success, else a negative error code
4202 */
spi_split_transfers(struct spi_message * msg)4203 static int spi_split_transfers(struct spi_message *msg)
4204 {
4205 struct spi_controller *ctlr = msg->spi->controller;
4206 struct spi_transfer *xfer;
4207 int ret;
4208
4209 /*
4210 * If an SPI controller does not support toggling the CS line on each
4211 * transfer (indicated by the SPI_CS_WORD flag) or we are using a GPIO
4212 * for the CS line, we can emulate the CS-per-word hardware function by
4213 * splitting transfers into one-word transfers and ensuring that
4214 * cs_change is set for each transfer.
4215 */
4216 if ((msg->spi->mode & SPI_CS_WORD) &&
4217 (!(ctlr->mode_bits & SPI_CS_WORD) || spi_is_csgpiod(msg->spi))) {
4218 ret = spi_split_transfers_maxwords(ctlr, msg, 1);
4219 if (ret)
4220 return ret;
4221
4222 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
4223 /* Don't change cs_change on the last entry in the list */
4224 if (list_is_last(&xfer->transfer_list, &msg->transfers))
4225 break;
4226
4227 xfer->cs_change = 1;
4228 }
4229 } else {
4230 ret = spi_split_transfers_maxsize(ctlr, msg,
4231 spi_max_transfer_size(msg->spi));
4232 if (ret)
4233 return ret;
4234 }
4235
4236 return 0;
4237 }
4238
4239 /*
4240 * __spi_optimize_message - shared implementation for spi_optimize_message()
4241 * and spi_maybe_optimize_message()
4242 * @spi: the device that will be used for the message
4243 * @msg: the message to optimize
4244 *
4245 * Peripheral drivers will call spi_optimize_message() and the spi core will
4246 * call spi_maybe_optimize_message() instead of calling this directly.
4247 *
4248 * It is not valid to call this on a message that has already been optimized.
4249 *
4250 * Return: zero on success, else a negative error code
4251 */
__spi_optimize_message(struct spi_device * spi,struct spi_message * msg)4252 static int __spi_optimize_message(struct spi_device *spi,
4253 struct spi_message *msg)
4254 {
4255 struct spi_controller *ctlr = spi->controller;
4256 int ret;
4257
4258 ret = __spi_validate(spi, msg);
4259 if (ret)
4260 return ret;
4261
4262 ret = spi_split_transfers(msg);
4263 if (ret)
4264 return ret;
4265
4266 if (ctlr->optimize_message) {
4267 ret = ctlr->optimize_message(msg);
4268 if (ret) {
4269 spi_res_release(ctlr, msg);
4270 return ret;
4271 }
4272 }
4273
4274 msg->optimized = true;
4275
4276 return 0;
4277 }
4278
4279 /*
4280 * spi_maybe_optimize_message - optimize message if it isn't already pre-optimized
4281 * @spi: the device that will be used for the message
4282 * @msg: the message to optimize
4283 * Return: zero on success, else a negative error code
4284 */
spi_maybe_optimize_message(struct spi_device * spi,struct spi_message * msg)4285 static int spi_maybe_optimize_message(struct spi_device *spi,
4286 struct spi_message *msg)
4287 {
4288 if (spi->controller->defer_optimize_message) {
4289 msg->spi = spi;
4290 return 0;
4291 }
4292
4293 if (msg->pre_optimized)
4294 return 0;
4295
4296 return __spi_optimize_message(spi, msg);
4297 }
4298
4299 /**
4300 * spi_optimize_message - do any one-time validation and setup for a SPI message
4301 * @spi: the device that will be used for the message
4302 * @msg: the message to optimize
4303 *
4304 * Peripheral drivers that reuse the same message repeatedly may call this to
4305 * perform as much message prep as possible once, rather than repeating it each
4306 * time a message transfer is performed to improve throughput and reduce CPU
4307 * usage.
4308 *
4309 * Once a message has been optimized, it cannot be modified with the exception
4310 * of updating the contents of any xfer->tx_buf (the pointer can't be changed,
4311 * only the data in the memory it points to).
4312 *
4313 * Calls to this function must be balanced with calls to spi_unoptimize_message()
4314 * to avoid leaking resources.
4315 *
4316 * Context: can sleep
4317 * Return: zero on success, else a negative error code
4318 */
spi_optimize_message(struct spi_device * spi,struct spi_message * msg)4319 int spi_optimize_message(struct spi_device *spi, struct spi_message *msg)
4320 {
4321 int ret;
4322
4323 /*
4324 * Pre-optimization is not supported and optimization is deferred e.g.
4325 * when using spi-mux.
4326 */
4327 if (spi->controller->defer_optimize_message)
4328 return 0;
4329
4330 ret = __spi_optimize_message(spi, msg);
4331 if (ret)
4332 return ret;
4333
4334 /*
4335 * This flag indicates that the peripheral driver called spi_optimize_message()
4336 * and therefore we shouldn't unoptimize message automatically when finalizing
4337 * the message but rather wait until spi_unoptimize_message() is called
4338 * by the peripheral driver.
4339 */
4340 msg->pre_optimized = true;
4341
4342 return 0;
4343 }
4344 EXPORT_SYMBOL_GPL(spi_optimize_message);
4345
4346 /**
4347 * spi_unoptimize_message - releases any resources allocated by spi_optimize_message()
4348 * @msg: the message to unoptimize
4349 *
4350 * Calls to this function must be balanced with calls to spi_optimize_message().
4351 *
4352 * Context: can sleep
4353 */
spi_unoptimize_message(struct spi_message * msg)4354 void spi_unoptimize_message(struct spi_message *msg)
4355 {
4356 if (msg->spi->controller->defer_optimize_message)
4357 return;
4358
4359 __spi_unoptimize_message(msg);
4360 msg->pre_optimized = false;
4361 }
4362 EXPORT_SYMBOL_GPL(spi_unoptimize_message);
4363
__spi_async(struct spi_device * spi,struct spi_message * message)4364 static int __spi_async(struct spi_device *spi, struct spi_message *message)
4365 {
4366 struct spi_controller *ctlr = spi->controller;
4367 struct spi_transfer *xfer;
4368
4369 /*
4370 * Some controllers do not support doing regular SPI transfers. Return
4371 * ENOTSUPP when this is the case.
4372 */
4373 if (!ctlr->transfer)
4374 return -ENOTSUPP;
4375
4376 SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics, spi_async);
4377 SPI_STATISTICS_INCREMENT_FIELD(spi->pcpu_statistics, spi_async);
4378
4379 trace_spi_message_submit(message);
4380
4381 if (!ctlr->ptp_sts_supported) {
4382 list_for_each_entry(xfer, &message->transfers, transfer_list) {
4383 xfer->ptp_sts_word_pre = 0;
4384 ptp_read_system_prets(xfer->ptp_sts);
4385 }
4386 }
4387
4388 return ctlr->transfer(spi, message);
4389 }
4390
devm_spi_unoptimize_message(void * msg)4391 static void devm_spi_unoptimize_message(void *msg)
4392 {
4393 spi_unoptimize_message(msg);
4394 }
4395
4396 /**
4397 * devm_spi_optimize_message - managed version of spi_optimize_message()
4398 * @dev: the device that manages @msg (usually @spi->dev)
4399 * @spi: the device that will be used for the message
4400 * @msg: the message to optimize
4401 * Return: zero on success, else a negative error code
4402 *
4403 * spi_unoptimize_message() will automatically be called when the device is
4404 * removed.
4405 */
devm_spi_optimize_message(struct device * dev,struct spi_device * spi,struct spi_message * msg)4406 int devm_spi_optimize_message(struct device *dev, struct spi_device *spi,
4407 struct spi_message *msg)
4408 {
4409 int ret;
4410
4411 ret = spi_optimize_message(spi, msg);
4412 if (ret)
4413 return ret;
4414
4415 return devm_add_action_or_reset(dev, devm_spi_unoptimize_message, msg);
4416 }
4417 EXPORT_SYMBOL_GPL(devm_spi_optimize_message);
4418
4419 /**
4420 * spi_async - asynchronous SPI transfer
4421 * @spi: device with which data will be exchanged
4422 * @message: describes the data transfers, including completion callback
4423 * Context: any (IRQs may be blocked, etc)
4424 *
4425 * This call may be used in_irq and other contexts which can't sleep,
4426 * as well as from task contexts which can sleep.
4427 *
4428 * The completion callback is invoked in a context which can't sleep.
4429 * Before that invocation, the value of message->status is undefined.
4430 * When the callback is issued, message->status holds either zero (to
4431 * indicate complete success) or a negative error code. After that
4432 * callback returns, the driver which issued the transfer request may
4433 * deallocate the associated memory; it's no longer in use by any SPI
4434 * core or controller driver code.
4435 *
4436 * Note that although all messages to a spi_device are handled in
4437 * FIFO order, messages may go to different devices in other orders.
4438 * Some device might be higher priority, or have various "hard" access
4439 * time requirements, for example.
4440 *
4441 * On detection of any fault during the transfer, processing of
4442 * the entire message is aborted, and the device is deselected.
4443 * Until returning from the associated message completion callback,
4444 * no other spi_message queued to that device will be processed.
4445 * (This rule applies equally to all the synchronous transfer calls,
4446 * which are wrappers around this core asynchronous primitive.)
4447 *
4448 * Return: zero on success, else a negative error code.
4449 */
spi_async(struct spi_device * spi,struct spi_message * message)4450 int spi_async(struct spi_device *spi, struct spi_message *message)
4451 {
4452 struct spi_controller *ctlr = spi->controller;
4453 int ret;
4454 unsigned long flags;
4455
4456 ret = spi_maybe_optimize_message(spi, message);
4457 if (ret)
4458 return ret;
4459
4460 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
4461
4462 if (ctlr->bus_lock_flag)
4463 ret = -EBUSY;
4464 else
4465 ret = __spi_async(spi, message);
4466
4467 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
4468
4469 return ret;
4470 }
4471 EXPORT_SYMBOL_GPL(spi_async);
4472
__spi_transfer_message_noqueue(struct spi_controller * ctlr,struct spi_message * msg)4473 static void __spi_transfer_message_noqueue(struct spi_controller *ctlr, struct spi_message *msg)
4474 {
4475 bool was_busy;
4476 int ret;
4477
4478 mutex_lock(&ctlr->io_mutex);
4479
4480 was_busy = ctlr->busy;
4481
4482 ctlr->cur_msg = msg;
4483 ret = __spi_pump_transfer_message(ctlr, msg, was_busy);
4484 if (ret)
4485 dev_err(&ctlr->dev, "noqueue transfer failed\n");
4486 ctlr->cur_msg = NULL;
4487 ctlr->fallback = false;
4488
4489 if (!was_busy) {
4490 kfree(ctlr->dummy_rx);
4491 ctlr->dummy_rx = NULL;
4492 kfree(ctlr->dummy_tx);
4493 ctlr->dummy_tx = NULL;
4494 if (ctlr->unprepare_transfer_hardware &&
4495 ctlr->unprepare_transfer_hardware(ctlr))
4496 dev_err(&ctlr->dev,
4497 "failed to unprepare transfer hardware\n");
4498 spi_idle_runtime_pm(ctlr);
4499 }
4500
4501 mutex_unlock(&ctlr->io_mutex);
4502 }
4503
4504 /*-------------------------------------------------------------------------*/
4505
4506 /*
4507 * Utility methods for SPI protocol drivers, layered on
4508 * top of the core. Some other utility methods are defined as
4509 * inline functions.
4510 */
4511
spi_complete(void * arg)4512 static void spi_complete(void *arg)
4513 {
4514 complete(arg);
4515 }
4516
__spi_sync(struct spi_device * spi,struct spi_message * message)4517 static int __spi_sync(struct spi_device *spi, struct spi_message *message)
4518 {
4519 DECLARE_COMPLETION_ONSTACK(done);
4520 unsigned long flags;
4521 int status;
4522 struct spi_controller *ctlr = spi->controller;
4523
4524 if (__spi_check_suspended(ctlr)) {
4525 dev_warn_once(&spi->dev, "Attempted to sync while suspend\n");
4526 return -ESHUTDOWN;
4527 }
4528
4529 status = spi_maybe_optimize_message(spi, message);
4530 if (status)
4531 return status;
4532
4533 SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics, spi_sync);
4534 SPI_STATISTICS_INCREMENT_FIELD(spi->pcpu_statistics, spi_sync);
4535
4536 /*
4537 * Checking queue_empty here only guarantees async/sync message
4538 * ordering when coming from the same context. It does not need to
4539 * guard against reentrancy from a different context. The io_mutex
4540 * will catch those cases.
4541 */
4542 if (READ_ONCE(ctlr->queue_empty) && !ctlr->must_async) {
4543 message->actual_length = 0;
4544 message->status = -EINPROGRESS;
4545
4546 trace_spi_message_submit(message);
4547
4548 SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics, spi_sync_immediate);
4549 SPI_STATISTICS_INCREMENT_FIELD(spi->pcpu_statistics, spi_sync_immediate);
4550
4551 __spi_transfer_message_noqueue(ctlr, message);
4552
4553 return message->status;
4554 }
4555
4556 /*
4557 * There are messages in the async queue that could have originated
4558 * from the same context, so we need to preserve ordering.
4559 * Therefor we send the message to the async queue and wait until they
4560 * are completed.
4561 */
4562 message->complete = spi_complete;
4563 message->context = &done;
4564
4565 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
4566 status = __spi_async(spi, message);
4567 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
4568
4569 if (status == 0) {
4570 wait_for_completion(&done);
4571 status = message->status;
4572 }
4573 message->complete = NULL;
4574 message->context = NULL;
4575
4576 return status;
4577 }
4578
4579 /**
4580 * spi_sync - blocking/synchronous SPI data transfers
4581 * @spi: device with which data will be exchanged
4582 * @message: describes the data transfers
4583 * Context: can sleep
4584 *
4585 * This call may only be used from a context that may sleep. The sleep
4586 * is non-interruptible, and has no timeout. Low-overhead controller
4587 * drivers may DMA directly into and out of the message buffers.
4588 *
4589 * Note that the SPI device's chip select is active during the message,
4590 * and then is normally disabled between messages. Drivers for some
4591 * frequently-used devices may want to minimize costs of selecting a chip,
4592 * by leaving it selected in anticipation that the next message will go
4593 * to the same chip. (That may increase power usage.)
4594 *
4595 * Also, the caller is guaranteeing that the memory associated with the
4596 * message will not be freed before this call returns.
4597 *
4598 * Return: zero on success, else a negative error code.
4599 */
spi_sync(struct spi_device * spi,struct spi_message * message)4600 int spi_sync(struct spi_device *spi, struct spi_message *message)
4601 {
4602 int ret;
4603
4604 mutex_lock(&spi->controller->bus_lock_mutex);
4605 ret = __spi_sync(spi, message);
4606 mutex_unlock(&spi->controller->bus_lock_mutex);
4607
4608 return ret;
4609 }
4610 EXPORT_SYMBOL_GPL(spi_sync);
4611
4612 /**
4613 * spi_sync_locked - version of spi_sync with exclusive bus usage
4614 * @spi: device with which data will be exchanged
4615 * @message: describes the data transfers
4616 * Context: can sleep
4617 *
4618 * This call may only be used from a context that may sleep. The sleep
4619 * is non-interruptible, and has no timeout. Low-overhead controller
4620 * drivers may DMA directly into and out of the message buffers.
4621 *
4622 * This call should be used by drivers that require exclusive access to the
4623 * SPI bus. It has to be preceded by a spi_bus_lock call. The SPI bus must
4624 * be released by a spi_bus_unlock call when the exclusive access is over.
4625 *
4626 * Return: zero on success, else a negative error code.
4627 */
spi_sync_locked(struct spi_device * spi,struct spi_message * message)4628 int spi_sync_locked(struct spi_device *spi, struct spi_message *message)
4629 {
4630 return __spi_sync(spi, message);
4631 }
4632 EXPORT_SYMBOL_GPL(spi_sync_locked);
4633
4634 /**
4635 * spi_bus_lock - obtain a lock for exclusive SPI bus usage
4636 * @ctlr: SPI bus controller that should be locked for exclusive bus access
4637 * Context: can sleep
4638 *
4639 * This call may only be used from a context that may sleep. The sleep
4640 * is non-interruptible, and has no timeout.
4641 *
4642 * This call should be used by drivers that require exclusive access to the
4643 * SPI bus. The SPI bus must be released by a spi_bus_unlock call when the
4644 * exclusive access is over. Data transfer must be done by spi_sync_locked
4645 * and spi_async_locked calls when the SPI bus lock is held.
4646 *
4647 * Return: always zero.
4648 */
spi_bus_lock(struct spi_controller * ctlr)4649 int spi_bus_lock(struct spi_controller *ctlr)
4650 {
4651 unsigned long flags;
4652
4653 mutex_lock(&ctlr->bus_lock_mutex);
4654
4655 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
4656 ctlr->bus_lock_flag = 1;
4657 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
4658
4659 /* Mutex remains locked until spi_bus_unlock() is called */
4660
4661 return 0;
4662 }
4663 EXPORT_SYMBOL_GPL(spi_bus_lock);
4664
4665 /**
4666 * spi_bus_unlock - release the lock for exclusive SPI bus usage
4667 * @ctlr: SPI bus controller that was locked for exclusive bus access
4668 * Context: can sleep
4669 *
4670 * This call may only be used from a context that may sleep. The sleep
4671 * is non-interruptible, and has no timeout.
4672 *
4673 * This call releases an SPI bus lock previously obtained by an spi_bus_lock
4674 * call.
4675 *
4676 * Return: always zero.
4677 */
spi_bus_unlock(struct spi_controller * ctlr)4678 int spi_bus_unlock(struct spi_controller *ctlr)
4679 {
4680 ctlr->bus_lock_flag = 0;
4681
4682 mutex_unlock(&ctlr->bus_lock_mutex);
4683
4684 return 0;
4685 }
4686 EXPORT_SYMBOL_GPL(spi_bus_unlock);
4687
4688 /* Portable code must never pass more than 32 bytes */
4689 #define SPI_BUFSIZ max(32, SMP_CACHE_BYTES)
4690
4691 static u8 *buf;
4692
4693 /**
4694 * spi_write_then_read - SPI synchronous write followed by read
4695 * @spi: device with which data will be exchanged
4696 * @txbuf: data to be written (need not be DMA-safe)
4697 * @n_tx: size of txbuf, in bytes
4698 * @rxbuf: buffer into which data will be read (need not be DMA-safe)
4699 * @n_rx: size of rxbuf, in bytes
4700 * Context: can sleep
4701 *
4702 * This performs a half duplex MicroWire style transaction with the
4703 * device, sending txbuf and then reading rxbuf. The return value
4704 * is zero for success, else a negative errno status code.
4705 * This call may only be used from a context that may sleep.
4706 *
4707 * Parameters to this routine are always copied using a small buffer.
4708 * Performance-sensitive or bulk transfer code should instead use
4709 * spi_{async,sync}() calls with DMA-safe buffers.
4710 *
4711 * Return: zero on success, else a negative error code.
4712 */
spi_write_then_read(struct spi_device * spi,const void * txbuf,unsigned n_tx,void * rxbuf,unsigned n_rx)4713 int spi_write_then_read(struct spi_device *spi,
4714 const void *txbuf, unsigned n_tx,
4715 void *rxbuf, unsigned n_rx)
4716 {
4717 static DEFINE_MUTEX(lock);
4718
4719 int status;
4720 struct spi_message message;
4721 struct spi_transfer x[2];
4722 u8 *local_buf;
4723
4724 /*
4725 * Use preallocated DMA-safe buffer if we can. We can't avoid
4726 * copying here, (as a pure convenience thing), but we can
4727 * keep heap costs out of the hot path unless someone else is
4728 * using the pre-allocated buffer or the transfer is too large.
4729 */
4730 if ((n_tx + n_rx) > SPI_BUFSIZ || !mutex_trylock(&lock)) {
4731 local_buf = kmalloc(max((unsigned)SPI_BUFSIZ, n_tx + n_rx),
4732 GFP_KERNEL | GFP_DMA);
4733 if (!local_buf)
4734 return -ENOMEM;
4735 } else {
4736 local_buf = buf;
4737 }
4738
4739 spi_message_init(&message);
4740 memset(x, 0, sizeof(x));
4741 if (n_tx) {
4742 x[0].len = n_tx;
4743 spi_message_add_tail(&x[0], &message);
4744 }
4745 if (n_rx) {
4746 x[1].len = n_rx;
4747 spi_message_add_tail(&x[1], &message);
4748 }
4749
4750 memcpy(local_buf, txbuf, n_tx);
4751 x[0].tx_buf = local_buf;
4752 x[1].rx_buf = local_buf + n_tx;
4753
4754 /* Do the I/O */
4755 status = spi_sync(spi, &message);
4756 if (status == 0)
4757 memcpy(rxbuf, x[1].rx_buf, n_rx);
4758
4759 if (x[0].tx_buf == buf)
4760 mutex_unlock(&lock);
4761 else
4762 kfree(local_buf);
4763
4764 return status;
4765 }
4766 EXPORT_SYMBOL_GPL(spi_write_then_read);
4767
4768 /*-------------------------------------------------------------------------*/
4769
4770 #if IS_ENABLED(CONFIG_OF_DYNAMIC)
4771 /* Must call put_device() when done with returned spi_device device */
of_find_spi_device_by_node(struct device_node * node)4772 static struct spi_device *of_find_spi_device_by_node(struct device_node *node)
4773 {
4774 struct device *dev = bus_find_device_by_of_node(&spi_bus_type, node);
4775
4776 return dev ? to_spi_device(dev) : NULL;
4777 }
4778
4779 /* The spi controllers are not using spi_bus, so we find it with another way */
of_find_spi_controller_by_node(struct device_node * node)4780 static struct spi_controller *of_find_spi_controller_by_node(struct device_node *node)
4781 {
4782 struct device *dev;
4783
4784 dev = class_find_device_by_of_node(&spi_controller_class, node);
4785 if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE))
4786 dev = class_find_device_by_of_node(&spi_target_class, node);
4787 if (!dev)
4788 return NULL;
4789
4790 /* Reference got in class_find_device */
4791 return container_of(dev, struct spi_controller, dev);
4792 }
4793
of_spi_notify(struct notifier_block * nb,unsigned long action,void * arg)4794 static int of_spi_notify(struct notifier_block *nb, unsigned long action,
4795 void *arg)
4796 {
4797 struct of_reconfig_data *rd = arg;
4798 struct spi_controller *ctlr;
4799 struct spi_device *spi;
4800
4801 switch (of_reconfig_get_state_change(action, arg)) {
4802 case OF_RECONFIG_CHANGE_ADD:
4803 ctlr = of_find_spi_controller_by_node(rd->dn->parent);
4804 if (ctlr == NULL)
4805 return NOTIFY_OK; /* Not for us */
4806
4807 if (of_node_test_and_set_flag(rd->dn, OF_POPULATED)) {
4808 put_device(&ctlr->dev);
4809 return NOTIFY_OK;
4810 }
4811
4812 /*
4813 * Clear the flag before adding the device so that fw_devlink
4814 * doesn't skip adding consumers to this device.
4815 */
4816 rd->dn->fwnode.flags &= ~FWNODE_FLAG_NOT_DEVICE;
4817 spi = of_register_spi_device(ctlr, rd->dn);
4818 put_device(&ctlr->dev);
4819
4820 if (IS_ERR(spi)) {
4821 pr_err("%s: failed to create for '%pOF'\n",
4822 __func__, rd->dn);
4823 of_node_clear_flag(rd->dn, OF_POPULATED);
4824 return notifier_from_errno(PTR_ERR(spi));
4825 }
4826 break;
4827
4828 case OF_RECONFIG_CHANGE_REMOVE:
4829 /* Already depopulated? */
4830 if (!of_node_check_flag(rd->dn, OF_POPULATED))
4831 return NOTIFY_OK;
4832
4833 /* Find our device by node */
4834 spi = of_find_spi_device_by_node(rd->dn);
4835 if (spi == NULL)
4836 return NOTIFY_OK; /* No? not meant for us */
4837
4838 /* Unregister takes one ref away */
4839 spi_unregister_device(spi);
4840
4841 /* And put the reference of the find */
4842 put_device(&spi->dev);
4843 break;
4844 }
4845
4846 return NOTIFY_OK;
4847 }
4848
4849 static struct notifier_block spi_of_notifier = {
4850 .notifier_call = of_spi_notify,
4851 };
4852 #else /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
4853 extern struct notifier_block spi_of_notifier;
4854 #endif /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
4855
4856 #if IS_ENABLED(CONFIG_ACPI)
spi_acpi_controller_match(struct device * dev,const void * data)4857 static int spi_acpi_controller_match(struct device *dev, const void *data)
4858 {
4859 return device_match_acpi_dev(dev->parent, data);
4860 }
4861
acpi_spi_find_controller_by_adev(struct acpi_device * adev)4862 struct spi_controller *acpi_spi_find_controller_by_adev(struct acpi_device *adev)
4863 {
4864 struct device *dev;
4865
4866 dev = class_find_device(&spi_controller_class, NULL, adev,
4867 spi_acpi_controller_match);
4868 if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE))
4869 dev = class_find_device(&spi_target_class, NULL, adev,
4870 spi_acpi_controller_match);
4871 if (!dev)
4872 return NULL;
4873
4874 return container_of(dev, struct spi_controller, dev);
4875 }
4876 EXPORT_SYMBOL_GPL(acpi_spi_find_controller_by_adev);
4877
acpi_spi_find_device_by_adev(struct acpi_device * adev)4878 static struct spi_device *acpi_spi_find_device_by_adev(struct acpi_device *adev)
4879 {
4880 struct device *dev;
4881
4882 dev = bus_find_device_by_acpi_dev(&spi_bus_type, adev);
4883 return to_spi_device(dev);
4884 }
4885
acpi_spi_notify(struct notifier_block * nb,unsigned long value,void * arg)4886 static int acpi_spi_notify(struct notifier_block *nb, unsigned long value,
4887 void *arg)
4888 {
4889 struct acpi_device *adev = arg;
4890 struct spi_controller *ctlr;
4891 struct spi_device *spi;
4892
4893 switch (value) {
4894 case ACPI_RECONFIG_DEVICE_ADD:
4895 ctlr = acpi_spi_find_controller_by_adev(acpi_dev_parent(adev));
4896 if (!ctlr)
4897 break;
4898
4899 acpi_register_spi_device(ctlr, adev);
4900 put_device(&ctlr->dev);
4901 break;
4902 case ACPI_RECONFIG_DEVICE_REMOVE:
4903 if (!acpi_device_enumerated(adev))
4904 break;
4905
4906 spi = acpi_spi_find_device_by_adev(adev);
4907 if (!spi)
4908 break;
4909
4910 spi_unregister_device(spi);
4911 put_device(&spi->dev);
4912 break;
4913 }
4914
4915 return NOTIFY_OK;
4916 }
4917
4918 static struct notifier_block spi_acpi_notifier = {
4919 .notifier_call = acpi_spi_notify,
4920 };
4921 #else
4922 extern struct notifier_block spi_acpi_notifier;
4923 #endif
4924
spi_init(void)4925 static int __init spi_init(void)
4926 {
4927 int status;
4928
4929 buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL);
4930 if (!buf) {
4931 status = -ENOMEM;
4932 goto err0;
4933 }
4934
4935 status = bus_register(&spi_bus_type);
4936 if (status < 0)
4937 goto err1;
4938
4939 status = class_register(&spi_controller_class);
4940 if (status < 0)
4941 goto err2;
4942
4943 if (IS_ENABLED(CONFIG_SPI_SLAVE)) {
4944 status = class_register(&spi_target_class);
4945 if (status < 0)
4946 goto err3;
4947 }
4948
4949 if (IS_ENABLED(CONFIG_OF_DYNAMIC))
4950 WARN_ON(of_reconfig_notifier_register(&spi_of_notifier));
4951 if (IS_ENABLED(CONFIG_ACPI))
4952 WARN_ON(acpi_reconfig_notifier_register(&spi_acpi_notifier));
4953
4954 return 0;
4955
4956 err3:
4957 class_unregister(&spi_controller_class);
4958 err2:
4959 bus_unregister(&spi_bus_type);
4960 err1:
4961 kfree(buf);
4962 buf = NULL;
4963 err0:
4964 return status;
4965 }
4966
4967 /*
4968 * A board_info is normally registered in arch_initcall(),
4969 * but even essential drivers wait till later.
4970 *
4971 * REVISIT only boardinfo really needs static linking. The rest (device and
4972 * driver registration) _could_ be dynamically linked (modular) ... Costs
4973 * include needing to have boardinfo data structures be much more public.
4974 */
4975 postcore_initcall(spi_init);
4976