Lines Matching +full:spi +full:- +full:lsb +full:- +full:first
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 // SPI init/core code
9 #include <linux/clk/clk-conf.h>
13 #include <linux/dma-mapping.h>
34 #include <linux/spi/offload/types.h>
35 #include <linux/spi/spi.h>
36 #include <linux/spi/spi-mem.h>
40 #include <trace/events/spi.h>
50 struct spi_device *spi = to_spi_device(dev);
52 spi_controller_put(spi->controller);
53 kfree(spi->driver_override);
54 free_percpu(spi->pcpu_statistics);
55 kfree(spi);
61 const struct spi_device *spi = to_spi_device(dev);
64 len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1);
65 if (len != -ENODEV)
68 return sysfs_emit(buf, "%s%s\n", SPI_MODULE_PREFIX, spi->modalias);
76 struct spi_device *spi = to_spi_device(dev);
79 ret = driver_set_override(dev, &spi->driver_override, buf, count);
89 const struct spi_device *spi = to_spi_device(dev);
93 len = sysfs_emit(buf, "%s\n", spi->driver_override ? : "");
115 u64_stats_init(&stat->syncp);
136 start = u64_stats_fetch_begin(&pcpu_stats->syncp);
138 } while (u64_stats_fetch_retry(&pcpu_stats->syncp, start));
151 return spi_statistics_##field##_show(ctlr->pcpu_statistics, buf); \
161 struct spi_device *spi = to_spi_device(dev); \
162 return spi_statistics_##field##_show(spi->pcpu_statistics, buf); \
199 SPI_STATISTICS_TRANSFER_BYTES_HISTO(0, "0-1");
200 SPI_STATISTICS_TRANSFER_BYTES_HISTO(1, "2-3");
201 SPI_STATISTICS_TRANSFER_BYTES_HISTO(2, "4-7");
202 SPI_STATISTICS_TRANSFER_BYTES_HISTO(3, "8-15");
203 SPI_STATISTICS_TRANSFER_BYTES_HISTO(4, "16-31");
204 SPI_STATISTICS_TRANSFER_BYTES_HISTO(5, "32-63");
205 SPI_STATISTICS_TRANSFER_BYTES_HISTO(6, "64-127");
206 SPI_STATISTICS_TRANSFER_BYTES_HISTO(7, "128-255");
207 SPI_STATISTICS_TRANSFER_BYTES_HISTO(8, "256-511");
208 SPI_STATISTICS_TRANSFER_BYTES_HISTO(9, "512-1023");
209 SPI_STATISTICS_TRANSFER_BYTES_HISTO(10, "1024-2047");
210 SPI_STATISTICS_TRANSFER_BYTES_HISTO(11, "2048-4095");
211 SPI_STATISTICS_TRANSFER_BYTES_HISTO(12, "4096-8191");
212 SPI_STATISTICS_TRANSFER_BYTES_HISTO(13, "8192-16383");
213 SPI_STATISTICS_TRANSFER_BYTES_HISTO(14, "16384-32767");
214 SPI_STATISTICS_TRANSFER_BYTES_HISTO(15, "32768-65535");
318 int l2len = min(fls(xfer->len), SPI_STATISTICS_HISTO_SIZE) - 1;
326 u64_stats_update_begin(&stats->syncp);
328 u64_stats_inc(&stats->transfers);
329 u64_stats_inc(&stats->transfer_bytes_histo[l2len]);
331 u64_stats_add(&stats->bytes, xfer->len);
333 u64_stats_add(&stats->bytes_tx, xfer->len);
335 u64_stats_add(&stats->bytes_rx, xfer->len);
337 u64_stats_update_end(&stats->syncp);
342 * modalias support makes "modprobe $MODALIAS" new-style hotplug work,
347 while (id->name[0]) {
348 if (!strcmp(name, id->name))
357 const struct spi_driver *sdrv = to_spi_driver(sdev->dev.driver);
359 return spi_match_id(sdrv->id_table, sdev->modalias);
367 match = device_get_match_data(&sdev->dev);
371 return (const void *)spi_get_device_id(sdev)->driver_data;
377 const struct spi_device *spi = to_spi_device(dev);
380 /* Check override first, and if set, only use the named driver */
381 if (spi->driver_override)
382 return strcmp(spi->driver_override, drv->name) == 0;
392 if (sdrv->id_table)
393 return !!spi_match_id(sdrv->id_table, spi->modalias);
395 return strcmp(spi->modalias, drv->name) == 0;
400 const struct spi_device *spi = to_spi_device(dev);
404 if (rc != -ENODEV)
407 return add_uevent_var(env, "MODALIAS=%s%s", SPI_MODULE_PREFIX, spi->modalias);
412 const struct spi_driver *sdrv = to_spi_driver(dev->driver);
413 struct spi_device *spi = to_spi_device(dev);
417 ret = of_clk_set_defaults(dev->of_node, false);
422 spi->irq = of_irq_get(dev->of_node, 0);
423 else if (is_acpi_device_node(fwnode) && spi->irq < 0)
424 spi->irq = acpi_dev_gpio_irq_get(to_acpi_device_node(fwnode), 0);
425 if (spi->irq == -EPROBE_DEFER)
426 return dev_err_probe(dev, spi->irq, "Failed to get irq\n");
427 if (spi->irq < 0)
428 spi->irq = 0;
435 if (sdrv->probe)
436 ret = sdrv->probe(spi);
443 const struct spi_driver *sdrv = to_spi_driver(dev->driver);
445 if (sdrv->remove)
446 sdrv->remove(to_spi_device(dev));
451 if (dev->driver) {
452 const struct spi_driver *sdrv = to_spi_driver(dev->driver);
454 if (sdrv->shutdown)
455 sdrv->shutdown(to_spi_device(dev));
460 .name = "spi",
471 * __spi_register_driver - register a SPI driver
480 sdrv->driver.owner = owner;
481 sdrv->driver.bus = &spi_bus_type;
484 * For Really Good Reasons we use spi: modaliases not of:
488 if (sdrv->driver.of_match_table) {
491 for (of_id = sdrv->driver.of_match_table; of_id->compatible[0];
496 of_name = strnchr(of_id->compatible,
497 sizeof(of_id->compatible), ',');
501 of_name = of_id->compatible;
503 if (sdrv->id_table) {
506 spi_id = spi_match_id(sdrv->id_table, of_name);
510 if (strcmp(sdrv->driver.name, of_name) == 0)
514 pr_warn("SPI driver %s has no spi_device_id for %s\n",
515 sdrv->driver.name, of_id->compatible);
519 return driver_register(&sdrv->driver);
523 /*-------------------------------------------------------------------------*/
526 * SPI devices should normally not be created by SPI device drivers; that
527 * would make them board-specific. Similarly with SPI controller drivers.
528 * Device registration normally goes into like arch/.../mach.../board-YYY.c
548 * spi_alloc_device - Allocate a new SPI device
558 * spi_device structure to add it to the SPI controller. If the caller
566 struct spi_device *spi;
571 spi = kzalloc(sizeof(*spi), GFP_KERNEL);
572 if (!spi) {
577 spi->pcpu_statistics = spi_alloc_pcpu_stats(NULL);
578 if (!spi->pcpu_statistics) {
579 kfree(spi);
584 spi->controller = ctlr;
585 spi->dev.parent = &ctlr->dev;
586 spi->dev.bus = &spi_bus_type;
587 spi->dev.release = spidev_release;
588 spi->mode = ctlr->buswidth_override_bits;
589 spi->num_chipselect = 1;
591 device_initialize(&spi->dev);
592 return spi;
596 static void spi_dev_set_name(struct spi_device *spi)
598 struct device *dev = &spi->dev;
602 dev_set_name(dev, "spi-%s", acpi_dev_name(to_acpi_device_node(fwnode)));
607 dev_set_name(dev, "spi-%pfwP", fwnode);
611 dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->controller->dev),
612 spi_get_chipselect(spi, 0));
617 * logical CS in the spi->chip_select[]. If all the physical CS
620 * CS can be 0. As a solution to this issue initialize all the CS to -1.
621 * Now all the unused logical CS will have -1 physical CS value & can be
624 #define SPI_INVALID_CS ((s8)-1)
627 struct spi_device *spi, u8 idx,
633 cs = spi_get_chipselect(spi, idx);
634 for (idx_new = new_idx; idx_new < new_spi->num_chipselect; idx_new++) {
638 return -EBUSY;
646 struct spi_device *spi = to_spi_device(dev);
650 if (spi->controller == new_spi->controller) {
651 for (idx = 0; idx < spi->num_chipselect; idx++) {
652 status = spi_dev_check_cs(dev, spi, idx, new_spi, 0);
660 static void spi_cleanup(struct spi_device *spi)
662 if (spi->controller->cleanup)
663 spi->controller->cleanup(spi);
666 static int __spi_add_device(struct spi_device *spi)
668 struct spi_controller *ctlr = spi->controller;
669 struct device *dev = ctlr->dev.parent;
673 if (spi->num_chipselect > SPI_DEVICE_CS_CNT_MAX) {
674 dev_err(dev, "num_cs %d > max %d\n", spi->num_chipselect,
676 return -EOVERFLOW;
679 for (idx = 0; idx < spi->num_chipselect; idx++) {
681 cs = spi_get_chipselect(spi, idx);
682 if (cs >= ctlr->num_chipselect) {
683 dev_err(dev, "cs%d >= max %d\n", spi_get_chipselect(spi, idx),
684 ctlr->num_chipselect);
685 return -EINVAL;
691 * For example, spi->chip_select[0] != spi->chip_select[1] and so on.
694 for (idx = 0; idx < spi->num_chipselect; idx++) {
695 status = spi_dev_check_cs(dev, spi, idx, spi, idx + 1);
702 for (idx = spi->num_chipselect; idx < SPI_DEVICE_CS_CNT_MAX; idx++)
703 spi_set_chipselect(spi, idx, SPI_INVALID_CS);
706 spi_dev_set_name(spi);
713 status = bus_for_each_dev(&spi_bus_type, NULL, spi, spi_dev_check);
719 !device_is_registered(&ctlr->dev)) {
720 return -ENODEV;
723 if (ctlr->cs_gpiods) {
726 for (idx = 0; idx < spi->num_chipselect; idx++) {
727 cs = spi_get_chipselect(spi, idx);
728 spi_set_csgpiod(spi, idx, ctlr->cs_gpiods[cs]);
737 status = spi_setup(spi);
740 dev_name(&spi->dev), status);
745 status = device_add(&spi->dev);
748 dev_name(&spi->dev), status);
749 spi_cleanup(spi);
751 dev_dbg(dev, "registered child %s\n", dev_name(&spi->dev));
758 * spi_add_device - Add spi_device allocated with spi_alloc_device
759 * @spi: spi_device to register
762 * spi_alloc_device can be added onto the SPI bus with this function.
766 int spi_add_device(struct spi_device *spi)
768 struct spi_controller *ctlr = spi->controller;
772 spi_dev_set_name(spi);
774 mutex_lock(&ctlr->add_lock);
775 status = __spi_add_device(spi);
776 mutex_unlock(&ctlr->add_lock);
782 * spi_new_device - instantiate one new SPI device
784 * @chip: Describes the SPI device
788 * after board init creates the hard-wired devices. Some development
791 * driver could add devices (which it would learn about out-of-band).
802 * NOTE: caller did any chip->bus_num checks necessary.
805 * error-or-pointer (not NULL-or-pointer), troubleshootability
813 WARN_ON(strlen(chip->modalias) >= sizeof(proxy->modalias));
815 /* Use provided chip-select for proxy device */
816 spi_set_chipselect(proxy, 0, chip->chip_select);
818 proxy->max_speed_hz = chip->max_speed_hz;
819 proxy->mode = chip->mode;
820 proxy->irq = chip->irq;
821 strscpy(proxy->modalias, chip->modalias, sizeof(proxy->modalias));
822 proxy->dev.platform_data = (void *) chip->platform_data;
823 proxy->controller_data = chip->controller_data;
824 proxy->controller_state = NULL;
826 * By default spi->chip_select[0] will hold the physical CS number,
827 * so set bit 0 in spi->cs_index_mask.
829 proxy->cs_index_mask = BIT(0);
831 if (chip->swnode) {
832 status = device_add_software_node(&proxy->dev, chip->swnode);
834 dev_err(&ctlr->dev, "failed to add software node to '%s': %d\n",
835 chip->modalias, status);
847 device_remove_software_node(&proxy->dev);
854 * spi_unregister_device - unregister a single SPI device
855 * @spi: spi_device to unregister
857 * Start making the passed SPI device vanish. Normally this would be handled
860 void spi_unregister_device(struct spi_device *spi)
864 if (!spi)
867 fwnode = dev_fwnode(&spi->dev);
874 device_remove_software_node(&spi->dev);
875 device_del(&spi->dev);
876 spi_cleanup(spi);
877 put_device(&spi->dev);
886 if (ctlr->bus_num != bi->bus_num)
891 dev_err(ctlr->dev.parent, "can't create new device for %s\n",
892 bi->modalias);
896 * spi_register_board_info - register SPI devices for a given board
901 * Board-specific early init code calls this (probably during arch_initcall)
902 * with segments of the SPI device table. Any device nodes are created later,
903 * after the relevant parent SPI controller (bus_num) is defined. We keep
905 * not make Linux forget about these hard-wired devices.
907 * Other code can also call this, e.g. a particular add-on board might provide
908 * SPI devices through its expansion connector, so code initializing that board
909 * would naturally declare its SPI devices.
912 * any embedded pointers (platform_data, etc), they're copied as-is.
926 return -ENOMEM;
931 memcpy(&bi->board_info, info, sizeof(*info));
934 list_add_tail(&bi->list, &board_list);
937 &bi->board_info);
944 /*-------------------------------------------------------------------------*/
946 /* Core methods for SPI resource management */
949 * spi_res_alloc - allocate a spi resource that is life-cycle managed
952 * @spi: the SPI device for which we allocate memory
962 static void *spi_res_alloc(struct spi_device *spi, spi_res_release_t release,
971 INIT_LIST_HEAD(&sres->entry);
972 sres->release = release;
974 return sres->data;
978 * spi_res_free - free an SPI resource
985 WARN_ON(!list_empty(&sres->entry));
990 * spi_res_add - add a spi_res to the spi_message
991 * @message: the SPI message
998 WARN_ON(!list_empty(&sres->entry));
999 list_add_tail(&sres->entry, &message->resources);
1003 * spi_res_release - release all SPI resources for this message
1011 list_for_each_entry_safe_reverse(res, tmp, &message->resources, entry) {
1012 if (res->release)
1013 res->release(ctlr, message, res->data);
1015 list_del(&res->entry);
1021 /*-------------------------------------------------------------------------*/
1022 #define spi_for_each_valid_cs(spi, idx) \
1023 for (idx = 0; idx < spi->num_chipselect; idx++) \
1024 if (!(spi->cs_index_mask & BIT(idx))) {} else
1026 static inline bool spi_is_last_cs(struct spi_device *spi)
1031 spi_for_each_valid_cs(spi, idx) {
1032 if (spi->controller->last_cs[idx] == spi_get_chipselect(spi, idx))
1038 static void spi_toggle_csgpiod(struct spi_device *spi, u8 idx, bool enable, bool activate)
1042 * thus the SPISerialBus() resource defines it on the per-chip
1050 if (is_acpi_device_node(dev_fwnode(&spi->dev)))
1051 gpiod_set_value_cansleep(spi_get_csgpiod(spi, idx), !enable);
1054 gpiod_set_value_cansleep(spi_get_csgpiod(spi, idx), activate);
1057 spi_delay_exec(&spi->cs_setup, NULL);
1059 spi_delay_exec(&spi->cs_inactive, NULL);
1062 static void spi_set_cs(struct spi_device *spi, bool enable, bool force)
1071 if (!force && (enable == spi_is_last_cs(spi)) &&
1072 (spi->controller->last_cs_index_mask == spi->cs_index_mask) &&
1073 (spi->controller->last_cs_mode_high == (spi->mode & SPI_CS_HIGH)))
1076 trace_spi_set_cs(spi, activate);
1078 spi->controller->last_cs_index_mask = spi->cs_index_mask;
1080 if (enable && idx < spi->num_chipselect)
1081 spi->controller->last_cs[idx] = spi_get_chipselect(spi, 0);
1083 spi->controller->last_cs[idx] = SPI_INVALID_CS;
1086 spi->controller->last_cs_mode_high = spi->mode & SPI_CS_HIGH;
1087 if (spi->controller->last_cs_mode_high)
1094 if ((spi_is_csgpiod(spi) || !spi->controller->set_cs_timing) && !activate)
1095 spi_delay_exec(&spi->cs_hold, NULL);
1097 if (spi_is_csgpiod(spi)) {
1098 if (!(spi->mode & SPI_NO_CS)) {
1099 spi_for_each_valid_cs(spi, idx) {
1100 if (spi_get_csgpiod(spi, idx))
1101 spi_toggle_csgpiod(spi, idx, enable, activate);
1104 /* Some SPI controllers need both GPIO CS & ->set_cs() */
1105 if ((spi->controller->flags & SPI_CONTROLLER_GPIO_SS) &&
1106 spi->controller->set_cs)
1107 spi->controller->set_cs(spi, !enable);
1108 } else if (spi->controller->set_cs) {
1109 spi->controller->set_cs(spi, !enable);
1112 if (spi_is_csgpiod(spi) || !spi->controller->set_cs_timing) {
1114 spi_delay_exec(&spi->cs_setup, NULL);
1116 spi_delay_exec(&spi->cs_inactive, NULL);
1146 desc_len = min_t(size_t, max_seg_size, ctlr->max_dma_len);
1149 return -EINVAL;
1156 sg = &sgt->sgl[0];
1167 PAGE_SIZE - offset_in_page(buf)));
1174 return -ENOMEM;
1185 len -= min;
1212 sgt->orig_nents = 0;
1213 sgt->nents = 0;
1228 if (!ctlr->can_dma)
1231 if (ctlr->dma_tx)
1232 tx_dev = ctlr->dma_tx->device->dev;
1233 else if (ctlr->dma_map_dev)
1234 tx_dev = ctlr->dma_map_dev;
1236 tx_dev = ctlr->dev.parent;
1238 if (ctlr->dma_rx)
1239 rx_dev = ctlr->dma_rx->device->dev;
1240 else if (ctlr->dma_map_dev)
1241 rx_dev = ctlr->dma_map_dev;
1243 rx_dev = ctlr->dev.parent;
1245 ret = -ENOMSG;
1246 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1250 if (!ctlr->can_dma(ctlr, msg->spi, xfer))
1253 if (xfer->tx_buf != NULL) {
1254 ret = spi_map_buf_attrs(ctlr, tx_dev, &xfer->tx_sg,
1255 (void *)xfer->tx_buf,
1256 xfer->len, DMA_TO_DEVICE,
1261 xfer->tx_sg_mapped = true;
1264 if (xfer->rx_buf != NULL) {
1265 ret = spi_map_buf_attrs(ctlr, rx_dev, &xfer->rx_sg,
1266 xfer->rx_buf, xfer->len,
1270 &xfer->tx_sg, DMA_TO_DEVICE,
1276 xfer->rx_sg_mapped = true;
1283 ctlr->cur_rx_dma_dev = rx_dev;
1284 ctlr->cur_tx_dma_dev = tx_dev;
1291 struct device *rx_dev = ctlr->cur_rx_dma_dev;
1292 struct device *tx_dev = ctlr->cur_tx_dma_dev;
1295 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1299 if (xfer->rx_sg_mapped)
1300 spi_unmap_buf_attrs(ctlr, rx_dev, &xfer->rx_sg,
1302 xfer->rx_sg_mapped = false;
1304 if (xfer->tx_sg_mapped)
1305 spi_unmap_buf_attrs(ctlr, tx_dev, &xfer->tx_sg,
1307 xfer->tx_sg_mapped = false;
1316 struct device *rx_dev = ctlr->cur_rx_dma_dev;
1317 struct device *tx_dev = ctlr->cur_tx_dma_dev;
1319 if (xfer->tx_sg_mapped)
1320 dma_sync_sgtable_for_device(tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
1321 if (xfer->rx_sg_mapped)
1322 dma_sync_sgtable_for_device(rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
1328 struct device *rx_dev = ctlr->cur_rx_dma_dev;
1329 struct device *tx_dev = ctlr->cur_tx_dma_dev;
1331 if (xfer->rx_sg_mapped)
1332 dma_sync_sgtable_for_cpu(rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
1333 if (xfer->tx_sg_mapped)
1334 dma_sync_sgtable_for_cpu(tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
1365 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1370 if (xfer->tx_buf == ctlr->dummy_tx)
1371 xfer->tx_buf = NULL;
1372 if (xfer->rx_buf == ctlr->dummy_rx)
1373 xfer->rx_buf = NULL;
1385 if ((ctlr->flags & (SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX))
1386 && !(msg->spi->mode & SPI_3WIRE)) {
1390 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1391 if ((ctlr->flags & SPI_CONTROLLER_MUST_TX) &&
1392 !xfer->tx_buf)
1393 max_tx = max(xfer->len, max_tx);
1394 if ((ctlr->flags & SPI_CONTROLLER_MUST_RX) &&
1395 !xfer->rx_buf)
1396 max_rx = max(xfer->len, max_rx);
1400 tmp = krealloc(ctlr->dummy_tx, max_tx,
1403 return -ENOMEM;
1404 ctlr->dummy_tx = tmp;
1408 tmp = krealloc(ctlr->dummy_rx, max_rx,
1411 return -ENOMEM;
1412 ctlr->dummy_rx = tmp;
1416 list_for_each_entry(xfer, &msg->transfers,
1418 if (!xfer->len)
1420 if (!xfer->tx_buf)
1421 xfer->tx_buf = ctlr->dummy_tx;
1422 if (!xfer->rx_buf)
1423 xfer->rx_buf = ctlr->dummy_rx;
1435 struct spi_statistics __percpu *statm = ctlr->pcpu_statistics;
1436 struct spi_statistics __percpu *stats = msg->spi->pcpu_statistics;
1437 u32 speed_hz = xfer->speed_hz;
1441 if (wait_for_completion_interruptible(&ctlr->xfer_completion)) {
1442 dev_dbg(&msg->spi->dev, "SPI transfer interrupted\n");
1443 return -EINTR;
1450 * For each byte we wait for 8 cycles of the SPI clock.
1455 ms = 8LL * MSEC_PER_SEC * xfer->len;
1466 ms = wait_for_completion_timeout(&ctlr->xfer_completion,
1472 dev_err(&msg->spi->dev,
1473 "SPI transfer timed out\n");
1474 return -ETIMEDOUT;
1477 if (xfer->error & SPI_TRANS_FAIL_IO)
1478 return -EIO;
1499 u32 delay = _delay->value;
1500 u32 unit = _delay->unit;
1516 return -EINVAL;
1521 hz = xfer->effective_speed_hz ?: xfer->speed_hz / 2;
1523 return -EINVAL;
1529 return -EINVAL;
1543 return -EINVAL;
1559 u32 delay = xfer->cs_change_delay.value;
1560 u32 unit = xfer->cs_change_delay.unit;
1563 /* Return early on "fast" mode - for everything but USECS */
1570 ret = spi_delay_exec(&xfer->cs_change_delay, xfer);
1572 dev_err_once(&msg->spi->dev,
1587 * spi_transfer_one_message - Default implementation of transfer_one_message()
1599 struct spi_statistics __percpu *statm = ctlr->pcpu_statistics;
1600 struct spi_statistics __percpu *stats = msg->spi->pcpu_statistics;
1602 xfer = list_first_entry(&msg->transfers, struct spi_transfer, transfer_list);
1603 spi_set_cs(msg->spi, !xfer->cs_off, false);
1608 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1614 if (!ctlr->ptp_sts_supported) {
1615 xfer->ptp_sts_word_pre = 0;
1616 ptp_read_system_prets(xfer->ptp_sts);
1619 if ((xfer->tx_buf || xfer->rx_buf) && xfer->len) {
1620 reinit_completion(&ctlr->xfer_completion);
1624 ret = ctlr->transfer_one(ctlr, msg->spi, xfer);
1628 if ((xfer->tx_sg_mapped || xfer->rx_sg_mapped) &&
1629 (xfer->error & SPI_TRANS_FAIL_NO_START)) {
1631 ctlr->fallback = true;
1632 xfer->error &= ~SPI_TRANS_FAIL_NO_START;
1640 dev_err(&msg->spi->dev,
1641 "SPI transfer failed: %d\n", ret);
1648 msg->status = ret;
1653 if (xfer->len)
1654 dev_err(&msg->spi->dev,
1656 xfer->len);
1659 if (!ctlr->ptp_sts_supported) {
1660 ptp_read_system_postts(xfer->ptp_sts);
1661 xfer->ptp_sts_word_post = xfer->len;
1666 if (msg->status != -EINPROGRESS)
1671 if (xfer->cs_change) {
1672 if (list_is_last(&xfer->transfer_list,
1673 &msg->transfers)) {
1676 if (!xfer->cs_off)
1677 spi_set_cs(msg->spi, false, false);
1679 if (!list_next_entry(xfer, transfer_list)->cs_off)
1680 spi_set_cs(msg->spi, true, false);
1682 } else if (!list_is_last(&xfer->transfer_list, &msg->transfers) &&
1683 xfer->cs_off != list_next_entry(xfer, transfer_list)->cs_off) {
1684 spi_set_cs(msg->spi, xfer->cs_off, false);
1687 msg->actual_length += xfer->len;
1692 spi_set_cs(msg->spi, false, false);
1694 if (msg->status == -EINPROGRESS)
1695 msg->status = ret;
1697 if (msg->status && ctlr->handle_err)
1698 ctlr->handle_err(ctlr, msg);
1706 * spi_finalize_current_transfer - report completion of a transfer
1709 * Called by SPI drivers using the core transfer_one_message()
1715 complete(&ctlr->xfer_completion);
1721 if (ctlr->auto_runtime_pm) {
1722 pm_runtime_put_autosuspend(ctlr->dev.parent);
1732 if (!was_busy && ctlr->auto_runtime_pm) {
1733 ret = pm_runtime_get_sync(ctlr->dev.parent);
1735 pm_runtime_put_noidle(ctlr->dev.parent);
1736 dev_err(&ctlr->dev, "Failed to power device: %d\n",
1739 msg->status = ret;
1749 if (!was_busy && ctlr->prepare_transfer_hardware) {
1750 ret = ctlr->prepare_transfer_hardware(ctlr);
1752 dev_err(&ctlr->dev,
1756 if (ctlr->auto_runtime_pm)
1757 pm_runtime_put(ctlr->dev.parent);
1759 msg->status = ret;
1768 if (ctlr->prepare_message) {
1769 ret = ctlr->prepare_message(ctlr, msg);
1771 dev_err(&ctlr->dev, "failed to prepare message: %d\n",
1773 msg->status = ret;
1777 msg->prepared = true;
1782 msg->status = ret;
1787 if (!ctlr->ptp_sts_supported && !ctlr->transfer_one) {
1788 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1789 xfer->ptp_sts_word_pre = 0;
1790 ptp_read_system_prets(xfer->ptp_sts);
1800 * ctlr->cur_msg.
1807 WRITE_ONCE(ctlr->cur_msg_incomplete, true);
1808 WRITE_ONCE(ctlr->cur_msg_need_completion, false);
1809 reinit_completion(&ctlr->cur_msg_completion);
1812 ret = ctlr->transfer_one_message(ctlr, msg);
1814 dev_err(&ctlr->dev,
1819 WRITE_ONCE(ctlr->cur_msg_need_completion, true);
1821 if (READ_ONCE(ctlr->cur_msg_incomplete))
1822 wait_for_completion(&ctlr->cur_msg_completion);
1828 * __spi_pump_messages - function which processes SPI message queue
1832 * This function checks if there is any SPI message in the queue that
1848 mutex_lock(&ctlr->io_mutex);
1851 spin_lock_irqsave(&ctlr->queue_lock, flags);
1854 if (ctlr->cur_msg)
1858 if (list_empty(&ctlr->queue) || !ctlr->running) {
1859 if (!ctlr->busy)
1862 /* Defer any non-atomic teardown to the thread */
1864 if (!ctlr->dummy_rx && !ctlr->dummy_tx &&
1865 !ctlr->unprepare_transfer_hardware) {
1867 ctlr->busy = false;
1868 ctlr->queue_empty = true;
1871 kthread_queue_work(ctlr->kworker,
1872 &ctlr->pump_messages);
1877 ctlr->busy = false;
1878 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1880 kfree(ctlr->dummy_rx);
1881 ctlr->dummy_rx = NULL;
1882 kfree(ctlr->dummy_tx);
1883 ctlr->dummy_tx = NULL;
1884 if (ctlr->unprepare_transfer_hardware &&
1885 ctlr->unprepare_transfer_hardware(ctlr))
1886 dev_err(&ctlr->dev,
1891 spin_lock_irqsave(&ctlr->queue_lock, flags);
1892 ctlr->queue_empty = true;
1897 msg = list_first_entry(&ctlr->queue, struct spi_message, queue);
1898 ctlr->cur_msg = msg;
1900 list_del_init(&msg->queue);
1901 if (ctlr->busy)
1904 ctlr->busy = true;
1905 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1908 kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
1910 ctlr->cur_msg = NULL;
1911 ctlr->fallback = false;
1913 mutex_unlock(&ctlr->io_mutex);
1921 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1922 mutex_unlock(&ctlr->io_mutex);
1926 * spi_pump_messages - kthread work function which processes spi message queue
1938 * spi_take_timestamp_pre - helper to collect the beginning of the TX timestamp
1950 * for the requested byte from the SPI transfer. The frequency with which this
1954 * timestamp is only taken once, at the first such call. It is assumed that
1961 if (!xfer->ptp_sts)
1964 if (xfer->timestamped)
1967 if (progress > xfer->ptp_sts_word_pre)
1971 xfer->ptp_sts_word_pre = progress;
1974 local_irq_save(ctlr->irq_flags);
1978 ptp_read_system_prets(xfer->ptp_sts);
1983 * spi_take_timestamp_post - helper to collect the end of the TX timestamp
1987 * @irqs_off: If true, will re-enable IRQs and preemption for the local CPU.
1990 * the requested byte from the SPI transfer. Can be called with an arbitrary
1991 * frequency: only the first call where @tx exceeds or is equal to the
1998 if (!xfer->ptp_sts)
2001 if (xfer->timestamped)
2004 if (progress < xfer->ptp_sts_word_post)
2007 ptp_read_system_postts(xfer->ptp_sts);
2010 local_irq_restore(ctlr->irq_flags);
2015 xfer->ptp_sts_word_post = progress;
2017 xfer->timestamped = 1;
2022 * spi_set_thread_rt - set the controller to pump at realtime priority
2026 * (by setting the ->rt value before calling spi_register_controller()) or
2038 dev_info(&ctlr->dev,
2040 sched_set_fifo(ctlr->kworker->task);
2045 ctlr->running = false;
2046 ctlr->busy = false;
2047 ctlr->queue_empty = true;
2049 ctlr->kworker = kthread_run_worker(0, dev_name(&ctlr->dev));
2050 if (IS_ERR(ctlr->kworker)) {
2051 dev_err(&ctlr->dev, "failed to create message pump kworker\n");
2052 return PTR_ERR(ctlr->kworker);
2055 kthread_init_work(&ctlr->pump_messages, spi_pump_messages);
2064 if (ctlr->rt)
2071 * spi_get_next_queued_message() - called by driver to check for queued
2086 spin_lock_irqsave(&ctlr->queue_lock, flags);
2087 next = list_first_entry_or_null(&ctlr->queue, struct spi_message,
2089 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2096 * __spi_unoptimize_message - shared implementation of spi_unoptimize_message()
2108 struct spi_controller *ctlr = msg->spi->controller;
2110 if (ctlr->unoptimize_message)
2111 ctlr->unoptimize_message(msg);
2115 msg->optimized = false;
2116 msg->opt_state = NULL;
2120 * spi_maybe_unoptimize_message - unoptimize msg not managed by a peripheral
2128 if (!msg->pre_optimized && msg->optimized &&
2129 !msg->spi->controller->defer_optimize_message)
2134 * spi_finalize_current_message() - the current message is complete
2146 mesg = ctlr->cur_msg;
2148 if (!ctlr->ptp_sts_supported && !ctlr->transfer_one) {
2149 list_for_each_entry(xfer, &mesg->transfers, transfer_list) {
2150 ptp_read_system_postts(xfer->ptp_sts);
2151 xfer->ptp_sts_word_post = xfer->len;
2155 if (unlikely(ctlr->ptp_sts_supported))
2156 list_for_each_entry(xfer, &mesg->transfers, transfer_list)
2157 WARN_ON_ONCE(xfer->ptp_sts && !xfer->timestamped);
2161 if (mesg->prepared && ctlr->unprepare_message) {
2162 ret = ctlr->unprepare_message(ctlr, mesg);
2164 dev_err(&ctlr->dev, "failed to unprepare message: %d\n",
2169 mesg->prepared = false;
2173 WRITE_ONCE(ctlr->cur_msg_incomplete, false);
2175 if (READ_ONCE(ctlr->cur_msg_need_completion))
2176 complete(&ctlr->cur_msg_completion);
2180 mesg->state = NULL;
2181 if (mesg->complete)
2182 mesg->complete(mesg->context);
2190 spin_lock_irqsave(&ctlr->queue_lock, flags);
2192 if (ctlr->running || ctlr->busy) {
2193 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2194 return -EBUSY;
2197 ctlr->running = true;
2198 ctlr->cur_msg = NULL;
2199 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2201 kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
2213 * A wait_queue on the ctlr->busy could be used, but then the common
2215 * friends on every SPI message. Do this instead.
2218 spin_lock_irqsave(&ctlr->queue_lock, flags);
2219 if (list_empty(&ctlr->queue) && !ctlr->busy) {
2220 ctlr->running = false;
2221 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2224 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2226 } while (--limit);
2228 return -EBUSY;
2244 dev_err(&ctlr->dev, "problem destroying queue\n");
2248 kthread_destroy_worker(ctlr->kworker);
2253 static int __spi_queued_transfer(struct spi_device *spi,
2257 struct spi_controller *ctlr = spi->controller;
2260 spin_lock_irqsave(&ctlr->queue_lock, flags);
2262 if (!ctlr->running) {
2263 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2264 return -ESHUTDOWN;
2266 msg->actual_length = 0;
2267 msg->status = -EINPROGRESS;
2269 list_add_tail(&msg->queue, &ctlr->queue);
2270 ctlr->queue_empty = false;
2271 if (!ctlr->busy && need_pump)
2272 kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
2274 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2279 * spi_queued_transfer - transfer function for queued transfers
2280 * @spi: SPI device which is requesting transfer
2281 * @msg: SPI message which is to handled is queued to driver queue
2285 static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg)
2287 return __spi_queued_transfer(spi, msg, true);
2294 ctlr->transfer = spi_queued_transfer;
2295 if (!ctlr->transfer_one_message)
2296 ctlr->transfer_one_message = spi_transfer_one_message;
2301 dev_err(&ctlr->dev, "problem initializing queue\n");
2304 ctlr->queued = true;
2307 dev_err(&ctlr->dev, "problem starting queue\n");
2320 * spi_flush_queue - Send all pending messages in the queue from the callers'
2325 * sent before doing something. Is used by the spi-mem code to make sure SPI
2326 * memory operations do not preempt regular SPI transfers that have been queued
2327 * before the spi-mem operation.
2331 if (ctlr->transfer == spi_queued_transfer)
2335 /*-------------------------------------------------------------------------*/
2345 delay->value = DIV_ROUND_UP(value, 1000);
2346 delay->unit = SPI_DELAY_UNIT_USECS;
2348 delay->value = value;
2349 delay->unit = SPI_DELAY_UNIT_NSECS;
2354 static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi,
2361 if (of_property_read_bool(nc, "spi-cpha"))
2362 spi->mode |= SPI_CPHA;
2363 if (of_property_read_bool(nc, "spi-cpol"))
2364 spi->mode |= SPI_CPOL;
2365 if (of_property_read_bool(nc, "spi-3wire"))
2366 spi->mode |= SPI_3WIRE;
2367 if (of_property_read_bool(nc, "spi-lsb-first"))
2368 spi->mode |= SPI_LSB_FIRST;
2369 if (of_property_read_bool(nc, "spi-cs-high"))
2370 spi->mode |= SPI_CS_HIGH;
2373 if (!of_property_read_u32(nc, "spi-tx-bus-width", &value)) {
2376 spi->mode |= SPI_NO_TX;
2381 spi->mode |= SPI_TX_DUAL;
2384 spi->mode |= SPI_TX_QUAD;
2387 spi->mode |= SPI_TX_OCTAL;
2390 dev_warn(&ctlr->dev,
2391 "spi-tx-bus-width %d not supported\n",
2397 if (!of_property_read_u32(nc, "spi-rx-bus-width", &value)) {
2400 spi->mode |= SPI_NO_RX;
2405 spi->mode |= SPI_RX_DUAL;
2408 spi->mode |= SPI_RX_QUAD;
2411 spi->mode |= SPI_RX_OCTAL;
2414 dev_warn(&ctlr->dev,
2415 "spi-rx-bus-width %d not supported\n",
2423 dev_err(&ctlr->dev, "%pOF is not called 'slave'\n",
2425 return -EINVAL;
2434 dev_err(&ctlr->dev, "%pOF has no valid 'reg' property (%d)\n",
2439 if ((of_property_present(nc, "parallel-memories")) &&
2440 (!(ctlr->flags & SPI_CONTROLLER_MULTI_CS))) {
2441 dev_err(&ctlr->dev, "SPI controller doesn't support multi CS\n");
2442 return -EINVAL;
2445 spi->num_chipselect = rc;
2447 spi_set_chipselect(spi, idx, cs[idx]);
2450 * By default spi->chip_select[0] will hold the physical CS number,
2451 * so set bit 0 in spi->cs_index_mask.
2453 spi->cs_index_mask = BIT(0);
2456 if (!of_property_read_u32(nc, "spi-max-frequency", &value))
2457 spi->max_speed_hz = value;
2460 of_spi_parse_dt_cs_delay(nc, &spi->cs_setup, "spi-cs-setup-delay-ns");
2461 of_spi_parse_dt_cs_delay(nc, &spi->cs_hold, "spi-cs-hold-delay-ns");
2462 of_spi_parse_dt_cs_delay(nc, &spi->cs_inactive, "spi-cs-inactive-delay-ns");
2470 struct spi_device *spi;
2474 spi = spi_alloc_device(ctlr);
2475 if (!spi) {
2476 dev_err(&ctlr->dev, "spi_device alloc error for %pOF\n", nc);
2477 rc = -ENOMEM;
2482 rc = of_alias_from_compatible(nc, spi->modalias,
2483 sizeof(spi->modalias));
2485 dev_err(&ctlr->dev, "cannot find modalias for %pOF\n", nc);
2489 rc = of_spi_parse_dt(ctlr, spi, nc);
2496 device_set_node(&spi->dev, of_fwnode_handle(nc));
2499 rc = spi_add_device(spi);
2501 dev_err(&ctlr->dev, "spi_device register error %pOF\n", nc);
2505 return spi;
2510 spi_dev_put(spi);
2515 * of_register_spi_devices() - Register child devices onto the SPI bus
2519 * represents a valid SPI target device.
2523 struct spi_device *spi;
2526 for_each_available_child_of_node(ctlr->dev.of_node, nc) {
2529 spi = of_register_spi_device(ctlr, nc);
2530 if (IS_ERR(spi)) {
2531 dev_warn(&ctlr->dev,
2532 "Failed to create SPI device for %pOF\n", nc);
2542 * spi_new_ancillary_device() - Register ancillary SPI device
2543 * @spi: Pointer to the main SPI device registering the ancillary device
2546 * Register an ancillary SPI device; for example some chips have a chip-select
2549 * This may only be called from main SPI device's probe routine.
2553 struct spi_device *spi_new_ancillary_device(struct spi_device *spi,
2556 struct spi_controller *ctlr = spi->controller;
2563 rc = -ENOMEM;
2567 strscpy(ancillary->modalias, "dummy", sizeof(ancillary->modalias));
2569 /* Use provided chip-select for ancillary device */
2572 /* Take over SPI mode/speed from SPI main device */
2573 ancillary->max_speed_hz = spi->max_speed_hz;
2574 ancillary->mode = spi->mode;
2576 * By default spi->chip_select[0] will hold the physical CS number,
2577 * so set bit 0 in spi->cs_index_mask.
2579 ancillary->cs_index_mask = BIT(0);
2581 WARN_ON(!mutex_is_locked(&ctlr->add_lock));
2586 dev_err(&spi->dev, "failed to register ancillary device\n");
2615 if (ares->type != ACPI_RESOURCE_TYPE_SERIAL_BUS)
2618 sb = &ares->data.spi_serial_bus;
2619 if (sb->type != ACPI_RESOURCE_SERIAL_TYPE_SPI)
2628 * acpi_spi_count_resources - Count the number of SpiSerialBus resources
2631 * Return: the number of SpiSerialBus resources in the ACPI-device's
2632 * resource-list; or a negative error code.
2659 && obj->buffer.length >= 4)
2660 lookup->max_speed_hz = NSEC_PER_SEC / *(u32 *)obj->buffer.pointer;
2663 && obj->buffer.length == 8)
2664 lookup->bits_per_word = *(u64 *)obj->buffer.pointer;
2667 && obj->buffer.length == 8 && !*(u64 *)obj->buffer.pointer)
2668 lookup->mode |= SPI_LSB_FIRST;
2671 && obj->buffer.length == 8 && *(u64 *)obj->buffer.pointer)
2672 lookup->mode |= SPI_CPOL;
2675 && obj->buffer.length == 8 && *(u64 *)obj->buffer.pointer)
2676 lookup->mode |= SPI_CPHA;
2682 struct spi_controller *ctlr = lookup->ctlr;
2684 if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
2689 sb = &ares->data.spi_serial_bus;
2690 if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_SPI) {
2692 if (lookup->index != -1 && lookup->n++ != lookup->index)
2696 sb->resource_source.string_ptr,
2700 return -ENODEV;
2703 if (!device_match_acpi_handle(ctlr->dev.parent, parent_handle))
2704 return -ENODEV;
2710 return -ENODEV;
2714 return -EPROBE_DEFER;
2716 lookup->ctlr = ctlr;
2723 * 0 .. max - 1 so we need to ask the driver to
2726 if (ctlr->fw_translate_cs) {
2727 int cs = ctlr->fw_translate_cs(ctlr,
2728 sb->device_selection);
2731 lookup->chip_select = cs;
2733 lookup->chip_select = sb->device_selection;
2736 lookup->max_speed_hz = sb->connection_speed;
2737 lookup->bits_per_word = sb->data_bit_length;
2739 if (sb->clock_phase == ACPI_SPI_SECOND_PHASE)
2740 lookup->mode |= SPI_CPHA;
2741 if (sb->clock_polarity == ACPI_SPI_START_HIGH)
2742 lookup->mode |= SPI_CPOL;
2743 if (sb->device_polarity == ACPI_SPI_ACTIVE_HIGH)
2744 lookup->mode |= SPI_CS_HIGH;
2746 } else if (lookup->irq < 0) {
2750 lookup->irq = r.start;
2758 * acpi_spi_device_alloc - Allocate a spi device, and fill it in with ACPI information
2759 * @ctlr: controller to which the spi device belongs
2760 * @adev: ACPI Device for the spi device
2761 * @index: Index of the spi resource inside the ACPI Node
2763 * This should be used to allocate a new SPI device from and ACPI Device node.
2764 * The caller is responsible for calling spi_add_device to register the SPI device.
2766 * If ctlr is set to NULL, the Controller for the SPI device will be looked up
2768 * If index is set to -1, index is not used.
2769 * Note: If index is -1, ctlr must be set.
2780 struct spi_device *spi;
2783 if (!ctlr && index == -1)
2784 return ERR_PTR(-EINVAL);
2787 lookup.irq = -1;
2797 /* Found SPI in _CRS but it points to another controller */
2801 ACPI_SUCCESS(acpi_get_parent(adev->handle, &parent_handle)) &&
2802 device_match_acpi_handle(lookup.ctlr->dev.parent, parent_handle)) {
2803 /* Apple does not use _CRS but nested devices for SPI target devices */
2808 return ERR_PTR(-ENODEV);
2810 spi = spi_alloc_device(lookup.ctlr);
2811 if (!spi) {
2812 dev_err(&lookup.ctlr->dev, "failed to allocate SPI device for %s\n",
2813 dev_name(&adev->dev));
2814 return ERR_PTR(-ENOMEM);
2817 spi_set_chipselect(spi, 0, lookup.chip_select);
2819 ACPI_COMPANION_SET(&spi->dev, adev);
2820 spi->max_speed_hz = lookup.max_speed_hz;
2821 spi->mode |= lookup.mode;
2822 spi->irq = lookup.irq;
2823 spi->bits_per_word = lookup.bits_per_word;
2825 * By default spi->chip_select[0] will hold the physical CS number,
2826 * so set bit 0 in spi->cs_index_mask.
2828 spi->cs_index_mask = BIT(0);
2830 return spi;
2837 struct spi_device *spi;
2839 if (acpi_bus_get_status(adev) || !adev->status.present ||
2843 spi = acpi_spi_device_alloc(ctlr, adev, -1);
2844 if (IS_ERR(spi)) {
2845 if (PTR_ERR(spi) == -ENOMEM)
2851 acpi_set_modalias(adev, acpi_device_hid(adev), spi->modalias,
2852 sizeof(spi->modalias));
2855 * This gets re-tried in spi_probe() for -EPROBE_DEFER handling in case
2863 if (spi->irq < 0)
2864 spi->irq = acpi_dev_gpio_irq_get(adev, 0);
2868 adev->power.flags.ignore_parent = true;
2869 if (spi_add_device(spi)) {
2870 adev->power.flags.ignore_parent = false;
2871 dev_err(&ctlr->dev, "failed to add SPI device %s from ACPI\n",
2872 dev_name(&adev->dev));
2873 spi_dev_put(spi);
2898 handle = ACPI_HANDLE(ctlr->dev.parent);
2906 dev_warn(&ctlr->dev, "failed to enumerate SPI target devices\n");
2928 * spi_target_abort - abort the ongoing transfer request on an SPI target controller
2929 * @spi: device used for the current transfer
2931 int spi_target_abort(struct spi_device *spi)
2933 struct spi_controller *ctlr = spi->controller;
2935 if (spi_controller_is_target(ctlr) && ctlr->target_abort)
2936 return ctlr->target_abort(ctlr);
2938 return -ENOTSUPP;
2950 child = device_find_any_child(&ctlr->dev);
2951 ret = sysfs_emit(buf, "%s\n", child ? to_spi_device(child)->modalias : NULL);
2962 struct spi_device *spi;
2969 return -EINVAL;
2971 child = device_find_any_child(&ctlr->dev);
2980 spi = spi_alloc_device(ctlr);
2981 if (!spi)
2982 return -ENOMEM;
2984 strscpy(spi->modalias, name, sizeof(spi->modalias));
2986 rc = spi_add_device(spi);
2988 spi_dev_put(spi);
3023 * __spi_alloc_controller - allocate an SPI host or target controller
3025 * @size: how much zeroed driver-private data to allocate; the pointer to this
3030 * @target: flag indicating whether to allocate an SPI host (false) or SPI target (true)
3034 * This call is used only by SPI controller drivers, which are the
3045 * Return: the SPI controller structure on success, else NULL.
3060 device_initialize(&ctlr->dev);
3061 INIT_LIST_HEAD(&ctlr->queue);
3062 spin_lock_init(&ctlr->queue_lock);
3063 spin_lock_init(&ctlr->bus_lock_spinlock);
3064 mutex_init(&ctlr->bus_lock_mutex);
3065 mutex_init(&ctlr->io_mutex);
3066 mutex_init(&ctlr->add_lock);
3067 ctlr->bus_num = -1;
3068 ctlr->num_chipselect = 1;
3069 ctlr->target = target;
3071 ctlr->dev.class = &spi_target_class;
3073 ctlr->dev.class = &spi_controller_class;
3074 ctlr->dev.parent = dev;
3075 pm_suspend_ignore_children(&ctlr->dev, true);
3088 * __devm_spi_alloc_controller - resource-managed __spi_alloc_controller()
3089 * @dev: physical device of SPI controller
3090 * @size: how much zeroed driver-private data to allocate
3091 * @target: whether to allocate an SPI host (false) or SPI target (true) controller
3094 * Allocate an SPI controller and automatically release a reference on it
3100 * Return: the SPI controller structure on success, else NULL.
3115 ctlr->devm_allocated = true;
3127 * spi_get_gpio_descs() - grab chip select GPIOs for the controller
3128 * @ctlr: The SPI controller to grab GPIO descriptors for
3134 struct device *dev = &ctlr->dev;
3141 if (nb == -ENOENT)
3146 ctlr->num_chipselect = max_t(int, nb, ctlr->num_chipselect);
3148 cs = devm_kcalloc(dev, ctlr->num_chipselect, sizeof(*cs),
3151 return -ENOMEM;
3152 ctlr->cs_gpiods = cs;
3177 return -ENOMEM;
3183 if (ctlr->max_native_cs && i >= ctlr->max_native_cs) {
3185 return -EINVAL;
3190 ctlr->unused_native_cs = ffs(~native_cs_mask) - 1;
3192 if ((ctlr->flags & SPI_CONTROLLER_GPIO_SS) && num_cs_gpios &&
3193 ctlr->max_native_cs && ctlr->unused_native_cs >= ctlr->max_native_cs) {
3195 return -EINVAL;
3204 * The controller may implement only the high-level SPI-memory like
3205 * operations if it does not support regular SPI transfers, and this is
3207 * If ->mem_ops or ->mem_ops->exec_op is NULL, we request that at least
3208 * one of the ->transfer_xxx() method be implemented.
3210 if (!ctlr->mem_ops || !ctlr->mem_ops->exec_op) {
3211 if (!ctlr->transfer && !ctlr->transfer_one &&
3212 !ctlr->transfer_one_message) {
3213 return -EINVAL;
3229 return id == -ENOSPC ? -EBUSY : id;
3230 ctlr->bus_num = id;
3235 * spi_register_controller - register SPI host or target controller
3240 * SPI controllers connect to their drivers using some non-SPI bus,
3242 * includes calling spi_register_controller() to hook up to this SPI bus glue.
3244 * SPI controllers use board specific (often SOC specific) bus numbers,
3245 * and board-specific addressing for SPI devices combines those numbers
3246 * with chip select numbers. Since SPI does not directly support dynamic
3259 struct device *dev = ctlr->dev.parent;
3266 return -ENODEV;
3270 * the SPI controller.
3276 if (ctlr->bus_num < 0)
3277 ctlr->bus_num = of_alias_get_id(ctlr->dev.of_node, "spi");
3278 if (ctlr->bus_num >= 0) {
3279 /* Devices with a fixed bus num must check-in with the num */
3280 status = spi_controller_id_alloc(ctlr, ctlr->bus_num, ctlr->bus_num + 1);
3284 if (ctlr->bus_num < 0) {
3285 first_dynamic = of_alias_get_highest_id("spi");
3295 ctlr->bus_lock_flag = 0;
3296 init_completion(&ctlr->xfer_completion);
3297 init_completion(&ctlr->cur_msg_completion);
3298 if (!ctlr->max_dma_len)
3299 ctlr->max_dma_len = INT_MAX;
3305 dev_set_name(&ctlr->dev, "spi%u", ctlr->bus_num);
3307 if (!spi_controller_is_target(ctlr) && ctlr->use_gpio_descriptors) {
3315 ctlr->mode_bits |= SPI_CS_HIGH;
3319 * Even if it's just one always-selected device, there must
3322 if (!ctlr->num_chipselect) {
3323 status = -EINVAL;
3329 ctlr->last_cs[idx] = SPI_INVALID_CS;
3331 status = device_add(&ctlr->dev);
3336 dev_name(&ctlr->dev));
3340 * need the queueing logic if the driver is only supporting high-level
3343 if (ctlr->transfer) {
3345 } else if (ctlr->transfer_one || ctlr->transfer_one_message) {
3348 device_del(&ctlr->dev);
3353 ctlr->pcpu_statistics = spi_alloc_pcpu_stats(dev);
3354 if (!ctlr->pcpu_statistics) {
3355 dev_err(dev, "Error allocating per-cpu statistics\n");
3356 status = -ENOMEM;
3361 list_add_tail(&ctlr->list, &spi_controller_list);
3363 spi_match_controller_to_boardinfo(ctlr, &bi->board_info);
3375 idr_remove(&spi_controller_idr, ctlr->bus_num);
3387 * devm_spi_register_controller - register managed SPI host or target controller
3388 * @dev: device managing SPI controller
3393 * Register a SPI device as with spi_register_controller() which will
3406 return -ENOMEM;
3427 * spi_unregister_controller - unregister SPI host or target controller
3431 * This call is used only by SPI controller drivers, which are the
3441 int id = ctlr->bus_num;
3445 mutex_lock(&ctlr->add_lock);
3447 device_for_each_child(&ctlr->dev, NULL, __unregister);
3449 /* First make sure that this controller was ever added */
3453 if (ctlr->queued) {
3455 dev_err(&ctlr->dev, "queue remove failed\n");
3458 list_del(&ctlr->list);
3461 device_del(&ctlr->dev);
3470 mutex_unlock(&ctlr->add_lock);
3476 if (!ctlr->devm_allocated)
3477 put_device(&ctlr->dev);
3483 return ctlr->flags & SPI_CONTROLLER_SUSPENDED ? -ESHUTDOWN : 0;
3488 mutex_lock(&ctlr->bus_lock_mutex);
3489 ctlr->flags |= SPI_CONTROLLER_SUSPENDED;
3490 mutex_unlock(&ctlr->bus_lock_mutex);
3495 mutex_lock(&ctlr->bus_lock_mutex);
3496 ctlr->flags &= ~SPI_CONTROLLER_SUSPENDED;
3497 mutex_unlock(&ctlr->bus_lock_mutex);
3504 /* Basically no-ops for non-queued controllers */
3505 if (ctlr->queued) {
3508 dev_err(&ctlr->dev, "queue stop failed\n");
3522 if (ctlr->queued) {
3525 dev_err(&ctlr->dev, "queue restart failed\n");
3531 /*-------------------------------------------------------------------------*/
3543 if (rxfer->release)
3544 rxfer->release(ctlr, msg, res);
3547 list_splice(&rxfer->replaced_transfers, rxfer->replaced_after);
3550 for (i = 0; i < rxfer->inserted; i++)
3551 list_del(&rxfer->inserted_transfers[i].transfer_list);
3555 * spi_replace_transfers - replace transfers with several transfers
3558 * @xfer_first: the first spi_transfer we want to replace
3583 rxfer = spi_res_alloc(msg->spi, __spi_replace_transfers_release,
3588 return ERR_PTR(-ENOMEM);
3591 rxfer->release = release;
3595 rxfer->extradata =
3596 &rxfer->inserted_transfers[insert];
3599 INIT_LIST_HEAD(&rxfer->replaced_transfers);
3603 * the @replaced_transfers - it may be spi_message.messages!
3605 rxfer->replaced_after = xfer_first->transfer_list.prev;
3610 * If the entry after replaced_after it is msg->transfers
3614 if (rxfer->replaced_after->next == &msg->transfers) {
3615 dev_err(&msg->spi->dev,
3618 list_splice(&rxfer->replaced_transfers,
3619 rxfer->replaced_after);
3625 return ERR_PTR(-EINVAL);
3632 list_move_tail(rxfer->replaced_after->next,
3633 &rxfer->replaced_transfers);
3638 * based on the first transfer to get removed.
3642 xfer = &rxfer->inserted_transfers[insert - 1 - i];
3648 list_add(&xfer->transfer_list, rxfer->replaced_after);
3652 xfer->cs_change = false;
3653 xfer->delay.value = 0;
3658 rxfer->inserted = insert;
3677 count = DIV_ROUND_UP(xfer->len, maxsize);
3683 xfers = srt->inserted_transfers;
3694 * The first transfer just needs the length modified, so we
3708 xfers[i].len = min(maxsize, xfers[i].len - offset);
3715 *xferp = &xfers[count - 1];
3718 SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics,
3720 SPI_STATISTICS_INCREMENT_FIELD(msg->spi->pcpu_statistics,
3727 * spi_split_transfers_maxsize - split spi transfers into multiple transfers
3735 * spi message unoptimize phase so this function should only be called from
3754 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
3755 if (xfer->len > maxsize) {
3769 * spi_split_transfers_maxwords - split SPI transfers into multiple transfers
3771 * certain number of SPI words
3777 * spi message unoptimize phase so this function should only be called from
3795 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
3799 maxsize = maxwords * spi_bpw_to_bytes(xfer->bits_per_word);
3800 if (xfer->len > maxsize) {
3812 /*-------------------------------------------------------------------------*/
3815 * Core methods for SPI controller protocol drivers. Some of the
3822 if (ctlr->bits_per_word_mask) {
3825 return -EINVAL;
3826 if (!(ctlr->bits_per_word_mask & SPI_BPW_MASK(bits_per_word)))
3827 return -EINVAL;
3834 * spi_set_cs_timing - configure CS setup, hold, and inactive delays
3835 * @spi: the device that requires specific CS timing configuration
3839 static int spi_set_cs_timing(struct spi_device *spi)
3841 struct device *parent = spi->controller->dev.parent;
3844 if (spi->controller->set_cs_timing && !spi_get_csgpiod(spi, 0)) {
3845 if (spi->controller->auto_runtime_pm) {
3849 dev_err(&spi->controller->dev, "Failed to power device: %d\n",
3854 status = spi->controller->set_cs_timing(spi);
3857 status = spi->controller->set_cs_timing(spi);
3864 * spi_setup - setup SPI mode and clock rate
3865 * @spi: the device whose settings are being modified
3868 * SPI protocol drivers may need to update the transfer mode if the
3874 * or from it. When this function returns, the SPI device is deselected.
3879 * LSB-first wire encoding, or active-high chipselects.
3883 int spi_setup(struct spi_device *spi)
3892 if ((hweight_long(spi->mode &
3894 (hweight_long(spi->mode &
3896 dev_err(&spi->dev,
3897 "setup: can not select any two of dual, quad and no-rx/tx at the same time\n");
3898 return -EINVAL;
3901 if ((spi->mode & SPI_3WIRE) && (spi->mode &
3904 return -EINVAL;
3906 if ((spi->mode & SPI_MOSI_IDLE_LOW) && (spi->mode & SPI_MOSI_IDLE_HIGH)) {
3907 dev_err(&spi->dev,
3909 return -EINVAL;
3917 bad_bits = spi->mode & ~(spi->controller->mode_bits | SPI_CS_WORD |
3923 dev_warn(&spi->dev,
3926 spi->mode &= ~ugly_bits;
3930 dev_err(&spi->dev, "setup: unsupported mode bits %x\n",
3932 return -EINVAL;
3935 if (!spi->bits_per_word) {
3936 spi->bits_per_word = 8;
3939 * Some controllers may not support the default 8 bits-per-word
3942 status = __spi_validate_bits_per_word(spi->controller,
3943 spi->bits_per_word);
3948 if (spi->controller->max_speed_hz &&
3949 (!spi->max_speed_hz ||
3950 spi->max_speed_hz > spi->controller->max_speed_hz))
3951 spi->max_speed_hz = spi->controller->max_speed_hz;
3953 mutex_lock(&spi->controller->io_mutex);
3955 if (spi->controller->setup) {
3956 status = spi->controller->setup(spi);
3958 mutex_unlock(&spi->controller->io_mutex);
3959 dev_err(&spi->controller->dev, "Failed to setup device: %d\n",
3965 status = spi_set_cs_timing(spi);
3967 mutex_unlock(&spi->controller->io_mutex);
3971 if (spi->controller->auto_runtime_pm && spi->controller->set_cs) {
3972 status = pm_runtime_resume_and_get(spi->controller->dev.parent);
3974 mutex_unlock(&spi->controller->io_mutex);
3975 dev_err(&spi->controller->dev, "Failed to power device: %d\n",
3983 * checking for a non-zero return value instead of a negative
3988 spi_set_cs(spi, false, true);
3989 pm_runtime_put_autosuspend(spi->controller->dev.parent);
3991 spi_set_cs(spi, false, true);
3994 mutex_unlock(&spi->controller->io_mutex);
3996 if (spi->rt && !spi->controller->rt) {
3997 spi->controller->rt = true;
3998 spi_set_thread_rt(spi->controller);
4001 trace_spi_setup(spi, status);
4003 dev_dbg(&spi->dev, "setup mode %lu, %s%s%s%s%u bits/w, %u Hz max --> %d\n",
4004 spi->mode & SPI_MODE_X_MASK,
4005 (spi->mode & SPI_CS_HIGH) ? "cs_high, " : "",
4006 (spi->mode & SPI_LSB_FIRST) ? "lsb, " : "",
4007 (spi->mode & SPI_3WIRE) ? "3wire, " : "",
4008 (spi->mode & SPI_LOOP) ? "loopback, " : "",
4009 spi->bits_per_word, spi->max_speed_hz,
4017 struct spi_device *spi)
4021 delay1 = spi_delay_to_ns(&xfer->word_delay, xfer);
4025 delay2 = spi_delay_to_ns(&spi->word_delay, xfer);
4030 memcpy(&xfer->word_delay, &spi->word_delay,
4031 sizeof(xfer->word_delay));
4036 static int __spi_validate(struct spi_device *spi, struct spi_message *message)
4038 struct spi_controller *ctlr = spi->controller;
4042 if (list_empty(&message->transfers))
4043 return -EINVAL;
4045 message->spi = spi;
4048 * Half-duplex links include original MicroWire, and ones with
4053 if ((ctlr->flags & SPI_CONTROLLER_HALF_DUPLEX) ||
4054 (spi->mode & SPI_3WIRE)) {
4055 unsigned flags = ctlr->flags;
4057 list_for_each_entry(xfer, &message->transfers, transfer_list) {
4058 if (xfer->rx_buf && xfer->tx_buf)
4059 return -EINVAL;
4060 if ((flags & SPI_CONTROLLER_NO_TX) && xfer->tx_buf)
4061 return -EINVAL;
4062 if ((flags & SPI_CONTROLLER_NO_RX) && xfer->rx_buf)
4063 return -EINVAL;
4068 * Set transfer bits_per_word and max speed as spi device default if
4075 message->frame_length = 0;
4076 list_for_each_entry(xfer, &message->transfers, transfer_list) {
4077 xfer->effective_speed_hz = 0;
4078 message->frame_length += xfer->len;
4079 if (!xfer->bits_per_word)
4080 xfer->bits_per_word = spi->bits_per_word;
4082 if (!xfer->speed_hz)
4083 xfer->speed_hz = spi->max_speed_hz;
4085 if (ctlr->max_speed_hz && xfer->speed_hz > ctlr->max_speed_hz)
4086 xfer->speed_hz = ctlr->max_speed_hz;
4088 if (__spi_validate_bits_per_word(ctlr, xfer->bits_per_word))
4089 return -EINVAL;
4092 * default considered as SDR mode for SPI and QSPI controller.
4095 if (xfer->dtr_mode && !ctlr->dtr_caps)
4096 return -EINVAL;
4099 * SPI transfer length should be multiple of SPI word size
4100 * where SPI word size should be power-of-two multiple.
4102 if (xfer->bits_per_word <= 8)
4104 else if (xfer->bits_per_word <= 16)
4110 if (xfer->len % w_size)
4111 return -EINVAL;
4113 if (xfer->speed_hz && ctlr->min_speed_hz &&
4114 xfer->speed_hz < ctlr->min_speed_hz)
4115 return -EINVAL;
4117 if (xfer->tx_buf && !xfer->tx_nbits)
4118 xfer->tx_nbits = SPI_NBITS_SINGLE;
4119 if (xfer->rx_buf && !xfer->rx_nbits)
4120 xfer->rx_nbits = SPI_NBITS_SINGLE;
4126 if (xfer->tx_buf) {
4127 if (spi->mode & SPI_NO_TX)
4128 return -EINVAL;
4129 if (xfer->tx_nbits != SPI_NBITS_SINGLE &&
4130 xfer->tx_nbits != SPI_NBITS_DUAL &&
4131 xfer->tx_nbits != SPI_NBITS_QUAD &&
4132 xfer->tx_nbits != SPI_NBITS_OCTAL)
4133 return -EINVAL;
4134 if ((xfer->tx_nbits == SPI_NBITS_DUAL) &&
4135 !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL)))
4136 return -EINVAL;
4137 if ((xfer->tx_nbits == SPI_NBITS_QUAD) &&
4138 !(spi->mode & (SPI_TX_QUAD | SPI_TX_OCTAL)))
4139 return -EINVAL;
4140 if ((xfer->tx_nbits == SPI_NBITS_OCTAL) &&
4141 !(spi->mode & SPI_TX_OCTAL))
4142 return -EINVAL;
4145 if (xfer->rx_buf) {
4146 if (spi->mode & SPI_NO_RX)
4147 return -EINVAL;
4148 if (xfer->rx_nbits != SPI_NBITS_SINGLE &&
4149 xfer->rx_nbits != SPI_NBITS_DUAL &&
4150 xfer->rx_nbits != SPI_NBITS_QUAD &&
4151 xfer->rx_nbits != SPI_NBITS_OCTAL)
4152 return -EINVAL;
4153 if ((xfer->rx_nbits == SPI_NBITS_DUAL) &&
4154 !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL)))
4155 return -EINVAL;
4156 if ((xfer->rx_nbits == SPI_NBITS_QUAD) &&
4157 !(spi->mode & (SPI_RX_QUAD | SPI_RX_OCTAL)))
4158 return -EINVAL;
4159 if ((xfer->rx_nbits == SPI_NBITS_OCTAL) &&
4160 !(spi->mode & SPI_RX_OCTAL))
4161 return -EINVAL;
4164 if (_spi_xfer_word_delay_update(xfer, spi))
4165 return -EINVAL;
4168 if (xfer->offload_flags) {
4169 if (!message->offload)
4170 return -EINVAL;
4172 if (xfer->offload_flags & ~message->offload->xfer_flags)
4173 return -EINVAL;
4177 message->status = -EINPROGRESS;
4183 * spi_split_transfers - generic handling of transfer splitting
4186 * Under certain conditions, a SPI controller may not support arbitrary
4199 struct spi_controller *ctlr = msg->spi->controller;
4204 * If an SPI controller does not support toggling the CS line on each
4206 * for the CS line, we can emulate the CS-per-word hardware function by
4207 * splitting transfers into one-word transfers and ensuring that
4210 if ((msg->spi->mode & SPI_CS_WORD) &&
4211 (!(ctlr->mode_bits & SPI_CS_WORD) || spi_is_csgpiod(msg->spi))) {
4216 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
4218 if (list_is_last(&xfer->transfer_list, &msg->transfers))
4221 xfer->cs_change = 1;
4225 spi_max_transfer_size(msg->spi));
4234 * __spi_optimize_message - shared implementation for spi_optimize_message()
4236 * @spi: the device that will be used for the message
4239 * Peripheral drivers will call spi_optimize_message() and the spi core will
4246 static int __spi_optimize_message(struct spi_device *spi,
4249 struct spi_controller *ctlr = spi->controller;
4252 ret = __spi_validate(spi, msg);
4260 if (ctlr->optimize_message) {
4261 ret = ctlr->optimize_message(msg);
4268 msg->optimized = true;
4274 * spi_maybe_optimize_message - optimize message if it isn't already pre-optimized
4275 * @spi: the device that will be used for the message
4279 static int spi_maybe_optimize_message(struct spi_device *spi,
4282 if (spi->controller->defer_optimize_message) {
4283 msg->spi = spi;
4287 if (msg->pre_optimized)
4290 return __spi_optimize_message(spi, msg);
4294 * spi_optimize_message - do any one-time validation and setup for a SPI message
4295 * @spi: the device that will be used for the message
4304 * of updating the contents of any xfer->tx_buf (the pointer can't be changed,
4313 int spi_optimize_message(struct spi_device *spi, struct spi_message *msg)
4318 * Pre-optimization is not supported and optimization is deferred e.g.
4319 * when using spi-mux.
4321 if (spi->controller->defer_optimize_message)
4324 ret = __spi_optimize_message(spi, msg);
4334 msg->pre_optimized = true;
4341 * spi_unoptimize_message - releases any resources allocated by spi_optimize_message()
4350 if (msg->spi->controller->defer_optimize_message)
4354 msg->pre_optimized = false;
4358 static int __spi_async(struct spi_device *spi, struct spi_message *message)
4360 struct spi_controller *ctlr = spi->controller;
4364 * Some controllers do not support doing regular SPI transfers. Return
4367 if (!ctlr->transfer)
4368 return -ENOTSUPP;
4370 SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics, spi_async);
4371 SPI_STATISTICS_INCREMENT_FIELD(spi->pcpu_statistics, spi_async);
4375 if (!ctlr->ptp_sts_supported) {
4376 list_for_each_entry(xfer, &message->transfers, transfer_list) {
4377 xfer->ptp_sts_word_pre = 0;
4378 ptp_read_system_prets(xfer->ptp_sts);
4382 return ctlr->transfer(spi, message);
4391 * devm_spi_optimize_message - managed version of spi_optimize_message()
4392 * @dev: the device that manages @msg (usually @spi->dev)
4393 * @spi: the device that will be used for the message
4400 int devm_spi_optimize_message(struct device *dev, struct spi_device *spi,
4405 ret = spi_optimize_message(spi, msg);
4414 * spi_async - asynchronous SPI transfer
4415 * @spi: device with which data will be exchanged
4423 * Before that invocation, the value of message->status is undefined.
4424 * When the callback is issued, message->status holds either zero (to
4427 * deallocate the associated memory; it's no longer in use by any SPI
4444 int spi_async(struct spi_device *spi, struct spi_message *message)
4446 struct spi_controller *ctlr = spi->controller;
4450 ret = spi_maybe_optimize_message(spi, message);
4454 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
4456 if (ctlr->bus_lock_flag)
4457 ret = -EBUSY;
4459 ret = __spi_async(spi, message);
4461 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
4472 mutex_lock(&ctlr->io_mutex);
4474 was_busy = ctlr->busy;
4476 ctlr->cur_msg = msg;
4479 dev_err(&ctlr->dev, "noqueue transfer failed\n");
4480 ctlr->cur_msg = NULL;
4481 ctlr->fallback = false;
4484 kfree(ctlr->dummy_rx);
4485 ctlr->dummy_rx = NULL;
4486 kfree(ctlr->dummy_tx);
4487 ctlr->dummy_tx = NULL;
4488 if (ctlr->unprepare_transfer_hardware &&
4489 ctlr->unprepare_transfer_hardware(ctlr))
4490 dev_err(&ctlr->dev,
4495 mutex_unlock(&ctlr->io_mutex);
4498 /*-------------------------------------------------------------------------*/
4501 * Utility methods for SPI protocol drivers, layered on
4511 static int __spi_sync(struct spi_device *spi, struct spi_message *message)
4516 struct spi_controller *ctlr = spi->controller;
4519 dev_warn_once(&spi->dev, "Attempted to sync while suspend\n");
4520 return -ESHUTDOWN;
4523 status = spi_maybe_optimize_message(spi, message);
4527 SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics, spi_sync);
4528 SPI_STATISTICS_INCREMENT_FIELD(spi->pcpu_statistics, spi_sync);
4536 if (READ_ONCE(ctlr->queue_empty) && !ctlr->must_async) {
4537 message->actual_length = 0;
4538 message->status = -EINPROGRESS;
4542 SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics, spi_sync_immediate);
4543 SPI_STATISTICS_INCREMENT_FIELD(spi->pcpu_statistics, spi_sync_immediate);
4547 return message->status;
4556 message->complete = spi_complete;
4557 message->context = &done;
4559 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
4560 status = __spi_async(spi, message);
4561 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
4565 status = message->status;
4567 message->complete = NULL;
4568 message->context = NULL;
4574 * spi_sync - blocking/synchronous SPI data transfers
4575 * @spi: device with which data will be exchanged
4580 * is non-interruptible, and has no timeout. Low-overhead controller
4583 * Note that the SPI device's chip select is active during the message,
4585 * frequently-used devices may want to minimize costs of selecting a chip,
4594 int spi_sync(struct spi_device *spi, struct spi_message *message)
4598 mutex_lock(&spi->controller->bus_lock_mutex);
4599 ret = __spi_sync(spi, message);
4600 mutex_unlock(&spi->controller->bus_lock_mutex);
4607 * spi_sync_locked - version of spi_sync with exclusive bus usage
4608 * @spi: device with which data will be exchanged
4613 * is non-interruptible, and has no timeout. Low-overhead controller
4617 * SPI bus. It has to be preceded by a spi_bus_lock call. The SPI bus must
4622 int spi_sync_locked(struct spi_device *spi, struct spi_message *message)
4624 return __spi_sync(spi, message);
4629 * spi_bus_lock - obtain a lock for exclusive SPI bus usage
4630 * @ctlr: SPI bus controller that should be locked for exclusive bus access
4634 * is non-interruptible, and has no timeout.
4637 * SPI bus. The SPI bus must be released by a spi_bus_unlock call when the
4639 * and spi_async_locked calls when the SPI bus lock is held.
4647 mutex_lock(&ctlr->bus_lock_mutex);
4649 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
4650 ctlr->bus_lock_flag = 1;
4651 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
4660 * spi_bus_unlock - release the lock for exclusive SPI bus usage
4661 * @ctlr: SPI bus controller that was locked for exclusive bus access
4665 * is non-interruptible, and has no timeout.
4667 * This call releases an SPI bus lock previously obtained by an spi_bus_lock
4674 ctlr->bus_lock_flag = 0;
4676 mutex_unlock(&ctlr->bus_lock_mutex);
4688 * spi_write_then_read - SPI synchronous write followed by read
4689 * @spi: device with which data will be exchanged
4690 * @txbuf: data to be written (need not be DMA-safe)
4692 * @rxbuf: buffer into which data will be read (need not be DMA-safe)
4702 * Performance-sensitive or bulk transfer code should instead use
4703 * spi_{async,sync}() calls with DMA-safe buffers.
4707 int spi_write_then_read(struct spi_device *spi,
4719 * Use preallocated DMA-safe buffer if we can. We can't avoid
4722 * using the pre-allocated buffer or the transfer is too large.
4728 return -ENOMEM;
4749 status = spi_sync(spi, &message);
4762 /*-------------------------------------------------------------------------*/
4773 /* The spi controllers are not using spi_bus, so we find it with another way */
4793 struct spi_device *spi;
4797 ctlr = of_find_spi_controller_by_node(rd->dn->parent);
4801 if (of_node_test_and_set_flag(rd->dn, OF_POPULATED)) {
4802 put_device(&ctlr->dev);
4810 rd->dn->fwnode.flags &= ~FWNODE_FLAG_NOT_DEVICE;
4811 spi = of_register_spi_device(ctlr, rd->dn);
4812 put_device(&ctlr->dev);
4814 if (IS_ERR(spi)) {
4816 __func__, rd->dn);
4817 of_node_clear_flag(rd->dn, OF_POPULATED);
4818 return notifier_from_errno(PTR_ERR(spi));
4824 if (!of_node_check_flag(rd->dn, OF_POPULATED))
4828 spi = of_find_spi_device_by_node(rd->dn);
4829 if (spi == NULL)
4833 spi_unregister_device(spi);
4836 put_device(&spi->dev);
4853 return device_match_acpi_dev(dev->parent, data);
4885 struct spi_device *spi;
4894 put_device(&ctlr->dev);
4900 spi = acpi_spi_find_device_by_adev(adev);
4901 if (!spi)
4904 spi_unregister_device(spi);
4905 put_device(&spi->dev);
4925 status = -ENOMEM;