Lines Matching +full:coexist +full:- +full:gpio +full:- +full:pin

1 // SPDX-License-Identifier: GPL-2.0-or-later
9 #include <linux/clk/clk-conf.h>
13 #include <linux/dma-mapping.h>
15 #include <linux/gpio/consumer.h>
35 #include <linux/spi/spi-mem.h>
51 spi_controller_put(spi->controller);
52 kfree(spi->driver_override);
53 free_percpu(spi->pcpu_statistics);
63 len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1);
64 if (len != -ENODEV)
67 return sysfs_emit(buf, "%s%s\n", SPI_MODULE_PREFIX, spi->modalias);
78 ret = driver_set_override(dev, &spi->driver_override, buf, count);
92 len = sysfs_emit(buf, "%s\n", spi->driver_override ? : "");
114 u64_stats_init(&stat->syncp);
135 start = u64_stats_fetch_begin(&pcpu_stats->syncp);
137 } while (u64_stats_fetch_retry(&pcpu_stats->syncp, start));
150 return spi_statistics_##field##_show(ctlr->pcpu_statistics, buf); \
161 return spi_statistics_##field##_show(spi->pcpu_statistics, buf); \
198 SPI_STATISTICS_TRANSFER_BYTES_HISTO(0, "0-1");
199 SPI_STATISTICS_TRANSFER_BYTES_HISTO(1, "2-3");
200 SPI_STATISTICS_TRANSFER_BYTES_HISTO(2, "4-7");
201 SPI_STATISTICS_TRANSFER_BYTES_HISTO(3, "8-15");
202 SPI_STATISTICS_TRANSFER_BYTES_HISTO(4, "16-31");
203 SPI_STATISTICS_TRANSFER_BYTES_HISTO(5, "32-63");
204 SPI_STATISTICS_TRANSFER_BYTES_HISTO(6, "64-127");
205 SPI_STATISTICS_TRANSFER_BYTES_HISTO(7, "128-255");
206 SPI_STATISTICS_TRANSFER_BYTES_HISTO(8, "256-511");
207 SPI_STATISTICS_TRANSFER_BYTES_HISTO(9, "512-1023");
208 SPI_STATISTICS_TRANSFER_BYTES_HISTO(10, "1024-2047");
209 SPI_STATISTICS_TRANSFER_BYTES_HISTO(11, "2048-4095");
210 SPI_STATISTICS_TRANSFER_BYTES_HISTO(12, "4096-8191");
211 SPI_STATISTICS_TRANSFER_BYTES_HISTO(13, "8192-16383");
212 SPI_STATISTICS_TRANSFER_BYTES_HISTO(14, "16384-32767");
213 SPI_STATISTICS_TRANSFER_BYTES_HISTO(15, "32768-65535");
317 int l2len = min(fls(xfer->len), SPI_STATISTICS_HISTO_SIZE) - 1;
325 u64_stats_update_begin(&stats->syncp);
327 u64_stats_inc(&stats->transfers);
328 u64_stats_inc(&stats->transfer_bytes_histo[l2len]);
330 u64_stats_add(&stats->bytes, xfer->len);
332 u64_stats_add(&stats->bytes_tx, xfer->len);
334 u64_stats_add(&stats->bytes_rx, xfer->len);
336 u64_stats_update_end(&stats->syncp);
341 * modalias support makes "modprobe $MODALIAS" new-style hotplug work,
346 while (id->name[0]) {
347 if (!strcmp(name, id->name))
356 const struct spi_driver *sdrv = to_spi_driver(sdev->dev.driver);
358 return spi_match_id(sdrv->id_table, sdev->modalias);
366 match = device_get_match_data(&sdev->dev);
370 return (const void *)spi_get_device_id(sdev)->driver_data;
380 if (spi->driver_override)
381 return strcmp(spi->driver_override, drv->name) == 0;
391 if (sdrv->id_table)
392 return !!spi_match_id(sdrv->id_table, spi->modalias);
394 return strcmp(spi->modalias, drv->name) == 0;
403 if (rc != -ENODEV)
406 return add_uevent_var(env, "MODALIAS=%s%s", SPI_MODULE_PREFIX, spi->modalias);
411 const struct spi_driver *sdrv = to_spi_driver(dev->driver);
416 ret = of_clk_set_defaults(dev->of_node, false);
421 spi->irq = of_irq_get(dev->of_node, 0);
422 else if (is_acpi_device_node(fwnode) && spi->irq < 0)
423 spi->irq = acpi_dev_gpio_irq_get(to_acpi_device_node(fwnode), 0);
424 if (spi->irq == -EPROBE_DEFER)
425 return dev_err_probe(dev, spi->irq, "Failed to get irq\n");
426 if (spi->irq < 0)
427 spi->irq = 0;
433 if (sdrv->probe) {
434 ret = sdrv->probe(spi);
444 const struct spi_driver *sdrv = to_spi_driver(dev->driver);
446 if (sdrv->remove)
447 sdrv->remove(to_spi_device(dev));
454 if (dev->driver) {
455 const struct spi_driver *sdrv = to_spi_driver(dev->driver);
457 if (sdrv->shutdown)
458 sdrv->shutdown(to_spi_device(dev));
474 * __spi_register_driver - register a SPI driver
483 sdrv->driver.owner = owner;
484 sdrv->driver.bus = &spi_bus_type;
491 if (sdrv->driver.of_match_table) {
494 for (of_id = sdrv->driver.of_match_table; of_id->compatible[0];
499 of_name = strnchr(of_id->compatible,
500 sizeof(of_id->compatible), ',');
504 of_name = of_id->compatible;
506 if (sdrv->id_table) {
509 spi_id = spi_match_id(sdrv->id_table, of_name);
513 if (strcmp(sdrv->driver.name, of_name) == 0)
518 sdrv->driver.name, of_id->compatible);
522 return driver_register(&sdrv->driver);
526 /*-------------------------------------------------------------------------*/
530 * would make them board-specific. Similarly with SPI controller drivers.
531 * Device registration normally goes into like arch/.../mach.../board-YYY.c
551 * spi_alloc_device - Allocate a new SPI device
580 spi->pcpu_statistics = spi_alloc_pcpu_stats(NULL);
581 if (!spi->pcpu_statistics) {
587 spi->controller = ctlr;
588 spi->dev.parent = &ctlr->dev;
589 spi->dev.bus = &spi_bus_type;
590 spi->dev.release = spidev_release;
591 spi->mode = ctlr->buswidth_override_bits;
593 device_initialize(&spi->dev);
600 struct device *dev = &spi->dev;
604 dev_set_name(dev, "spi-%s", acpi_dev_name(to_acpi_device_node(fwnode)));
609 dev_set_name(dev, "spi-%pfwP", fwnode);
613 dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->controller->dev),
619 * logical CS in the spi->chip_select[]. If all the physical CS
622 * CS can be 0. As a solution to this issue initialize all the CS to -1.
623 * Now all the unused logical CS will have -1 physical CS value & can be
626 #define SPI_INVALID_CS ((s8)-1)
645 return -EBUSY;
657 if (spi->controller == new_spi->controller) {
669 if (spi->controller->cleanup)
670 spi->controller->cleanup(spi);
675 struct spi_controller *ctlr = spi->controller;
676 struct device *dev = ctlr->dev.parent;
683 if (is_valid_cs(cs) && cs >= ctlr->num_chipselect) {
685 ctlr->num_chipselect);
686 return -EINVAL;
692 * For example, spi->chip_select[0] != spi->chip_select[1] and so on.
716 !device_is_registered(&ctlr->dev)) {
717 return -ENODEV;
720 if (ctlr->cs_gpiods) {
726 spi_set_csgpiod(spi, idx, ctlr->cs_gpiods[cs]);
733 * using SPI_CS_HIGH can't coexist well otherwise...
738 dev_name(&spi->dev), status);
743 status = device_add(&spi->dev);
746 dev_name(&spi->dev), status);
749 dev_dbg(dev, "registered child %s\n", dev_name(&spi->dev));
756 * spi_add_device - Add spi_device allocated with spi_alloc_device
766 struct spi_controller *ctlr = spi->controller;
772 mutex_lock(&ctlr->add_lock);
774 mutex_unlock(&ctlr->add_lock);
788 * spi_new_device - instantiate one new SPI device
794 * after board init creates the hard-wired devices. Some development
797 * driver could add devices (which it would learn about out-of-band).
808 * NOTE: caller did any chip->bus_num checks necessary.
811 * error-or-pointer (not NULL-or-pointer), troubleshootability
819 WARN_ON(strlen(chip->modalias) >= sizeof(proxy->modalias));
821 /* Use provided chip-select for proxy device */
823 spi_set_chipselect(proxy, 0, chip->chip_select);
825 proxy->max_speed_hz = chip->max_speed_hz;
826 proxy->mode = chip->mode;
827 proxy->irq = chip->irq;
828 strscpy(proxy->modalias, chip->modalias, sizeof(proxy->modalias));
829 proxy->dev.platform_data = (void *) chip->platform_data;
830 proxy->controller_data = chip->controller_data;
831 proxy->controller_state = NULL;
833 * By default spi->chip_select[0] will hold the physical CS number,
834 * so set bit 0 in spi->cs_index_mask.
836 proxy->cs_index_mask = BIT(0);
838 if (chip->swnode) {
839 status = device_add_software_node(&proxy->dev, chip->swnode);
841 dev_err(&ctlr->dev, "failed to add software node to '%s': %d\n",
842 chip->modalias, status);
854 device_remove_software_node(&proxy->dev);
861 * spi_unregister_device - unregister a single SPI device
874 fwnode = dev_fwnode(&spi->dev);
881 device_remove_software_node(&spi->dev);
882 device_del(&spi->dev);
884 put_device(&spi->dev);
893 if (ctlr->bus_num != bi->bus_num)
898 dev_err(ctlr->dev.parent, "can't create new device for %s\n",
899 bi->modalias);
903 * spi_register_board_info - register SPI devices for a given board
908 * Board-specific early init code calls this (probably during arch_initcall)
912 * not make Linux forget about these hard-wired devices.
914 * Other code can also call this, e.g. a particular add-on board might provide
919 * any embedded pointers (platform_data, etc), they're copied as-is.
933 return -ENOMEM;
938 memcpy(&bi->board_info, info, sizeof(*info));
941 list_add_tail(&bi->list, &board_list);
944 &bi->board_info);
951 /*-------------------------------------------------------------------------*/
956 * spi_res_alloc - allocate a spi resource that is life-cycle managed
978 INIT_LIST_HEAD(&sres->entry);
979 sres->release = release;
981 return sres->data;
985 * spi_res_free - free an SPI resource
992 WARN_ON(!list_empty(&sres->entry));
997 * spi_res_add - add a spi_res to the spi_message
1005 WARN_ON(!list_empty(&sres->entry));
1006 list_add_tail(&sres->entry, &message->resources);
1010 * spi_res_release - release all SPI resources for this message
1018 list_for_each_entry_safe_reverse(res, tmp, &message->resources, entry) {
1019 if (res->release)
1020 res->release(ctlr, message, res->data);
1022 list_del(&res->entry);
1028 /*-------------------------------------------------------------------------*/
1031 if (!(spi->cs_index_mask & BIT(idx))) {} else
1039 if (spi->controller->last_cs[idx] == spi_get_chipselect(spi, idx))
1048 * Historically ACPI has no means of the GPIO polarity and
1049 * thus the SPISerialBus() resource defines it on the per-chip
1050 * basis. In order to avoid a chain of negations, the GPIO
1053 * the GPIO CS polarity must be defined Active High to avoid
1057 if (is_acpi_device_node(dev_fwnode(&spi->dev)))
1060 /* Polarity handled by GPIO library */
1064 spi_delay_exec(&spi->cs_setup, NULL);
1066 spi_delay_exec(&spi->cs_inactive, NULL);
1078 if (!force && ((enable && spi->controller->last_cs_index_mask == spi->cs_index_mask &&
1080 (!enable && spi->controller->last_cs_index_mask == spi->cs_index_mask &&
1082 (spi->controller->last_cs_mode_high == (spi->mode & SPI_CS_HIGH)))
1087 spi->controller->last_cs_index_mask = spi->cs_index_mask;
1089 spi->controller->last_cs[idx] = enable ? spi_get_chipselect(spi, 0) : SPI_INVALID_CS;
1090 spi->controller->last_cs_mode_high = spi->mode & SPI_CS_HIGH;
1092 if (spi->mode & SPI_CS_HIGH)
1096 * Handle chip select delays for GPIO based CS or controllers without
1099 if ((spi_is_csgpiod(spi) || !spi->controller->set_cs_timing) && !activate)
1100 spi_delay_exec(&spi->cs_hold, NULL);
1103 if (!(spi->mode & SPI_NO_CS)) {
1109 /* Some SPI masters need both GPIO CS & slave_select */
1110 if ((spi->controller->flags & SPI_CONTROLLER_GPIO_SS) &&
1111 spi->controller->set_cs)
1112 spi->controller->set_cs(spi, !enable);
1113 } else if (spi->controller->set_cs) {
1114 spi->controller->set_cs(spi, !enable);
1117 if (spi_is_csgpiod(spi) || !spi->controller->set_cs_timing) {
1119 spi_delay_exec(&spi->cs_setup, NULL);
1121 spi_delay_exec(&spi->cs_inactive, NULL);
1151 desc_len = min_t(size_t, max_seg_size, ctlr->max_dma_len);
1154 return -EINVAL;
1161 sg = &sgt->sgl[0];
1172 PAGE_SIZE - offset_in_page(buf)));
1179 return -ENOMEM;
1190 len -= min;
1217 sgt->orig_nents = 0;
1218 sgt->nents = 0;
1233 if (!ctlr->can_dma)
1236 if (ctlr->dma_tx)
1237 tx_dev = ctlr->dma_tx->device->dev;
1238 else if (ctlr->dma_map_dev)
1239 tx_dev = ctlr->dma_map_dev;
1241 tx_dev = ctlr->dev.parent;
1243 if (ctlr->dma_rx)
1244 rx_dev = ctlr->dma_rx->device->dev;
1245 else if (ctlr->dma_map_dev)
1246 rx_dev = ctlr->dma_map_dev;
1248 rx_dev = ctlr->dev.parent;
1250 ret = -ENOMSG;
1251 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1255 if (!ctlr->can_dma(ctlr, msg->spi, xfer))
1258 if (xfer->tx_buf != NULL) {
1259 ret = spi_map_buf_attrs(ctlr, tx_dev, &xfer->tx_sg,
1260 (void *)xfer->tx_buf,
1261 xfer->len, DMA_TO_DEVICE,
1266 xfer->tx_sg_mapped = true;
1269 if (xfer->rx_buf != NULL) {
1270 ret = spi_map_buf_attrs(ctlr, rx_dev, &xfer->rx_sg,
1271 xfer->rx_buf, xfer->len,
1275 &xfer->tx_sg, DMA_TO_DEVICE,
1281 xfer->rx_sg_mapped = true;
1288 ctlr->cur_rx_dma_dev = rx_dev;
1289 ctlr->cur_tx_dma_dev = tx_dev;
1296 struct device *rx_dev = ctlr->cur_rx_dma_dev;
1297 struct device *tx_dev = ctlr->cur_tx_dma_dev;
1300 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1304 if (xfer->rx_sg_mapped)
1305 spi_unmap_buf_attrs(ctlr, rx_dev, &xfer->rx_sg,
1307 xfer->rx_sg_mapped = false;
1309 if (xfer->tx_sg_mapped)
1310 spi_unmap_buf_attrs(ctlr, tx_dev, &xfer->tx_sg,
1312 xfer->tx_sg_mapped = false;
1321 struct device *rx_dev = ctlr->cur_rx_dma_dev;
1322 struct device *tx_dev = ctlr->cur_tx_dma_dev;
1324 if (xfer->tx_sg_mapped)
1325 dma_sync_sgtable_for_device(tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
1326 if (xfer->rx_sg_mapped)
1327 dma_sync_sgtable_for_device(rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
1333 struct device *rx_dev = ctlr->cur_rx_dma_dev;
1334 struct device *tx_dev = ctlr->cur_tx_dma_dev;
1336 if (xfer->rx_sg_mapped)
1337 dma_sync_sgtable_for_cpu(rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
1338 if (xfer->tx_sg_mapped)
1339 dma_sync_sgtable_for_cpu(tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
1370 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1375 if (xfer->tx_buf == ctlr->dummy_tx)
1376 xfer->tx_buf = NULL;
1377 if (xfer->rx_buf == ctlr->dummy_rx)
1378 xfer->rx_buf = NULL;
1390 if ((ctlr->flags & (SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX))
1391 && !(msg->spi->mode & SPI_3WIRE)) {
1395 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1396 if ((ctlr->flags & SPI_CONTROLLER_MUST_TX) &&
1397 !xfer->tx_buf)
1398 max_tx = max(xfer->len, max_tx);
1399 if ((ctlr->flags & SPI_CONTROLLER_MUST_RX) &&
1400 !xfer->rx_buf)
1401 max_rx = max(xfer->len, max_rx);
1405 tmp = krealloc(ctlr->dummy_tx, max_tx,
1408 return -ENOMEM;
1409 ctlr->dummy_tx = tmp;
1413 tmp = krealloc(ctlr->dummy_rx, max_rx,
1416 return -ENOMEM;
1417 ctlr->dummy_rx = tmp;
1421 list_for_each_entry(xfer, &msg->transfers,
1423 if (!xfer->len)
1425 if (!xfer->tx_buf)
1426 xfer->tx_buf = ctlr->dummy_tx;
1427 if (!xfer->rx_buf)
1428 xfer->rx_buf = ctlr->dummy_rx;
1440 struct spi_statistics __percpu *statm = ctlr->pcpu_statistics;
1441 struct spi_statistics __percpu *stats = msg->spi->pcpu_statistics;
1442 u32 speed_hz = xfer->speed_hz;
1446 if (wait_for_completion_interruptible(&ctlr->xfer_completion)) {
1447 dev_dbg(&msg->spi->dev, "SPI transfer interrupted\n");
1448 return -EINTR;
1460 ms = 8LL * MSEC_PER_SEC * xfer->len;
1471 ms = wait_for_completion_timeout(&ctlr->xfer_completion,
1477 dev_err(&msg->spi->dev,
1479 return -ETIMEDOUT;
1482 if (xfer->error & SPI_TRANS_FAIL_IO)
1483 return -EIO;
1507 u32 delay = _delay->value;
1508 u32 unit = _delay->unit;
1524 return -EINVAL;
1529 hz = xfer->effective_speed_hz ?: xfer->speed_hz / 2;
1531 return -EINVAL;
1537 return -EINVAL;
1551 return -EINVAL;
1567 u32 delay = xfer->cs_change_delay.value;
1568 u32 unit = xfer->cs_change_delay.unit;
1571 /* Return early on "fast" mode - for everything but USECS */
1578 ret = spi_delay_exec(&xfer->cs_change_delay, xfer);
1580 dev_err_once(&msg->spi->dev,
1595 * spi_transfer_one_message - Default implementation of transfer_one_message()
1607 struct spi_statistics __percpu *statm = ctlr->pcpu_statistics;
1608 struct spi_statistics __percpu *stats = msg->spi->pcpu_statistics;
1610 xfer = list_first_entry(&msg->transfers, struct spi_transfer, transfer_list);
1611 spi_set_cs(msg->spi, !xfer->cs_off, false);
1616 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1622 if (!ctlr->ptp_sts_supported) {
1623 xfer->ptp_sts_word_pre = 0;
1624 ptp_read_system_prets(xfer->ptp_sts);
1627 if ((xfer->tx_buf || xfer->rx_buf) && xfer->len) {
1628 reinit_completion(&ctlr->xfer_completion);
1632 ret = ctlr->transfer_one(ctlr, msg->spi, xfer);
1636 if ((xfer->tx_sg_mapped || xfer->rx_sg_mapped) &&
1637 (xfer->error & SPI_TRANS_FAIL_NO_START)) {
1639 ctlr->fallback = true;
1640 xfer->error &= ~SPI_TRANS_FAIL_NO_START;
1648 dev_err(&msg->spi->dev,
1656 msg->status = ret;
1661 if (xfer->len)
1662 dev_err(&msg->spi->dev,
1664 xfer->len);
1667 if (!ctlr->ptp_sts_supported) {
1668 ptp_read_system_postts(xfer->ptp_sts);
1669 xfer->ptp_sts_word_post = xfer->len;
1674 if (msg->status != -EINPROGRESS)
1679 if (xfer->cs_change) {
1680 if (list_is_last(&xfer->transfer_list,
1681 &msg->transfers)) {
1684 if (!xfer->cs_off)
1685 spi_set_cs(msg->spi, false, false);
1687 if (!list_next_entry(xfer, transfer_list)->cs_off)
1688 spi_set_cs(msg->spi, true, false);
1690 } else if (!list_is_last(&xfer->transfer_list, &msg->transfers) &&
1691 xfer->cs_off != list_next_entry(xfer, transfer_list)->cs_off) {
1692 spi_set_cs(msg->spi, xfer->cs_off, false);
1695 msg->actual_length += xfer->len;
1700 spi_set_cs(msg->spi, false, false);
1702 if (msg->status == -EINPROGRESS)
1703 msg->status = ret;
1705 if (msg->status && ctlr->handle_err)
1706 ctlr->handle_err(ctlr, msg);
1714 * spi_finalize_current_transfer - report completion of a transfer
1723 complete(&ctlr->xfer_completion);
1729 if (ctlr->auto_runtime_pm) {
1730 pm_runtime_mark_last_busy(ctlr->dev.parent);
1731 pm_runtime_put_autosuspend(ctlr->dev.parent);
1741 if (!was_busy && ctlr->auto_runtime_pm) {
1742 ret = pm_runtime_get_sync(ctlr->dev.parent);
1744 pm_runtime_put_noidle(ctlr->dev.parent);
1745 dev_err(&ctlr->dev, "Failed to power device: %d\n",
1748 msg->status = ret;
1758 if (!was_busy && ctlr->prepare_transfer_hardware) {
1759 ret = ctlr->prepare_transfer_hardware(ctlr);
1761 dev_err(&ctlr->dev,
1765 if (ctlr->auto_runtime_pm)
1766 pm_runtime_put(ctlr->dev.parent);
1768 msg->status = ret;
1777 if (ctlr->prepare_message) {
1778 ret = ctlr->prepare_message(ctlr, msg);
1780 dev_err(&ctlr->dev, "failed to prepare message: %d\n",
1782 msg->status = ret;
1786 msg->prepared = true;
1791 msg->status = ret;
1796 if (!ctlr->ptp_sts_supported && !ctlr->transfer_one) {
1797 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1798 xfer->ptp_sts_word_pre = 0;
1799 ptp_read_system_prets(xfer->ptp_sts);
1809 * ctlr->cur_msg.
1816 WRITE_ONCE(ctlr->cur_msg_incomplete, true);
1817 WRITE_ONCE(ctlr->cur_msg_need_completion, false);
1818 reinit_completion(&ctlr->cur_msg_completion);
1821 ret = ctlr->transfer_one_message(ctlr, msg);
1823 dev_err(&ctlr->dev,
1828 WRITE_ONCE(ctlr->cur_msg_need_completion, true);
1830 if (READ_ONCE(ctlr->cur_msg_incomplete))
1831 wait_for_completion(&ctlr->cur_msg_completion);
1837 * __spi_pump_messages - function which processes SPI message queue
1857 mutex_lock(&ctlr->io_mutex);
1860 spin_lock_irqsave(&ctlr->queue_lock, flags);
1863 if (ctlr->cur_msg)
1867 if (list_empty(&ctlr->queue) || !ctlr->running) {
1868 if (!ctlr->busy)
1871 /* Defer any non-atomic teardown to the thread */
1873 if (!ctlr->dummy_rx && !ctlr->dummy_tx &&
1874 !ctlr->unprepare_transfer_hardware) {
1876 ctlr->busy = false;
1877 ctlr->queue_empty = true;
1880 kthread_queue_work(ctlr->kworker,
1881 &ctlr->pump_messages);
1886 ctlr->busy = false;
1887 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1889 kfree(ctlr->dummy_rx);
1890 ctlr->dummy_rx = NULL;
1891 kfree(ctlr->dummy_tx);
1892 ctlr->dummy_tx = NULL;
1893 if (ctlr->unprepare_transfer_hardware &&
1894 ctlr->unprepare_transfer_hardware(ctlr))
1895 dev_err(&ctlr->dev,
1900 spin_lock_irqsave(&ctlr->queue_lock, flags);
1901 ctlr->queue_empty = true;
1906 msg = list_first_entry(&ctlr->queue, struct spi_message, queue);
1907 ctlr->cur_msg = msg;
1909 list_del_init(&msg->queue);
1910 if (ctlr->busy)
1913 ctlr->busy = true;
1914 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1917 kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
1919 ctlr->cur_msg = NULL;
1920 ctlr->fallback = false;
1922 mutex_unlock(&ctlr->io_mutex);
1930 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1931 mutex_unlock(&ctlr->io_mutex);
1935 * spi_pump_messages - kthread work function which processes spi message queue
1947 * spi_take_timestamp_pre - helper to collect the beginning of the TX timestamp
1970 if (!xfer->ptp_sts)
1973 if (xfer->timestamped)
1976 if (progress > xfer->ptp_sts_word_pre)
1980 xfer->ptp_sts_word_pre = progress;
1983 local_irq_save(ctlr->irq_flags);
1987 ptp_read_system_prets(xfer->ptp_sts);
1992 * spi_take_timestamp_post - helper to collect the end of the TX timestamp
1996 * @irqs_off: If true, will re-enable IRQs and preemption for the local CPU.
2007 if (!xfer->ptp_sts)
2010 if (xfer->timestamped)
2013 if (progress < xfer->ptp_sts_word_post)
2016 ptp_read_system_postts(xfer->ptp_sts);
2019 local_irq_restore(ctlr->irq_flags);
2024 xfer->ptp_sts_word_post = progress;
2026 xfer->timestamped = 1;
2031 * spi_set_thread_rt - set the controller to pump at realtime priority
2035 * (by setting the ->rt value before calling spi_register_controller()) or
2047 dev_info(&ctlr->dev,
2049 sched_set_fifo(ctlr->kworker->task);
2054 ctlr->running = false;
2055 ctlr->busy = false;
2056 ctlr->queue_empty = true;
2058 ctlr->kworker = kthread_run_worker(0, dev_name(&ctlr->dev));
2059 if (IS_ERR(ctlr->kworker)) {
2060 dev_err(&ctlr->dev, "failed to create message pump kworker\n");
2061 return PTR_ERR(ctlr->kworker);
2064 kthread_init_work(&ctlr->pump_messages, spi_pump_messages);
2073 if (ctlr->rt)
2080 * spi_get_next_queued_message() - called by driver to check for queued
2095 spin_lock_irqsave(&ctlr->queue_lock, flags);
2096 next = list_first_entry_or_null(&ctlr->queue, struct spi_message,
2098 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2105 * __spi_unoptimize_message - shared implementation of spi_unoptimize_message()
2117 struct spi_controller *ctlr = msg->spi->controller;
2119 if (ctlr->unoptimize_message)
2120 ctlr->unoptimize_message(msg);
2124 msg->optimized = false;
2125 msg->opt_state = NULL;
2129 * spi_maybe_unoptimize_message - unoptimize msg not managed by a peripheral
2137 if (!msg->pre_optimized && msg->optimized &&
2138 !msg->spi->controller->defer_optimize_message)
2143 * spi_finalize_current_message() - the current message is complete
2155 mesg = ctlr->cur_msg;
2157 if (!ctlr->ptp_sts_supported && !ctlr->transfer_one) {
2158 list_for_each_entry(xfer, &mesg->transfers, transfer_list) {
2159 ptp_read_system_postts(xfer->ptp_sts);
2160 xfer->ptp_sts_word_post = xfer->len;
2164 if (unlikely(ctlr->ptp_sts_supported))
2165 list_for_each_entry(xfer, &mesg->transfers, transfer_list)
2166 WARN_ON_ONCE(xfer->ptp_sts && !xfer->timestamped);
2170 if (mesg->prepared && ctlr->unprepare_message) {
2171 ret = ctlr->unprepare_message(ctlr, mesg);
2173 dev_err(&ctlr->dev, "failed to unprepare message: %d\n",
2178 mesg->prepared = false;
2182 WRITE_ONCE(ctlr->cur_msg_incomplete, false);
2184 if (READ_ONCE(ctlr->cur_msg_need_completion))
2185 complete(&ctlr->cur_msg_completion);
2189 mesg->state = NULL;
2190 if (mesg->complete)
2191 mesg->complete(mesg->context);
2199 spin_lock_irqsave(&ctlr->queue_lock, flags);
2201 if (ctlr->running || ctlr->busy) {
2202 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2203 return -EBUSY;
2206 ctlr->running = true;
2207 ctlr->cur_msg = NULL;
2208 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2210 kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
2222 * A wait_queue on the ctlr->busy could be used, but then the common
2227 spin_lock_irqsave(&ctlr->queue_lock, flags);
2228 if (list_empty(&ctlr->queue) && !ctlr->busy) {
2229 ctlr->running = false;
2230 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2233 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2235 } while (--limit);
2237 return -EBUSY;
2253 dev_err(&ctlr->dev, "problem destroying queue\n");
2257 kthread_destroy_worker(ctlr->kworker);
2266 struct spi_controller *ctlr = spi->controller;
2269 spin_lock_irqsave(&ctlr->queue_lock, flags);
2271 if (!ctlr->running) {
2272 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2273 return -ESHUTDOWN;
2275 msg->actual_length = 0;
2276 msg->status = -EINPROGRESS;
2278 list_add_tail(&msg->queue, &ctlr->queue);
2279 ctlr->queue_empty = false;
2280 if (!ctlr->busy && need_pump)
2281 kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
2283 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2288 * spi_queued_transfer - transfer function for queued transfers
2303 ctlr->transfer = spi_queued_transfer;
2304 if (!ctlr->transfer_one_message)
2305 ctlr->transfer_one_message = spi_transfer_one_message;
2310 dev_err(&ctlr->dev, "problem initializing queue\n");
2313 ctlr->queued = true;
2316 dev_err(&ctlr->dev, "problem starting queue\n");
2329 * spi_flush_queue - Send all pending messages in the queue from the callers'
2334 * sent before doing something. Is used by the spi-mem code to make sure SPI
2336 * before the spi-mem operation.
2340 if (ctlr->transfer == spi_queued_transfer)
2344 /*-------------------------------------------------------------------------*/
2354 delay->value = DIV_ROUND_UP(value, 1000);
2355 delay->unit = SPI_DELAY_UNIT_USECS;
2357 delay->value = value;
2358 delay->unit = SPI_DELAY_UNIT_NSECS;
2370 if (of_property_read_bool(nc, "spi-cpha"))
2371 spi->mode |= SPI_CPHA;
2372 if (of_property_read_bool(nc, "spi-cpol"))
2373 spi->mode |= SPI_CPOL;
2374 if (of_property_read_bool(nc, "spi-3wire"))
2375 spi->mode |= SPI_3WIRE;
2376 if (of_property_read_bool(nc, "spi-lsb-first"))
2377 spi->mode |= SPI_LSB_FIRST;
2378 if (of_property_read_bool(nc, "spi-cs-high"))
2379 spi->mode |= SPI_CS_HIGH;
2382 if (!of_property_read_u32(nc, "spi-tx-bus-width", &value)) {
2385 spi->mode |= SPI_NO_TX;
2390 spi->mode |= SPI_TX_DUAL;
2393 spi->mode |= SPI_TX_QUAD;
2396 spi->mode |= SPI_TX_OCTAL;
2399 dev_warn(&ctlr->dev,
2400 "spi-tx-bus-width %d not supported\n",
2406 if (!of_property_read_u32(nc, "spi-rx-bus-width", &value)) {
2409 spi->mode |= SPI_NO_RX;
2414 spi->mode |= SPI_RX_DUAL;
2417 spi->mode |= SPI_RX_QUAD;
2420 spi->mode |= SPI_RX_OCTAL;
2423 dev_warn(&ctlr->dev,
2424 "spi-rx-bus-width %d not supported\n",
2432 dev_err(&ctlr->dev, "%pOF is not called 'slave'\n",
2434 return -EINVAL;
2439 if (ctlr->num_chipselect > SPI_CS_CNT_MAX) {
2440 dev_err(&ctlr->dev, "No. of CS is more than max. no. of supported CS\n");
2441 return -EINVAL;
2450 dev_err(&ctlr->dev, "%pOF has no valid 'reg' property (%d)\n",
2454 if (rc > ctlr->num_chipselect) {
2455 dev_err(&ctlr->dev, "%pOF has number of CS > ctlr->num_chipselect (%d)\n",
2459 if ((of_property_present(nc, "parallel-memories")) &&
2460 (!(ctlr->flags & SPI_CONTROLLER_MULTI_CS))) {
2461 dev_err(&ctlr->dev, "SPI controller doesn't support multi CS\n");
2462 return -EINVAL;
2468 * By default spi->chip_select[0] will hold the physical CS number,
2469 * so set bit 0 in spi->cs_index_mask.
2471 spi->cs_index_mask = BIT(0);
2474 if (!of_property_read_u32(nc, "spi-max-frequency", &value))
2475 spi->max_speed_hz = value;
2478 of_spi_parse_dt_cs_delay(nc, &spi->cs_setup, "spi-cs-setup-delay-ns");
2479 of_spi_parse_dt_cs_delay(nc, &spi->cs_hold, "spi-cs-hold-delay-ns");
2480 of_spi_parse_dt_cs_delay(nc, &spi->cs_inactive, "spi-cs-inactive-delay-ns");
2494 dev_err(&ctlr->dev, "spi_device alloc error for %pOF\n", nc);
2495 rc = -ENOMEM;
2500 rc = of_alias_from_compatible(nc, spi->modalias,
2501 sizeof(spi->modalias));
2503 dev_err(&ctlr->dev, "cannot find modalias for %pOF\n", nc);
2514 device_set_node(&spi->dev, of_fwnode_handle(nc));
2519 dev_err(&ctlr->dev, "spi_device register error %pOF\n", nc);
2533 * of_register_spi_devices() - Register child devices onto the SPI bus
2544 for_each_available_child_of_node(ctlr->dev.of_node, nc) {
2549 dev_warn(&ctlr->dev,
2560 * spi_new_ancillary_device() - Register ancillary SPI device
2564 * Register an ancillary SPI device; for example some chips have a chip-select
2574 struct spi_controller *ctlr = spi->controller;
2581 rc = -ENOMEM;
2585 strscpy(ancillary->modalias, "dummy", sizeof(ancillary->modalias));
2587 /* Use provided chip-select for ancillary device */
2592 ancillary->max_speed_hz = spi->max_speed_hz;
2593 ancillary->mode = spi->mode;
2595 * By default spi->chip_select[0] will hold the physical CS number,
2596 * so set bit 0 in spi->cs_index_mask.
2598 ancillary->cs_index_mask = BIT(0);
2600 WARN_ON(!mutex_is_locked(&ctlr->add_lock));
2605 dev_err(&spi->dev, "failed to register ancillary device\n");
2634 if (ares->type != ACPI_RESOURCE_TYPE_SERIAL_BUS)
2637 sb = &ares->data.spi_serial_bus;
2638 if (sb->type != ACPI_RESOURCE_SERIAL_TYPE_SPI)
2647 * acpi_spi_count_resources - Count the number of SpiSerialBus resources
2650 * Return: the number of SpiSerialBus resources in the ACPI-device's
2651 * resource-list; or a negative error code.
2678 && obj->buffer.length >= 4)
2679 lookup->max_speed_hz = NSEC_PER_SEC / *(u32 *)obj->buffer.pointer;
2682 && obj->buffer.length == 8)
2683 lookup->bits_per_word = *(u64 *)obj->buffer.pointer;
2686 && obj->buffer.length == 8 && !*(u64 *)obj->buffer.pointer)
2687 lookup->mode |= SPI_LSB_FIRST;
2690 && obj->buffer.length == 8 && *(u64 *)obj->buffer.pointer)
2691 lookup->mode |= SPI_CPOL;
2694 && obj->buffer.length == 8 && *(u64 *)obj->buffer.pointer)
2695 lookup->mode |= SPI_CPHA;
2701 struct spi_controller *ctlr = lookup->ctlr;
2703 if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
2708 sb = &ares->data.spi_serial_bus;
2709 if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_SPI) {
2711 if (lookup->index != -1 && lookup->n++ != lookup->index)
2715 sb->resource_source.string_ptr,
2719 return -ENODEV;
2722 if (!device_match_acpi_handle(ctlr->dev.parent, parent_handle))
2723 return -ENODEV;
2729 return -ENODEV;
2733 return -EPROBE_DEFER;
2735 lookup->ctlr = ctlr;
2742 * 0 .. max - 1 so we need to ask the driver to
2745 if (ctlr->fw_translate_cs) {
2746 int cs = ctlr->fw_translate_cs(ctlr,
2747 sb->device_selection);
2750 lookup->chip_select = cs;
2752 lookup->chip_select = sb->device_selection;
2755 lookup->max_speed_hz = sb->connection_speed;
2756 lookup->bits_per_word = sb->data_bit_length;
2758 if (sb->clock_phase == ACPI_SPI_SECOND_PHASE)
2759 lookup->mode |= SPI_CPHA;
2760 if (sb->clock_polarity == ACPI_SPI_START_HIGH)
2761 lookup->mode |= SPI_CPOL;
2762 if (sb->device_polarity == ACPI_SPI_ACTIVE_HIGH)
2763 lookup->mode |= SPI_CS_HIGH;
2765 } else if (lookup->irq < 0) {
2769 lookup->irq = r.start;
2777 * acpi_spi_device_alloc - Allocate a spi device, and fill it in with ACPI information
2787 * If index is set to -1, index is not used.
2788 * Note: If index is -1, ctlr must be set.
2802 if (!ctlr && index == -1)
2803 return ERR_PTR(-EINVAL);
2806 lookup.irq = -1;
2820 ACPI_SUCCESS(acpi_get_parent(adev->handle, &parent_handle)) &&
2821 device_match_acpi_handle(lookup.ctlr->dev.parent, parent_handle)) {
2827 return ERR_PTR(-ENODEV);
2831 dev_err(&lookup.ctlr->dev, "failed to allocate SPI device for %s\n",
2832 dev_name(&adev->dev));
2833 return ERR_PTR(-ENOMEM);
2839 ACPI_COMPANION_SET(&spi->dev, adev);
2840 spi->max_speed_hz = lookup.max_speed_hz;
2841 spi->mode |= lookup.mode;
2842 spi->irq = lookup.irq;
2843 spi->bits_per_word = lookup.bits_per_word;
2845 * By default spi->chip_select[0] will hold the physical CS number,
2846 * so set bit 0 in spi->cs_index_mask.
2848 spi->cs_index_mask = BIT(0);
2859 if (acpi_bus_get_status(adev) || !adev->status.present ||
2863 spi = acpi_spi_device_alloc(ctlr, adev, -1);
2865 if (PTR_ERR(spi) == -ENOMEM)
2871 acpi_set_modalias(adev, acpi_device_hid(adev), spi->modalias,
2872 sizeof(spi->modalias));
2876 adev->power.flags.ignore_parent = true;
2878 adev->power.flags.ignore_parent = false;
2879 dev_err(&ctlr->dev, "failed to add SPI device %s from ACPI\n",
2880 dev_name(&adev->dev));
2906 handle = ACPI_HANDLE(ctlr->dev.parent);
2914 dev_warn(&ctlr->dev, "failed to enumerate SPI slaves\n");
2936 * spi_target_abort - abort the ongoing transfer request on an SPI slave
2942 struct spi_controller *ctlr = spi->controller;
2944 if (spi_controller_is_target(ctlr) && ctlr->target_abort)
2945 return ctlr->target_abort(ctlr);
2947 return -ENOTSUPP;
2958 child = device_find_any_child(&ctlr->dev);
2959 return sysfs_emit(buf, "%s\n", child ? to_spi_device(child)->modalias : NULL);
2974 return -EINVAL;
2976 child = device_find_any_child(&ctlr->dev);
2987 return -ENOMEM;
2989 strscpy(spi->modalias, name, sizeof(spi->modalias));
3028 * __spi_alloc_controller - allocate an SPI master or slave controller
3030 * @size: how much zeroed driver-private data to allocate; the pointer to this
3065 device_initialize(&ctlr->dev);
3066 INIT_LIST_HEAD(&ctlr->queue);
3067 spin_lock_init(&ctlr->queue_lock);
3068 spin_lock_init(&ctlr->bus_lock_spinlock);
3069 mutex_init(&ctlr->bus_lock_mutex);
3070 mutex_init(&ctlr->io_mutex);
3071 mutex_init(&ctlr->add_lock);
3072 ctlr->bus_num = -1;
3073 ctlr->num_chipselect = 1;
3074 ctlr->slave = slave;
3076 ctlr->dev.class = &spi_slave_class;
3078 ctlr->dev.class = &spi_master_class;
3079 ctlr->dev.parent = dev;
3080 pm_suspend_ignore_children(&ctlr->dev, true);
3093 * __devm_spi_alloc_controller - resource-managed __spi_alloc_controller()
3095 * @size: how much zeroed driver-private data to allocate
3120 ctlr->devm_allocated = true;
3132 * spi_get_gpio_descs() - grab chip select GPIOs for the master
3133 * @ctlr: The SPI master to grab GPIO descriptors for
3139 struct device *dev = &ctlr->dev;
3146 if (nb == -ENOENT)
3151 ctlr->num_chipselect = max_t(int, nb, ctlr->num_chipselect);
3153 cs = devm_kcalloc(dev, ctlr->num_chipselect, sizeof(*cs),
3156 return -ENOMEM;
3157 ctlr->cs_gpiods = cs;
3174 * If we find a CS GPIO, name it after the device and
3182 return -ENOMEM;
3188 if (ctlr->max_native_cs && i >= ctlr->max_native_cs) {
3190 return -EINVAL;
3195 ctlr->unused_native_cs = ffs(~native_cs_mask) - 1;
3197 if ((ctlr->flags & SPI_CONTROLLER_GPIO_SS) && num_cs_gpios &&
3198 ctlr->max_native_cs && ctlr->unused_native_cs >= ctlr->max_native_cs) {
3200 return -EINVAL;
3209 * The controller may implement only the high-level SPI-memory like
3212 * If ->mem_ops or ->mem_ops->exec_op is NULL, we request that at least
3213 * one of the ->transfer_xxx() method be implemented.
3215 if (!ctlr->mem_ops || !ctlr->mem_ops->exec_op) {
3216 if (!ctlr->transfer && !ctlr->transfer_one &&
3217 !ctlr->transfer_one_message) {
3218 return -EINVAL;
3234 return id == -ENOSPC ? -EBUSY : id;
3235 ctlr->bus_num = id;
3240 * spi_register_controller - register SPI host or target controller
3245 * SPI controllers connect to their drivers using some non-SPI bus,
3250 * and board-specific addressing for SPI devices combines those numbers
3264 struct device *dev = ctlr->dev.parent;
3271 return -ENODEV;
3281 if (ctlr->bus_num < 0)
3282 ctlr->bus_num = of_alias_get_id(ctlr->dev.of_node, "spi");
3283 if (ctlr->bus_num >= 0) {
3284 /* Devices with a fixed bus num must check-in with the num */
3285 status = spi_controller_id_alloc(ctlr, ctlr->bus_num, ctlr->bus_num + 1);
3289 if (ctlr->bus_num < 0) {
3300 ctlr->bus_lock_flag = 0;
3301 init_completion(&ctlr->xfer_completion);
3302 init_completion(&ctlr->cur_msg_completion);
3303 if (!ctlr->max_dma_len)
3304 ctlr->max_dma_len = INT_MAX;
3310 dev_set_name(&ctlr->dev, "spi%u", ctlr->bus_num);
3312 if (!spi_controller_is_target(ctlr) && ctlr->use_gpio_descriptors) {
3317 * A controller using GPIO descriptors always
3320 ctlr->mode_bits |= SPI_CS_HIGH;
3324 * Even if it's just one always-selected device, there must
3327 if (!ctlr->num_chipselect) {
3328 status = -EINVAL;
3334 ctlr->last_cs[idx] = SPI_INVALID_CS;
3336 status = device_add(&ctlr->dev);
3341 dev_name(&ctlr->dev));
3345 * need the queueing logic if the driver is only supporting high-level
3348 if (ctlr->transfer) {
3350 } else if (ctlr->transfer_one || ctlr->transfer_one_message) {
3353 device_del(&ctlr->dev);
3358 ctlr->pcpu_statistics = spi_alloc_pcpu_stats(dev);
3359 if (!ctlr->pcpu_statistics) {
3360 dev_err(dev, "Error allocating per-cpu statistics\n");
3361 status = -ENOMEM;
3366 list_add_tail(&ctlr->list, &spi_controller_list);
3368 spi_match_controller_to_boardinfo(ctlr, &bi->board_info);
3380 idr_remove(&spi_master_idr, ctlr->bus_num);
3392 * devm_spi_register_controller - register managed SPI host or target
3412 return -ENOMEM;
3433 * spi_unregister_controller - unregister SPI master or slave controller
3447 int id = ctlr->bus_num;
3451 mutex_lock(&ctlr->add_lock);
3453 device_for_each_child(&ctlr->dev, NULL, __unregister);
3459 if (ctlr->queued) {
3461 dev_err(&ctlr->dev, "queue remove failed\n");
3464 list_del(&ctlr->list);
3467 device_del(&ctlr->dev);
3476 mutex_unlock(&ctlr->add_lock);
3482 if (!ctlr->devm_allocated)
3483 put_device(&ctlr->dev);
3489 return ctlr->flags & SPI_CONTROLLER_SUSPENDED ? -ESHUTDOWN : 0;
3494 mutex_lock(&ctlr->bus_lock_mutex);
3495 ctlr->flags |= SPI_CONTROLLER_SUSPENDED;
3496 mutex_unlock(&ctlr->bus_lock_mutex);
3501 mutex_lock(&ctlr->bus_lock_mutex);
3502 ctlr->flags &= ~SPI_CONTROLLER_SUSPENDED;
3503 mutex_unlock(&ctlr->bus_lock_mutex);
3510 /* Basically no-ops for non-queued controllers */
3511 if (ctlr->queued) {
3514 dev_err(&ctlr->dev, "queue stop failed\n");
3528 if (ctlr->queued) {
3531 dev_err(&ctlr->dev, "queue restart failed\n");
3537 /*-------------------------------------------------------------------------*/
3549 if (rxfer->release)
3550 rxfer->release(ctlr, msg, res);
3553 list_splice(&rxfer->replaced_transfers, rxfer->replaced_after);
3556 for (i = 0; i < rxfer->inserted; i++)
3557 list_del(&rxfer->inserted_transfers[i].transfer_list);
3561 * spi_replace_transfers - replace transfers with several transfers
3589 rxfer = spi_res_alloc(msg->spi, __spi_replace_transfers_release,
3594 return ERR_PTR(-ENOMEM);
3597 rxfer->release = release;
3601 rxfer->extradata =
3602 &rxfer->inserted_transfers[insert];
3605 INIT_LIST_HEAD(&rxfer->replaced_transfers);
3609 * the @replaced_transfers - it may be spi_message.messages!
3611 rxfer->replaced_after = xfer_first->transfer_list.prev;
3616 * If the entry after replaced_after it is msg->transfers
3620 if (rxfer->replaced_after->next == &msg->transfers) {
3621 dev_err(&msg->spi->dev,
3624 list_splice(&rxfer->replaced_transfers,
3625 rxfer->replaced_after);
3631 return ERR_PTR(-EINVAL);
3638 list_move_tail(rxfer->replaced_after->next,
3639 &rxfer->replaced_transfers);
3648 xfer = &rxfer->inserted_transfers[insert - 1 - i];
3654 list_add(&xfer->transfer_list, rxfer->replaced_after);
3658 xfer->cs_change = false;
3659 xfer->delay.value = 0;
3664 rxfer->inserted = insert;
3683 count = DIV_ROUND_UP(xfer->len, maxsize);
3689 xfers = srt->inserted_transfers;
3714 xfers[i].len = min(maxsize, xfers[i].len - offset);
3721 *xferp = &xfers[count - 1];
3724 SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics,
3726 SPI_STATISTICS_INCREMENT_FIELD(msg->spi->pcpu_statistics,
3733 * spi_split_transfers_maxsize - split spi transfers into multiple transfers
3760 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
3761 if (xfer->len > maxsize) {
3775 * spi_split_transfers_maxwords - split SPI transfers into multiple transfers
3801 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
3805 maxsize = maxwords * roundup_pow_of_two(BITS_TO_BYTES(xfer->bits_per_word));
3806 if (xfer->len > maxsize) {
3818 /*-------------------------------------------------------------------------*/
3828 if (ctlr->bits_per_word_mask) {
3831 return -EINVAL;
3832 if (!(ctlr->bits_per_word_mask & SPI_BPW_MASK(bits_per_word)))
3833 return -EINVAL;
3840 * spi_set_cs_timing - configure CS setup, hold, and inactive delays
3847 struct device *parent = spi->controller->dev.parent;
3850 if (spi->controller->set_cs_timing && !spi_get_csgpiod(spi, 0)) {
3851 if (spi->controller->auto_runtime_pm) {
3855 dev_err(&spi->controller->dev, "Failed to power device: %d\n",
3860 status = spi->controller->set_cs_timing(spi);
3864 status = spi->controller->set_cs_timing(spi);
3871 * spi_setup - setup SPI mode and clock rate
3886 * LSB-first wire encoding, or active-high chipselects.
3899 if ((hweight_long(spi->mode &
3901 (hweight_long(spi->mode &
3903 dev_err(&spi->dev,
3904 "setup: can not select any two of dual, quad and no-rx/tx at the same time\n");
3905 return -EINVAL;
3908 if ((spi->mode & SPI_3WIRE) && (spi->mode &
3911 return -EINVAL;
3913 if ((spi->mode & SPI_MOSI_IDLE_LOW) && (spi->mode & SPI_MOSI_IDLE_HIGH)) {
3914 dev_err(&spi->dev,
3916 return -EINVAL;
3924 bad_bits = spi->mode & ~(spi->controller->mode_bits | SPI_CS_WORD |
3930 dev_warn(&spi->dev,
3933 spi->mode &= ~ugly_bits;
3937 dev_err(&spi->dev, "setup: unsupported mode bits %x\n",
3939 return -EINVAL;
3942 if (!spi->bits_per_word) {
3943 spi->bits_per_word = 8;
3946 * Some controllers may not support the default 8 bits-per-word
3949 status = __spi_validate_bits_per_word(spi->controller,
3950 spi->bits_per_word);
3955 if (spi->controller->max_speed_hz &&
3956 (!spi->max_speed_hz ||
3957 spi->max_speed_hz > spi->controller->max_speed_hz))
3958 spi->max_speed_hz = spi->controller->max_speed_hz;
3960 mutex_lock(&spi->controller->io_mutex);
3962 if (spi->controller->setup) {
3963 status = spi->controller->setup(spi);
3965 mutex_unlock(&spi->controller->io_mutex);
3966 dev_err(&spi->controller->dev, "Failed to setup device: %d\n",
3974 mutex_unlock(&spi->controller->io_mutex);
3978 if (spi->controller->auto_runtime_pm && spi->controller->set_cs) {
3979 status = pm_runtime_resume_and_get(spi->controller->dev.parent);
3981 mutex_unlock(&spi->controller->io_mutex);
3982 dev_err(&spi->controller->dev, "Failed to power device: %d\n",
3990 * checking for a non-zero return value instead of a negative
3996 pm_runtime_mark_last_busy(spi->controller->dev.parent);
3997 pm_runtime_put_autosuspend(spi->controller->dev.parent);
4002 mutex_unlock(&spi->controller->io_mutex);
4004 if (spi->rt && !spi->controller->rt) {
4005 spi->controller->rt = true;
4006 spi_set_thread_rt(spi->controller);
4011 dev_dbg(&spi->dev, "setup mode %lu, %s%s%s%s%u bits/w, %u Hz max --> %d\n",
4012 spi->mode & SPI_MODE_X_MASK,
4013 (spi->mode & SPI_CS_HIGH) ? "cs_high, " : "",
4014 (spi->mode & SPI_LSB_FIRST) ? "lsb, " : "",
4015 (spi->mode & SPI_3WIRE) ? "3wire, " : "",
4016 (spi->mode & SPI_LOOP) ? "loopback, " : "",
4017 spi->bits_per_word, spi->max_speed_hz,
4029 delay1 = spi_delay_to_ns(&xfer->word_delay, xfer);
4033 delay2 = spi_delay_to_ns(&spi->word_delay, xfer);
4038 memcpy(&xfer->word_delay, &spi->word_delay,
4039 sizeof(xfer->word_delay));
4046 struct spi_controller *ctlr = spi->controller;
4050 if (list_empty(&message->transfers))
4051 return -EINVAL;
4053 message->spi = spi;
4056 * Half-duplex links include original MicroWire, and ones with
4057 * only one data pin like SPI_3WIRE (switches direction) or where
4061 if ((ctlr->flags & SPI_CONTROLLER_HALF_DUPLEX) ||
4062 (spi->mode & SPI_3WIRE)) {
4063 unsigned flags = ctlr->flags;
4065 list_for_each_entry(xfer, &message->transfers, transfer_list) {
4066 if (xfer->rx_buf && xfer->tx_buf)
4067 return -EINVAL;
4068 if ((flags & SPI_CONTROLLER_NO_TX) && xfer->tx_buf)
4069 return -EINVAL;
4070 if ((flags & SPI_CONTROLLER_NO_RX) && xfer->rx_buf)
4071 return -EINVAL;
4083 message->frame_length = 0;
4084 list_for_each_entry(xfer, &message->transfers, transfer_list) {
4085 xfer->effective_speed_hz = 0;
4086 message->frame_length += xfer->len;
4087 if (!xfer->bits_per_word)
4088 xfer->bits_per_word = spi->bits_per_word;
4090 if (!xfer->speed_hz)
4091 xfer->speed_hz = spi->max_speed_hz;
4093 if (ctlr->max_speed_hz && xfer->speed_hz > ctlr->max_speed_hz)
4094 xfer->speed_hz = ctlr->max_speed_hz;
4096 if (__spi_validate_bits_per_word(ctlr, xfer->bits_per_word))
4097 return -EINVAL;
4101 * where SPI word size should be power-of-two multiple.
4103 if (xfer->bits_per_word <= 8)
4105 else if (xfer->bits_per_word <= 16)
4111 if (xfer->len % w_size)
4112 return -EINVAL;
4114 if (xfer->speed_hz && ctlr->min_speed_hz &&
4115 xfer->speed_hz < ctlr->min_speed_hz)
4116 return -EINVAL;
4118 if (xfer->tx_buf && !xfer->tx_nbits)
4119 xfer->tx_nbits = SPI_NBITS_SINGLE;
4120 if (xfer->rx_buf && !xfer->rx_nbits)
4121 xfer->rx_nbits = SPI_NBITS_SINGLE;
4127 if (xfer->tx_buf) {
4128 if (spi->mode & SPI_NO_TX)
4129 return -EINVAL;
4130 if (xfer->tx_nbits != SPI_NBITS_SINGLE &&
4131 xfer->tx_nbits != SPI_NBITS_DUAL &&
4132 xfer->tx_nbits != SPI_NBITS_QUAD &&
4133 xfer->tx_nbits != SPI_NBITS_OCTAL)
4134 return -EINVAL;
4135 if ((xfer->tx_nbits == SPI_NBITS_DUAL) &&
4136 !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD)))
4137 return -EINVAL;
4138 if ((xfer->tx_nbits == SPI_NBITS_QUAD) &&
4139 !(spi->mode & SPI_TX_QUAD))
4140 return -EINVAL;
4143 if (xfer->rx_buf) {
4144 if (spi->mode & SPI_NO_RX)
4145 return -EINVAL;
4146 if (xfer->rx_nbits != SPI_NBITS_SINGLE &&
4147 xfer->rx_nbits != SPI_NBITS_DUAL &&
4148 xfer->rx_nbits != SPI_NBITS_QUAD &&
4149 xfer->rx_nbits != SPI_NBITS_OCTAL)
4150 return -EINVAL;
4151 if ((xfer->rx_nbits == SPI_NBITS_DUAL) &&
4152 !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD)))
4153 return -EINVAL;
4154 if ((xfer->rx_nbits == SPI_NBITS_QUAD) &&
4155 !(spi->mode & SPI_RX_QUAD))
4156 return -EINVAL;
4160 return -EINVAL;
4163 message->status = -EINPROGRESS;
4169 * spi_split_transfers - generic handling of transfer splitting
4185 struct spi_controller *ctlr = msg->spi->controller;
4191 * transfer (indicated by the SPI_CS_WORD flag) or we are using a GPIO
4192 * for the CS line, we can emulate the CS-per-word hardware function by
4193 * splitting transfers into one-word transfers and ensuring that
4196 if ((msg->spi->mode & SPI_CS_WORD) &&
4197 (!(ctlr->mode_bits & SPI_CS_WORD) || spi_is_csgpiod(msg->spi))) {
4202 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
4204 if (list_is_last(&xfer->transfer_list, &msg->transfers))
4207 xfer->cs_change = 1;
4211 spi_max_transfer_size(msg->spi));
4220 * __spi_optimize_message - shared implementation for spi_optimize_message()
4235 struct spi_controller *ctlr = spi->controller;
4246 if (ctlr->optimize_message) {
4247 ret = ctlr->optimize_message(msg);
4254 msg->optimized = true;
4260 * spi_maybe_optimize_message - optimize message if it isn't already pre-optimized
4268 if (spi->controller->defer_optimize_message) {
4269 msg->spi = spi;
4273 if (msg->pre_optimized)
4280 * spi_optimize_message - do any one-time validation and setup for a SPI message
4290 * of updating the contents of any xfer->tx_buf (the pointer can't be changed,
4304 * Pre-optimization is not supported and optimization is deferred e.g.
4305 * when using spi-mux.
4307 if (spi->controller->defer_optimize_message)
4320 msg->pre_optimized = true;
4327 * spi_unoptimize_message - releases any resources allocated by spi_optimize_message()
4336 if (msg->spi->controller->defer_optimize_message)
4340 msg->pre_optimized = false;
4346 struct spi_controller *ctlr = spi->controller;
4353 if (!ctlr->transfer)
4354 return -ENOTSUPP;
4356 SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics, spi_async);
4357 SPI_STATISTICS_INCREMENT_FIELD(spi->pcpu_statistics, spi_async);
4361 if (!ctlr->ptp_sts_supported) {
4362 list_for_each_entry(xfer, &message->transfers, transfer_list) {
4363 xfer->ptp_sts_word_pre = 0;
4364 ptp_read_system_prets(xfer->ptp_sts);
4368 return ctlr->transfer(spi, message);
4377 * devm_spi_optimize_message - managed version of spi_optimize_message()
4378 * @dev: the device that manages @msg (usually @spi->dev)
4400 * spi_async - asynchronous SPI transfer
4409 * Before that invocation, the value of message->status is undefined.
4410 * When the callback is issued, message->status holds either zero (to
4432 struct spi_controller *ctlr = spi->controller;
4440 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
4442 if (ctlr->bus_lock_flag)
4443 ret = -EBUSY;
4447 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
4458 mutex_lock(&ctlr->io_mutex);
4460 was_busy = ctlr->busy;
4462 ctlr->cur_msg = msg;
4465 dev_err(&ctlr->dev, "noqueue transfer failed\n");
4466 ctlr->cur_msg = NULL;
4467 ctlr->fallback = false;
4470 kfree(ctlr->dummy_rx);
4471 ctlr->dummy_rx = NULL;
4472 kfree(ctlr->dummy_tx);
4473 ctlr->dummy_tx = NULL;
4474 if (ctlr->unprepare_transfer_hardware &&
4475 ctlr->unprepare_transfer_hardware(ctlr))
4476 dev_err(&ctlr->dev,
4481 mutex_unlock(&ctlr->io_mutex);
4484 /*-------------------------------------------------------------------------*/
4502 struct spi_controller *ctlr = spi->controller;
4505 dev_warn_once(&spi->dev, "Attempted to sync while suspend\n");
4506 return -ESHUTDOWN;
4513 SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics, spi_sync);
4514 SPI_STATISTICS_INCREMENT_FIELD(spi->pcpu_statistics, spi_sync);
4522 if (READ_ONCE(ctlr->queue_empty) && !ctlr->must_async) {
4523 message->actual_length = 0;
4524 message->status = -EINPROGRESS;
4528 SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics, spi_sync_immediate);
4529 SPI_STATISTICS_INCREMENT_FIELD(spi->pcpu_statistics, spi_sync_immediate);
4533 return message->status;
4542 message->complete = spi_complete;
4543 message->context = &done;
4545 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
4547 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
4551 status = message->status;
4553 message->complete = NULL;
4554 message->context = NULL;
4560 * spi_sync - blocking/synchronous SPI data transfers
4566 * is non-interruptible, and has no timeout. Low-overhead controller
4571 * frequently-used devices may want to minimize costs of selecting a chip,
4584 mutex_lock(&spi->controller->bus_lock_mutex);
4586 mutex_unlock(&spi->controller->bus_lock_mutex);
4593 * spi_sync_locked - version of spi_sync with exclusive bus usage
4599 * is non-interruptible, and has no timeout. Low-overhead controller
4615 * spi_bus_lock - obtain a lock for exclusive SPI bus usage
4620 * is non-interruptible, and has no timeout.
4633 mutex_lock(&ctlr->bus_lock_mutex);
4635 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
4636 ctlr->bus_lock_flag = 1;
4637 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
4646 * spi_bus_unlock - release the lock for exclusive SPI bus usage
4651 * is non-interruptible, and has no timeout.
4660 ctlr->bus_lock_flag = 0;
4662 mutex_unlock(&ctlr->bus_lock_mutex);
4674 * spi_write_then_read - SPI synchronous write followed by read
4676 * @txbuf: data to be written (need not be DMA-safe)
4678 * @rxbuf: buffer into which data will be read (need not be DMA-safe)
4688 * Performance-sensitive or bulk transfer code should instead use
4689 * spi_{async,sync}() calls with DMA-safe buffers.
4705 * Use preallocated DMA-safe buffer if we can. We can't avoid
4708 * using the pre-allocated buffer or the transfer is too large.
4714 return -ENOMEM;
4748 /*-------------------------------------------------------------------------*/
4783 ctlr = of_find_spi_controller_by_node(rd->dn->parent);
4787 if (of_node_test_and_set_flag(rd->dn, OF_POPULATED)) {
4788 put_device(&ctlr->dev);
4796 rd->dn->fwnode.flags &= ~FWNODE_FLAG_NOT_DEVICE;
4797 spi = of_register_spi_device(ctlr, rd->dn);
4798 put_device(&ctlr->dev);
4802 __func__, rd->dn);
4803 of_node_clear_flag(rd->dn, OF_POPULATED);
4810 if (!of_node_check_flag(rd->dn, OF_POPULATED))
4814 spi = of_find_spi_device_by_node(rd->dn);
4822 put_device(&spi->dev);
4839 return device_match_acpi_dev(dev->parent, data);
4880 put_device(&ctlr->dev);
4891 put_device(&spi->dev);
4911 status = -ENOMEM;