1b445bfcbSMarco Felsch // SPDX-License-Identifier: GPL-2.0-or-later 2787f4889SMark Brown // SPI init/core code 3787f4889SMark Brown // 4787f4889SMark Brown // Copyright (C) 2005 David Brownell 5787f4889SMark Brown // Copyright (C) 2008 Secret Lab Technologies Ltd. 68ae12a0dSDavid Brownell 78ae12a0dSDavid Brownell #include <linux/kernel.h> 88ae12a0dSDavid Brownell #include <linux/device.h> 98ae12a0dSDavid Brownell #include <linux/init.h> 108ae12a0dSDavid Brownell #include <linux/cache.h> 1199adef31SMark Brown #include <linux/dma-mapping.h> 1299adef31SMark Brown #include <linux/dmaengine.h> 1394040828SMatthias Kaehlcke #include <linux/mutex.h> 142b7a32f7SSinan Akman #include <linux/of_device.h> 15d57a4282SGrant Likely #include <linux/of_irq.h> 1686be408bSSylwester Nawrocki #include <linux/clk/clk-conf.h> 175a0e3ad6STejun Heo #include <linux/slab.h> 18e0626e38SAnton Vorontsov #include <linux/mod_devicetable.h> 198ae12a0dSDavid Brownell #include <linux/spi/spi.h> 20b5932f5cSBoris Brezillon #include <linux/spi/spi-mem.h> 2174317984SJean-Christophe PLAGNIOL-VILLARD #include <linux/of_gpio.h> 22f3186dd8SLinus Walleij #include <linux/gpio/consumer.h> 233ae22e8cSMark Brown #include <linux/pm_runtime.h> 24f48c767cSUlf Hansson #include <linux/pm_domain.h> 25826cf175SDmitry Torokhov #include <linux/property.h> 26025ed130SPaul Gortmaker #include <linux/export.h> 278bd75c77SClark Williams #include <linux/sched/rt.h> 28ae7e81c0SIngo Molnar #include <uapi/linux/sched/types.h> 29ffbbdd21SLinus Walleij #include <linux/delay.h> 30ffbbdd21SLinus Walleij #include <linux/kthread.h> 3164bee4d2SMika Westerberg #include <linux/ioport.h> 3264bee4d2SMika Westerberg #include <linux/acpi.h> 33b1b8153cSVignesh R #include <linux/highmem.h> 349b61e302SSuniel Mahesh #include <linux/idr.h> 358a2e487eSLukas Wunner #include <linux/platform_data/x86/apple.h> 368ae12a0dSDavid Brownell 3756ec1978SMark Brown #define CREATE_TRACE_POINTS 3856ec1978SMark Brown #include <trace/events/spi.h> 39ca1438dcSArnd Bergmann EXPORT_TRACEPOINT_SYMBOL(spi_transfer_start); 40ca1438dcSArnd Bergmann EXPORT_TRACEPOINT_SYMBOL(spi_transfer_stop); 419b61e302SSuniel Mahesh 4246336966SBoris Brezillon #include "internals.h" 4346336966SBoris Brezillon 449b61e302SSuniel Mahesh static DEFINE_IDR(spi_master_idr); 4556ec1978SMark Brown 468ae12a0dSDavid Brownell static void spidev_release(struct device *dev) 478ae12a0dSDavid Brownell { 480ffa0285SHans-Peter Nilsson struct spi_device *spi = to_spi_device(dev); 498ae12a0dSDavid Brownell 508caab75fSGeert Uytterhoeven /* spi controllers may cleanup for released devices */ 518caab75fSGeert Uytterhoeven if (spi->controller->cleanup) 528caab75fSGeert Uytterhoeven spi->controller->cleanup(spi); 538ae12a0dSDavid Brownell 548caab75fSGeert Uytterhoeven spi_controller_put(spi->controller); 555039563eSTrent Piepho kfree(spi->driver_override); 5607a389feSRoman Tereshonkov kfree(spi); 578ae12a0dSDavid Brownell } 588ae12a0dSDavid Brownell 598ae12a0dSDavid Brownell static ssize_t 608ae12a0dSDavid Brownell modalias_show(struct device *dev, struct device_attribute *a, char *buf) 618ae12a0dSDavid Brownell { 628ae12a0dSDavid Brownell const struct spi_device *spi = to_spi_device(dev); 638c4ff6d0SZhang Rui int len; 648c4ff6d0SZhang Rui 658c4ff6d0SZhang Rui len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1); 668c4ff6d0SZhang Rui if (len != -ENODEV) 678c4ff6d0SZhang Rui return len; 688ae12a0dSDavid Brownell 69d8e328b3SGrant Likely return sprintf(buf, "%s%s\n", SPI_MODULE_PREFIX, spi->modalias); 708ae12a0dSDavid Brownell } 71aa7da564SGreg Kroah-Hartman static DEVICE_ATTR_RO(modalias); 728ae12a0dSDavid Brownell 735039563eSTrent Piepho static ssize_t driver_override_store(struct device *dev, 745039563eSTrent Piepho struct device_attribute *a, 755039563eSTrent Piepho const char *buf, size_t count) 765039563eSTrent Piepho { 775039563eSTrent Piepho struct spi_device *spi = to_spi_device(dev); 785039563eSTrent Piepho const char *end = memchr(buf, '\n', count); 795039563eSTrent Piepho const size_t len = end ? end - buf : count; 805039563eSTrent Piepho const char *driver_override, *old; 815039563eSTrent Piepho 825039563eSTrent Piepho /* We need to keep extra room for a newline when displaying value */ 835039563eSTrent Piepho if (len >= (PAGE_SIZE - 1)) 845039563eSTrent Piepho return -EINVAL; 855039563eSTrent Piepho 865039563eSTrent Piepho driver_override = kstrndup(buf, len, GFP_KERNEL); 875039563eSTrent Piepho if (!driver_override) 885039563eSTrent Piepho return -ENOMEM; 895039563eSTrent Piepho 905039563eSTrent Piepho device_lock(dev); 915039563eSTrent Piepho old = spi->driver_override; 925039563eSTrent Piepho if (len) { 935039563eSTrent Piepho spi->driver_override = driver_override; 945039563eSTrent Piepho } else { 95be73e323SAndy Shevchenko /* Empty string, disable driver override */ 965039563eSTrent Piepho spi->driver_override = NULL; 975039563eSTrent Piepho kfree(driver_override); 985039563eSTrent Piepho } 995039563eSTrent Piepho device_unlock(dev); 1005039563eSTrent Piepho kfree(old); 1015039563eSTrent Piepho 1025039563eSTrent Piepho return count; 1035039563eSTrent Piepho } 1045039563eSTrent Piepho 1055039563eSTrent Piepho static ssize_t driver_override_show(struct device *dev, 1065039563eSTrent Piepho struct device_attribute *a, char *buf) 1075039563eSTrent Piepho { 1085039563eSTrent Piepho const struct spi_device *spi = to_spi_device(dev); 1095039563eSTrent Piepho ssize_t len; 1105039563eSTrent Piepho 1115039563eSTrent Piepho device_lock(dev); 1125039563eSTrent Piepho len = snprintf(buf, PAGE_SIZE, "%s\n", spi->driver_override ? : ""); 1135039563eSTrent Piepho device_unlock(dev); 1145039563eSTrent Piepho return len; 1155039563eSTrent Piepho } 1165039563eSTrent Piepho static DEVICE_ATTR_RW(driver_override); 1175039563eSTrent Piepho 118eca2ebc7SMartin Sperl #define SPI_STATISTICS_ATTRS(field, file) \ 1198caab75fSGeert Uytterhoeven static ssize_t spi_controller_##field##_show(struct device *dev, \ 120eca2ebc7SMartin Sperl struct device_attribute *attr, \ 121eca2ebc7SMartin Sperl char *buf) \ 122eca2ebc7SMartin Sperl { \ 1238caab75fSGeert Uytterhoeven struct spi_controller *ctlr = container_of(dev, \ 1248caab75fSGeert Uytterhoeven struct spi_controller, dev); \ 1258caab75fSGeert Uytterhoeven return spi_statistics_##field##_show(&ctlr->statistics, buf); \ 126eca2ebc7SMartin Sperl } \ 1278caab75fSGeert Uytterhoeven static struct device_attribute dev_attr_spi_controller_##field = { \ 128ad25c92eSGeert Uytterhoeven .attr = { .name = file, .mode = 0444 }, \ 1298caab75fSGeert Uytterhoeven .show = spi_controller_##field##_show, \ 130eca2ebc7SMartin Sperl }; \ 131eca2ebc7SMartin Sperl static ssize_t spi_device_##field##_show(struct device *dev, \ 132eca2ebc7SMartin Sperl struct device_attribute *attr, \ 133eca2ebc7SMartin Sperl char *buf) \ 134eca2ebc7SMartin Sperl { \ 135d1eba93bSGeliang Tang struct spi_device *spi = to_spi_device(dev); \ 136eca2ebc7SMartin Sperl return spi_statistics_##field##_show(&spi->statistics, buf); \ 137eca2ebc7SMartin Sperl } \ 138eca2ebc7SMartin Sperl static struct device_attribute dev_attr_spi_device_##field = { \ 139ad25c92eSGeert Uytterhoeven .attr = { .name = file, .mode = 0444 }, \ 140eca2ebc7SMartin Sperl .show = spi_device_##field##_show, \ 141eca2ebc7SMartin Sperl } 142eca2ebc7SMartin Sperl 143eca2ebc7SMartin Sperl #define SPI_STATISTICS_SHOW_NAME(name, file, field, format_string) \ 144eca2ebc7SMartin Sperl static ssize_t spi_statistics_##name##_show(struct spi_statistics *stat, \ 145eca2ebc7SMartin Sperl char *buf) \ 146eca2ebc7SMartin Sperl { \ 147eca2ebc7SMartin Sperl unsigned long flags; \ 148eca2ebc7SMartin Sperl ssize_t len; \ 149eca2ebc7SMartin Sperl spin_lock_irqsave(&stat->lock, flags); \ 150eca2ebc7SMartin Sperl len = sprintf(buf, format_string, stat->field); \ 151eca2ebc7SMartin Sperl spin_unlock_irqrestore(&stat->lock, flags); \ 152eca2ebc7SMartin Sperl return len; \ 153eca2ebc7SMartin Sperl } \ 154eca2ebc7SMartin Sperl SPI_STATISTICS_ATTRS(name, file) 155eca2ebc7SMartin Sperl 156eca2ebc7SMartin Sperl #define SPI_STATISTICS_SHOW(field, format_string) \ 157eca2ebc7SMartin Sperl SPI_STATISTICS_SHOW_NAME(field, __stringify(field), \ 158eca2ebc7SMartin Sperl field, format_string) 159eca2ebc7SMartin Sperl 160eca2ebc7SMartin Sperl SPI_STATISTICS_SHOW(messages, "%lu"); 161eca2ebc7SMartin Sperl SPI_STATISTICS_SHOW(transfers, "%lu"); 162eca2ebc7SMartin Sperl SPI_STATISTICS_SHOW(errors, "%lu"); 163eca2ebc7SMartin Sperl SPI_STATISTICS_SHOW(timedout, "%lu"); 164eca2ebc7SMartin Sperl 165eca2ebc7SMartin Sperl SPI_STATISTICS_SHOW(spi_sync, "%lu"); 166eca2ebc7SMartin Sperl SPI_STATISTICS_SHOW(spi_sync_immediate, "%lu"); 167eca2ebc7SMartin Sperl SPI_STATISTICS_SHOW(spi_async, "%lu"); 168eca2ebc7SMartin Sperl 169eca2ebc7SMartin Sperl SPI_STATISTICS_SHOW(bytes, "%llu"); 170eca2ebc7SMartin Sperl SPI_STATISTICS_SHOW(bytes_rx, "%llu"); 171eca2ebc7SMartin Sperl SPI_STATISTICS_SHOW(bytes_tx, "%llu"); 172eca2ebc7SMartin Sperl 1736b7bc061SMartin Sperl #define SPI_STATISTICS_TRANSFER_BYTES_HISTO(index, number) \ 1746b7bc061SMartin Sperl SPI_STATISTICS_SHOW_NAME(transfer_bytes_histo##index, \ 1756b7bc061SMartin Sperl "transfer_bytes_histo_" number, \ 1766b7bc061SMartin Sperl transfer_bytes_histo[index], "%lu") 1776b7bc061SMartin Sperl SPI_STATISTICS_TRANSFER_BYTES_HISTO(0, "0-1"); 1786b7bc061SMartin Sperl SPI_STATISTICS_TRANSFER_BYTES_HISTO(1, "2-3"); 1796b7bc061SMartin Sperl SPI_STATISTICS_TRANSFER_BYTES_HISTO(2, "4-7"); 1806b7bc061SMartin Sperl SPI_STATISTICS_TRANSFER_BYTES_HISTO(3, "8-15"); 1816b7bc061SMartin Sperl SPI_STATISTICS_TRANSFER_BYTES_HISTO(4, "16-31"); 1826b7bc061SMartin Sperl SPI_STATISTICS_TRANSFER_BYTES_HISTO(5, "32-63"); 1836b7bc061SMartin Sperl SPI_STATISTICS_TRANSFER_BYTES_HISTO(6, "64-127"); 1846b7bc061SMartin Sperl SPI_STATISTICS_TRANSFER_BYTES_HISTO(7, "128-255"); 1856b7bc061SMartin Sperl SPI_STATISTICS_TRANSFER_BYTES_HISTO(8, "256-511"); 1866b7bc061SMartin Sperl SPI_STATISTICS_TRANSFER_BYTES_HISTO(9, "512-1023"); 1876b7bc061SMartin Sperl SPI_STATISTICS_TRANSFER_BYTES_HISTO(10, "1024-2047"); 1886b7bc061SMartin Sperl SPI_STATISTICS_TRANSFER_BYTES_HISTO(11, "2048-4095"); 1896b7bc061SMartin Sperl SPI_STATISTICS_TRANSFER_BYTES_HISTO(12, "4096-8191"); 1906b7bc061SMartin Sperl SPI_STATISTICS_TRANSFER_BYTES_HISTO(13, "8192-16383"); 1916b7bc061SMartin Sperl SPI_STATISTICS_TRANSFER_BYTES_HISTO(14, "16384-32767"); 1926b7bc061SMartin Sperl SPI_STATISTICS_TRANSFER_BYTES_HISTO(15, "32768-65535"); 1936b7bc061SMartin Sperl SPI_STATISTICS_TRANSFER_BYTES_HISTO(16, "65536+"); 1946b7bc061SMartin Sperl 195d9f12122SMartin Sperl SPI_STATISTICS_SHOW(transfers_split_maxsize, "%lu"); 196d9f12122SMartin Sperl 197aa7da564SGreg Kroah-Hartman static struct attribute *spi_dev_attrs[] = { 198aa7da564SGreg Kroah-Hartman &dev_attr_modalias.attr, 1995039563eSTrent Piepho &dev_attr_driver_override.attr, 200aa7da564SGreg Kroah-Hartman NULL, 2018ae12a0dSDavid Brownell }; 202eca2ebc7SMartin Sperl 203eca2ebc7SMartin Sperl static const struct attribute_group spi_dev_group = { 204eca2ebc7SMartin Sperl .attrs = spi_dev_attrs, 205eca2ebc7SMartin Sperl }; 206eca2ebc7SMartin Sperl 207eca2ebc7SMartin Sperl static struct attribute *spi_device_statistics_attrs[] = { 208eca2ebc7SMartin Sperl &dev_attr_spi_device_messages.attr, 209eca2ebc7SMartin Sperl &dev_attr_spi_device_transfers.attr, 210eca2ebc7SMartin Sperl &dev_attr_spi_device_errors.attr, 211eca2ebc7SMartin Sperl &dev_attr_spi_device_timedout.attr, 212eca2ebc7SMartin Sperl &dev_attr_spi_device_spi_sync.attr, 213eca2ebc7SMartin Sperl &dev_attr_spi_device_spi_sync_immediate.attr, 214eca2ebc7SMartin Sperl &dev_attr_spi_device_spi_async.attr, 215eca2ebc7SMartin Sperl &dev_attr_spi_device_bytes.attr, 216eca2ebc7SMartin Sperl &dev_attr_spi_device_bytes_rx.attr, 217eca2ebc7SMartin Sperl &dev_attr_spi_device_bytes_tx.attr, 2186b7bc061SMartin Sperl &dev_attr_spi_device_transfer_bytes_histo0.attr, 2196b7bc061SMartin Sperl &dev_attr_spi_device_transfer_bytes_histo1.attr, 2206b7bc061SMartin Sperl &dev_attr_spi_device_transfer_bytes_histo2.attr, 2216b7bc061SMartin Sperl &dev_attr_spi_device_transfer_bytes_histo3.attr, 2226b7bc061SMartin Sperl &dev_attr_spi_device_transfer_bytes_histo4.attr, 2236b7bc061SMartin Sperl &dev_attr_spi_device_transfer_bytes_histo5.attr, 2246b7bc061SMartin Sperl &dev_attr_spi_device_transfer_bytes_histo6.attr, 2256b7bc061SMartin Sperl &dev_attr_spi_device_transfer_bytes_histo7.attr, 2266b7bc061SMartin Sperl &dev_attr_spi_device_transfer_bytes_histo8.attr, 2276b7bc061SMartin Sperl &dev_attr_spi_device_transfer_bytes_histo9.attr, 2286b7bc061SMartin Sperl &dev_attr_spi_device_transfer_bytes_histo10.attr, 2296b7bc061SMartin Sperl &dev_attr_spi_device_transfer_bytes_histo11.attr, 2306b7bc061SMartin Sperl &dev_attr_spi_device_transfer_bytes_histo12.attr, 2316b7bc061SMartin Sperl &dev_attr_spi_device_transfer_bytes_histo13.attr, 2326b7bc061SMartin Sperl &dev_attr_spi_device_transfer_bytes_histo14.attr, 2336b7bc061SMartin Sperl &dev_attr_spi_device_transfer_bytes_histo15.attr, 2346b7bc061SMartin Sperl &dev_attr_spi_device_transfer_bytes_histo16.attr, 235d9f12122SMartin Sperl &dev_attr_spi_device_transfers_split_maxsize.attr, 236eca2ebc7SMartin Sperl NULL, 237eca2ebc7SMartin Sperl }; 238eca2ebc7SMartin Sperl 239eca2ebc7SMartin Sperl static const struct attribute_group spi_device_statistics_group = { 240eca2ebc7SMartin Sperl .name = "statistics", 241eca2ebc7SMartin Sperl .attrs = spi_device_statistics_attrs, 242eca2ebc7SMartin Sperl }; 243eca2ebc7SMartin Sperl 244eca2ebc7SMartin Sperl static const struct attribute_group *spi_dev_groups[] = { 245eca2ebc7SMartin Sperl &spi_dev_group, 246eca2ebc7SMartin Sperl &spi_device_statistics_group, 247eca2ebc7SMartin Sperl NULL, 248eca2ebc7SMartin Sperl }; 249eca2ebc7SMartin Sperl 2508caab75fSGeert Uytterhoeven static struct attribute *spi_controller_statistics_attrs[] = { 2518caab75fSGeert Uytterhoeven &dev_attr_spi_controller_messages.attr, 2528caab75fSGeert Uytterhoeven &dev_attr_spi_controller_transfers.attr, 2538caab75fSGeert Uytterhoeven &dev_attr_spi_controller_errors.attr, 2548caab75fSGeert Uytterhoeven &dev_attr_spi_controller_timedout.attr, 2558caab75fSGeert Uytterhoeven &dev_attr_spi_controller_spi_sync.attr, 2568caab75fSGeert Uytterhoeven &dev_attr_spi_controller_spi_sync_immediate.attr, 2578caab75fSGeert Uytterhoeven &dev_attr_spi_controller_spi_async.attr, 2588caab75fSGeert Uytterhoeven &dev_attr_spi_controller_bytes.attr, 2598caab75fSGeert Uytterhoeven &dev_attr_spi_controller_bytes_rx.attr, 2608caab75fSGeert Uytterhoeven &dev_attr_spi_controller_bytes_tx.attr, 2618caab75fSGeert Uytterhoeven &dev_attr_spi_controller_transfer_bytes_histo0.attr, 2628caab75fSGeert Uytterhoeven &dev_attr_spi_controller_transfer_bytes_histo1.attr, 2638caab75fSGeert Uytterhoeven &dev_attr_spi_controller_transfer_bytes_histo2.attr, 2648caab75fSGeert Uytterhoeven &dev_attr_spi_controller_transfer_bytes_histo3.attr, 2658caab75fSGeert Uytterhoeven &dev_attr_spi_controller_transfer_bytes_histo4.attr, 2668caab75fSGeert Uytterhoeven &dev_attr_spi_controller_transfer_bytes_histo5.attr, 2678caab75fSGeert Uytterhoeven &dev_attr_spi_controller_transfer_bytes_histo6.attr, 2688caab75fSGeert Uytterhoeven &dev_attr_spi_controller_transfer_bytes_histo7.attr, 2698caab75fSGeert Uytterhoeven &dev_attr_spi_controller_transfer_bytes_histo8.attr, 2708caab75fSGeert Uytterhoeven &dev_attr_spi_controller_transfer_bytes_histo9.attr, 2718caab75fSGeert Uytterhoeven &dev_attr_spi_controller_transfer_bytes_histo10.attr, 2728caab75fSGeert Uytterhoeven &dev_attr_spi_controller_transfer_bytes_histo11.attr, 2738caab75fSGeert Uytterhoeven &dev_attr_spi_controller_transfer_bytes_histo12.attr, 2748caab75fSGeert Uytterhoeven &dev_attr_spi_controller_transfer_bytes_histo13.attr, 2758caab75fSGeert Uytterhoeven &dev_attr_spi_controller_transfer_bytes_histo14.attr, 2768caab75fSGeert Uytterhoeven &dev_attr_spi_controller_transfer_bytes_histo15.attr, 2778caab75fSGeert Uytterhoeven &dev_attr_spi_controller_transfer_bytes_histo16.attr, 2788caab75fSGeert Uytterhoeven &dev_attr_spi_controller_transfers_split_maxsize.attr, 279eca2ebc7SMartin Sperl NULL, 280eca2ebc7SMartin Sperl }; 281eca2ebc7SMartin Sperl 2828caab75fSGeert Uytterhoeven static const struct attribute_group spi_controller_statistics_group = { 283eca2ebc7SMartin Sperl .name = "statistics", 2848caab75fSGeert Uytterhoeven .attrs = spi_controller_statistics_attrs, 285eca2ebc7SMartin Sperl }; 286eca2ebc7SMartin Sperl 287eca2ebc7SMartin Sperl static const struct attribute_group *spi_master_groups[] = { 2888caab75fSGeert Uytterhoeven &spi_controller_statistics_group, 289eca2ebc7SMartin Sperl NULL, 290eca2ebc7SMartin Sperl }; 291eca2ebc7SMartin Sperl 292eca2ebc7SMartin Sperl void spi_statistics_add_transfer_stats(struct spi_statistics *stats, 293eca2ebc7SMartin Sperl struct spi_transfer *xfer, 2948caab75fSGeert Uytterhoeven struct spi_controller *ctlr) 295eca2ebc7SMartin Sperl { 296eca2ebc7SMartin Sperl unsigned long flags; 2976b7bc061SMartin Sperl int l2len = min(fls(xfer->len), SPI_STATISTICS_HISTO_SIZE) - 1; 2986b7bc061SMartin Sperl 2996b7bc061SMartin Sperl if (l2len < 0) 3006b7bc061SMartin Sperl l2len = 0; 301eca2ebc7SMartin Sperl 302eca2ebc7SMartin Sperl spin_lock_irqsave(&stats->lock, flags); 303eca2ebc7SMartin Sperl 304eca2ebc7SMartin Sperl stats->transfers++; 3056b7bc061SMartin Sperl stats->transfer_bytes_histo[l2len]++; 306eca2ebc7SMartin Sperl 307eca2ebc7SMartin Sperl stats->bytes += xfer->len; 308eca2ebc7SMartin Sperl if ((xfer->tx_buf) && 3098caab75fSGeert Uytterhoeven (xfer->tx_buf != ctlr->dummy_tx)) 310eca2ebc7SMartin Sperl stats->bytes_tx += xfer->len; 311eca2ebc7SMartin Sperl if ((xfer->rx_buf) && 3128caab75fSGeert Uytterhoeven (xfer->rx_buf != ctlr->dummy_rx)) 313eca2ebc7SMartin Sperl stats->bytes_rx += xfer->len; 314eca2ebc7SMartin Sperl 315eca2ebc7SMartin Sperl spin_unlock_irqrestore(&stats->lock, flags); 316eca2ebc7SMartin Sperl } 317eca2ebc7SMartin Sperl EXPORT_SYMBOL_GPL(spi_statistics_add_transfer_stats); 3188ae12a0dSDavid Brownell 3198ae12a0dSDavid Brownell /* modalias support makes "modprobe $MODALIAS" new-style hotplug work, 3208ae12a0dSDavid Brownell * and the sysfs version makes coldplug work too. 3218ae12a0dSDavid Brownell */ 3228ae12a0dSDavid Brownell 32375368bf6SAnton Vorontsov static const struct spi_device_id *spi_match_id(const struct spi_device_id *id, 32475368bf6SAnton Vorontsov const struct spi_device *sdev) 32575368bf6SAnton Vorontsov { 32675368bf6SAnton Vorontsov while (id->name[0]) { 32775368bf6SAnton Vorontsov if (!strcmp(sdev->modalias, id->name)) 32875368bf6SAnton Vorontsov return id; 32975368bf6SAnton Vorontsov id++; 33075368bf6SAnton Vorontsov } 33175368bf6SAnton Vorontsov return NULL; 33275368bf6SAnton Vorontsov } 33375368bf6SAnton Vorontsov 33475368bf6SAnton Vorontsov const struct spi_device_id *spi_get_device_id(const struct spi_device *sdev) 33575368bf6SAnton Vorontsov { 33675368bf6SAnton Vorontsov const struct spi_driver *sdrv = to_spi_driver(sdev->dev.driver); 33775368bf6SAnton Vorontsov 33875368bf6SAnton Vorontsov return spi_match_id(sdrv->id_table, sdev); 33975368bf6SAnton Vorontsov } 34075368bf6SAnton Vorontsov EXPORT_SYMBOL_GPL(spi_get_device_id); 34175368bf6SAnton Vorontsov 3428ae12a0dSDavid Brownell static int spi_match_device(struct device *dev, struct device_driver *drv) 3438ae12a0dSDavid Brownell { 3448ae12a0dSDavid Brownell const struct spi_device *spi = to_spi_device(dev); 34575368bf6SAnton Vorontsov const struct spi_driver *sdrv = to_spi_driver(drv); 34675368bf6SAnton Vorontsov 3475039563eSTrent Piepho /* Check override first, and if set, only use the named driver */ 3485039563eSTrent Piepho if (spi->driver_override) 3495039563eSTrent Piepho return strcmp(spi->driver_override, drv->name) == 0; 3505039563eSTrent Piepho 3512b7a32f7SSinan Akman /* Attempt an OF style match */ 3522b7a32f7SSinan Akman if (of_driver_match_device(dev, drv)) 3532b7a32f7SSinan Akman return 1; 3542b7a32f7SSinan Akman 35564bee4d2SMika Westerberg /* Then try ACPI */ 35664bee4d2SMika Westerberg if (acpi_driver_match_device(dev, drv)) 35764bee4d2SMika Westerberg return 1; 35864bee4d2SMika Westerberg 35975368bf6SAnton Vorontsov if (sdrv->id_table) 36075368bf6SAnton Vorontsov return !!spi_match_id(sdrv->id_table, spi); 3618ae12a0dSDavid Brownell 36235f74fcaSKay Sievers return strcmp(spi->modalias, drv->name) == 0; 3638ae12a0dSDavid Brownell } 3648ae12a0dSDavid Brownell 3657eff2e7aSKay Sievers static int spi_uevent(struct device *dev, struct kobj_uevent_env *env) 3668ae12a0dSDavid Brownell { 3678ae12a0dSDavid Brownell const struct spi_device *spi = to_spi_device(dev); 3688c4ff6d0SZhang Rui int rc; 3698c4ff6d0SZhang Rui 3708c4ff6d0SZhang Rui rc = acpi_device_uevent_modalias(dev, env); 3718c4ff6d0SZhang Rui if (rc != -ENODEV) 3728c4ff6d0SZhang Rui return rc; 3738ae12a0dSDavid Brownell 3742856670fSAndy Shevchenko return add_uevent_var(env, "MODALIAS=%s%s", SPI_MODULE_PREFIX, spi->modalias); 3758ae12a0dSDavid Brownell } 3768ae12a0dSDavid Brownell 3779db34ee6SUwe Kleine-König static int spi_probe(struct device *dev) 378b885244eSDavid Brownell { 379b885244eSDavid Brownell const struct spi_driver *sdrv = to_spi_driver(dev->driver); 38044af7927SJon Hunter struct spi_device *spi = to_spi_device(dev); 38133cf00e5SMika Westerberg int ret; 382b885244eSDavid Brownell 38386be408bSSylwester Nawrocki ret = of_clk_set_defaults(dev->of_node, false); 38486be408bSSylwester Nawrocki if (ret) 38586be408bSSylwester Nawrocki return ret; 38686be408bSSylwester Nawrocki 38744af7927SJon Hunter if (dev->of_node) { 38844af7927SJon Hunter spi->irq = of_irq_get(dev->of_node, 0); 38944af7927SJon Hunter if (spi->irq == -EPROBE_DEFER) 39044af7927SJon Hunter return -EPROBE_DEFER; 39144af7927SJon Hunter if (spi->irq < 0) 39244af7927SJon Hunter spi->irq = 0; 39344af7927SJon Hunter } 39444af7927SJon Hunter 395676e7c25SUlf Hansson ret = dev_pm_domain_attach(dev, true); 39671f277a7SUlf Hansson if (ret) 39771f277a7SUlf Hansson return ret; 39871f277a7SUlf Hansson 399440408dbSUwe Kleine-König if (sdrv->probe) { 40044af7927SJon Hunter ret = sdrv->probe(spi); 40133cf00e5SMika Westerberg if (ret) 402676e7c25SUlf Hansson dev_pm_domain_detach(dev, true); 403440408dbSUwe Kleine-König } 40433cf00e5SMika Westerberg 40533cf00e5SMika Westerberg return ret; 406b885244eSDavid Brownell } 407b885244eSDavid Brownell 4089db34ee6SUwe Kleine-König static int spi_remove(struct device *dev) 409b885244eSDavid Brownell { 410b885244eSDavid Brownell const struct spi_driver *sdrv = to_spi_driver(dev->driver); 411b885244eSDavid Brownell 4127795d475SUwe Kleine-König if (sdrv->remove) { 4137795d475SUwe Kleine-König int ret; 4147795d475SUwe Kleine-König 415aec35f4eSJean Delvare ret = sdrv->remove(to_spi_device(dev)); 4167795d475SUwe Kleine-König if (ret) 4177795d475SUwe Kleine-König dev_warn(dev, 4187795d475SUwe Kleine-König "Failed to unbind driver (%pe), ignoring\n", 4197795d475SUwe Kleine-König ERR_PTR(ret)); 4207795d475SUwe Kleine-König } 4217795d475SUwe Kleine-König 422676e7c25SUlf Hansson dev_pm_domain_detach(dev, true); 42333cf00e5SMika Westerberg 4247795d475SUwe Kleine-König return 0; 425b885244eSDavid Brownell } 426b885244eSDavid Brownell 4279db34ee6SUwe Kleine-König static void spi_shutdown(struct device *dev) 428b885244eSDavid Brownell { 429a6f483b2SMarek Szyprowski if (dev->driver) { 430b885244eSDavid Brownell const struct spi_driver *sdrv = to_spi_driver(dev->driver); 431b885244eSDavid Brownell 4329db34ee6SUwe Kleine-König if (sdrv->shutdown) 433b885244eSDavid Brownell sdrv->shutdown(to_spi_device(dev)); 434b885244eSDavid Brownell } 435a6f483b2SMarek Szyprowski } 436b885244eSDavid Brownell 4379db34ee6SUwe Kleine-König struct bus_type spi_bus_type = { 4389db34ee6SUwe Kleine-König .name = "spi", 4399db34ee6SUwe Kleine-König .dev_groups = spi_dev_groups, 4409db34ee6SUwe Kleine-König .match = spi_match_device, 4419db34ee6SUwe Kleine-König .uevent = spi_uevent, 4429db34ee6SUwe Kleine-König .probe = spi_probe, 4439db34ee6SUwe Kleine-König .remove = spi_remove, 4449db34ee6SUwe Kleine-König .shutdown = spi_shutdown, 4459db34ee6SUwe Kleine-König }; 4469db34ee6SUwe Kleine-König EXPORT_SYMBOL_GPL(spi_bus_type); 4479db34ee6SUwe Kleine-König 44833e34dc6SDavid Brownell /** 449ca5d2485SAndrew F. Davis * __spi_register_driver - register a SPI driver 45088c9321dSThierry Reding * @owner: owner module of the driver to register 45133e34dc6SDavid Brownell * @sdrv: the driver to register 45233e34dc6SDavid Brownell * Context: can sleep 45397d56dc6SJavier Martinez Canillas * 45497d56dc6SJavier Martinez Canillas * Return: zero on success, else a negative error code. 45533e34dc6SDavid Brownell */ 456ca5d2485SAndrew F. Davis int __spi_register_driver(struct module *owner, struct spi_driver *sdrv) 457b885244eSDavid Brownell { 458ca5d2485SAndrew F. Davis sdrv->driver.owner = owner; 459b885244eSDavid Brownell sdrv->driver.bus = &spi_bus_type; 460b885244eSDavid Brownell return driver_register(&sdrv->driver); 461b885244eSDavid Brownell } 462ca5d2485SAndrew F. Davis EXPORT_SYMBOL_GPL(__spi_register_driver); 463b885244eSDavid Brownell 4648ae12a0dSDavid Brownell /*-------------------------------------------------------------------------*/ 4658ae12a0dSDavid Brownell 4668ae12a0dSDavid Brownell /* SPI devices should normally not be created by SPI device drivers; that 4678caab75fSGeert Uytterhoeven * would make them board-specific. Similarly with SPI controller drivers. 4688ae12a0dSDavid Brownell * Device registration normally goes into like arch/.../mach.../board-YYY.c 4698ae12a0dSDavid Brownell * with other readonly (flashable) information about mainboard devices. 4708ae12a0dSDavid Brownell */ 4718ae12a0dSDavid Brownell 4728ae12a0dSDavid Brownell struct boardinfo { 4738ae12a0dSDavid Brownell struct list_head list; 4742b9603a0SFeng Tang struct spi_board_info board_info; 4758ae12a0dSDavid Brownell }; 4768ae12a0dSDavid Brownell 4778ae12a0dSDavid Brownell static LIST_HEAD(board_list); 4788caab75fSGeert Uytterhoeven static LIST_HEAD(spi_controller_list); 4792b9603a0SFeng Tang 4802b9603a0SFeng Tang /* 481be73e323SAndy Shevchenko * Used to protect add/del operation for board_info list and 4828caab75fSGeert Uytterhoeven * spi_controller list, and their matching process 4839b61e302SSuniel Mahesh * also used to protect object of type struct idr 4842b9603a0SFeng Tang */ 48594040828SMatthias Kaehlcke static DEFINE_MUTEX(board_lock); 4868ae12a0dSDavid Brownell 487ddf75be4SLukas Wunner /* 488ddf75be4SLukas Wunner * Prevents addition of devices with same chip select and 489ddf75be4SLukas Wunner * addition of devices below an unregistering controller. 490ddf75be4SLukas Wunner */ 491ddf75be4SLukas Wunner static DEFINE_MUTEX(spi_add_lock); 492ddf75be4SLukas Wunner 493dc87c98eSGrant Likely /** 494dc87c98eSGrant Likely * spi_alloc_device - Allocate a new SPI device 4958caab75fSGeert Uytterhoeven * @ctlr: Controller to which device is connected 496dc87c98eSGrant Likely * Context: can sleep 497dc87c98eSGrant Likely * 498dc87c98eSGrant Likely * Allows a driver to allocate and initialize a spi_device without 499dc87c98eSGrant Likely * registering it immediately. This allows a driver to directly 500dc87c98eSGrant Likely * fill the spi_device with device parameters before calling 501dc87c98eSGrant Likely * spi_add_device() on it. 502dc87c98eSGrant Likely * 503dc87c98eSGrant Likely * Caller is responsible to call spi_add_device() on the returned 5048caab75fSGeert Uytterhoeven * spi_device structure to add it to the SPI controller. If the caller 505dc87c98eSGrant Likely * needs to discard the spi_device without adding it, then it should 506dc87c98eSGrant Likely * call spi_dev_put() on it. 507dc87c98eSGrant Likely * 50897d56dc6SJavier Martinez Canillas * Return: a pointer to the new device, or NULL. 509dc87c98eSGrant Likely */ 5108caab75fSGeert Uytterhoeven struct spi_device *spi_alloc_device(struct spi_controller *ctlr) 511dc87c98eSGrant Likely { 512dc87c98eSGrant Likely struct spi_device *spi; 513dc87c98eSGrant Likely 5148caab75fSGeert Uytterhoeven if (!spi_controller_get(ctlr)) 515dc87c98eSGrant Likely return NULL; 516dc87c98eSGrant Likely 5175fe5f05eSJingoo Han spi = kzalloc(sizeof(*spi), GFP_KERNEL); 518dc87c98eSGrant Likely if (!spi) { 5198caab75fSGeert Uytterhoeven spi_controller_put(ctlr); 520dc87c98eSGrant Likely return NULL; 521dc87c98eSGrant Likely } 522dc87c98eSGrant Likely 5238caab75fSGeert Uytterhoeven spi->master = spi->controller = ctlr; 5248caab75fSGeert Uytterhoeven spi->dev.parent = &ctlr->dev; 525dc87c98eSGrant Likely spi->dev.bus = &spi_bus_type; 526dc87c98eSGrant Likely spi->dev.release = spidev_release; 527446411e1SAndreas Larsson spi->cs_gpio = -ENOENT; 528ea235786SJohn Garry spi->mode = ctlr->buswidth_override_bits; 529eca2ebc7SMartin Sperl 530eca2ebc7SMartin Sperl spin_lock_init(&spi->statistics.lock); 531eca2ebc7SMartin Sperl 532dc87c98eSGrant Likely device_initialize(&spi->dev); 533dc87c98eSGrant Likely return spi; 534dc87c98eSGrant Likely } 535dc87c98eSGrant Likely EXPORT_SYMBOL_GPL(spi_alloc_device); 536dc87c98eSGrant Likely 537e13ac47bSJarkko Nikula static void spi_dev_set_name(struct spi_device *spi) 538e13ac47bSJarkko Nikula { 539e13ac47bSJarkko Nikula struct acpi_device *adev = ACPI_COMPANION(&spi->dev); 540e13ac47bSJarkko Nikula 541e13ac47bSJarkko Nikula if (adev) { 542e13ac47bSJarkko Nikula dev_set_name(&spi->dev, "spi-%s", acpi_dev_name(adev)); 543e13ac47bSJarkko Nikula return; 544e13ac47bSJarkko Nikula } 545e13ac47bSJarkko Nikula 5468caab75fSGeert Uytterhoeven dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->controller->dev), 547e13ac47bSJarkko Nikula spi->chip_select); 548e13ac47bSJarkko Nikula } 549e13ac47bSJarkko Nikula 550b6fb8d3aSMika Westerberg static int spi_dev_check(struct device *dev, void *data) 551b6fb8d3aSMika Westerberg { 552b6fb8d3aSMika Westerberg struct spi_device *spi = to_spi_device(dev); 553b6fb8d3aSMika Westerberg struct spi_device *new_spi = data; 554b6fb8d3aSMika Westerberg 5558caab75fSGeert Uytterhoeven if (spi->controller == new_spi->controller && 556b6fb8d3aSMika Westerberg spi->chip_select == new_spi->chip_select) 557b6fb8d3aSMika Westerberg return -EBUSY; 558b6fb8d3aSMika Westerberg return 0; 559b6fb8d3aSMika Westerberg } 560b6fb8d3aSMika Westerberg 561dc87c98eSGrant Likely /** 562dc87c98eSGrant Likely * spi_add_device - Add spi_device allocated with spi_alloc_device 563dc87c98eSGrant Likely * @spi: spi_device to register 564dc87c98eSGrant Likely * 565dc87c98eSGrant Likely * Companion function to spi_alloc_device. Devices allocated with 566dc87c98eSGrant Likely * spi_alloc_device can be added onto the spi bus with this function. 567dc87c98eSGrant Likely * 56897d56dc6SJavier Martinez Canillas * Return: 0 on success; negative errno on failure 569dc87c98eSGrant Likely */ 570dc87c98eSGrant Likely int spi_add_device(struct spi_device *spi) 571dc87c98eSGrant Likely { 5728caab75fSGeert Uytterhoeven struct spi_controller *ctlr = spi->controller; 5738caab75fSGeert Uytterhoeven struct device *dev = ctlr->dev.parent; 574dc87c98eSGrant Likely int status; 575dc87c98eSGrant Likely 576dc87c98eSGrant Likely /* Chipselects are numbered 0..max; validate. */ 5778caab75fSGeert Uytterhoeven if (spi->chip_select >= ctlr->num_chipselect) { 5788caab75fSGeert Uytterhoeven dev_err(dev, "cs%d >= max %d\n", spi->chip_select, 5798caab75fSGeert Uytterhoeven ctlr->num_chipselect); 580dc87c98eSGrant Likely return -EINVAL; 581dc87c98eSGrant Likely } 582dc87c98eSGrant Likely 583dc87c98eSGrant Likely /* Set the bus ID string */ 584e13ac47bSJarkko Nikula spi_dev_set_name(spi); 585e48880e0SDavid Brownell 586e48880e0SDavid Brownell /* We need to make sure there's no other device with this 587e48880e0SDavid Brownell * chipselect **BEFORE** we call setup(), else we'll trash 588e48880e0SDavid Brownell * its configuration. Lock against concurrent add() calls. 589e48880e0SDavid Brownell */ 590e48880e0SDavid Brownell mutex_lock(&spi_add_lock); 591e48880e0SDavid Brownell 592b6fb8d3aSMika Westerberg status = bus_for_each_dev(&spi_bus_type, NULL, spi, spi_dev_check); 593b6fb8d3aSMika Westerberg if (status) { 594e48880e0SDavid Brownell dev_err(dev, "chipselect %d already in use\n", 595e48880e0SDavid Brownell spi->chip_select); 596e48880e0SDavid Brownell goto done; 597e48880e0SDavid Brownell } 598e48880e0SDavid Brownell 599ddf75be4SLukas Wunner /* Controller may unregister concurrently */ 600ddf75be4SLukas Wunner if (IS_ENABLED(CONFIG_SPI_DYNAMIC) && 601ddf75be4SLukas Wunner !device_is_registered(&ctlr->dev)) { 602ddf75be4SLukas Wunner status = -ENODEV; 603ddf75be4SLukas Wunner goto done; 604ddf75be4SLukas Wunner } 605ddf75be4SLukas Wunner 606f3186dd8SLinus Walleij /* Descriptors take precedence */ 607f3186dd8SLinus Walleij if (ctlr->cs_gpiods) 608f3186dd8SLinus Walleij spi->cs_gpiod = ctlr->cs_gpiods[spi->chip_select]; 609f3186dd8SLinus Walleij else if (ctlr->cs_gpios) 6108caab75fSGeert Uytterhoeven spi->cs_gpio = ctlr->cs_gpios[spi->chip_select]; 61174317984SJean-Christophe PLAGNIOL-VILLARD 612e48880e0SDavid Brownell /* Drivers may modify this initial i/o setup, but will 613e48880e0SDavid Brownell * normally rely on the device being setup. Devices 614e48880e0SDavid Brownell * using SPI_CS_HIGH can't coexist well otherwise... 615e48880e0SDavid Brownell */ 6167d077197SDavid Brownell status = spi_setup(spi); 617dc87c98eSGrant Likely if (status < 0) { 618eb288a1fSLinus Walleij dev_err(dev, "can't setup %s, status %d\n", 619eb288a1fSLinus Walleij dev_name(&spi->dev), status); 620e48880e0SDavid Brownell goto done; 621dc87c98eSGrant Likely } 622dc87c98eSGrant Likely 623e48880e0SDavid Brownell /* Device may be bound to an active driver when this returns */ 624dc87c98eSGrant Likely status = device_add(&spi->dev); 625e48880e0SDavid Brownell if (status < 0) 626eb288a1fSLinus Walleij dev_err(dev, "can't add %s, status %d\n", 627eb288a1fSLinus Walleij dev_name(&spi->dev), status); 628e48880e0SDavid Brownell else 62935f74fcaSKay Sievers dev_dbg(dev, "registered child %s\n", dev_name(&spi->dev)); 630e48880e0SDavid Brownell 631e48880e0SDavid Brownell done: 632e48880e0SDavid Brownell mutex_unlock(&spi_add_lock); 633e48880e0SDavid Brownell return status; 634dc87c98eSGrant Likely } 635dc87c98eSGrant Likely EXPORT_SYMBOL_GPL(spi_add_device); 6368ae12a0dSDavid Brownell 63733e34dc6SDavid Brownell /** 63833e34dc6SDavid Brownell * spi_new_device - instantiate one new SPI device 6398caab75fSGeert Uytterhoeven * @ctlr: Controller to which device is connected 64033e34dc6SDavid Brownell * @chip: Describes the SPI device 64133e34dc6SDavid Brownell * Context: can sleep 64233e34dc6SDavid Brownell * 64333e34dc6SDavid Brownell * On typical mainboards, this is purely internal; and it's not needed 6448ae12a0dSDavid Brownell * after board init creates the hard-wired devices. Some development 6458ae12a0dSDavid Brownell * platforms may not be able to use spi_register_board_info though, and 6468ae12a0dSDavid Brownell * this is exported so that for example a USB or parport based adapter 6478ae12a0dSDavid Brownell * driver could add devices (which it would learn about out-of-band). 648082c8cb4SDavid Brownell * 64997d56dc6SJavier Martinez Canillas * Return: the new device, or NULL. 6508ae12a0dSDavid Brownell */ 6518caab75fSGeert Uytterhoeven struct spi_device *spi_new_device(struct spi_controller *ctlr, 652e9d5a461SAdrian Bunk struct spi_board_info *chip) 6538ae12a0dSDavid Brownell { 6548ae12a0dSDavid Brownell struct spi_device *proxy; 6558ae12a0dSDavid Brownell int status; 6568ae12a0dSDavid Brownell 657082c8cb4SDavid Brownell /* NOTE: caller did any chip->bus_num checks necessary. 658082c8cb4SDavid Brownell * 659082c8cb4SDavid Brownell * Also, unless we change the return value convention to use 660082c8cb4SDavid Brownell * error-or-pointer (not NULL-or-pointer), troubleshootability 661082c8cb4SDavid Brownell * suggests syslogged diagnostics are best here (ugh). 662082c8cb4SDavid Brownell */ 663082c8cb4SDavid Brownell 6648caab75fSGeert Uytterhoeven proxy = spi_alloc_device(ctlr); 665dc87c98eSGrant Likely if (!proxy) 6668ae12a0dSDavid Brownell return NULL; 6678ae12a0dSDavid Brownell 668102eb975SGrant Likely WARN_ON(strlen(chip->modalias) >= sizeof(proxy->modalias)); 669102eb975SGrant Likely 6708ae12a0dSDavid Brownell proxy->chip_select = chip->chip_select; 6718ae12a0dSDavid Brownell proxy->max_speed_hz = chip->max_speed_hz; 672980a01c9SDavid Brownell proxy->mode = chip->mode; 6738ae12a0dSDavid Brownell proxy->irq = chip->irq; 674102eb975SGrant Likely strlcpy(proxy->modalias, chip->modalias, sizeof(proxy->modalias)); 6758ae12a0dSDavid Brownell proxy->dev.platform_data = (void *) chip->platform_data; 6768ae12a0dSDavid Brownell proxy->controller_data = chip->controller_data; 6778ae12a0dSDavid Brownell proxy->controller_state = NULL; 6788ae12a0dSDavid Brownell 679826cf175SDmitry Torokhov if (chip->properties) { 680826cf175SDmitry Torokhov status = device_add_properties(&proxy->dev, chip->properties); 681826cf175SDmitry Torokhov if (status) { 6828caab75fSGeert Uytterhoeven dev_err(&ctlr->dev, 683826cf175SDmitry Torokhov "failed to add properties to '%s': %d\n", 684826cf175SDmitry Torokhov chip->modalias, status); 685826cf175SDmitry Torokhov goto err_dev_put; 686826cf175SDmitry Torokhov } 6878ae12a0dSDavid Brownell } 688dc87c98eSGrant Likely 689826cf175SDmitry Torokhov status = spi_add_device(proxy); 690826cf175SDmitry Torokhov if (status < 0) 691826cf175SDmitry Torokhov goto err_remove_props; 692826cf175SDmitry Torokhov 693dc87c98eSGrant Likely return proxy; 694826cf175SDmitry Torokhov 695826cf175SDmitry Torokhov err_remove_props: 696826cf175SDmitry Torokhov if (chip->properties) 697826cf175SDmitry Torokhov device_remove_properties(&proxy->dev); 698826cf175SDmitry Torokhov err_dev_put: 699826cf175SDmitry Torokhov spi_dev_put(proxy); 700826cf175SDmitry Torokhov return NULL; 701dc87c98eSGrant Likely } 7028ae12a0dSDavid Brownell EXPORT_SYMBOL_GPL(spi_new_device); 7038ae12a0dSDavid Brownell 7043b1884c2SGeert Uytterhoeven /** 7053b1884c2SGeert Uytterhoeven * spi_unregister_device - unregister a single SPI device 7063b1884c2SGeert Uytterhoeven * @spi: spi_device to unregister 7073b1884c2SGeert Uytterhoeven * 7083b1884c2SGeert Uytterhoeven * Start making the passed SPI device vanish. Normally this would be handled 7098caab75fSGeert Uytterhoeven * by spi_unregister_controller(). 7103b1884c2SGeert Uytterhoeven */ 7113b1884c2SGeert Uytterhoeven void spi_unregister_device(struct spi_device *spi) 7123b1884c2SGeert Uytterhoeven { 713bd6c1644SGeert Uytterhoeven if (!spi) 714bd6c1644SGeert Uytterhoeven return; 715bd6c1644SGeert Uytterhoeven 7168324147fSJohan Hovold if (spi->dev.of_node) { 717bd6c1644SGeert Uytterhoeven of_node_clear_flag(spi->dev.of_node, OF_POPULATED); 7188324147fSJohan Hovold of_node_put(spi->dev.of_node); 7198324147fSJohan Hovold } 7207f24467fSOctavian Purdila if (ACPI_COMPANION(&spi->dev)) 7217f24467fSOctavian Purdila acpi_device_clear_enumerated(ACPI_COMPANION(&spi->dev)); 7223b1884c2SGeert Uytterhoeven device_unregister(&spi->dev); 7233b1884c2SGeert Uytterhoeven } 7243b1884c2SGeert Uytterhoeven EXPORT_SYMBOL_GPL(spi_unregister_device); 7253b1884c2SGeert Uytterhoeven 7268caab75fSGeert Uytterhoeven static void spi_match_controller_to_boardinfo(struct spi_controller *ctlr, 7272b9603a0SFeng Tang struct spi_board_info *bi) 7282b9603a0SFeng Tang { 7292b9603a0SFeng Tang struct spi_device *dev; 7302b9603a0SFeng Tang 7318caab75fSGeert Uytterhoeven if (ctlr->bus_num != bi->bus_num) 7322b9603a0SFeng Tang return; 7332b9603a0SFeng Tang 7348caab75fSGeert Uytterhoeven dev = spi_new_device(ctlr, bi); 7352b9603a0SFeng Tang if (!dev) 7368caab75fSGeert Uytterhoeven dev_err(ctlr->dev.parent, "can't create new device for %s\n", 7372b9603a0SFeng Tang bi->modalias); 7382b9603a0SFeng Tang } 7392b9603a0SFeng Tang 74033e34dc6SDavid Brownell /** 74133e34dc6SDavid Brownell * spi_register_board_info - register SPI devices for a given board 74233e34dc6SDavid Brownell * @info: array of chip descriptors 74333e34dc6SDavid Brownell * @n: how many descriptors are provided 74433e34dc6SDavid Brownell * Context: can sleep 74533e34dc6SDavid Brownell * 7468ae12a0dSDavid Brownell * Board-specific early init code calls this (probably during arch_initcall) 7478ae12a0dSDavid Brownell * with segments of the SPI device table. Any device nodes are created later, 7488ae12a0dSDavid Brownell * after the relevant parent SPI controller (bus_num) is defined. We keep 7498ae12a0dSDavid Brownell * this table of devices forever, so that reloading a controller driver will 7508ae12a0dSDavid Brownell * not make Linux forget about these hard-wired devices. 7518ae12a0dSDavid Brownell * 7528ae12a0dSDavid Brownell * Other code can also call this, e.g. a particular add-on board might provide 7538ae12a0dSDavid Brownell * SPI devices through its expansion connector, so code initializing that board 7548ae12a0dSDavid Brownell * would naturally declare its SPI devices. 7558ae12a0dSDavid Brownell * 7568ae12a0dSDavid Brownell * The board info passed can safely be __initdata ... but be careful of 7578ae12a0dSDavid Brownell * any embedded pointers (platform_data, etc), they're copied as-is. 758826cf175SDmitry Torokhov * Device properties are deep-copied though. 75997d56dc6SJavier Martinez Canillas * 76097d56dc6SJavier Martinez Canillas * Return: zero on success, else a negative error code. 7618ae12a0dSDavid Brownell */ 762fd4a319bSGrant Likely int spi_register_board_info(struct spi_board_info const *info, unsigned n) 7638ae12a0dSDavid Brownell { 7648ae12a0dSDavid Brownell struct boardinfo *bi; 7652b9603a0SFeng Tang int i; 7668ae12a0dSDavid Brownell 767c7908a37SXiubo Li if (!n) 768f974cf57SDmitry Torokhov return 0; 769c7908a37SXiubo Li 770f9bdb7fdSMarkus Elfring bi = kcalloc(n, sizeof(*bi), GFP_KERNEL); 7718ae12a0dSDavid Brownell if (!bi) 7728ae12a0dSDavid Brownell return -ENOMEM; 7738ae12a0dSDavid Brownell 7742b9603a0SFeng Tang for (i = 0; i < n; i++, bi++, info++) { 7758caab75fSGeert Uytterhoeven struct spi_controller *ctlr; 7762b9603a0SFeng Tang 7772b9603a0SFeng Tang memcpy(&bi->board_info, info, sizeof(*info)); 778826cf175SDmitry Torokhov if (info->properties) { 779826cf175SDmitry Torokhov bi->board_info.properties = 780826cf175SDmitry Torokhov property_entries_dup(info->properties); 781826cf175SDmitry Torokhov if (IS_ERR(bi->board_info.properties)) 782826cf175SDmitry Torokhov return PTR_ERR(bi->board_info.properties); 783826cf175SDmitry Torokhov } 784826cf175SDmitry Torokhov 78594040828SMatthias Kaehlcke mutex_lock(&board_lock); 7868ae12a0dSDavid Brownell list_add_tail(&bi->list, &board_list); 7878caab75fSGeert Uytterhoeven list_for_each_entry(ctlr, &spi_controller_list, list) 7888caab75fSGeert Uytterhoeven spi_match_controller_to_boardinfo(ctlr, 7898caab75fSGeert Uytterhoeven &bi->board_info); 79094040828SMatthias Kaehlcke mutex_unlock(&board_lock); 7912b9603a0SFeng Tang } 7922b9603a0SFeng Tang 7938ae12a0dSDavid Brownell return 0; 7948ae12a0dSDavid Brownell } 7958ae12a0dSDavid Brownell 7968ae12a0dSDavid Brownell /*-------------------------------------------------------------------------*/ 7978ae12a0dSDavid Brownell 798b158935fSMark Brown static void spi_set_cs(struct spi_device *spi, bool enable) 799b158935fSMark Brown { 80025093bdeSAlexandru Ardelean bool enable1 = enable; 80125093bdeSAlexandru Ardelean 802d40f0b6fSDouglas Anderson /* 803d40f0b6fSDouglas Anderson * Avoid calling into the driver (or doing delays) if the chip select 804d40f0b6fSDouglas Anderson * isn't actually changing from the last time this was called. 805d40f0b6fSDouglas Anderson */ 806d40f0b6fSDouglas Anderson if ((spi->controller->last_cs_enable == enable) && 807d40f0b6fSDouglas Anderson (spi->controller->last_cs_mode_high == (spi->mode & SPI_CS_HIGH))) 808d40f0b6fSDouglas Anderson return; 809d40f0b6fSDouglas Anderson 810d40f0b6fSDouglas Anderson spi->controller->last_cs_enable = enable; 811d40f0b6fSDouglas Anderson spi->controller->last_cs_mode_high = spi->mode & SPI_CS_HIGH; 812d40f0b6fSDouglas Anderson 8130486d9f9Sleilk.liu if (spi->cs_gpiod || gpio_is_valid(spi->cs_gpio) || 8140486d9f9Sleilk.liu !spi->controller->set_cs_timing) { 81525093bdeSAlexandru Ardelean if (enable1) 81625093bdeSAlexandru Ardelean spi_delay_exec(&spi->controller->cs_setup, NULL); 81725093bdeSAlexandru Ardelean else 81825093bdeSAlexandru Ardelean spi_delay_exec(&spi->controller->cs_hold, NULL); 81925093bdeSAlexandru Ardelean } 82025093bdeSAlexandru Ardelean 821b158935fSMark Brown if (spi->mode & SPI_CS_HIGH) 822b158935fSMark Brown enable = !enable; 823b158935fSMark Brown 824f3186dd8SLinus Walleij if (spi->cs_gpiod || gpio_is_valid(spi->cs_gpio)) { 825f3186dd8SLinus Walleij if (!(spi->mode & SPI_NO_CS)) { 826f3186dd8SLinus Walleij if (spi->cs_gpiod) 827766c6b63SSven Van Asbroeck /* polarity handled by gpiolib */ 82828f7604fSFelix Fietkau gpiod_set_value_cansleep(spi->cs_gpiod, 829766c6b63SSven Van Asbroeck enable1); 830f3186dd8SLinus Walleij else 831766c6b63SSven Van Asbroeck /* 832766c6b63SSven Van Asbroeck * invert the enable line, as active low is 833766c6b63SSven Van Asbroeck * default for SPI. 834766c6b63SSven Van Asbroeck */ 83528f7604fSFelix Fietkau gpio_set_value_cansleep(spi->cs_gpio, !enable); 836f3186dd8SLinus Walleij } 8378eee6b9dSThor Thayer /* Some SPI masters need both GPIO CS & slave_select */ 8388caab75fSGeert Uytterhoeven if ((spi->controller->flags & SPI_MASTER_GPIO_SS) && 8398caab75fSGeert Uytterhoeven spi->controller->set_cs) 8408caab75fSGeert Uytterhoeven spi->controller->set_cs(spi, !enable); 8418caab75fSGeert Uytterhoeven } else if (spi->controller->set_cs) { 8428caab75fSGeert Uytterhoeven spi->controller->set_cs(spi, !enable); 8438eee6b9dSThor Thayer } 84425093bdeSAlexandru Ardelean 8450486d9f9Sleilk.liu if (spi->cs_gpiod || gpio_is_valid(spi->cs_gpio) || 8460486d9f9Sleilk.liu !spi->controller->set_cs_timing) { 84725093bdeSAlexandru Ardelean if (!enable1) 84825093bdeSAlexandru Ardelean spi_delay_exec(&spi->controller->cs_inactive, NULL); 84925093bdeSAlexandru Ardelean } 850b158935fSMark Brown } 851b158935fSMark Brown 8522de440f5SGeert Uytterhoeven #ifdef CONFIG_HAS_DMA 85346336966SBoris Brezillon int spi_map_buf(struct spi_controller *ctlr, struct device *dev, 8546ad45a27SMark Brown struct sg_table *sgt, void *buf, size_t len, 8556ad45a27SMark Brown enum dma_data_direction dir) 8566ad45a27SMark Brown { 8576ad45a27SMark Brown const bool vmalloced_buf = is_vmalloc_addr(buf); 858df88e91bSAndy Shevchenko unsigned int max_seg_size = dma_get_max_seg_size(dev); 859b1b8153cSVignesh R #ifdef CONFIG_HIGHMEM 860b1b8153cSVignesh R const bool kmap_buf = ((unsigned long)buf >= PKMAP_BASE && 861b1b8153cSVignesh R (unsigned long)buf < (PKMAP_BASE + 862b1b8153cSVignesh R (LAST_PKMAP * PAGE_SIZE))); 863b1b8153cSVignesh R #else 864b1b8153cSVignesh R const bool kmap_buf = false; 865b1b8153cSVignesh R #endif 86665598c13SAndrew Gabbasov int desc_len; 86765598c13SAndrew Gabbasov int sgs; 8686ad45a27SMark Brown struct page *vm_page; 8698dd4a016SJuan Gutierrez struct scatterlist *sg; 8706ad45a27SMark Brown void *sg_buf; 8716ad45a27SMark Brown size_t min; 8726ad45a27SMark Brown int i, ret; 8736ad45a27SMark Brown 874b1b8153cSVignesh R if (vmalloced_buf || kmap_buf) { 875df88e91bSAndy Shevchenko desc_len = min_t(int, max_seg_size, PAGE_SIZE); 87665598c13SAndrew Gabbasov sgs = DIV_ROUND_UP(len + offset_in_page(buf), desc_len); 8770569a88fSVignesh R } else if (virt_addr_valid(buf)) { 8788caab75fSGeert Uytterhoeven desc_len = min_t(int, max_seg_size, ctlr->max_dma_len); 87965598c13SAndrew Gabbasov sgs = DIV_ROUND_UP(len, desc_len); 8800569a88fSVignesh R } else { 8810569a88fSVignesh R return -EINVAL; 88265598c13SAndrew Gabbasov } 88365598c13SAndrew Gabbasov 8846ad45a27SMark Brown ret = sg_alloc_table(sgt, sgs, GFP_KERNEL); 8856ad45a27SMark Brown if (ret != 0) 8866ad45a27SMark Brown return ret; 8876ad45a27SMark Brown 8888dd4a016SJuan Gutierrez sg = &sgt->sgl[0]; 8896ad45a27SMark Brown for (i = 0; i < sgs; i++) { 8906ad45a27SMark Brown 891b1b8153cSVignesh R if (vmalloced_buf || kmap_buf) { 892ce99319aSMaxime Chevallier /* 893ce99319aSMaxime Chevallier * Next scatterlist entry size is the minimum between 894ce99319aSMaxime Chevallier * the desc_len and the remaining buffer length that 895ce99319aSMaxime Chevallier * fits in a page. 896ce99319aSMaxime Chevallier */ 897ce99319aSMaxime Chevallier min = min_t(size_t, desc_len, 898ce99319aSMaxime Chevallier min_t(size_t, len, 899ce99319aSMaxime Chevallier PAGE_SIZE - offset_in_page(buf))); 900b1b8153cSVignesh R if (vmalloced_buf) 9016ad45a27SMark Brown vm_page = vmalloc_to_page(buf); 902b1b8153cSVignesh R else 903b1b8153cSVignesh R vm_page = kmap_to_page(buf); 9046ad45a27SMark Brown if (!vm_page) { 9056ad45a27SMark Brown sg_free_table(sgt); 9066ad45a27SMark Brown return -ENOMEM; 9076ad45a27SMark Brown } 9088dd4a016SJuan Gutierrez sg_set_page(sg, vm_page, 909c1aefbddSCharles Keepax min, offset_in_page(buf)); 9106ad45a27SMark Brown } else { 91165598c13SAndrew Gabbasov min = min_t(size_t, len, desc_len); 9126ad45a27SMark Brown sg_buf = buf; 9138dd4a016SJuan Gutierrez sg_set_buf(sg, sg_buf, min); 9146ad45a27SMark Brown } 9156ad45a27SMark Brown 9166ad45a27SMark Brown buf += min; 9176ad45a27SMark Brown len -= min; 9188dd4a016SJuan Gutierrez sg = sg_next(sg); 9196ad45a27SMark Brown } 9206ad45a27SMark Brown 9216ad45a27SMark Brown ret = dma_map_sg(dev, sgt->sgl, sgt->nents, dir); 92289e4b66aSGeert Uytterhoeven if (!ret) 92389e4b66aSGeert Uytterhoeven ret = -ENOMEM; 9246ad45a27SMark Brown if (ret < 0) { 9256ad45a27SMark Brown sg_free_table(sgt); 9266ad45a27SMark Brown return ret; 9276ad45a27SMark Brown } 9286ad45a27SMark Brown 9296ad45a27SMark Brown sgt->nents = ret; 9306ad45a27SMark Brown 9316ad45a27SMark Brown return 0; 9326ad45a27SMark Brown } 9336ad45a27SMark Brown 93446336966SBoris Brezillon void spi_unmap_buf(struct spi_controller *ctlr, struct device *dev, 9356ad45a27SMark Brown struct sg_table *sgt, enum dma_data_direction dir) 9366ad45a27SMark Brown { 9376ad45a27SMark Brown if (sgt->orig_nents) { 9386ad45a27SMark Brown dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir); 9396ad45a27SMark Brown sg_free_table(sgt); 9406ad45a27SMark Brown } 9416ad45a27SMark Brown } 9426ad45a27SMark Brown 9438caab75fSGeert Uytterhoeven static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg) 94499adef31SMark Brown { 94599adef31SMark Brown struct device *tx_dev, *rx_dev; 94699adef31SMark Brown struct spi_transfer *xfer; 9476ad45a27SMark Brown int ret; 9483a2eba9bSMark Brown 9498caab75fSGeert Uytterhoeven if (!ctlr->can_dma) 95099adef31SMark Brown return 0; 95199adef31SMark Brown 9528caab75fSGeert Uytterhoeven if (ctlr->dma_tx) 9538caab75fSGeert Uytterhoeven tx_dev = ctlr->dma_tx->device->dev; 954c37f45b5SLeilk Liu else 9558caab75fSGeert Uytterhoeven tx_dev = ctlr->dev.parent; 956c37f45b5SLeilk Liu 9578caab75fSGeert Uytterhoeven if (ctlr->dma_rx) 9588caab75fSGeert Uytterhoeven rx_dev = ctlr->dma_rx->device->dev; 959c37f45b5SLeilk Liu else 9608caab75fSGeert Uytterhoeven rx_dev = ctlr->dev.parent; 96199adef31SMark Brown 96299adef31SMark Brown list_for_each_entry(xfer, &msg->transfers, transfer_list) { 9638caab75fSGeert Uytterhoeven if (!ctlr->can_dma(ctlr, msg->spi, xfer)) 96499adef31SMark Brown continue; 96599adef31SMark Brown 96699adef31SMark Brown if (xfer->tx_buf != NULL) { 9678caab75fSGeert Uytterhoeven ret = spi_map_buf(ctlr, tx_dev, &xfer->tx_sg, 9686ad45a27SMark Brown (void *)xfer->tx_buf, xfer->len, 96999adef31SMark Brown DMA_TO_DEVICE); 9706ad45a27SMark Brown if (ret != 0) 9716ad45a27SMark Brown return ret; 97299adef31SMark Brown } 97399adef31SMark Brown 97499adef31SMark Brown if (xfer->rx_buf != NULL) { 9758caab75fSGeert Uytterhoeven ret = spi_map_buf(ctlr, rx_dev, &xfer->rx_sg, 97699adef31SMark Brown xfer->rx_buf, xfer->len, 97799adef31SMark Brown DMA_FROM_DEVICE); 9786ad45a27SMark Brown if (ret != 0) { 9798caab75fSGeert Uytterhoeven spi_unmap_buf(ctlr, tx_dev, &xfer->tx_sg, 9806ad45a27SMark Brown DMA_TO_DEVICE); 9816ad45a27SMark Brown return ret; 98299adef31SMark Brown } 98399adef31SMark Brown } 98499adef31SMark Brown } 98599adef31SMark Brown 9868caab75fSGeert Uytterhoeven ctlr->cur_msg_mapped = true; 98799adef31SMark Brown 98899adef31SMark Brown return 0; 98999adef31SMark Brown } 99099adef31SMark Brown 9918caab75fSGeert Uytterhoeven static int __spi_unmap_msg(struct spi_controller *ctlr, struct spi_message *msg) 99299adef31SMark Brown { 99399adef31SMark Brown struct spi_transfer *xfer; 99499adef31SMark Brown struct device *tx_dev, *rx_dev; 99599adef31SMark Brown 9968caab75fSGeert Uytterhoeven if (!ctlr->cur_msg_mapped || !ctlr->can_dma) 99799adef31SMark Brown return 0; 99899adef31SMark Brown 9998caab75fSGeert Uytterhoeven if (ctlr->dma_tx) 10008caab75fSGeert Uytterhoeven tx_dev = ctlr->dma_tx->device->dev; 1001c37f45b5SLeilk Liu else 10028caab75fSGeert Uytterhoeven tx_dev = ctlr->dev.parent; 1003c37f45b5SLeilk Liu 10048caab75fSGeert Uytterhoeven if (ctlr->dma_rx) 10058caab75fSGeert Uytterhoeven rx_dev = ctlr->dma_rx->device->dev; 1006c37f45b5SLeilk Liu else 10078caab75fSGeert Uytterhoeven rx_dev = ctlr->dev.parent; 100899adef31SMark Brown 100999adef31SMark Brown list_for_each_entry(xfer, &msg->transfers, transfer_list) { 10108caab75fSGeert Uytterhoeven if (!ctlr->can_dma(ctlr, msg->spi, xfer)) 101199adef31SMark Brown continue; 101299adef31SMark Brown 10138caab75fSGeert Uytterhoeven spi_unmap_buf(ctlr, rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE); 10148caab75fSGeert Uytterhoeven spi_unmap_buf(ctlr, tx_dev, &xfer->tx_sg, DMA_TO_DEVICE); 101599adef31SMark Brown } 101699adef31SMark Brown 1017809b1b04SRobin Gong ctlr->cur_msg_mapped = false; 1018809b1b04SRobin Gong 101999adef31SMark Brown return 0; 102099adef31SMark Brown } 10212de440f5SGeert Uytterhoeven #else /* !CONFIG_HAS_DMA */ 10228caab75fSGeert Uytterhoeven static inline int __spi_map_msg(struct spi_controller *ctlr, 10232de440f5SGeert Uytterhoeven struct spi_message *msg) 10242de440f5SGeert Uytterhoeven { 10252de440f5SGeert Uytterhoeven return 0; 10262de440f5SGeert Uytterhoeven } 10272de440f5SGeert Uytterhoeven 10288caab75fSGeert Uytterhoeven static inline int __spi_unmap_msg(struct spi_controller *ctlr, 10292de440f5SGeert Uytterhoeven struct spi_message *msg) 10302de440f5SGeert Uytterhoeven { 10312de440f5SGeert Uytterhoeven return 0; 10322de440f5SGeert Uytterhoeven } 10332de440f5SGeert Uytterhoeven #endif /* !CONFIG_HAS_DMA */ 10342de440f5SGeert Uytterhoeven 10358caab75fSGeert Uytterhoeven static inline int spi_unmap_msg(struct spi_controller *ctlr, 10364b786458SMartin Sperl struct spi_message *msg) 10374b786458SMartin Sperl { 10384b786458SMartin Sperl struct spi_transfer *xfer; 10394b786458SMartin Sperl 10404b786458SMartin Sperl list_for_each_entry(xfer, &msg->transfers, transfer_list) { 10414b786458SMartin Sperl /* 10424b786458SMartin Sperl * Restore the original value of tx_buf or rx_buf if they are 10434b786458SMartin Sperl * NULL. 10444b786458SMartin Sperl */ 10458caab75fSGeert Uytterhoeven if (xfer->tx_buf == ctlr->dummy_tx) 10464b786458SMartin Sperl xfer->tx_buf = NULL; 10478caab75fSGeert Uytterhoeven if (xfer->rx_buf == ctlr->dummy_rx) 10484b786458SMartin Sperl xfer->rx_buf = NULL; 10494b786458SMartin Sperl } 10504b786458SMartin Sperl 10518caab75fSGeert Uytterhoeven return __spi_unmap_msg(ctlr, msg); 10524b786458SMartin Sperl } 10534b786458SMartin Sperl 10548caab75fSGeert Uytterhoeven static int spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg) 10552de440f5SGeert Uytterhoeven { 10562de440f5SGeert Uytterhoeven struct spi_transfer *xfer; 10572de440f5SGeert Uytterhoeven void *tmp; 10582de440f5SGeert Uytterhoeven unsigned int max_tx, max_rx; 10592de440f5SGeert Uytterhoeven 1060aee67fe8Sdillon min if ((ctlr->flags & (SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX)) 1061aee67fe8Sdillon min && !(msg->spi->mode & SPI_3WIRE)) { 10622de440f5SGeert Uytterhoeven max_tx = 0; 10632de440f5SGeert Uytterhoeven max_rx = 0; 10642de440f5SGeert Uytterhoeven 10652de440f5SGeert Uytterhoeven list_for_each_entry(xfer, &msg->transfers, transfer_list) { 10668caab75fSGeert Uytterhoeven if ((ctlr->flags & SPI_CONTROLLER_MUST_TX) && 10672de440f5SGeert Uytterhoeven !xfer->tx_buf) 10682de440f5SGeert Uytterhoeven max_tx = max(xfer->len, max_tx); 10698caab75fSGeert Uytterhoeven if ((ctlr->flags & SPI_CONTROLLER_MUST_RX) && 10702de440f5SGeert Uytterhoeven !xfer->rx_buf) 10712de440f5SGeert Uytterhoeven max_rx = max(xfer->len, max_rx); 10722de440f5SGeert Uytterhoeven } 10732de440f5SGeert Uytterhoeven 10742de440f5SGeert Uytterhoeven if (max_tx) { 10758caab75fSGeert Uytterhoeven tmp = krealloc(ctlr->dummy_tx, max_tx, 10762de440f5SGeert Uytterhoeven GFP_KERNEL | GFP_DMA); 10772de440f5SGeert Uytterhoeven if (!tmp) 10782de440f5SGeert Uytterhoeven return -ENOMEM; 10798caab75fSGeert Uytterhoeven ctlr->dummy_tx = tmp; 10802de440f5SGeert Uytterhoeven memset(tmp, 0, max_tx); 10812de440f5SGeert Uytterhoeven } 10822de440f5SGeert Uytterhoeven 10832de440f5SGeert Uytterhoeven if (max_rx) { 10848caab75fSGeert Uytterhoeven tmp = krealloc(ctlr->dummy_rx, max_rx, 10852de440f5SGeert Uytterhoeven GFP_KERNEL | GFP_DMA); 10862de440f5SGeert Uytterhoeven if (!tmp) 10872de440f5SGeert Uytterhoeven return -ENOMEM; 10888caab75fSGeert Uytterhoeven ctlr->dummy_rx = tmp; 10892de440f5SGeert Uytterhoeven } 10902de440f5SGeert Uytterhoeven 10912de440f5SGeert Uytterhoeven if (max_tx || max_rx) { 10922de440f5SGeert Uytterhoeven list_for_each_entry(xfer, &msg->transfers, 10932de440f5SGeert Uytterhoeven transfer_list) { 10945442dcaaSChris Lesiak if (!xfer->len) 10955442dcaaSChris Lesiak continue; 10962de440f5SGeert Uytterhoeven if (!xfer->tx_buf) 10978caab75fSGeert Uytterhoeven xfer->tx_buf = ctlr->dummy_tx; 10982de440f5SGeert Uytterhoeven if (!xfer->rx_buf) 10998caab75fSGeert Uytterhoeven xfer->rx_buf = ctlr->dummy_rx; 11002de440f5SGeert Uytterhoeven } 11012de440f5SGeert Uytterhoeven } 11022de440f5SGeert Uytterhoeven } 11032de440f5SGeert Uytterhoeven 11048caab75fSGeert Uytterhoeven return __spi_map_msg(ctlr, msg); 11052de440f5SGeert Uytterhoeven } 110699adef31SMark Brown 1107810923f3SLubomir Rintel static int spi_transfer_wait(struct spi_controller *ctlr, 1108810923f3SLubomir Rintel struct spi_message *msg, 1109810923f3SLubomir Rintel struct spi_transfer *xfer) 1110810923f3SLubomir Rintel { 1111810923f3SLubomir Rintel struct spi_statistics *statm = &ctlr->statistics; 1112810923f3SLubomir Rintel struct spi_statistics *stats = &msg->spi->statistics; 11136170d077SXu Yilun u32 speed_hz = xfer->speed_hz; 111449686df5SColin Ian King unsigned long long ms; 1115810923f3SLubomir Rintel 1116810923f3SLubomir Rintel if (spi_controller_is_slave(ctlr)) { 1117810923f3SLubomir Rintel if (wait_for_completion_interruptible(&ctlr->xfer_completion)) { 1118810923f3SLubomir Rintel dev_dbg(&msg->spi->dev, "SPI transfer interrupted\n"); 1119810923f3SLubomir Rintel return -EINTR; 1120810923f3SLubomir Rintel } 1121810923f3SLubomir Rintel } else { 11226170d077SXu Yilun if (!speed_hz) 11236170d077SXu Yilun speed_hz = 100000; 11246170d077SXu Yilun 1125810923f3SLubomir Rintel ms = 8LL * 1000LL * xfer->len; 11266170d077SXu Yilun do_div(ms, speed_hz); 1127810923f3SLubomir Rintel ms += ms + 200; /* some tolerance */ 1128810923f3SLubomir Rintel 1129810923f3SLubomir Rintel if (ms > UINT_MAX) 1130810923f3SLubomir Rintel ms = UINT_MAX; 1131810923f3SLubomir Rintel 1132810923f3SLubomir Rintel ms = wait_for_completion_timeout(&ctlr->xfer_completion, 1133810923f3SLubomir Rintel msecs_to_jiffies(ms)); 1134810923f3SLubomir Rintel 1135810923f3SLubomir Rintel if (ms == 0) { 1136810923f3SLubomir Rintel SPI_STATISTICS_INCREMENT_FIELD(statm, timedout); 1137810923f3SLubomir Rintel SPI_STATISTICS_INCREMENT_FIELD(stats, timedout); 1138810923f3SLubomir Rintel dev_err(&msg->spi->dev, 1139810923f3SLubomir Rintel "SPI transfer timed out\n"); 1140810923f3SLubomir Rintel return -ETIMEDOUT; 1141810923f3SLubomir Rintel } 1142810923f3SLubomir Rintel } 1143810923f3SLubomir Rintel 1144810923f3SLubomir Rintel return 0; 1145810923f3SLubomir Rintel } 1146810923f3SLubomir Rintel 11470ff2de8bSMartin Sperl static void _spi_transfer_delay_ns(u32 ns) 11480ff2de8bSMartin Sperl { 11490ff2de8bSMartin Sperl if (!ns) 11500ff2de8bSMartin Sperl return; 11510ff2de8bSMartin Sperl if (ns <= 1000) { 11520ff2de8bSMartin Sperl ndelay(ns); 11530ff2de8bSMartin Sperl } else { 11540ff2de8bSMartin Sperl u32 us = DIV_ROUND_UP(ns, 1000); 11550ff2de8bSMartin Sperl 11560ff2de8bSMartin Sperl if (us <= 10) 11570ff2de8bSMartin Sperl udelay(us); 11580ff2de8bSMartin Sperl else 11590ff2de8bSMartin Sperl usleep_range(us, us + DIV_ROUND_UP(us, 10)); 11600ff2de8bSMartin Sperl } 11610ff2de8bSMartin Sperl } 11620ff2de8bSMartin Sperl 11633984d39bSAlexandru Ardelean int spi_delay_to_ns(struct spi_delay *_delay, struct spi_transfer *xfer) 11640ff2de8bSMartin Sperl { 1165b2c98153SAlexandru Ardelean u32 delay = _delay->value; 1166b2c98153SAlexandru Ardelean u32 unit = _delay->unit; 1167d5864e5bSMartin Sperl u32 hz; 11680ff2de8bSMartin Sperl 1169b2c98153SAlexandru Ardelean if (!delay) 1170b2c98153SAlexandru Ardelean return 0; 11710ff2de8bSMartin Sperl 11720ff2de8bSMartin Sperl switch (unit) { 11730ff2de8bSMartin Sperl case SPI_DELAY_UNIT_USECS: 11740ff2de8bSMartin Sperl delay *= 1000; 11750ff2de8bSMartin Sperl break; 11760ff2de8bSMartin Sperl case SPI_DELAY_UNIT_NSECS: /* nothing to do here */ 11770ff2de8bSMartin Sperl break; 1178d5864e5bSMartin Sperl case SPI_DELAY_UNIT_SCK: 1179b2c98153SAlexandru Ardelean /* clock cycles need to be obtained from spi_transfer */ 1180b2c98153SAlexandru Ardelean if (!xfer) 1181b2c98153SAlexandru Ardelean return -EINVAL; 1182d5864e5bSMartin Sperl /* if there is no effective speed know, then approximate 1183d5864e5bSMartin Sperl * by underestimating with half the requested hz 1184d5864e5bSMartin Sperl */ 1185d5864e5bSMartin Sperl hz = xfer->effective_speed_hz ?: xfer->speed_hz / 2; 1186b2c98153SAlexandru Ardelean if (!hz) 1187b2c98153SAlexandru Ardelean return -EINVAL; 1188d5864e5bSMartin Sperl delay *= DIV_ROUND_UP(1000000000, hz); 1189d5864e5bSMartin Sperl break; 11900ff2de8bSMartin Sperl default: 1191b2c98153SAlexandru Ardelean return -EINVAL; 1192b2c98153SAlexandru Ardelean } 1193b2c98153SAlexandru Ardelean 1194b2c98153SAlexandru Ardelean return delay; 1195b2c98153SAlexandru Ardelean } 11963984d39bSAlexandru Ardelean EXPORT_SYMBOL_GPL(spi_delay_to_ns); 1197b2c98153SAlexandru Ardelean 1198b2c98153SAlexandru Ardelean int spi_delay_exec(struct spi_delay *_delay, struct spi_transfer *xfer) 1199b2c98153SAlexandru Ardelean { 1200b2c98153SAlexandru Ardelean int delay; 1201b2c98153SAlexandru Ardelean 12028fede89fSMark Brown might_sleep(); 12038fede89fSMark Brown 1204b2c98153SAlexandru Ardelean if (!_delay) 1205b2c98153SAlexandru Ardelean return -EINVAL; 1206b2c98153SAlexandru Ardelean 12073984d39bSAlexandru Ardelean delay = spi_delay_to_ns(_delay, xfer); 1208b2c98153SAlexandru Ardelean if (delay < 0) 1209b2c98153SAlexandru Ardelean return delay; 1210b2c98153SAlexandru Ardelean 1211b2c98153SAlexandru Ardelean _spi_transfer_delay_ns(delay); 1212b2c98153SAlexandru Ardelean 1213b2c98153SAlexandru Ardelean return 0; 1214b2c98153SAlexandru Ardelean } 1215b2c98153SAlexandru Ardelean EXPORT_SYMBOL_GPL(spi_delay_exec); 1216b2c98153SAlexandru Ardelean 12170ff2de8bSMartin Sperl static void _spi_transfer_cs_change_delay(struct spi_message *msg, 12180ff2de8bSMartin Sperl struct spi_transfer *xfer) 12190ff2de8bSMartin Sperl { 1220329f0dacSAlexandru Ardelean u32 delay = xfer->cs_change_delay.value; 1221329f0dacSAlexandru Ardelean u32 unit = xfer->cs_change_delay.unit; 1222329f0dacSAlexandru Ardelean int ret; 12230ff2de8bSMartin Sperl 12240ff2de8bSMartin Sperl /* return early on "fast" mode - for everything but USECS */ 12256b3f236aSAlexandru Ardelean if (!delay) { 12266b3f236aSAlexandru Ardelean if (unit == SPI_DELAY_UNIT_USECS) 12276b3f236aSAlexandru Ardelean _spi_transfer_delay_ns(10000); 12280ff2de8bSMartin Sperl return; 12296b3f236aSAlexandru Ardelean } 12300ff2de8bSMartin Sperl 1231329f0dacSAlexandru Ardelean ret = spi_delay_exec(&xfer->cs_change_delay, xfer); 1232329f0dacSAlexandru Ardelean if (ret) { 12330ff2de8bSMartin Sperl dev_err_once(&msg->spi->dev, 12340ff2de8bSMartin Sperl "Use of unsupported delay unit %i, using default of 10us\n", 1235329f0dacSAlexandru Ardelean unit); 1236329f0dacSAlexandru Ardelean _spi_transfer_delay_ns(10000); 12370ff2de8bSMartin Sperl } 12380ff2de8bSMartin Sperl } 12390ff2de8bSMartin Sperl 1240b158935fSMark Brown /* 1241b158935fSMark Brown * spi_transfer_one_message - Default implementation of transfer_one_message() 1242b158935fSMark Brown * 1243b158935fSMark Brown * This is a standard implementation of transfer_one_message() for 12448ba811a7SMoritz Fischer * drivers which implement a transfer_one() operation. It provides 1245b158935fSMark Brown * standard handling of delays and chip select management. 1246b158935fSMark Brown */ 12478caab75fSGeert Uytterhoeven static int spi_transfer_one_message(struct spi_controller *ctlr, 1248b158935fSMark Brown struct spi_message *msg) 1249b158935fSMark Brown { 1250b158935fSMark Brown struct spi_transfer *xfer; 1251b158935fSMark Brown bool keep_cs = false; 1252b158935fSMark Brown int ret = 0; 12538caab75fSGeert Uytterhoeven struct spi_statistics *statm = &ctlr->statistics; 1254eca2ebc7SMartin Sperl struct spi_statistics *stats = &msg->spi->statistics; 1255b158935fSMark Brown 1256b158935fSMark Brown spi_set_cs(msg->spi, true); 1257b158935fSMark Brown 1258eca2ebc7SMartin Sperl SPI_STATISTICS_INCREMENT_FIELD(statm, messages); 1259eca2ebc7SMartin Sperl SPI_STATISTICS_INCREMENT_FIELD(stats, messages); 1260eca2ebc7SMartin Sperl 1261b158935fSMark Brown list_for_each_entry(xfer, &msg->transfers, transfer_list) { 1262b158935fSMark Brown trace_spi_transfer_start(msg, xfer); 1263b158935fSMark Brown 12648caab75fSGeert Uytterhoeven spi_statistics_add_transfer_stats(statm, xfer, ctlr); 12658caab75fSGeert Uytterhoeven spi_statistics_add_transfer_stats(stats, xfer, ctlr); 1266eca2ebc7SMartin Sperl 1267b42faeeeSVladimir Oltean if (!ctlr->ptp_sts_supported) { 1268b42faeeeSVladimir Oltean xfer->ptp_sts_word_pre = 0; 1269b42faeeeSVladimir Oltean ptp_read_system_prets(xfer->ptp_sts); 1270b42faeeeSVladimir Oltean } 1271b42faeeeSVladimir Oltean 1272b3063203SNicolas Saenz Julienne if ((xfer->tx_buf || xfer->rx_buf) && xfer->len) { 12738caab75fSGeert Uytterhoeven reinit_completion(&ctlr->xfer_completion); 1274b158935fSMark Brown 1275809b1b04SRobin Gong fallback_pio: 12768caab75fSGeert Uytterhoeven ret = ctlr->transfer_one(ctlr, msg->spi, xfer); 1277b158935fSMark Brown if (ret < 0) { 1278809b1b04SRobin Gong if (ctlr->cur_msg_mapped && 1279809b1b04SRobin Gong (xfer->error & SPI_TRANS_FAIL_NO_START)) { 1280809b1b04SRobin Gong __spi_unmap_msg(ctlr, msg); 1281809b1b04SRobin Gong ctlr->fallback = true; 1282809b1b04SRobin Gong xfer->error &= ~SPI_TRANS_FAIL_NO_START; 1283809b1b04SRobin Gong goto fallback_pio; 1284809b1b04SRobin Gong } 1285809b1b04SRobin Gong 1286eca2ebc7SMartin Sperl SPI_STATISTICS_INCREMENT_FIELD(statm, 1287eca2ebc7SMartin Sperl errors); 1288eca2ebc7SMartin Sperl SPI_STATISTICS_INCREMENT_FIELD(stats, 1289eca2ebc7SMartin Sperl errors); 1290b158935fSMark Brown dev_err(&msg->spi->dev, 1291b158935fSMark Brown "SPI transfer failed: %d\n", ret); 1292b158935fSMark Brown goto out; 1293b158935fSMark Brown } 1294b158935fSMark Brown 1295d57e7960SMark Brown if (ret > 0) { 1296810923f3SLubomir Rintel ret = spi_transfer_wait(ctlr, msg, xfer); 1297810923f3SLubomir Rintel if (ret < 0) 1298810923f3SLubomir Rintel msg->status = ret; 1299d57e7960SMark Brown } 130038ec10f6SMark Brown } else { 130138ec10f6SMark Brown if (xfer->len) 130238ec10f6SMark Brown dev_err(&msg->spi->dev, 130338ec10f6SMark Brown "Bufferless transfer has length %u\n", 130438ec10f6SMark Brown xfer->len); 130538ec10f6SMark Brown } 1306b158935fSMark Brown 1307b42faeeeSVladimir Oltean if (!ctlr->ptp_sts_supported) { 1308b42faeeeSVladimir Oltean ptp_read_system_postts(xfer->ptp_sts); 1309b42faeeeSVladimir Oltean xfer->ptp_sts_word_post = xfer->len; 1310b42faeeeSVladimir Oltean } 1311b42faeeeSVladimir Oltean 1312b158935fSMark Brown trace_spi_transfer_stop(msg, xfer); 1313b158935fSMark Brown 1314b158935fSMark Brown if (msg->status != -EINPROGRESS) 1315b158935fSMark Brown goto out; 1316b158935fSMark Brown 1317bebcfd27SAlexandru Ardelean spi_transfer_delay_exec(xfer); 1318b158935fSMark Brown 1319b158935fSMark Brown if (xfer->cs_change) { 1320b158935fSMark Brown if (list_is_last(&xfer->transfer_list, 1321b158935fSMark Brown &msg->transfers)) { 1322b158935fSMark Brown keep_cs = true; 1323b158935fSMark Brown } else { 13240b73aa63SMark Brown spi_set_cs(msg->spi, false); 13250ff2de8bSMartin Sperl _spi_transfer_cs_change_delay(msg, xfer); 13260b73aa63SMark Brown spi_set_cs(msg->spi, true); 1327b158935fSMark Brown } 1328b158935fSMark Brown } 1329b158935fSMark Brown 1330b158935fSMark Brown msg->actual_length += xfer->len; 1331b158935fSMark Brown } 1332b158935fSMark Brown 1333b158935fSMark Brown out: 1334b158935fSMark Brown if (ret != 0 || !keep_cs) 1335b158935fSMark Brown spi_set_cs(msg->spi, false); 1336b158935fSMark Brown 1337b158935fSMark Brown if (msg->status == -EINPROGRESS) 1338b158935fSMark Brown msg->status = ret; 1339b158935fSMark Brown 13408caab75fSGeert Uytterhoeven if (msg->status && ctlr->handle_err) 13418caab75fSGeert Uytterhoeven ctlr->handle_err(ctlr, msg); 1342b716c4ffSAndy Shevchenko 13430ed56252SMark Brown spi_finalize_current_message(ctlr); 13440ed56252SMark Brown 1345b158935fSMark Brown return ret; 1346b158935fSMark Brown } 1347b158935fSMark Brown 1348b158935fSMark Brown /** 1349b158935fSMark Brown * spi_finalize_current_transfer - report completion of a transfer 13508caab75fSGeert Uytterhoeven * @ctlr: the controller reporting completion 1351b158935fSMark Brown * 1352b158935fSMark Brown * Called by SPI drivers using the core transfer_one_message() 1353b158935fSMark Brown * implementation to notify it that the current interrupt driven 13549e8f4882SGeert Uytterhoeven * transfer has finished and the next one may be scheduled. 1355b158935fSMark Brown */ 13568caab75fSGeert Uytterhoeven void spi_finalize_current_transfer(struct spi_controller *ctlr) 1357b158935fSMark Brown { 13588caab75fSGeert Uytterhoeven complete(&ctlr->xfer_completion); 1359b158935fSMark Brown } 1360b158935fSMark Brown EXPORT_SYMBOL_GPL(spi_finalize_current_transfer); 1361b158935fSMark Brown 1362e1268597SMark Brown static void spi_idle_runtime_pm(struct spi_controller *ctlr) 1363e1268597SMark Brown { 1364e1268597SMark Brown if (ctlr->auto_runtime_pm) { 1365e1268597SMark Brown pm_runtime_mark_last_busy(ctlr->dev.parent); 1366e1268597SMark Brown pm_runtime_put_autosuspend(ctlr->dev.parent); 1367e1268597SMark Brown } 1368e1268597SMark Brown } 1369e1268597SMark Brown 1370ffbbdd21SLinus Walleij /** 1371fc9e0f71SMark Brown * __spi_pump_messages - function which processes spi message queue 13728caab75fSGeert Uytterhoeven * @ctlr: controller to process queue for 1373fc9e0f71SMark Brown * @in_kthread: true if we are in the context of the message pump thread 1374ffbbdd21SLinus Walleij * 1375ffbbdd21SLinus Walleij * This function checks if there is any spi message in the queue that 1376ffbbdd21SLinus Walleij * needs processing and if so call out to the driver to initialize hardware 1377ffbbdd21SLinus Walleij * and transfer each message. 1378ffbbdd21SLinus Walleij * 13790461a414SMark Brown * Note that it is called both from the kthread itself and also from 13800461a414SMark Brown * inside spi_sync(); the queue extraction handling at the top of the 13810461a414SMark Brown * function should deal with this safely. 1382ffbbdd21SLinus Walleij */ 13838caab75fSGeert Uytterhoeven static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread) 1384ffbbdd21SLinus Walleij { 1385b42faeeeSVladimir Oltean struct spi_transfer *xfer; 1386d1c44c93SVladimir Oltean struct spi_message *msg; 1387ffbbdd21SLinus Walleij bool was_busy = false; 1388d1c44c93SVladimir Oltean unsigned long flags; 1389ffbbdd21SLinus Walleij int ret; 1390ffbbdd21SLinus Walleij 1391983aee5dSMark Brown /* Lock queue */ 13928caab75fSGeert Uytterhoeven spin_lock_irqsave(&ctlr->queue_lock, flags); 1393983aee5dSMark Brown 1394983aee5dSMark Brown /* Make sure we are not already running a message */ 13958caab75fSGeert Uytterhoeven if (ctlr->cur_msg) { 13968caab75fSGeert Uytterhoeven spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1397983aee5dSMark Brown return; 1398983aee5dSMark Brown } 1399983aee5dSMark Brown 1400f0125f1aSMark Brown /* If another context is idling the device then defer */ 14018caab75fSGeert Uytterhoeven if (ctlr->idling) { 140260a883d1SMarek Szyprowski kthread_queue_work(ctlr->kworker, &ctlr->pump_messages); 14038caab75fSGeert Uytterhoeven spin_unlock_irqrestore(&ctlr->queue_lock, flags); 14040461a414SMark Brown return; 14050461a414SMark Brown } 14060461a414SMark Brown 1407983aee5dSMark Brown /* Check if the queue is idle */ 14088caab75fSGeert Uytterhoeven if (list_empty(&ctlr->queue) || !ctlr->running) { 14098caab75fSGeert Uytterhoeven if (!ctlr->busy) { 14108caab75fSGeert Uytterhoeven spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1411ffbbdd21SLinus Walleij return; 1412ffbbdd21SLinus Walleij } 1413fc9e0f71SMark Brown 1414e1268597SMark Brown /* Defer any non-atomic teardown to the thread */ 1415f0125f1aSMark Brown if (!in_kthread) { 1416e1268597SMark Brown if (!ctlr->dummy_rx && !ctlr->dummy_tx && 1417e1268597SMark Brown !ctlr->unprepare_transfer_hardware) { 1418e1268597SMark Brown spi_idle_runtime_pm(ctlr); 1419e1268597SMark Brown ctlr->busy = false; 1420e1268597SMark Brown trace_spi_controller_idle(ctlr); 1421e1268597SMark Brown } else { 142260a883d1SMarek Szyprowski kthread_queue_work(ctlr->kworker, 1423f0125f1aSMark Brown &ctlr->pump_messages); 1424e1268597SMark Brown } 1425f0125f1aSMark Brown spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1426f0125f1aSMark Brown return; 1427f0125f1aSMark Brown } 1428f0125f1aSMark Brown 1429f0125f1aSMark Brown ctlr->busy = false; 1430f0125f1aSMark Brown ctlr->idling = true; 1431f0125f1aSMark Brown spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1432f0125f1aSMark Brown 1433f0125f1aSMark Brown kfree(ctlr->dummy_rx); 1434f0125f1aSMark Brown ctlr->dummy_rx = NULL; 1435f0125f1aSMark Brown kfree(ctlr->dummy_tx); 1436f0125f1aSMark Brown ctlr->dummy_tx = NULL; 1437f0125f1aSMark Brown if (ctlr->unprepare_transfer_hardware && 1438f0125f1aSMark Brown ctlr->unprepare_transfer_hardware(ctlr)) 1439f0125f1aSMark Brown dev_err(&ctlr->dev, 1440f0125f1aSMark Brown "failed to unprepare transfer hardware\n"); 1441e1268597SMark Brown spi_idle_runtime_pm(ctlr); 1442f0125f1aSMark Brown trace_spi_controller_idle(ctlr); 1443f0125f1aSMark Brown 1444f0125f1aSMark Brown spin_lock_irqsave(&ctlr->queue_lock, flags); 1445f0125f1aSMark Brown ctlr->idling = false; 14468caab75fSGeert Uytterhoeven spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1447ffbbdd21SLinus Walleij return; 1448ffbbdd21SLinus Walleij } 1449ffbbdd21SLinus Walleij 1450ffbbdd21SLinus Walleij /* Extract head of queue */ 1451d1c44c93SVladimir Oltean msg = list_first_entry(&ctlr->queue, struct spi_message, queue); 1452d1c44c93SVladimir Oltean ctlr->cur_msg = msg; 1453ffbbdd21SLinus Walleij 1454d1c44c93SVladimir Oltean list_del_init(&msg->queue); 14558caab75fSGeert Uytterhoeven if (ctlr->busy) 1456ffbbdd21SLinus Walleij was_busy = true; 1457ffbbdd21SLinus Walleij else 14588caab75fSGeert Uytterhoeven ctlr->busy = true; 14598caab75fSGeert Uytterhoeven spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1460ffbbdd21SLinus Walleij 14618caab75fSGeert Uytterhoeven mutex_lock(&ctlr->io_mutex); 1462ef4d96ecSMark Brown 14638caab75fSGeert Uytterhoeven if (!was_busy && ctlr->auto_runtime_pm) { 14648caab75fSGeert Uytterhoeven ret = pm_runtime_get_sync(ctlr->dev.parent); 146549834de2SMark Brown if (ret < 0) { 14667e48e23aSTony Lindgren pm_runtime_put_noidle(ctlr->dev.parent); 14678caab75fSGeert Uytterhoeven dev_err(&ctlr->dev, "Failed to power device: %d\n", 146849834de2SMark Brown ret); 14698caab75fSGeert Uytterhoeven mutex_unlock(&ctlr->io_mutex); 147049834de2SMark Brown return; 147149834de2SMark Brown } 147249834de2SMark Brown } 147349834de2SMark Brown 147456ec1978SMark Brown if (!was_busy) 14758caab75fSGeert Uytterhoeven trace_spi_controller_busy(ctlr); 147656ec1978SMark Brown 14778caab75fSGeert Uytterhoeven if (!was_busy && ctlr->prepare_transfer_hardware) { 14788caab75fSGeert Uytterhoeven ret = ctlr->prepare_transfer_hardware(ctlr); 1479ffbbdd21SLinus Walleij if (ret) { 14808caab75fSGeert Uytterhoeven dev_err(&ctlr->dev, 1481f3440d9aSSuper Liu "failed to prepare transfer hardware: %d\n", 1482f3440d9aSSuper Liu ret); 148349834de2SMark Brown 14848caab75fSGeert Uytterhoeven if (ctlr->auto_runtime_pm) 14858caab75fSGeert Uytterhoeven pm_runtime_put(ctlr->dev.parent); 1486f3440d9aSSuper Liu 1487d1c44c93SVladimir Oltean msg->status = ret; 1488f3440d9aSSuper Liu spi_finalize_current_message(ctlr); 1489f3440d9aSSuper Liu 14908caab75fSGeert Uytterhoeven mutex_unlock(&ctlr->io_mutex); 1491ffbbdd21SLinus Walleij return; 1492ffbbdd21SLinus Walleij } 1493ffbbdd21SLinus Walleij } 1494ffbbdd21SLinus Walleij 1495d1c44c93SVladimir Oltean trace_spi_message_start(msg); 149656ec1978SMark Brown 14978caab75fSGeert Uytterhoeven if (ctlr->prepare_message) { 1498d1c44c93SVladimir Oltean ret = ctlr->prepare_message(ctlr, msg); 14992841a5fcSMark Brown if (ret) { 15008caab75fSGeert Uytterhoeven dev_err(&ctlr->dev, "failed to prepare message: %d\n", 15018caab75fSGeert Uytterhoeven ret); 1502d1c44c93SVladimir Oltean msg->status = ret; 15038caab75fSGeert Uytterhoeven spi_finalize_current_message(ctlr); 150449023d2eSJon Hunter goto out; 15052841a5fcSMark Brown } 15068caab75fSGeert Uytterhoeven ctlr->cur_msg_prepared = true; 15072841a5fcSMark Brown } 15082841a5fcSMark Brown 1509d1c44c93SVladimir Oltean ret = spi_map_msg(ctlr, msg); 151099adef31SMark Brown if (ret) { 1511d1c44c93SVladimir Oltean msg->status = ret; 15128caab75fSGeert Uytterhoeven spi_finalize_current_message(ctlr); 151349023d2eSJon Hunter goto out; 151499adef31SMark Brown } 151599adef31SMark Brown 1516b42faeeeSVladimir Oltean if (!ctlr->ptp_sts_supported && !ctlr->transfer_one) { 1517b42faeeeSVladimir Oltean list_for_each_entry(xfer, &msg->transfers, transfer_list) { 1518b42faeeeSVladimir Oltean xfer->ptp_sts_word_pre = 0; 1519b42faeeeSVladimir Oltean ptp_read_system_prets(xfer->ptp_sts); 1520b42faeeeSVladimir Oltean } 1521b42faeeeSVladimir Oltean } 1522b42faeeeSVladimir Oltean 1523d1c44c93SVladimir Oltean ret = ctlr->transfer_one_message(ctlr, msg); 1524ffbbdd21SLinus Walleij if (ret) { 15258caab75fSGeert Uytterhoeven dev_err(&ctlr->dev, 15261f802f82SGeert Uytterhoeven "failed to transfer one message from queue\n"); 152749023d2eSJon Hunter goto out; 1528ffbbdd21SLinus Walleij } 152949023d2eSJon Hunter 153049023d2eSJon Hunter out: 15318caab75fSGeert Uytterhoeven mutex_unlock(&ctlr->io_mutex); 153262826970SMark Brown 153362826970SMark Brown /* Prod the scheduler in case transfer_one() was busy waiting */ 153449023d2eSJon Hunter if (!ret) 153562826970SMark Brown cond_resched(); 1536ffbbdd21SLinus Walleij } 1537ffbbdd21SLinus Walleij 1538fc9e0f71SMark Brown /** 1539fc9e0f71SMark Brown * spi_pump_messages - kthread work function which processes spi message queue 15408caab75fSGeert Uytterhoeven * @work: pointer to kthread work struct contained in the controller struct 1541fc9e0f71SMark Brown */ 1542fc9e0f71SMark Brown static void spi_pump_messages(struct kthread_work *work) 1543fc9e0f71SMark Brown { 15448caab75fSGeert Uytterhoeven struct spi_controller *ctlr = 15458caab75fSGeert Uytterhoeven container_of(work, struct spi_controller, pump_messages); 1546fc9e0f71SMark Brown 15478caab75fSGeert Uytterhoeven __spi_pump_messages(ctlr, true); 1548fc9e0f71SMark Brown } 1549fc9e0f71SMark Brown 1550924b5867SDouglas Anderson /** 1551b42faeeeSVladimir Oltean * spi_take_timestamp_pre - helper for drivers to collect the beginning of the 1552b42faeeeSVladimir Oltean * TX timestamp for the requested byte from the SPI 1553b42faeeeSVladimir Oltean * transfer. The frequency with which this function 1554b42faeeeSVladimir Oltean * must be called (once per word, once for the whole 1555b42faeeeSVladimir Oltean * transfer, once per batch of words etc) is arbitrary 1556b42faeeeSVladimir Oltean * as long as the @tx buffer offset is greater than or 1557b42faeeeSVladimir Oltean * equal to the requested byte at the time of the 1558b42faeeeSVladimir Oltean * call. The timestamp is only taken once, at the 1559b42faeeeSVladimir Oltean * first such call. It is assumed that the driver 1560b42faeeeSVladimir Oltean * advances its @tx buffer pointer monotonically. 1561b42faeeeSVladimir Oltean * @ctlr: Pointer to the spi_controller structure of the driver 1562b42faeeeSVladimir Oltean * @xfer: Pointer to the transfer being timestamped 1563862dd2a9SVladimir Oltean * @progress: How many words (not bytes) have been transferred so far 1564b42faeeeSVladimir Oltean * @irqs_off: If true, will disable IRQs and preemption for the duration of the 1565b42faeeeSVladimir Oltean * transfer, for less jitter in time measurement. Only compatible 1566b42faeeeSVladimir Oltean * with PIO drivers. If true, must follow up with 1567b42faeeeSVladimir Oltean * spi_take_timestamp_post or otherwise system will crash. 1568b42faeeeSVladimir Oltean * WARNING: for fully predictable results, the CPU frequency must 1569b42faeeeSVladimir Oltean * also be under control (governor). 1570b42faeeeSVladimir Oltean */ 1571b42faeeeSVladimir Oltean void spi_take_timestamp_pre(struct spi_controller *ctlr, 1572b42faeeeSVladimir Oltean struct spi_transfer *xfer, 1573862dd2a9SVladimir Oltean size_t progress, bool irqs_off) 1574b42faeeeSVladimir Oltean { 1575b42faeeeSVladimir Oltean if (!xfer->ptp_sts) 1576b42faeeeSVladimir Oltean return; 1577b42faeeeSVladimir Oltean 15786a726824SVladimir Oltean if (xfer->timestamped) 1579b42faeeeSVladimir Oltean return; 1580b42faeeeSVladimir Oltean 15816a726824SVladimir Oltean if (progress > xfer->ptp_sts_word_pre) 1582b42faeeeSVladimir Oltean return; 1583b42faeeeSVladimir Oltean 1584b42faeeeSVladimir Oltean /* Capture the resolution of the timestamp */ 1585862dd2a9SVladimir Oltean xfer->ptp_sts_word_pre = progress; 1586b42faeeeSVladimir Oltean 1587b42faeeeSVladimir Oltean if (irqs_off) { 1588b42faeeeSVladimir Oltean local_irq_save(ctlr->irq_flags); 1589b42faeeeSVladimir Oltean preempt_disable(); 1590b42faeeeSVladimir Oltean } 1591b42faeeeSVladimir Oltean 1592b42faeeeSVladimir Oltean ptp_read_system_prets(xfer->ptp_sts); 1593b42faeeeSVladimir Oltean } 1594b42faeeeSVladimir Oltean EXPORT_SYMBOL_GPL(spi_take_timestamp_pre); 1595b42faeeeSVladimir Oltean 1596b42faeeeSVladimir Oltean /** 1597b42faeeeSVladimir Oltean * spi_take_timestamp_post - helper for drivers to collect the end of the 1598b42faeeeSVladimir Oltean * TX timestamp for the requested byte from the SPI 1599b42faeeeSVladimir Oltean * transfer. Can be called with an arbitrary 1600b42faeeeSVladimir Oltean * frequency: only the first call where @tx exceeds 1601b42faeeeSVladimir Oltean * or is equal to the requested word will be 1602b42faeeeSVladimir Oltean * timestamped. 1603b42faeeeSVladimir Oltean * @ctlr: Pointer to the spi_controller structure of the driver 1604b42faeeeSVladimir Oltean * @xfer: Pointer to the transfer being timestamped 1605862dd2a9SVladimir Oltean * @progress: How many words (not bytes) have been transferred so far 1606b42faeeeSVladimir Oltean * @irqs_off: If true, will re-enable IRQs and preemption for the local CPU. 1607b42faeeeSVladimir Oltean */ 1608b42faeeeSVladimir Oltean void spi_take_timestamp_post(struct spi_controller *ctlr, 1609b42faeeeSVladimir Oltean struct spi_transfer *xfer, 1610862dd2a9SVladimir Oltean size_t progress, bool irqs_off) 1611b42faeeeSVladimir Oltean { 1612b42faeeeSVladimir Oltean if (!xfer->ptp_sts) 1613b42faeeeSVladimir Oltean return; 1614b42faeeeSVladimir Oltean 16156a726824SVladimir Oltean if (xfer->timestamped) 1616b42faeeeSVladimir Oltean return; 1617b42faeeeSVladimir Oltean 1618862dd2a9SVladimir Oltean if (progress < xfer->ptp_sts_word_post) 1619b42faeeeSVladimir Oltean return; 1620b42faeeeSVladimir Oltean 1621b42faeeeSVladimir Oltean ptp_read_system_postts(xfer->ptp_sts); 1622b42faeeeSVladimir Oltean 1623b42faeeeSVladimir Oltean if (irqs_off) { 1624b42faeeeSVladimir Oltean local_irq_restore(ctlr->irq_flags); 1625b42faeeeSVladimir Oltean preempt_enable(); 1626b42faeeeSVladimir Oltean } 1627b42faeeeSVladimir Oltean 1628b42faeeeSVladimir Oltean /* Capture the resolution of the timestamp */ 1629862dd2a9SVladimir Oltean xfer->ptp_sts_word_post = progress; 1630b42faeeeSVladimir Oltean 16316a726824SVladimir Oltean xfer->timestamped = true; 1632b42faeeeSVladimir Oltean } 1633b42faeeeSVladimir Oltean EXPORT_SYMBOL_GPL(spi_take_timestamp_post); 1634b42faeeeSVladimir Oltean 1635b42faeeeSVladimir Oltean /** 1636924b5867SDouglas Anderson * spi_set_thread_rt - set the controller to pump at realtime priority 1637924b5867SDouglas Anderson * @ctlr: controller to boost priority of 1638924b5867SDouglas Anderson * 1639924b5867SDouglas Anderson * This can be called because the controller requested realtime priority 1640924b5867SDouglas Anderson * (by setting the ->rt value before calling spi_register_controller()) or 1641924b5867SDouglas Anderson * because a device on the bus said that its transfers needed realtime 1642924b5867SDouglas Anderson * priority. 1643924b5867SDouglas Anderson * 1644924b5867SDouglas Anderson * NOTE: at the moment if any device on a bus says it needs realtime then 1645924b5867SDouglas Anderson * the thread will be at realtime priority for all transfers on that 1646924b5867SDouglas Anderson * controller. If this eventually becomes a problem we may see if we can 1647924b5867SDouglas Anderson * find a way to boost the priority only temporarily during relevant 1648924b5867SDouglas Anderson * transfers. 1649924b5867SDouglas Anderson */ 1650924b5867SDouglas Anderson static void spi_set_thread_rt(struct spi_controller *ctlr) 1651ffbbdd21SLinus Walleij { 1652924b5867SDouglas Anderson dev_info(&ctlr->dev, 1653924b5867SDouglas Anderson "will run message pump with realtime priority\n"); 16546d2b84a4SLinus Torvalds sched_set_fifo(ctlr->kworker->task); 1655924b5867SDouglas Anderson } 1656924b5867SDouglas Anderson 1657924b5867SDouglas Anderson static int spi_init_queue(struct spi_controller *ctlr) 1658924b5867SDouglas Anderson { 16598caab75fSGeert Uytterhoeven ctlr->running = false; 16608caab75fSGeert Uytterhoeven ctlr->busy = false; 1661ffbbdd21SLinus Walleij 166260a883d1SMarek Szyprowski ctlr->kworker = kthread_create_worker(0, dev_name(&ctlr->dev)); 166360a883d1SMarek Szyprowski if (IS_ERR(ctlr->kworker)) { 166460a883d1SMarek Szyprowski dev_err(&ctlr->dev, "failed to create message pump kworker\n"); 166560a883d1SMarek Szyprowski return PTR_ERR(ctlr->kworker); 1666ffbbdd21SLinus Walleij } 166760a883d1SMarek Szyprowski 16688caab75fSGeert Uytterhoeven kthread_init_work(&ctlr->pump_messages, spi_pump_messages); 1669f0125f1aSMark Brown 1670ffbbdd21SLinus Walleij /* 16718caab75fSGeert Uytterhoeven * Controller config will indicate if this controller should run the 1672ffbbdd21SLinus Walleij * message pump with high (realtime) priority to reduce the transfer 1673ffbbdd21SLinus Walleij * latency on the bus by minimising the delay between a transfer 1674ffbbdd21SLinus Walleij * request and the scheduling of the message pump thread. Without this 1675ffbbdd21SLinus Walleij * setting the message pump thread will remain at default priority. 1676ffbbdd21SLinus Walleij */ 1677924b5867SDouglas Anderson if (ctlr->rt) 1678924b5867SDouglas Anderson spi_set_thread_rt(ctlr); 1679ffbbdd21SLinus Walleij 1680ffbbdd21SLinus Walleij return 0; 1681ffbbdd21SLinus Walleij } 1682ffbbdd21SLinus Walleij 1683ffbbdd21SLinus Walleij /** 1684ffbbdd21SLinus Walleij * spi_get_next_queued_message() - called by driver to check for queued 1685ffbbdd21SLinus Walleij * messages 16868caab75fSGeert Uytterhoeven * @ctlr: the controller to check for queued messages 1687ffbbdd21SLinus Walleij * 1688ffbbdd21SLinus Walleij * If there are more messages in the queue, the next message is returned from 1689ffbbdd21SLinus Walleij * this call. 169097d56dc6SJavier Martinez Canillas * 169197d56dc6SJavier Martinez Canillas * Return: the next message in the queue, else NULL if the queue is empty. 1692ffbbdd21SLinus Walleij */ 16938caab75fSGeert Uytterhoeven struct spi_message *spi_get_next_queued_message(struct spi_controller *ctlr) 1694ffbbdd21SLinus Walleij { 1695ffbbdd21SLinus Walleij struct spi_message *next; 1696ffbbdd21SLinus Walleij unsigned long flags; 1697ffbbdd21SLinus Walleij 1698ffbbdd21SLinus Walleij /* get a pointer to the next message, if any */ 16998caab75fSGeert Uytterhoeven spin_lock_irqsave(&ctlr->queue_lock, flags); 17008caab75fSGeert Uytterhoeven next = list_first_entry_or_null(&ctlr->queue, struct spi_message, 17011cfd97f9SAxel Lin queue); 17028caab75fSGeert Uytterhoeven spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1703ffbbdd21SLinus Walleij 1704ffbbdd21SLinus Walleij return next; 1705ffbbdd21SLinus Walleij } 1706ffbbdd21SLinus Walleij EXPORT_SYMBOL_GPL(spi_get_next_queued_message); 1707ffbbdd21SLinus Walleij 1708ffbbdd21SLinus Walleij /** 1709ffbbdd21SLinus Walleij * spi_finalize_current_message() - the current message is complete 17108caab75fSGeert Uytterhoeven * @ctlr: the controller to return the message to 1711ffbbdd21SLinus Walleij * 1712ffbbdd21SLinus Walleij * Called by the driver to notify the core that the message in the front of the 1713ffbbdd21SLinus Walleij * queue is complete and can be removed from the queue. 1714ffbbdd21SLinus Walleij */ 17158caab75fSGeert Uytterhoeven void spi_finalize_current_message(struct spi_controller *ctlr) 1716ffbbdd21SLinus Walleij { 1717b42faeeeSVladimir Oltean struct spi_transfer *xfer; 1718ffbbdd21SLinus Walleij struct spi_message *mesg; 1719ffbbdd21SLinus Walleij unsigned long flags; 17202841a5fcSMark Brown int ret; 1721ffbbdd21SLinus Walleij 17228caab75fSGeert Uytterhoeven spin_lock_irqsave(&ctlr->queue_lock, flags); 17238caab75fSGeert Uytterhoeven mesg = ctlr->cur_msg; 17248caab75fSGeert Uytterhoeven spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1725ffbbdd21SLinus Walleij 1726b42faeeeSVladimir Oltean if (!ctlr->ptp_sts_supported && !ctlr->transfer_one) { 1727b42faeeeSVladimir Oltean list_for_each_entry(xfer, &mesg->transfers, transfer_list) { 1728b42faeeeSVladimir Oltean ptp_read_system_postts(xfer->ptp_sts); 1729b42faeeeSVladimir Oltean xfer->ptp_sts_word_post = xfer->len; 1730b42faeeeSVladimir Oltean } 1731b42faeeeSVladimir Oltean } 1732b42faeeeSVladimir Oltean 17336a726824SVladimir Oltean if (unlikely(ctlr->ptp_sts_supported)) 17346a726824SVladimir Oltean list_for_each_entry(xfer, &mesg->transfers, transfer_list) 17356a726824SVladimir Oltean WARN_ON_ONCE(xfer->ptp_sts && !xfer->timestamped); 1736f971a207SVladimir Oltean 17378caab75fSGeert Uytterhoeven spi_unmap_msg(ctlr, mesg); 173899adef31SMark Brown 1739b59a7ca1SGustav Wiklander /* In the prepare_messages callback the spi bus has the opportunity to 1740b59a7ca1SGustav Wiklander * split a transfer to smaller chunks. 1741b59a7ca1SGustav Wiklander * Release splited transfers here since spi_map_msg is done on the 1742b59a7ca1SGustav Wiklander * splited transfers. 1743b59a7ca1SGustav Wiklander */ 1744b59a7ca1SGustav Wiklander spi_res_release(ctlr, mesg); 1745b59a7ca1SGustav Wiklander 17468caab75fSGeert Uytterhoeven if (ctlr->cur_msg_prepared && ctlr->unprepare_message) { 17478caab75fSGeert Uytterhoeven ret = ctlr->unprepare_message(ctlr, mesg); 17482841a5fcSMark Brown if (ret) { 17498caab75fSGeert Uytterhoeven dev_err(&ctlr->dev, "failed to unprepare message: %d\n", 17508caab75fSGeert Uytterhoeven ret); 17512841a5fcSMark Brown } 17522841a5fcSMark Brown } 1753391949b6SUwe Kleine-König 17548caab75fSGeert Uytterhoeven spin_lock_irqsave(&ctlr->queue_lock, flags); 17558caab75fSGeert Uytterhoeven ctlr->cur_msg = NULL; 17568caab75fSGeert Uytterhoeven ctlr->cur_msg_prepared = false; 1757809b1b04SRobin Gong ctlr->fallback = false; 175860a883d1SMarek Szyprowski kthread_queue_work(ctlr->kworker, &ctlr->pump_messages); 17598caab75fSGeert Uytterhoeven spin_unlock_irqrestore(&ctlr->queue_lock, flags); 17608e76ef88SMartin Sperl 17618e76ef88SMartin Sperl trace_spi_message_done(mesg); 17622841a5fcSMark Brown 1763ffbbdd21SLinus Walleij mesg->state = NULL; 1764ffbbdd21SLinus Walleij if (mesg->complete) 1765ffbbdd21SLinus Walleij mesg->complete(mesg->context); 1766ffbbdd21SLinus Walleij } 1767ffbbdd21SLinus Walleij EXPORT_SYMBOL_GPL(spi_finalize_current_message); 1768ffbbdd21SLinus Walleij 17698caab75fSGeert Uytterhoeven static int spi_start_queue(struct spi_controller *ctlr) 1770ffbbdd21SLinus Walleij { 1771ffbbdd21SLinus Walleij unsigned long flags; 1772ffbbdd21SLinus Walleij 17738caab75fSGeert Uytterhoeven spin_lock_irqsave(&ctlr->queue_lock, flags); 1774ffbbdd21SLinus Walleij 17758caab75fSGeert Uytterhoeven if (ctlr->running || ctlr->busy) { 17768caab75fSGeert Uytterhoeven spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1777ffbbdd21SLinus Walleij return -EBUSY; 1778ffbbdd21SLinus Walleij } 1779ffbbdd21SLinus Walleij 17808caab75fSGeert Uytterhoeven ctlr->running = true; 17818caab75fSGeert Uytterhoeven ctlr->cur_msg = NULL; 17828caab75fSGeert Uytterhoeven spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1783ffbbdd21SLinus Walleij 178460a883d1SMarek Szyprowski kthread_queue_work(ctlr->kworker, &ctlr->pump_messages); 1785ffbbdd21SLinus Walleij 1786ffbbdd21SLinus Walleij return 0; 1787ffbbdd21SLinus Walleij } 1788ffbbdd21SLinus Walleij 17898caab75fSGeert Uytterhoeven static int spi_stop_queue(struct spi_controller *ctlr) 1790ffbbdd21SLinus Walleij { 1791ffbbdd21SLinus Walleij unsigned long flags; 1792ffbbdd21SLinus Walleij unsigned limit = 500; 1793ffbbdd21SLinus Walleij int ret = 0; 1794ffbbdd21SLinus Walleij 17958caab75fSGeert Uytterhoeven spin_lock_irqsave(&ctlr->queue_lock, flags); 1796ffbbdd21SLinus Walleij 1797ffbbdd21SLinus Walleij /* 1798ffbbdd21SLinus Walleij * This is a bit lame, but is optimized for the common execution path. 17998caab75fSGeert Uytterhoeven * A wait_queue on the ctlr->busy could be used, but then the common 1800ffbbdd21SLinus Walleij * execution path (pump_messages) would be required to call wake_up or 1801ffbbdd21SLinus Walleij * friends on every SPI message. Do this instead. 1802ffbbdd21SLinus Walleij */ 18038caab75fSGeert Uytterhoeven while ((!list_empty(&ctlr->queue) || ctlr->busy) && limit--) { 18048caab75fSGeert Uytterhoeven spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1805f97b26b0SAxel Lin usleep_range(10000, 11000); 18068caab75fSGeert Uytterhoeven spin_lock_irqsave(&ctlr->queue_lock, flags); 1807ffbbdd21SLinus Walleij } 1808ffbbdd21SLinus Walleij 18098caab75fSGeert Uytterhoeven if (!list_empty(&ctlr->queue) || ctlr->busy) 1810ffbbdd21SLinus Walleij ret = -EBUSY; 1811ffbbdd21SLinus Walleij else 18128caab75fSGeert Uytterhoeven ctlr->running = false; 1813ffbbdd21SLinus Walleij 18148caab75fSGeert Uytterhoeven spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1815ffbbdd21SLinus Walleij 1816ffbbdd21SLinus Walleij if (ret) { 18178caab75fSGeert Uytterhoeven dev_warn(&ctlr->dev, "could not stop message queue\n"); 1818ffbbdd21SLinus Walleij return ret; 1819ffbbdd21SLinus Walleij } 1820ffbbdd21SLinus Walleij return ret; 1821ffbbdd21SLinus Walleij } 1822ffbbdd21SLinus Walleij 18238caab75fSGeert Uytterhoeven static int spi_destroy_queue(struct spi_controller *ctlr) 1824ffbbdd21SLinus Walleij { 1825ffbbdd21SLinus Walleij int ret; 1826ffbbdd21SLinus Walleij 18278caab75fSGeert Uytterhoeven ret = spi_stop_queue(ctlr); 1828ffbbdd21SLinus Walleij 1829ffbbdd21SLinus Walleij /* 18303989144fSPetr Mladek * kthread_flush_worker will block until all work is done. 1831ffbbdd21SLinus Walleij * If the reason that stop_queue timed out is that the work will never 1832ffbbdd21SLinus Walleij * finish, then it does no good to call flush/stop thread, so 1833ffbbdd21SLinus Walleij * return anyway. 1834ffbbdd21SLinus Walleij */ 1835ffbbdd21SLinus Walleij if (ret) { 18368caab75fSGeert Uytterhoeven dev_err(&ctlr->dev, "problem destroying queue\n"); 1837ffbbdd21SLinus Walleij return ret; 1838ffbbdd21SLinus Walleij } 1839ffbbdd21SLinus Walleij 184060a883d1SMarek Szyprowski kthread_destroy_worker(ctlr->kworker); 1841ffbbdd21SLinus Walleij 1842ffbbdd21SLinus Walleij return 0; 1843ffbbdd21SLinus Walleij } 1844ffbbdd21SLinus Walleij 18450461a414SMark Brown static int __spi_queued_transfer(struct spi_device *spi, 18460461a414SMark Brown struct spi_message *msg, 18470461a414SMark Brown bool need_pump) 1848ffbbdd21SLinus Walleij { 18498caab75fSGeert Uytterhoeven struct spi_controller *ctlr = spi->controller; 1850ffbbdd21SLinus Walleij unsigned long flags; 1851ffbbdd21SLinus Walleij 18528caab75fSGeert Uytterhoeven spin_lock_irqsave(&ctlr->queue_lock, flags); 1853ffbbdd21SLinus Walleij 18548caab75fSGeert Uytterhoeven if (!ctlr->running) { 18558caab75fSGeert Uytterhoeven spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1856ffbbdd21SLinus Walleij return -ESHUTDOWN; 1857ffbbdd21SLinus Walleij } 1858ffbbdd21SLinus Walleij msg->actual_length = 0; 1859ffbbdd21SLinus Walleij msg->status = -EINPROGRESS; 1860ffbbdd21SLinus Walleij 18618caab75fSGeert Uytterhoeven list_add_tail(&msg->queue, &ctlr->queue); 1862f0125f1aSMark Brown if (!ctlr->busy && need_pump) 186360a883d1SMarek Szyprowski kthread_queue_work(ctlr->kworker, &ctlr->pump_messages); 1864ffbbdd21SLinus Walleij 18658caab75fSGeert Uytterhoeven spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1866ffbbdd21SLinus Walleij return 0; 1867ffbbdd21SLinus Walleij } 1868ffbbdd21SLinus Walleij 18690461a414SMark Brown /** 18700461a414SMark Brown * spi_queued_transfer - transfer function for queued transfers 18710461a414SMark Brown * @spi: spi device which is requesting transfer 18720461a414SMark Brown * @msg: spi message which is to handled is queued to driver queue 187397d56dc6SJavier Martinez Canillas * 187497d56dc6SJavier Martinez Canillas * Return: zero on success, else a negative error code. 18750461a414SMark Brown */ 18760461a414SMark Brown static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg) 18770461a414SMark Brown { 18780461a414SMark Brown return __spi_queued_transfer(spi, msg, true); 18790461a414SMark Brown } 18800461a414SMark Brown 18818caab75fSGeert Uytterhoeven static int spi_controller_initialize_queue(struct spi_controller *ctlr) 1882ffbbdd21SLinus Walleij { 1883ffbbdd21SLinus Walleij int ret; 1884ffbbdd21SLinus Walleij 18858caab75fSGeert Uytterhoeven ctlr->transfer = spi_queued_transfer; 18868caab75fSGeert Uytterhoeven if (!ctlr->transfer_one_message) 18878caab75fSGeert Uytterhoeven ctlr->transfer_one_message = spi_transfer_one_message; 1888ffbbdd21SLinus Walleij 1889ffbbdd21SLinus Walleij /* Initialize and start queue */ 18908caab75fSGeert Uytterhoeven ret = spi_init_queue(ctlr); 1891ffbbdd21SLinus Walleij if (ret) { 18928caab75fSGeert Uytterhoeven dev_err(&ctlr->dev, "problem initializing queue\n"); 1893ffbbdd21SLinus Walleij goto err_init_queue; 1894ffbbdd21SLinus Walleij } 18958caab75fSGeert Uytterhoeven ctlr->queued = true; 18968caab75fSGeert Uytterhoeven ret = spi_start_queue(ctlr); 1897ffbbdd21SLinus Walleij if (ret) { 18988caab75fSGeert Uytterhoeven dev_err(&ctlr->dev, "problem starting queue\n"); 1899ffbbdd21SLinus Walleij goto err_start_queue; 1900ffbbdd21SLinus Walleij } 1901ffbbdd21SLinus Walleij 1902ffbbdd21SLinus Walleij return 0; 1903ffbbdd21SLinus Walleij 1904ffbbdd21SLinus Walleij err_start_queue: 19058caab75fSGeert Uytterhoeven spi_destroy_queue(ctlr); 1906c3676d5cSMark Brown err_init_queue: 1907ffbbdd21SLinus Walleij return ret; 1908ffbbdd21SLinus Walleij } 1909ffbbdd21SLinus Walleij 1910988f259bSBoris Brezillon /** 1911988f259bSBoris Brezillon * spi_flush_queue - Send all pending messages in the queue from the callers' 1912988f259bSBoris Brezillon * context 1913988f259bSBoris Brezillon * @ctlr: controller to process queue for 1914988f259bSBoris Brezillon * 1915988f259bSBoris Brezillon * This should be used when one wants to ensure all pending messages have been 1916988f259bSBoris Brezillon * sent before doing something. Is used by the spi-mem code to make sure SPI 1917988f259bSBoris Brezillon * memory operations do not preempt regular SPI transfers that have been queued 1918988f259bSBoris Brezillon * before the spi-mem operation. 1919988f259bSBoris Brezillon */ 1920988f259bSBoris Brezillon void spi_flush_queue(struct spi_controller *ctlr) 1921988f259bSBoris Brezillon { 1922988f259bSBoris Brezillon if (ctlr->transfer == spi_queued_transfer) 1923988f259bSBoris Brezillon __spi_pump_messages(ctlr, false); 1924988f259bSBoris Brezillon } 1925988f259bSBoris Brezillon 1926ffbbdd21SLinus Walleij /*-------------------------------------------------------------------------*/ 1927ffbbdd21SLinus Walleij 19287cb94361SAndreas Larsson #if defined(CONFIG_OF) 19298caab75fSGeert Uytterhoeven static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi, 1930c2e51ac3SGeert Uytterhoeven struct device_node *nc) 1931d57a4282SGrant Likely { 193289da4293STrent Piepho u32 value; 1933c2e51ac3SGeert Uytterhoeven int rc; 1934d57a4282SGrant Likely 1935d57a4282SGrant Likely /* Mode (clock phase/polarity/etc.) */ 1936e0bcb680SSergei Shtylyov if (of_property_read_bool(nc, "spi-cpha")) 1937d57a4282SGrant Likely spi->mode |= SPI_CPHA; 1938e0bcb680SSergei Shtylyov if (of_property_read_bool(nc, "spi-cpol")) 1939d57a4282SGrant Likely spi->mode |= SPI_CPOL; 1940e0bcb680SSergei Shtylyov if (of_property_read_bool(nc, "spi-3wire")) 1941c20151dfSLars-Peter Clausen spi->mode |= SPI_3WIRE; 1942e0bcb680SSergei Shtylyov if (of_property_read_bool(nc, "spi-lsb-first")) 1943cd6339e6SZhao Qiang spi->mode |= SPI_LSB_FIRST; 19443e5ec1dbSGregory CLEMENT if (of_property_read_bool(nc, "spi-cs-high")) 1945f3186dd8SLinus Walleij spi->mode |= SPI_CS_HIGH; 1946f3186dd8SLinus Walleij 1947f477b7fbSwangyuhang /* Device DUAL/QUAD mode */ 194889da4293STrent Piepho if (!of_property_read_u32(nc, "spi-tx-bus-width", &value)) { 194989da4293STrent Piepho switch (value) { 1950d962608cSDragos Bogdan case 0: 1951d962608cSDragos Bogdan spi->mode |= SPI_NO_TX; 1952d962608cSDragos Bogdan break; 195389da4293STrent Piepho case 1: 1954f477b7fbSwangyuhang break; 195589da4293STrent Piepho case 2: 1956f477b7fbSwangyuhang spi->mode |= SPI_TX_DUAL; 1957f477b7fbSwangyuhang break; 195889da4293STrent Piepho case 4: 1959f477b7fbSwangyuhang spi->mode |= SPI_TX_QUAD; 1960f477b7fbSwangyuhang break; 19616b03061fSYogesh Narayan Gaur case 8: 19626b03061fSYogesh Narayan Gaur spi->mode |= SPI_TX_OCTAL; 19636b03061fSYogesh Narayan Gaur break; 1964f477b7fbSwangyuhang default: 19658caab75fSGeert Uytterhoeven dev_warn(&ctlr->dev, 1966a110f93dSwangyuhang "spi-tx-bus-width %d not supported\n", 196789da4293STrent Piepho value); 196880874d8cSGeert Uytterhoeven break; 1969f477b7fbSwangyuhang } 1970a822e99cSMark Brown } 1971f477b7fbSwangyuhang 197289da4293STrent Piepho if (!of_property_read_u32(nc, "spi-rx-bus-width", &value)) { 197389da4293STrent Piepho switch (value) { 1974d962608cSDragos Bogdan case 0: 1975d962608cSDragos Bogdan spi->mode |= SPI_NO_RX; 1976d962608cSDragos Bogdan break; 197789da4293STrent Piepho case 1: 1978f477b7fbSwangyuhang break; 197989da4293STrent Piepho case 2: 1980f477b7fbSwangyuhang spi->mode |= SPI_RX_DUAL; 1981f477b7fbSwangyuhang break; 198289da4293STrent Piepho case 4: 1983f477b7fbSwangyuhang spi->mode |= SPI_RX_QUAD; 1984f477b7fbSwangyuhang break; 19856b03061fSYogesh Narayan Gaur case 8: 19866b03061fSYogesh Narayan Gaur spi->mode |= SPI_RX_OCTAL; 19876b03061fSYogesh Narayan Gaur break; 1988f477b7fbSwangyuhang default: 19898caab75fSGeert Uytterhoeven dev_warn(&ctlr->dev, 1990a110f93dSwangyuhang "spi-rx-bus-width %d not supported\n", 199189da4293STrent Piepho value); 199280874d8cSGeert Uytterhoeven break; 1993f477b7fbSwangyuhang } 1994a822e99cSMark Brown } 1995f477b7fbSwangyuhang 19968caab75fSGeert Uytterhoeven if (spi_controller_is_slave(ctlr)) { 1997194276b0SRob Herring if (!of_node_name_eq(nc, "slave")) { 199825c56c88SRob Herring dev_err(&ctlr->dev, "%pOF is not called 'slave'\n", 199925c56c88SRob Herring nc); 20006c364062SGeert Uytterhoeven return -EINVAL; 20016c364062SGeert Uytterhoeven } 20026c364062SGeert Uytterhoeven return 0; 20036c364062SGeert Uytterhoeven } 20046c364062SGeert Uytterhoeven 20056c364062SGeert Uytterhoeven /* Device address */ 20066c364062SGeert Uytterhoeven rc = of_property_read_u32(nc, "reg", &value); 20076c364062SGeert Uytterhoeven if (rc) { 200825c56c88SRob Herring dev_err(&ctlr->dev, "%pOF has no valid 'reg' property (%d)\n", 200925c56c88SRob Herring nc, rc); 20106c364062SGeert Uytterhoeven return rc; 20116c364062SGeert Uytterhoeven } 20126c364062SGeert Uytterhoeven spi->chip_select = value; 20136c364062SGeert Uytterhoeven 2014d57a4282SGrant Likely /* Device speed */ 2015671c3bf5SChuanhong Guo if (!of_property_read_u32(nc, "spi-max-frequency", &value)) 201689da4293STrent Piepho spi->max_speed_hz = value; 2017d57a4282SGrant Likely 2018c2e51ac3SGeert Uytterhoeven return 0; 2019c2e51ac3SGeert Uytterhoeven } 2020c2e51ac3SGeert Uytterhoeven 2021c2e51ac3SGeert Uytterhoeven static struct spi_device * 20228caab75fSGeert Uytterhoeven of_register_spi_device(struct spi_controller *ctlr, struct device_node *nc) 2023c2e51ac3SGeert Uytterhoeven { 2024c2e51ac3SGeert Uytterhoeven struct spi_device *spi; 2025c2e51ac3SGeert Uytterhoeven int rc; 2026c2e51ac3SGeert Uytterhoeven 2027c2e51ac3SGeert Uytterhoeven /* Alloc an spi_device */ 20288caab75fSGeert Uytterhoeven spi = spi_alloc_device(ctlr); 2029c2e51ac3SGeert Uytterhoeven if (!spi) { 203025c56c88SRob Herring dev_err(&ctlr->dev, "spi_device alloc error for %pOF\n", nc); 2031c2e51ac3SGeert Uytterhoeven rc = -ENOMEM; 2032c2e51ac3SGeert Uytterhoeven goto err_out; 2033c2e51ac3SGeert Uytterhoeven } 2034c2e51ac3SGeert Uytterhoeven 2035c2e51ac3SGeert Uytterhoeven /* Select device driver */ 2036c2e51ac3SGeert Uytterhoeven rc = of_modalias_node(nc, spi->modalias, 2037c2e51ac3SGeert Uytterhoeven sizeof(spi->modalias)); 2038c2e51ac3SGeert Uytterhoeven if (rc < 0) { 203925c56c88SRob Herring dev_err(&ctlr->dev, "cannot find modalias for %pOF\n", nc); 2040c2e51ac3SGeert Uytterhoeven goto err_out; 2041c2e51ac3SGeert Uytterhoeven } 2042c2e51ac3SGeert Uytterhoeven 20438caab75fSGeert Uytterhoeven rc = of_spi_parse_dt(ctlr, spi, nc); 2044c2e51ac3SGeert Uytterhoeven if (rc) 2045c2e51ac3SGeert Uytterhoeven goto err_out; 2046c2e51ac3SGeert Uytterhoeven 2047d57a4282SGrant Likely /* Store a pointer to the node in the device structure */ 2048d57a4282SGrant Likely of_node_get(nc); 2049d57a4282SGrant Likely spi->dev.of_node = nc; 2050*0e793ba7SCharles Keepax spi->dev.fwnode = of_fwnode_handle(nc); 2051d57a4282SGrant Likely 2052d57a4282SGrant Likely /* Register the new device */ 2053d57a4282SGrant Likely rc = spi_add_device(spi); 2054d57a4282SGrant Likely if (rc) { 205525c56c88SRob Herring dev_err(&ctlr->dev, "spi_device register error %pOF\n", nc); 20568324147fSJohan Hovold goto err_of_node_put; 2057d57a4282SGrant Likely } 2058d57a4282SGrant Likely 2059aff5e3f8SPantelis Antoniou return spi; 2060aff5e3f8SPantelis Antoniou 20618324147fSJohan Hovold err_of_node_put: 20628324147fSJohan Hovold of_node_put(nc); 2063aff5e3f8SPantelis Antoniou err_out: 2064aff5e3f8SPantelis Antoniou spi_dev_put(spi); 2065aff5e3f8SPantelis Antoniou return ERR_PTR(rc); 2066aff5e3f8SPantelis Antoniou } 2067aff5e3f8SPantelis Antoniou 2068aff5e3f8SPantelis Antoniou /** 2069aff5e3f8SPantelis Antoniou * of_register_spi_devices() - Register child devices onto the SPI bus 20708caab75fSGeert Uytterhoeven * @ctlr: Pointer to spi_controller device 2071aff5e3f8SPantelis Antoniou * 20726c364062SGeert Uytterhoeven * Registers an spi_device for each child node of controller node which 20736c364062SGeert Uytterhoeven * represents a valid SPI slave. 2074aff5e3f8SPantelis Antoniou */ 20758caab75fSGeert Uytterhoeven static void of_register_spi_devices(struct spi_controller *ctlr) 2076aff5e3f8SPantelis Antoniou { 2077aff5e3f8SPantelis Antoniou struct spi_device *spi; 2078aff5e3f8SPantelis Antoniou struct device_node *nc; 2079aff5e3f8SPantelis Antoniou 20808caab75fSGeert Uytterhoeven if (!ctlr->dev.of_node) 2081aff5e3f8SPantelis Antoniou return; 2082aff5e3f8SPantelis Antoniou 20838caab75fSGeert Uytterhoeven for_each_available_child_of_node(ctlr->dev.of_node, nc) { 2084bd6c1644SGeert Uytterhoeven if (of_node_test_and_set_flag(nc, OF_POPULATED)) 2085bd6c1644SGeert Uytterhoeven continue; 20868caab75fSGeert Uytterhoeven spi = of_register_spi_device(ctlr, nc); 2087e0af98a7SRalf Ramsauer if (IS_ERR(spi)) { 20888caab75fSGeert Uytterhoeven dev_warn(&ctlr->dev, 208925c56c88SRob Herring "Failed to create SPI device for %pOF\n", nc); 2090e0af98a7SRalf Ramsauer of_node_clear_flag(nc, OF_POPULATED); 2091e0af98a7SRalf Ramsauer } 2092d57a4282SGrant Likely } 2093d57a4282SGrant Likely } 2094d57a4282SGrant Likely #else 20958caab75fSGeert Uytterhoeven static void of_register_spi_devices(struct spi_controller *ctlr) { } 2096d57a4282SGrant Likely #endif 2097d57a4282SGrant Likely 209864bee4d2SMika Westerberg #ifdef CONFIG_ACPI 20994c3c5954SArd Biesheuvel struct acpi_spi_lookup { 21004c3c5954SArd Biesheuvel struct spi_controller *ctlr; 21014c3c5954SArd Biesheuvel u32 max_speed_hz; 21024c3c5954SArd Biesheuvel u32 mode; 21034c3c5954SArd Biesheuvel int irq; 21044c3c5954SArd Biesheuvel u8 bits_per_word; 21054c3c5954SArd Biesheuvel u8 chip_select; 21064c3c5954SArd Biesheuvel }; 21074c3c5954SArd Biesheuvel 21084c3c5954SArd Biesheuvel static void acpi_spi_parse_apple_properties(struct acpi_device *dev, 21094c3c5954SArd Biesheuvel struct acpi_spi_lookup *lookup) 21108a2e487eSLukas Wunner { 21118a2e487eSLukas Wunner const union acpi_object *obj; 21128a2e487eSLukas Wunner 21138a2e487eSLukas Wunner if (!x86_apple_machine) 21148a2e487eSLukas Wunner return; 21158a2e487eSLukas Wunner 21168a2e487eSLukas Wunner if (!acpi_dev_get_property(dev, "spiSclkPeriod", ACPI_TYPE_BUFFER, &obj) 21178a2e487eSLukas Wunner && obj->buffer.length >= 4) 21184c3c5954SArd Biesheuvel lookup->max_speed_hz = NSEC_PER_SEC / *(u32 *)obj->buffer.pointer; 21198a2e487eSLukas Wunner 21208a2e487eSLukas Wunner if (!acpi_dev_get_property(dev, "spiWordSize", ACPI_TYPE_BUFFER, &obj) 21218a2e487eSLukas Wunner && obj->buffer.length == 8) 21224c3c5954SArd Biesheuvel lookup->bits_per_word = *(u64 *)obj->buffer.pointer; 21238a2e487eSLukas Wunner 21248a2e487eSLukas Wunner if (!acpi_dev_get_property(dev, "spiBitOrder", ACPI_TYPE_BUFFER, &obj) 21258a2e487eSLukas Wunner && obj->buffer.length == 8 && !*(u64 *)obj->buffer.pointer) 21264c3c5954SArd Biesheuvel lookup->mode |= SPI_LSB_FIRST; 21278a2e487eSLukas Wunner 21288a2e487eSLukas Wunner if (!acpi_dev_get_property(dev, "spiSPO", ACPI_TYPE_BUFFER, &obj) 21298a2e487eSLukas Wunner && obj->buffer.length == 8 && *(u64 *)obj->buffer.pointer) 21304c3c5954SArd Biesheuvel lookup->mode |= SPI_CPOL; 21318a2e487eSLukas Wunner 21328a2e487eSLukas Wunner if (!acpi_dev_get_property(dev, "spiSPH", ACPI_TYPE_BUFFER, &obj) 21338a2e487eSLukas Wunner && obj->buffer.length == 8 && *(u64 *)obj->buffer.pointer) 21344c3c5954SArd Biesheuvel lookup->mode |= SPI_CPHA; 21358a2e487eSLukas Wunner } 21368a2e487eSLukas Wunner 213764bee4d2SMika Westerberg static int acpi_spi_add_resource(struct acpi_resource *ares, void *data) 213864bee4d2SMika Westerberg { 21394c3c5954SArd Biesheuvel struct acpi_spi_lookup *lookup = data; 21404c3c5954SArd Biesheuvel struct spi_controller *ctlr = lookup->ctlr; 214164bee4d2SMika Westerberg 214264bee4d2SMika Westerberg if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) { 214364bee4d2SMika Westerberg struct acpi_resource_spi_serialbus *sb; 21444c3c5954SArd Biesheuvel acpi_handle parent_handle; 21454c3c5954SArd Biesheuvel acpi_status status; 214664bee4d2SMika Westerberg 214764bee4d2SMika Westerberg sb = &ares->data.spi_serial_bus; 214864bee4d2SMika Westerberg if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_SPI) { 21494c3c5954SArd Biesheuvel 21504c3c5954SArd Biesheuvel status = acpi_get_handle(NULL, 21514c3c5954SArd Biesheuvel sb->resource_source.string_ptr, 21524c3c5954SArd Biesheuvel &parent_handle); 21534c3c5954SArd Biesheuvel 2154b5e3cf41SArd Biesheuvel if (ACPI_FAILURE(status) || 21554c3c5954SArd Biesheuvel ACPI_HANDLE(ctlr->dev.parent) != parent_handle) 21564c3c5954SArd Biesheuvel return -ENODEV; 21574c3c5954SArd Biesheuvel 2158a0a90718SMika Westerberg /* 2159a0a90718SMika Westerberg * ACPI DeviceSelection numbering is handled by the 2160a0a90718SMika Westerberg * host controller driver in Windows and can vary 2161a0a90718SMika Westerberg * from driver to driver. In Linux we always expect 2162a0a90718SMika Westerberg * 0 .. max - 1 so we need to ask the driver to 2163a0a90718SMika Westerberg * translate between the two schemes. 2164a0a90718SMika Westerberg */ 21658caab75fSGeert Uytterhoeven if (ctlr->fw_translate_cs) { 21668caab75fSGeert Uytterhoeven int cs = ctlr->fw_translate_cs(ctlr, 2167a0a90718SMika Westerberg sb->device_selection); 2168a0a90718SMika Westerberg if (cs < 0) 2169a0a90718SMika Westerberg return cs; 21704c3c5954SArd Biesheuvel lookup->chip_select = cs; 2171a0a90718SMika Westerberg } else { 21724c3c5954SArd Biesheuvel lookup->chip_select = sb->device_selection; 2173a0a90718SMika Westerberg } 2174a0a90718SMika Westerberg 21754c3c5954SArd Biesheuvel lookup->max_speed_hz = sb->connection_speed; 21760dadde34SAndy Shevchenko lookup->bits_per_word = sb->data_bit_length; 217764bee4d2SMika Westerberg 217864bee4d2SMika Westerberg if (sb->clock_phase == ACPI_SPI_SECOND_PHASE) 21794c3c5954SArd Biesheuvel lookup->mode |= SPI_CPHA; 218064bee4d2SMika Westerberg if (sb->clock_polarity == ACPI_SPI_START_HIGH) 21814c3c5954SArd Biesheuvel lookup->mode |= SPI_CPOL; 218264bee4d2SMika Westerberg if (sb->device_polarity == ACPI_SPI_ACTIVE_HIGH) 21834c3c5954SArd Biesheuvel lookup->mode |= SPI_CS_HIGH; 218464bee4d2SMika Westerberg } 21854c3c5954SArd Biesheuvel } else if (lookup->irq < 0) { 218664bee4d2SMika Westerberg struct resource r; 218764bee4d2SMika Westerberg 218864bee4d2SMika Westerberg if (acpi_dev_resource_interrupt(ares, 0, &r)) 21894c3c5954SArd Biesheuvel lookup->irq = r.start; 219064bee4d2SMika Westerberg } 219164bee4d2SMika Westerberg 219264bee4d2SMika Westerberg /* Always tell the ACPI core to skip this resource */ 219364bee4d2SMika Westerberg return 1; 219464bee4d2SMika Westerberg } 219564bee4d2SMika Westerberg 21968caab75fSGeert Uytterhoeven static acpi_status acpi_register_spi_device(struct spi_controller *ctlr, 21977f24467fSOctavian Purdila struct acpi_device *adev) 219864bee4d2SMika Westerberg { 21994c3c5954SArd Biesheuvel acpi_handle parent_handle = NULL; 220064bee4d2SMika Westerberg struct list_head resource_list; 2201b28944c6SArd Biesheuvel struct acpi_spi_lookup lookup = {}; 220264bee4d2SMika Westerberg struct spi_device *spi; 220364bee4d2SMika Westerberg int ret; 220464bee4d2SMika Westerberg 22057f24467fSOctavian Purdila if (acpi_bus_get_status(adev) || !adev->status.present || 22067f24467fSOctavian Purdila acpi_device_enumerated(adev)) 220764bee4d2SMika Westerberg return AE_OK; 220864bee4d2SMika Westerberg 22094c3c5954SArd Biesheuvel lookup.ctlr = ctlr; 22104c3c5954SArd Biesheuvel lookup.irq = -1; 22114c3c5954SArd Biesheuvel 22124c3c5954SArd Biesheuvel INIT_LIST_HEAD(&resource_list); 22134c3c5954SArd Biesheuvel ret = acpi_dev_get_resources(adev, &resource_list, 22144c3c5954SArd Biesheuvel acpi_spi_add_resource, &lookup); 22154c3c5954SArd Biesheuvel acpi_dev_free_resource_list(&resource_list); 22164c3c5954SArd Biesheuvel 22174c3c5954SArd Biesheuvel if (ret < 0) 22184c3c5954SArd Biesheuvel /* found SPI in _CRS but it points to another controller */ 22194c3c5954SArd Biesheuvel return AE_OK; 22204c3c5954SArd Biesheuvel 22214c3c5954SArd Biesheuvel if (!lookup.max_speed_hz && 222210e92724SBjorn Helgaas ACPI_SUCCESS(acpi_get_parent(adev->handle, &parent_handle)) && 22234c3c5954SArd Biesheuvel ACPI_HANDLE(ctlr->dev.parent) == parent_handle) { 22244c3c5954SArd Biesheuvel /* Apple does not use _CRS but nested devices for SPI slaves */ 22254c3c5954SArd Biesheuvel acpi_spi_parse_apple_properties(adev, &lookup); 22264c3c5954SArd Biesheuvel } 22274c3c5954SArd Biesheuvel 22284c3c5954SArd Biesheuvel if (!lookup.max_speed_hz) 22294c3c5954SArd Biesheuvel return AE_OK; 22304c3c5954SArd Biesheuvel 22318caab75fSGeert Uytterhoeven spi = spi_alloc_device(ctlr); 223264bee4d2SMika Westerberg if (!spi) { 22338caab75fSGeert Uytterhoeven dev_err(&ctlr->dev, "failed to allocate SPI device for %s\n", 223464bee4d2SMika Westerberg dev_name(&adev->dev)); 223564bee4d2SMika Westerberg return AE_NO_MEMORY; 223664bee4d2SMika Westerberg } 223764bee4d2SMika Westerberg 2238ea235786SJohn Garry 22397b199811SRafael J. Wysocki ACPI_COMPANION_SET(&spi->dev, adev); 22404c3c5954SArd Biesheuvel spi->max_speed_hz = lookup.max_speed_hz; 2241ea235786SJohn Garry spi->mode |= lookup.mode; 22424c3c5954SArd Biesheuvel spi->irq = lookup.irq; 22434c3c5954SArd Biesheuvel spi->bits_per_word = lookup.bits_per_word; 22444c3c5954SArd Biesheuvel spi->chip_select = lookup.chip_select; 224564bee4d2SMika Westerberg 22460c6543f6SDan O'Donovan acpi_set_modalias(adev, acpi_device_hid(adev), spi->modalias, 22470c6543f6SDan O'Donovan sizeof(spi->modalias)); 22480c6543f6SDan O'Donovan 224933ada67dSChristophe RICARD if (spi->irq < 0) 225033ada67dSChristophe RICARD spi->irq = acpi_dev_gpio_irq_get(adev, 0); 225133ada67dSChristophe RICARD 22527f24467fSOctavian Purdila acpi_device_set_enumerated(adev); 22537f24467fSOctavian Purdila 225433cf00e5SMika Westerberg adev->power.flags.ignore_parent = true; 225564bee4d2SMika Westerberg if (spi_add_device(spi)) { 225633cf00e5SMika Westerberg adev->power.flags.ignore_parent = false; 22578caab75fSGeert Uytterhoeven dev_err(&ctlr->dev, "failed to add SPI device %s from ACPI\n", 225864bee4d2SMika Westerberg dev_name(&adev->dev)); 225964bee4d2SMika Westerberg spi_dev_put(spi); 226064bee4d2SMika Westerberg } 226164bee4d2SMika Westerberg 226264bee4d2SMika Westerberg return AE_OK; 226364bee4d2SMika Westerberg } 226464bee4d2SMika Westerberg 22657f24467fSOctavian Purdila static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level, 22667f24467fSOctavian Purdila void *data, void **return_value) 22677f24467fSOctavian Purdila { 22688caab75fSGeert Uytterhoeven struct spi_controller *ctlr = data; 22697f24467fSOctavian Purdila struct acpi_device *adev; 22707f24467fSOctavian Purdila 22717f24467fSOctavian Purdila if (acpi_bus_get_device(handle, &adev)) 22727f24467fSOctavian Purdila return AE_OK; 22737f24467fSOctavian Purdila 22748caab75fSGeert Uytterhoeven return acpi_register_spi_device(ctlr, adev); 22757f24467fSOctavian Purdila } 22767f24467fSOctavian Purdila 22774c3c5954SArd Biesheuvel #define SPI_ACPI_ENUMERATE_MAX_DEPTH 32 22784c3c5954SArd Biesheuvel 22798caab75fSGeert Uytterhoeven static void acpi_register_spi_devices(struct spi_controller *ctlr) 228064bee4d2SMika Westerberg { 228164bee4d2SMika Westerberg acpi_status status; 228264bee4d2SMika Westerberg acpi_handle handle; 228364bee4d2SMika Westerberg 22848caab75fSGeert Uytterhoeven handle = ACPI_HANDLE(ctlr->dev.parent); 228564bee4d2SMika Westerberg if (!handle) 228664bee4d2SMika Westerberg return; 228764bee4d2SMika Westerberg 22884c3c5954SArd Biesheuvel status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, 22894c3c5954SArd Biesheuvel SPI_ACPI_ENUMERATE_MAX_DEPTH, 22908caab75fSGeert Uytterhoeven acpi_spi_add_device, NULL, ctlr, NULL); 229164bee4d2SMika Westerberg if (ACPI_FAILURE(status)) 22928caab75fSGeert Uytterhoeven dev_warn(&ctlr->dev, "failed to enumerate SPI slaves\n"); 229364bee4d2SMika Westerberg } 229464bee4d2SMika Westerberg #else 22958caab75fSGeert Uytterhoeven static inline void acpi_register_spi_devices(struct spi_controller *ctlr) {} 229664bee4d2SMika Westerberg #endif /* CONFIG_ACPI */ 229764bee4d2SMika Westerberg 22988caab75fSGeert Uytterhoeven static void spi_controller_release(struct device *dev) 22998ae12a0dSDavid Brownell { 23008caab75fSGeert Uytterhoeven struct spi_controller *ctlr; 23018ae12a0dSDavid Brownell 23028caab75fSGeert Uytterhoeven ctlr = container_of(dev, struct spi_controller, dev); 23038caab75fSGeert Uytterhoeven kfree(ctlr); 23048ae12a0dSDavid Brownell } 23058ae12a0dSDavid Brownell 23068ae12a0dSDavid Brownell static struct class spi_master_class = { 23078ae12a0dSDavid Brownell .name = "spi_master", 23088ae12a0dSDavid Brownell .owner = THIS_MODULE, 23098caab75fSGeert Uytterhoeven .dev_release = spi_controller_release, 2310eca2ebc7SMartin Sperl .dev_groups = spi_master_groups, 23118ae12a0dSDavid Brownell }; 23128ae12a0dSDavid Brownell 23136c364062SGeert Uytterhoeven #ifdef CONFIG_SPI_SLAVE 23146c364062SGeert Uytterhoeven /** 23156c364062SGeert Uytterhoeven * spi_slave_abort - abort the ongoing transfer request on an SPI slave 23166c364062SGeert Uytterhoeven * controller 23176c364062SGeert Uytterhoeven * @spi: device used for the current transfer 23186c364062SGeert Uytterhoeven */ 23196c364062SGeert Uytterhoeven int spi_slave_abort(struct spi_device *spi) 23206c364062SGeert Uytterhoeven { 23218caab75fSGeert Uytterhoeven struct spi_controller *ctlr = spi->controller; 23226c364062SGeert Uytterhoeven 23238caab75fSGeert Uytterhoeven if (spi_controller_is_slave(ctlr) && ctlr->slave_abort) 23248caab75fSGeert Uytterhoeven return ctlr->slave_abort(ctlr); 23256c364062SGeert Uytterhoeven 23266c364062SGeert Uytterhoeven return -ENOTSUPP; 23276c364062SGeert Uytterhoeven } 23286c364062SGeert Uytterhoeven EXPORT_SYMBOL_GPL(spi_slave_abort); 23296c364062SGeert Uytterhoeven 23306c364062SGeert Uytterhoeven static int match_true(struct device *dev, void *data) 23316c364062SGeert Uytterhoeven { 23326c364062SGeert Uytterhoeven return 1; 23336c364062SGeert Uytterhoeven } 23346c364062SGeert Uytterhoeven 2335cc8b4659SGeert Uytterhoeven static ssize_t slave_show(struct device *dev, struct device_attribute *attr, 2336cc8b4659SGeert Uytterhoeven char *buf) 23376c364062SGeert Uytterhoeven { 23388caab75fSGeert Uytterhoeven struct spi_controller *ctlr = container_of(dev, struct spi_controller, 23398caab75fSGeert Uytterhoeven dev); 23406c364062SGeert Uytterhoeven struct device *child; 23416c364062SGeert Uytterhoeven 23426c364062SGeert Uytterhoeven child = device_find_child(&ctlr->dev, NULL, match_true); 23436c364062SGeert Uytterhoeven return sprintf(buf, "%s\n", 23446c364062SGeert Uytterhoeven child ? to_spi_device(child)->modalias : NULL); 23456c364062SGeert Uytterhoeven } 23466c364062SGeert Uytterhoeven 2347cc8b4659SGeert Uytterhoeven static ssize_t slave_store(struct device *dev, struct device_attribute *attr, 2348cc8b4659SGeert Uytterhoeven const char *buf, size_t count) 23496c364062SGeert Uytterhoeven { 23508caab75fSGeert Uytterhoeven struct spi_controller *ctlr = container_of(dev, struct spi_controller, 23518caab75fSGeert Uytterhoeven dev); 23526c364062SGeert Uytterhoeven struct spi_device *spi; 23536c364062SGeert Uytterhoeven struct device *child; 23546c364062SGeert Uytterhoeven char name[32]; 23556c364062SGeert Uytterhoeven int rc; 23566c364062SGeert Uytterhoeven 23576c364062SGeert Uytterhoeven rc = sscanf(buf, "%31s", name); 23586c364062SGeert Uytterhoeven if (rc != 1 || !name[0]) 23596c364062SGeert Uytterhoeven return -EINVAL; 23606c364062SGeert Uytterhoeven 23616c364062SGeert Uytterhoeven child = device_find_child(&ctlr->dev, NULL, match_true); 23626c364062SGeert Uytterhoeven if (child) { 23636c364062SGeert Uytterhoeven /* Remove registered slave */ 23646c364062SGeert Uytterhoeven device_unregister(child); 23656c364062SGeert Uytterhoeven put_device(child); 23666c364062SGeert Uytterhoeven } 23676c364062SGeert Uytterhoeven 23686c364062SGeert Uytterhoeven if (strcmp(name, "(null)")) { 23696c364062SGeert Uytterhoeven /* Register new slave */ 23706c364062SGeert Uytterhoeven spi = spi_alloc_device(ctlr); 23716c364062SGeert Uytterhoeven if (!spi) 23726c364062SGeert Uytterhoeven return -ENOMEM; 23736c364062SGeert Uytterhoeven 23746c364062SGeert Uytterhoeven strlcpy(spi->modalias, name, sizeof(spi->modalias)); 23756c364062SGeert Uytterhoeven 23766c364062SGeert Uytterhoeven rc = spi_add_device(spi); 23776c364062SGeert Uytterhoeven if (rc) { 23786c364062SGeert Uytterhoeven spi_dev_put(spi); 23796c364062SGeert Uytterhoeven return rc; 23806c364062SGeert Uytterhoeven } 23816c364062SGeert Uytterhoeven } 23826c364062SGeert Uytterhoeven 23836c364062SGeert Uytterhoeven return count; 23846c364062SGeert Uytterhoeven } 23856c364062SGeert Uytterhoeven 2386cc8b4659SGeert Uytterhoeven static DEVICE_ATTR_RW(slave); 23876c364062SGeert Uytterhoeven 23886c364062SGeert Uytterhoeven static struct attribute *spi_slave_attrs[] = { 23896c364062SGeert Uytterhoeven &dev_attr_slave.attr, 23906c364062SGeert Uytterhoeven NULL, 23916c364062SGeert Uytterhoeven }; 23926c364062SGeert Uytterhoeven 23936c364062SGeert Uytterhoeven static const struct attribute_group spi_slave_group = { 23946c364062SGeert Uytterhoeven .attrs = spi_slave_attrs, 23956c364062SGeert Uytterhoeven }; 23966c364062SGeert Uytterhoeven 23976c364062SGeert Uytterhoeven static const struct attribute_group *spi_slave_groups[] = { 23988caab75fSGeert Uytterhoeven &spi_controller_statistics_group, 23996c364062SGeert Uytterhoeven &spi_slave_group, 24006c364062SGeert Uytterhoeven NULL, 24016c364062SGeert Uytterhoeven }; 24026c364062SGeert Uytterhoeven 24036c364062SGeert Uytterhoeven static struct class spi_slave_class = { 24046c364062SGeert Uytterhoeven .name = "spi_slave", 24056c364062SGeert Uytterhoeven .owner = THIS_MODULE, 24068caab75fSGeert Uytterhoeven .dev_release = spi_controller_release, 24076c364062SGeert Uytterhoeven .dev_groups = spi_slave_groups, 24086c364062SGeert Uytterhoeven }; 24096c364062SGeert Uytterhoeven #else 24106c364062SGeert Uytterhoeven extern struct class spi_slave_class; /* dummy */ 24116c364062SGeert Uytterhoeven #endif 24128ae12a0dSDavid Brownell 24138ae12a0dSDavid Brownell /** 24146c364062SGeert Uytterhoeven * __spi_alloc_controller - allocate an SPI master or slave controller 24158ae12a0dSDavid Brownell * @dev: the controller, possibly using the platform_bus 241633e34dc6SDavid Brownell * @size: how much zeroed driver-private data to allocate; the pointer to this 2417229e6af1SLukas Wunner * memory is in the driver_data field of the returned device, accessible 2418229e6af1SLukas Wunner * with spi_controller_get_devdata(); the memory is cacheline aligned; 2419229e6af1SLukas Wunner * drivers granting DMA access to portions of their private data need to 2420229e6af1SLukas Wunner * round up @size using ALIGN(size, dma_get_cache_alignment()). 24216c364062SGeert Uytterhoeven * @slave: flag indicating whether to allocate an SPI master (false) or SPI 24226c364062SGeert Uytterhoeven * slave (true) controller 242333e34dc6SDavid Brownell * Context: can sleep 24248ae12a0dSDavid Brownell * 24256c364062SGeert Uytterhoeven * This call is used only by SPI controller drivers, which are the 24268ae12a0dSDavid Brownell * only ones directly touching chip registers. It's how they allocate 24278caab75fSGeert Uytterhoeven * an spi_controller structure, prior to calling spi_register_controller(). 24288ae12a0dSDavid Brownell * 242997d56dc6SJavier Martinez Canillas * This must be called from context that can sleep. 24308ae12a0dSDavid Brownell * 24316c364062SGeert Uytterhoeven * The caller is responsible for assigning the bus number and initializing the 24328caab75fSGeert Uytterhoeven * controller's methods before calling spi_register_controller(); and (after 24338caab75fSGeert Uytterhoeven * errors adding the device) calling spi_controller_put() to prevent a memory 24348caab75fSGeert Uytterhoeven * leak. 243597d56dc6SJavier Martinez Canillas * 24366c364062SGeert Uytterhoeven * Return: the SPI controller structure on success, else NULL. 24378ae12a0dSDavid Brownell */ 24388caab75fSGeert Uytterhoeven struct spi_controller *__spi_alloc_controller(struct device *dev, 24396c364062SGeert Uytterhoeven unsigned int size, bool slave) 24408ae12a0dSDavid Brownell { 24418caab75fSGeert Uytterhoeven struct spi_controller *ctlr; 2442229e6af1SLukas Wunner size_t ctlr_size = ALIGN(sizeof(*ctlr), dma_get_cache_alignment()); 24438ae12a0dSDavid Brownell 24440c868461SDavid Brownell if (!dev) 24450c868461SDavid Brownell return NULL; 24460c868461SDavid Brownell 2447229e6af1SLukas Wunner ctlr = kzalloc(size + ctlr_size, GFP_KERNEL); 24488caab75fSGeert Uytterhoeven if (!ctlr) 24498ae12a0dSDavid Brownell return NULL; 24508ae12a0dSDavid Brownell 24518caab75fSGeert Uytterhoeven device_initialize(&ctlr->dev); 24528caab75fSGeert Uytterhoeven ctlr->bus_num = -1; 24538caab75fSGeert Uytterhoeven ctlr->num_chipselect = 1; 24548caab75fSGeert Uytterhoeven ctlr->slave = slave; 24556c364062SGeert Uytterhoeven if (IS_ENABLED(CONFIG_SPI_SLAVE) && slave) 24568caab75fSGeert Uytterhoeven ctlr->dev.class = &spi_slave_class; 24576c364062SGeert Uytterhoeven else 24588caab75fSGeert Uytterhoeven ctlr->dev.class = &spi_master_class; 24598caab75fSGeert Uytterhoeven ctlr->dev.parent = dev; 24608caab75fSGeert Uytterhoeven pm_suspend_ignore_children(&ctlr->dev, true); 2461229e6af1SLukas Wunner spi_controller_set_devdata(ctlr, (void *)ctlr + ctlr_size); 24628ae12a0dSDavid Brownell 24638caab75fSGeert Uytterhoeven return ctlr; 24648ae12a0dSDavid Brownell } 24656c364062SGeert Uytterhoeven EXPORT_SYMBOL_GPL(__spi_alloc_controller); 24668ae12a0dSDavid Brownell 24675e844cc3SLukas Wunner static void devm_spi_release_controller(struct device *dev, void *ctlr) 24685e844cc3SLukas Wunner { 24695e844cc3SLukas Wunner spi_controller_put(*(struct spi_controller **)ctlr); 24705e844cc3SLukas Wunner } 24715e844cc3SLukas Wunner 24725e844cc3SLukas Wunner /** 24735e844cc3SLukas Wunner * __devm_spi_alloc_controller - resource-managed __spi_alloc_controller() 24745e844cc3SLukas Wunner * @dev: physical device of SPI controller 24755e844cc3SLukas Wunner * @size: how much zeroed driver-private data to allocate 24765e844cc3SLukas Wunner * @slave: whether to allocate an SPI master (false) or SPI slave (true) 24775e844cc3SLukas Wunner * Context: can sleep 24785e844cc3SLukas Wunner * 24795e844cc3SLukas Wunner * Allocate an SPI controller and automatically release a reference on it 24805e844cc3SLukas Wunner * when @dev is unbound from its driver. Drivers are thus relieved from 24815e844cc3SLukas Wunner * having to call spi_controller_put(). 24825e844cc3SLukas Wunner * 24835e844cc3SLukas Wunner * The arguments to this function are identical to __spi_alloc_controller(). 24845e844cc3SLukas Wunner * 24855e844cc3SLukas Wunner * Return: the SPI controller structure on success, else NULL. 24865e844cc3SLukas Wunner */ 24875e844cc3SLukas Wunner struct spi_controller *__devm_spi_alloc_controller(struct device *dev, 24885e844cc3SLukas Wunner unsigned int size, 24895e844cc3SLukas Wunner bool slave) 24905e844cc3SLukas Wunner { 24915e844cc3SLukas Wunner struct spi_controller **ptr, *ctlr; 24925e844cc3SLukas Wunner 24935e844cc3SLukas Wunner ptr = devres_alloc(devm_spi_release_controller, sizeof(*ptr), 24945e844cc3SLukas Wunner GFP_KERNEL); 24955e844cc3SLukas Wunner if (!ptr) 24965e844cc3SLukas Wunner return NULL; 24975e844cc3SLukas Wunner 24985e844cc3SLukas Wunner ctlr = __spi_alloc_controller(dev, size, slave); 24995e844cc3SLukas Wunner if (ctlr) { 2500794aaf01SWilliam A. Kennington III ctlr->devm_allocated = true; 25015e844cc3SLukas Wunner *ptr = ctlr; 25025e844cc3SLukas Wunner devres_add(dev, ptr); 25035e844cc3SLukas Wunner } else { 25045e844cc3SLukas Wunner devres_free(ptr); 25055e844cc3SLukas Wunner } 25065e844cc3SLukas Wunner 25075e844cc3SLukas Wunner return ctlr; 25085e844cc3SLukas Wunner } 25095e844cc3SLukas Wunner EXPORT_SYMBOL_GPL(__devm_spi_alloc_controller); 25105e844cc3SLukas Wunner 251174317984SJean-Christophe PLAGNIOL-VILLARD #ifdef CONFIG_OF 251243004f31SLinus Walleij static int of_spi_get_gpio_numbers(struct spi_controller *ctlr) 251374317984SJean-Christophe PLAGNIOL-VILLARD { 2514e80beb27SGrant Likely int nb, i, *cs; 25158caab75fSGeert Uytterhoeven struct device_node *np = ctlr->dev.of_node; 251674317984SJean-Christophe PLAGNIOL-VILLARD 251774317984SJean-Christophe PLAGNIOL-VILLARD if (!np) 251874317984SJean-Christophe PLAGNIOL-VILLARD return 0; 251974317984SJean-Christophe PLAGNIOL-VILLARD 252074317984SJean-Christophe PLAGNIOL-VILLARD nb = of_gpio_named_count(np, "cs-gpios"); 25218caab75fSGeert Uytterhoeven ctlr->num_chipselect = max_t(int, nb, ctlr->num_chipselect); 252274317984SJean-Christophe PLAGNIOL-VILLARD 25238ec5d84eSAndreas Larsson /* Return error only for an incorrectly formed cs-gpios property */ 25248ec5d84eSAndreas Larsson if (nb == 0 || nb == -ENOENT) 252574317984SJean-Christophe PLAGNIOL-VILLARD return 0; 25268ec5d84eSAndreas Larsson else if (nb < 0) 25278ec5d84eSAndreas Larsson return nb; 252874317984SJean-Christophe PLAGNIOL-VILLARD 2529a86854d0SKees Cook cs = devm_kcalloc(&ctlr->dev, ctlr->num_chipselect, sizeof(int), 253074317984SJean-Christophe PLAGNIOL-VILLARD GFP_KERNEL); 25318caab75fSGeert Uytterhoeven ctlr->cs_gpios = cs; 253274317984SJean-Christophe PLAGNIOL-VILLARD 25338caab75fSGeert Uytterhoeven if (!ctlr->cs_gpios) 253474317984SJean-Christophe PLAGNIOL-VILLARD return -ENOMEM; 253574317984SJean-Christophe PLAGNIOL-VILLARD 25368caab75fSGeert Uytterhoeven for (i = 0; i < ctlr->num_chipselect; i++) 2537446411e1SAndreas Larsson cs[i] = -ENOENT; 253874317984SJean-Christophe PLAGNIOL-VILLARD 253974317984SJean-Christophe PLAGNIOL-VILLARD for (i = 0; i < nb; i++) 254074317984SJean-Christophe PLAGNIOL-VILLARD cs[i] = of_get_named_gpio(np, "cs-gpios", i); 254174317984SJean-Christophe PLAGNIOL-VILLARD 254274317984SJean-Christophe PLAGNIOL-VILLARD return 0; 254374317984SJean-Christophe PLAGNIOL-VILLARD } 254474317984SJean-Christophe PLAGNIOL-VILLARD #else 254543004f31SLinus Walleij static int of_spi_get_gpio_numbers(struct spi_controller *ctlr) 254674317984SJean-Christophe PLAGNIOL-VILLARD { 254774317984SJean-Christophe PLAGNIOL-VILLARD return 0; 254874317984SJean-Christophe PLAGNIOL-VILLARD } 254974317984SJean-Christophe PLAGNIOL-VILLARD #endif 255074317984SJean-Christophe PLAGNIOL-VILLARD 2551f3186dd8SLinus Walleij /** 2552f3186dd8SLinus Walleij * spi_get_gpio_descs() - grab chip select GPIOs for the master 2553f3186dd8SLinus Walleij * @ctlr: The SPI master to grab GPIO descriptors for 2554f3186dd8SLinus Walleij */ 2555f3186dd8SLinus Walleij static int spi_get_gpio_descs(struct spi_controller *ctlr) 2556f3186dd8SLinus Walleij { 2557f3186dd8SLinus Walleij int nb, i; 2558f3186dd8SLinus Walleij struct gpio_desc **cs; 2559f3186dd8SLinus Walleij struct device *dev = &ctlr->dev; 25607d93aecdSGeert Uytterhoeven unsigned long native_cs_mask = 0; 25617d93aecdSGeert Uytterhoeven unsigned int num_cs_gpios = 0; 2562f3186dd8SLinus Walleij 2563f3186dd8SLinus Walleij nb = gpiod_count(dev, "cs"); 2564f3186dd8SLinus Walleij ctlr->num_chipselect = max_t(int, nb, ctlr->num_chipselect); 2565f3186dd8SLinus Walleij 2566f3186dd8SLinus Walleij /* No GPIOs at all is fine, else return the error */ 2567f3186dd8SLinus Walleij if (nb == 0 || nb == -ENOENT) 2568f3186dd8SLinus Walleij return 0; 2569f3186dd8SLinus Walleij else if (nb < 0) 2570f3186dd8SLinus Walleij return nb; 2571f3186dd8SLinus Walleij 2572f3186dd8SLinus Walleij cs = devm_kcalloc(dev, ctlr->num_chipselect, sizeof(*cs), 2573f3186dd8SLinus Walleij GFP_KERNEL); 2574f3186dd8SLinus Walleij if (!cs) 2575f3186dd8SLinus Walleij return -ENOMEM; 2576f3186dd8SLinus Walleij ctlr->cs_gpiods = cs; 2577f3186dd8SLinus Walleij 2578f3186dd8SLinus Walleij for (i = 0; i < nb; i++) { 2579f3186dd8SLinus Walleij /* 2580f3186dd8SLinus Walleij * Most chipselects are active low, the inverted 2581f3186dd8SLinus Walleij * semantics are handled by special quirks in gpiolib, 2582f3186dd8SLinus Walleij * so initializing them GPIOD_OUT_LOW here means 2583f3186dd8SLinus Walleij * "unasserted", in most cases this will drive the physical 2584f3186dd8SLinus Walleij * line high. 2585f3186dd8SLinus Walleij */ 2586f3186dd8SLinus Walleij cs[i] = devm_gpiod_get_index_optional(dev, "cs", i, 2587f3186dd8SLinus Walleij GPIOD_OUT_LOW); 25881723fdecSGeert Uytterhoeven if (IS_ERR(cs[i])) 25891723fdecSGeert Uytterhoeven return PTR_ERR(cs[i]); 2590f3186dd8SLinus Walleij 2591f3186dd8SLinus Walleij if (cs[i]) { 2592f3186dd8SLinus Walleij /* 2593f3186dd8SLinus Walleij * If we find a CS GPIO, name it after the device and 2594f3186dd8SLinus Walleij * chip select line. 2595f3186dd8SLinus Walleij */ 2596f3186dd8SLinus Walleij char *gpioname; 2597f3186dd8SLinus Walleij 2598f3186dd8SLinus Walleij gpioname = devm_kasprintf(dev, GFP_KERNEL, "%s CS%d", 2599f3186dd8SLinus Walleij dev_name(dev), i); 2600f3186dd8SLinus Walleij if (!gpioname) 2601f3186dd8SLinus Walleij return -ENOMEM; 2602f3186dd8SLinus Walleij gpiod_set_consumer_name(cs[i], gpioname); 26037d93aecdSGeert Uytterhoeven num_cs_gpios++; 26047d93aecdSGeert Uytterhoeven continue; 2605f3186dd8SLinus Walleij } 26067d93aecdSGeert Uytterhoeven 26077d93aecdSGeert Uytterhoeven if (ctlr->max_native_cs && i >= ctlr->max_native_cs) { 26087d93aecdSGeert Uytterhoeven dev_err(dev, "Invalid native chip select %d\n", i); 26097d93aecdSGeert Uytterhoeven return -EINVAL; 26107d93aecdSGeert Uytterhoeven } 26117d93aecdSGeert Uytterhoeven native_cs_mask |= BIT(i); 26127d93aecdSGeert Uytterhoeven } 26137d93aecdSGeert Uytterhoeven 26147d93aecdSGeert Uytterhoeven ctlr->unused_native_cs = ffz(native_cs_mask); 26157d93aecdSGeert Uytterhoeven if (num_cs_gpios && ctlr->max_native_cs && 26167d93aecdSGeert Uytterhoeven ctlr->unused_native_cs >= ctlr->max_native_cs) { 26177d93aecdSGeert Uytterhoeven dev_err(dev, "No unused native chip select available\n"); 26187d93aecdSGeert Uytterhoeven return -EINVAL; 2619f3186dd8SLinus Walleij } 2620f3186dd8SLinus Walleij 2621f3186dd8SLinus Walleij return 0; 2622f3186dd8SLinus Walleij } 2623f3186dd8SLinus Walleij 2624bdf3a3b5SBoris Brezillon static int spi_controller_check_ops(struct spi_controller *ctlr) 2625bdf3a3b5SBoris Brezillon { 2626bdf3a3b5SBoris Brezillon /* 2627b5932f5cSBoris Brezillon * The controller may implement only the high-level SPI-memory like 2628b5932f5cSBoris Brezillon * operations if it does not support regular SPI transfers, and this is 2629b5932f5cSBoris Brezillon * valid use case. 2630b5932f5cSBoris Brezillon * If ->mem_ops is NULL, we request that at least one of the 2631b5932f5cSBoris Brezillon * ->transfer_xxx() method be implemented. 2632bdf3a3b5SBoris Brezillon */ 2633b5932f5cSBoris Brezillon if (ctlr->mem_ops) { 2634b5932f5cSBoris Brezillon if (!ctlr->mem_ops->exec_op) 2635bdf3a3b5SBoris Brezillon return -EINVAL; 2636b5932f5cSBoris Brezillon } else if (!ctlr->transfer && !ctlr->transfer_one && 2637b5932f5cSBoris Brezillon !ctlr->transfer_one_message) { 2638b5932f5cSBoris Brezillon return -EINVAL; 2639b5932f5cSBoris Brezillon } 2640bdf3a3b5SBoris Brezillon 2641bdf3a3b5SBoris Brezillon return 0; 2642bdf3a3b5SBoris Brezillon } 2643bdf3a3b5SBoris Brezillon 26448ae12a0dSDavid Brownell /** 26458caab75fSGeert Uytterhoeven * spi_register_controller - register SPI master or slave controller 26468caab75fSGeert Uytterhoeven * @ctlr: initialized master, originally from spi_alloc_master() or 26478caab75fSGeert Uytterhoeven * spi_alloc_slave() 264833e34dc6SDavid Brownell * Context: can sleep 26498ae12a0dSDavid Brownell * 26508caab75fSGeert Uytterhoeven * SPI controllers connect to their drivers using some non-SPI bus, 26518ae12a0dSDavid Brownell * such as the platform bus. The final stage of probe() in that code 26528caab75fSGeert Uytterhoeven * includes calling spi_register_controller() to hook up to this SPI bus glue. 26538ae12a0dSDavid Brownell * 26548ae12a0dSDavid Brownell * SPI controllers use board specific (often SOC specific) bus numbers, 26558ae12a0dSDavid Brownell * and board-specific addressing for SPI devices combines those numbers 26568ae12a0dSDavid Brownell * with chip select numbers. Since SPI does not directly support dynamic 26578ae12a0dSDavid Brownell * device identification, boards need configuration tables telling which 26588ae12a0dSDavid Brownell * chip is at which address. 26598ae12a0dSDavid Brownell * 26608ae12a0dSDavid Brownell * This must be called from context that can sleep. It returns zero on 26618caab75fSGeert Uytterhoeven * success, else a negative error code (dropping the controller's refcount). 26620c868461SDavid Brownell * After a successful return, the caller is responsible for calling 26638caab75fSGeert Uytterhoeven * spi_unregister_controller(). 266497d56dc6SJavier Martinez Canillas * 266597d56dc6SJavier Martinez Canillas * Return: zero on success, else a negative error code. 26668ae12a0dSDavid Brownell */ 26678caab75fSGeert Uytterhoeven int spi_register_controller(struct spi_controller *ctlr) 26688ae12a0dSDavid Brownell { 26698caab75fSGeert Uytterhoeven struct device *dev = ctlr->dev.parent; 26702b9603a0SFeng Tang struct boardinfo *bi; 2671b93318a2SSergei Shtylyov int status; 267242bdd706SLucas Stach int id, first_dynamic; 26738ae12a0dSDavid Brownell 26740c868461SDavid Brownell if (!dev) 26750c868461SDavid Brownell return -ENODEV; 26760c868461SDavid Brownell 2677bdf3a3b5SBoris Brezillon /* 2678bdf3a3b5SBoris Brezillon * Make sure all necessary hooks are implemented before registering 2679bdf3a3b5SBoris Brezillon * the SPI controller. 2680bdf3a3b5SBoris Brezillon */ 2681bdf3a3b5SBoris Brezillon status = spi_controller_check_ops(ctlr); 2682bdf3a3b5SBoris Brezillon if (status) 2683bdf3a3b5SBoris Brezillon return status; 2684bdf3a3b5SBoris Brezillon 268504b2d03aSGeert Uytterhoeven if (ctlr->bus_num >= 0) { 268604b2d03aSGeert Uytterhoeven /* devices with a fixed bus num must check-in with the num */ 268704b2d03aSGeert Uytterhoeven mutex_lock(&board_lock); 268804b2d03aSGeert Uytterhoeven id = idr_alloc(&spi_master_idr, ctlr, ctlr->bus_num, 268904b2d03aSGeert Uytterhoeven ctlr->bus_num + 1, GFP_KERNEL); 269004b2d03aSGeert Uytterhoeven mutex_unlock(&board_lock); 269104b2d03aSGeert Uytterhoeven if (WARN(id < 0, "couldn't get idr")) 269204b2d03aSGeert Uytterhoeven return id == -ENOSPC ? -EBUSY : id; 269304b2d03aSGeert Uytterhoeven ctlr->bus_num = id; 269404b2d03aSGeert Uytterhoeven } else if (ctlr->dev.of_node) { 26959b61e302SSuniel Mahesh /* allocate dynamic bus number using Linux idr */ 26969b61e302SSuniel Mahesh id = of_alias_get_id(ctlr->dev.of_node, "spi"); 26979b61e302SSuniel Mahesh if (id >= 0) { 26989b61e302SSuniel Mahesh ctlr->bus_num = id; 26999b61e302SSuniel Mahesh mutex_lock(&board_lock); 27009b61e302SSuniel Mahesh id = idr_alloc(&spi_master_idr, ctlr, ctlr->bus_num, 27019b61e302SSuniel Mahesh ctlr->bus_num + 1, GFP_KERNEL); 27029b61e302SSuniel Mahesh mutex_unlock(&board_lock); 27039b61e302SSuniel Mahesh if (WARN(id < 0, "couldn't get idr")) 27049b61e302SSuniel Mahesh return id == -ENOSPC ? -EBUSY : id; 27059b61e302SSuniel Mahesh } 27069b61e302SSuniel Mahesh } 27078caab75fSGeert Uytterhoeven if (ctlr->bus_num < 0) { 270842bdd706SLucas Stach first_dynamic = of_alias_get_highest_id("spi"); 270942bdd706SLucas Stach if (first_dynamic < 0) 271042bdd706SLucas Stach first_dynamic = 0; 271142bdd706SLucas Stach else 271242bdd706SLucas Stach first_dynamic++; 271342bdd706SLucas Stach 27149b61e302SSuniel Mahesh mutex_lock(&board_lock); 271542bdd706SLucas Stach id = idr_alloc(&spi_master_idr, ctlr, first_dynamic, 271642bdd706SLucas Stach 0, GFP_KERNEL); 27179b61e302SSuniel Mahesh mutex_unlock(&board_lock); 27189b61e302SSuniel Mahesh if (WARN(id < 0, "couldn't get idr")) 27199b61e302SSuniel Mahesh return id; 27209b61e302SSuniel Mahesh ctlr->bus_num = id; 27218ae12a0dSDavid Brownell } 27228caab75fSGeert Uytterhoeven INIT_LIST_HEAD(&ctlr->queue); 27238caab75fSGeert Uytterhoeven spin_lock_init(&ctlr->queue_lock); 27248caab75fSGeert Uytterhoeven spin_lock_init(&ctlr->bus_lock_spinlock); 27258caab75fSGeert Uytterhoeven mutex_init(&ctlr->bus_lock_mutex); 27268caab75fSGeert Uytterhoeven mutex_init(&ctlr->io_mutex); 27278caab75fSGeert Uytterhoeven ctlr->bus_lock_flag = 0; 27288caab75fSGeert Uytterhoeven init_completion(&ctlr->xfer_completion); 27298caab75fSGeert Uytterhoeven if (!ctlr->max_dma_len) 27308caab75fSGeert Uytterhoeven ctlr->max_dma_len = INT_MAX; 2731cf32b71eSErnst Schwab 27328ae12a0dSDavid Brownell /* register the device, then userspace will see it. 27338ae12a0dSDavid Brownell * registration fails if the bus ID is in use. 27348ae12a0dSDavid Brownell */ 27358caab75fSGeert Uytterhoeven dev_set_name(&ctlr->dev, "spi%u", ctlr->bus_num); 27360a919ae4SAndrey Smirnov 27370a919ae4SAndrey Smirnov if (!spi_controller_is_slave(ctlr)) { 27380a919ae4SAndrey Smirnov if (ctlr->use_gpio_descriptors) { 27390a919ae4SAndrey Smirnov status = spi_get_gpio_descs(ctlr); 27400a919ae4SAndrey Smirnov if (status) 2741f9981d4fSAaro Koskinen goto free_bus_id; 27420a919ae4SAndrey Smirnov /* 27430a919ae4SAndrey Smirnov * A controller using GPIO descriptors always 27440a919ae4SAndrey Smirnov * supports SPI_CS_HIGH if need be. 27450a919ae4SAndrey Smirnov */ 27460a919ae4SAndrey Smirnov ctlr->mode_bits |= SPI_CS_HIGH; 27470a919ae4SAndrey Smirnov } else { 27480a919ae4SAndrey Smirnov /* Legacy code path for GPIOs from DT */ 274943004f31SLinus Walleij status = of_spi_get_gpio_numbers(ctlr); 27500a919ae4SAndrey Smirnov if (status) 2751f9981d4fSAaro Koskinen goto free_bus_id; 27520a919ae4SAndrey Smirnov } 27530a919ae4SAndrey Smirnov } 27540a919ae4SAndrey Smirnov 2755f9481b08STudor Ambarus /* 2756f9481b08STudor Ambarus * Even if it's just one always-selected device, there must 2757f9481b08STudor Ambarus * be at least one chipselect. 2758f9481b08STudor Ambarus */ 2759f9981d4fSAaro Koskinen if (!ctlr->num_chipselect) { 2760f9981d4fSAaro Koskinen status = -EINVAL; 2761f9981d4fSAaro Koskinen goto free_bus_id; 2762f9981d4fSAaro Koskinen } 2763f9481b08STudor Ambarus 27648caab75fSGeert Uytterhoeven status = device_add(&ctlr->dev); 2765f9981d4fSAaro Koskinen if (status < 0) 2766f9981d4fSAaro Koskinen goto free_bus_id; 27679b61e302SSuniel Mahesh dev_dbg(dev, "registered %s %s\n", 27688caab75fSGeert Uytterhoeven spi_controller_is_slave(ctlr) ? "slave" : "master", 27699b61e302SSuniel Mahesh dev_name(&ctlr->dev)); 27708ae12a0dSDavid Brownell 2771b5932f5cSBoris Brezillon /* 2772b5932f5cSBoris Brezillon * If we're using a queued driver, start the queue. Note that we don't 2773b5932f5cSBoris Brezillon * need the queueing logic if the driver is only supporting high-level 2774b5932f5cSBoris Brezillon * memory operations. 2775b5932f5cSBoris Brezillon */ 2776b5932f5cSBoris Brezillon if (ctlr->transfer) { 27778caab75fSGeert Uytterhoeven dev_info(dev, "controller is unqueued, this is deprecated\n"); 2778b5932f5cSBoris Brezillon } else if (ctlr->transfer_one || ctlr->transfer_one_message) { 27798caab75fSGeert Uytterhoeven status = spi_controller_initialize_queue(ctlr); 2780ffbbdd21SLinus Walleij if (status) { 27818caab75fSGeert Uytterhoeven device_del(&ctlr->dev); 2782f9981d4fSAaro Koskinen goto free_bus_id; 2783ffbbdd21SLinus Walleij } 2784ffbbdd21SLinus Walleij } 2785eca2ebc7SMartin Sperl /* add statistics */ 27868caab75fSGeert Uytterhoeven spin_lock_init(&ctlr->statistics.lock); 2787ffbbdd21SLinus Walleij 27882b9603a0SFeng Tang mutex_lock(&board_lock); 27898caab75fSGeert Uytterhoeven list_add_tail(&ctlr->list, &spi_controller_list); 27902b9603a0SFeng Tang list_for_each_entry(bi, &board_list, list) 27918caab75fSGeert Uytterhoeven spi_match_controller_to_boardinfo(ctlr, &bi->board_info); 27922b9603a0SFeng Tang mutex_unlock(&board_lock); 27932b9603a0SFeng Tang 279464bee4d2SMika Westerberg /* Register devices from the device tree and ACPI */ 27958caab75fSGeert Uytterhoeven of_register_spi_devices(ctlr); 27968caab75fSGeert Uytterhoeven acpi_register_spi_devices(ctlr); 2797f9981d4fSAaro Koskinen return status; 2798f9981d4fSAaro Koskinen 2799f9981d4fSAaro Koskinen free_bus_id: 2800f9981d4fSAaro Koskinen mutex_lock(&board_lock); 2801f9981d4fSAaro Koskinen idr_remove(&spi_master_idr, ctlr->bus_num); 2802f9981d4fSAaro Koskinen mutex_unlock(&board_lock); 28038ae12a0dSDavid Brownell return status; 28048ae12a0dSDavid Brownell } 28058caab75fSGeert Uytterhoeven EXPORT_SYMBOL_GPL(spi_register_controller); 28068ae12a0dSDavid Brownell 2807666d5b4cSMark Brown static void devm_spi_unregister(struct device *dev, void *res) 2808666d5b4cSMark Brown { 28098caab75fSGeert Uytterhoeven spi_unregister_controller(*(struct spi_controller **)res); 2810666d5b4cSMark Brown } 2811666d5b4cSMark Brown 2812666d5b4cSMark Brown /** 28138caab75fSGeert Uytterhoeven * devm_spi_register_controller - register managed SPI master or slave 28148caab75fSGeert Uytterhoeven * controller 28158caab75fSGeert Uytterhoeven * @dev: device managing SPI controller 28168caab75fSGeert Uytterhoeven * @ctlr: initialized controller, originally from spi_alloc_master() or 28178caab75fSGeert Uytterhoeven * spi_alloc_slave() 2818666d5b4cSMark Brown * Context: can sleep 2819666d5b4cSMark Brown * 28208caab75fSGeert Uytterhoeven * Register a SPI device as with spi_register_controller() which will 282168b892f1SJohan Hovold * automatically be unregistered and freed. 282297d56dc6SJavier Martinez Canillas * 282397d56dc6SJavier Martinez Canillas * Return: zero on success, else a negative error code. 2824666d5b4cSMark Brown */ 28258caab75fSGeert Uytterhoeven int devm_spi_register_controller(struct device *dev, 28268caab75fSGeert Uytterhoeven struct spi_controller *ctlr) 2827666d5b4cSMark Brown { 28288caab75fSGeert Uytterhoeven struct spi_controller **ptr; 2829666d5b4cSMark Brown int ret; 2830666d5b4cSMark Brown 2831666d5b4cSMark Brown ptr = devres_alloc(devm_spi_unregister, sizeof(*ptr), GFP_KERNEL); 2832666d5b4cSMark Brown if (!ptr) 2833666d5b4cSMark Brown return -ENOMEM; 2834666d5b4cSMark Brown 28358caab75fSGeert Uytterhoeven ret = spi_register_controller(ctlr); 28364b92894eSStephen Warren if (!ret) { 28378caab75fSGeert Uytterhoeven *ptr = ctlr; 2838666d5b4cSMark Brown devres_add(dev, ptr); 2839666d5b4cSMark Brown } else { 2840666d5b4cSMark Brown devres_free(ptr); 2841666d5b4cSMark Brown } 2842666d5b4cSMark Brown 2843666d5b4cSMark Brown return ret; 2844666d5b4cSMark Brown } 28458caab75fSGeert Uytterhoeven EXPORT_SYMBOL_GPL(devm_spi_register_controller); 2846666d5b4cSMark Brown 284734860089SDavid Lamparter static int __unregister(struct device *dev, void *null) 28488ae12a0dSDavid Brownell { 28490c868461SDavid Brownell spi_unregister_device(to_spi_device(dev)); 28508ae12a0dSDavid Brownell return 0; 28518ae12a0dSDavid Brownell } 28528ae12a0dSDavid Brownell 28538ae12a0dSDavid Brownell /** 28548caab75fSGeert Uytterhoeven * spi_unregister_controller - unregister SPI master or slave controller 28558caab75fSGeert Uytterhoeven * @ctlr: the controller being unregistered 285633e34dc6SDavid Brownell * Context: can sleep 28578ae12a0dSDavid Brownell * 28588caab75fSGeert Uytterhoeven * This call is used only by SPI controller drivers, which are the 28598ae12a0dSDavid Brownell * only ones directly touching chip registers. 28608ae12a0dSDavid Brownell * 28618ae12a0dSDavid Brownell * This must be called from context that can sleep. 286268b892f1SJohan Hovold * 286368b892f1SJohan Hovold * Note that this function also drops a reference to the controller. 28648ae12a0dSDavid Brownell */ 28658caab75fSGeert Uytterhoeven void spi_unregister_controller(struct spi_controller *ctlr) 28668ae12a0dSDavid Brownell { 28679b61e302SSuniel Mahesh struct spi_controller *found; 286867f7b278SJohan Hovold int id = ctlr->bus_num; 286989fc9a1aSJeff Garzik 2870ddf75be4SLukas Wunner /* Prevent addition of new devices, unregister existing ones */ 2871ddf75be4SLukas Wunner if (IS_ENABLED(CONFIG_SPI_DYNAMIC)) 2872ddf75be4SLukas Wunner mutex_lock(&spi_add_lock); 2873ddf75be4SLukas Wunner 287484855678SLukas Wunner device_for_each_child(&ctlr->dev, NULL, __unregister); 287584855678SLukas Wunner 28769b61e302SSuniel Mahesh /* First make sure that this controller was ever added */ 28779b61e302SSuniel Mahesh mutex_lock(&board_lock); 287867f7b278SJohan Hovold found = idr_find(&spi_master_idr, id); 28799b61e302SSuniel Mahesh mutex_unlock(&board_lock); 28808caab75fSGeert Uytterhoeven if (ctlr->queued) { 28818caab75fSGeert Uytterhoeven if (spi_destroy_queue(ctlr)) 28828caab75fSGeert Uytterhoeven dev_err(&ctlr->dev, "queue remove failed\n"); 2883ffbbdd21SLinus Walleij } 28842b9603a0SFeng Tang mutex_lock(&board_lock); 28858caab75fSGeert Uytterhoeven list_del(&ctlr->list); 28862b9603a0SFeng Tang mutex_unlock(&board_lock); 28872b9603a0SFeng Tang 28885e844cc3SLukas Wunner device_del(&ctlr->dev); 28895e844cc3SLukas Wunner 28905e844cc3SLukas Wunner /* Release the last reference on the controller if its driver 28915e844cc3SLukas Wunner * has not yet been converted to devm_spi_alloc_master/slave(). 28925e844cc3SLukas Wunner */ 2893794aaf01SWilliam A. Kennington III if (!ctlr->devm_allocated) 28945e844cc3SLukas Wunner put_device(&ctlr->dev); 28955e844cc3SLukas Wunner 28969b61e302SSuniel Mahesh /* free bus id */ 28979b61e302SSuniel Mahesh mutex_lock(&board_lock); 2898613bd1eaSJarkko Nikula if (found == ctlr) 289967f7b278SJohan Hovold idr_remove(&spi_master_idr, id); 29009b61e302SSuniel Mahesh mutex_unlock(&board_lock); 2901ddf75be4SLukas Wunner 2902ddf75be4SLukas Wunner if (IS_ENABLED(CONFIG_SPI_DYNAMIC)) 2903ddf75be4SLukas Wunner mutex_unlock(&spi_add_lock); 29048ae12a0dSDavid Brownell } 29058caab75fSGeert Uytterhoeven EXPORT_SYMBOL_GPL(spi_unregister_controller); 29068ae12a0dSDavid Brownell 29078caab75fSGeert Uytterhoeven int spi_controller_suspend(struct spi_controller *ctlr) 2908ffbbdd21SLinus Walleij { 2909ffbbdd21SLinus Walleij int ret; 2910ffbbdd21SLinus Walleij 29118caab75fSGeert Uytterhoeven /* Basically no-ops for non-queued controllers */ 29128caab75fSGeert Uytterhoeven if (!ctlr->queued) 2913ffbbdd21SLinus Walleij return 0; 2914ffbbdd21SLinus Walleij 29158caab75fSGeert Uytterhoeven ret = spi_stop_queue(ctlr); 2916ffbbdd21SLinus Walleij if (ret) 29178caab75fSGeert Uytterhoeven dev_err(&ctlr->dev, "queue stop failed\n"); 2918ffbbdd21SLinus Walleij 2919ffbbdd21SLinus Walleij return ret; 2920ffbbdd21SLinus Walleij } 29218caab75fSGeert Uytterhoeven EXPORT_SYMBOL_GPL(spi_controller_suspend); 2922ffbbdd21SLinus Walleij 29238caab75fSGeert Uytterhoeven int spi_controller_resume(struct spi_controller *ctlr) 2924ffbbdd21SLinus Walleij { 2925ffbbdd21SLinus Walleij int ret; 2926ffbbdd21SLinus Walleij 29278caab75fSGeert Uytterhoeven if (!ctlr->queued) 2928ffbbdd21SLinus Walleij return 0; 2929ffbbdd21SLinus Walleij 29308caab75fSGeert Uytterhoeven ret = spi_start_queue(ctlr); 2931ffbbdd21SLinus Walleij if (ret) 29328caab75fSGeert Uytterhoeven dev_err(&ctlr->dev, "queue restart failed\n"); 2933ffbbdd21SLinus Walleij 2934ffbbdd21SLinus Walleij return ret; 2935ffbbdd21SLinus Walleij } 29368caab75fSGeert Uytterhoeven EXPORT_SYMBOL_GPL(spi_controller_resume); 2937ffbbdd21SLinus Walleij 29388caab75fSGeert Uytterhoeven static int __spi_controller_match(struct device *dev, const void *data) 29395ed2c832SDave Young { 29408caab75fSGeert Uytterhoeven struct spi_controller *ctlr; 29419f3b795aSMichał Mirosław const u16 *bus_num = data; 29425ed2c832SDave Young 29438caab75fSGeert Uytterhoeven ctlr = container_of(dev, struct spi_controller, dev); 29448caab75fSGeert Uytterhoeven return ctlr->bus_num == *bus_num; 29455ed2c832SDave Young } 29465ed2c832SDave Young 29478ae12a0dSDavid Brownell /** 29488ae12a0dSDavid Brownell * spi_busnum_to_master - look up master associated with bus_num 29498ae12a0dSDavid Brownell * @bus_num: the master's bus number 295033e34dc6SDavid Brownell * Context: can sleep 29518ae12a0dSDavid Brownell * 29528ae12a0dSDavid Brownell * This call may be used with devices that are registered after 29538ae12a0dSDavid Brownell * arch init time. It returns a refcounted pointer to the relevant 29548caab75fSGeert Uytterhoeven * spi_controller (which the caller must release), or NULL if there is 29558ae12a0dSDavid Brownell * no such master registered. 295697d56dc6SJavier Martinez Canillas * 295797d56dc6SJavier Martinez Canillas * Return: the SPI master structure on success, else NULL. 29588ae12a0dSDavid Brownell */ 29598caab75fSGeert Uytterhoeven struct spi_controller *spi_busnum_to_master(u16 bus_num) 29608ae12a0dSDavid Brownell { 296149dce689STony Jones struct device *dev; 29628caab75fSGeert Uytterhoeven struct spi_controller *ctlr = NULL; 29638ae12a0dSDavid Brownell 2964695794aeSGreg Kroah-Hartman dev = class_find_device(&spi_master_class, NULL, &bus_num, 29658caab75fSGeert Uytterhoeven __spi_controller_match); 29665ed2c832SDave Young if (dev) 29678caab75fSGeert Uytterhoeven ctlr = container_of(dev, struct spi_controller, dev); 29685ed2c832SDave Young /* reference got in class_find_device */ 29698caab75fSGeert Uytterhoeven return ctlr; 29708ae12a0dSDavid Brownell } 29718ae12a0dSDavid Brownell EXPORT_SYMBOL_GPL(spi_busnum_to_master); 29728ae12a0dSDavid Brownell 2973d780c371SMartin Sperl /*-------------------------------------------------------------------------*/ 2974d780c371SMartin Sperl 2975d780c371SMartin Sperl /* Core methods for SPI resource management */ 2976d780c371SMartin Sperl 2977d780c371SMartin Sperl /** 2978d780c371SMartin Sperl * spi_res_alloc - allocate a spi resource that is life-cycle managed 2979d780c371SMartin Sperl * during the processing of a spi_message while using 2980d780c371SMartin Sperl * spi_transfer_one 2981d780c371SMartin Sperl * @spi: the spi device for which we allocate memory 2982d780c371SMartin Sperl * @release: the release code to execute for this resource 2983d780c371SMartin Sperl * @size: size to alloc and return 2984d780c371SMartin Sperl * @gfp: GFP allocation flags 2985d780c371SMartin Sperl * 2986d780c371SMartin Sperl * Return: the pointer to the allocated data 2987d780c371SMartin Sperl * 2988d780c371SMartin Sperl * This may get enhanced in the future to allocate from a memory pool 29898caab75fSGeert Uytterhoeven * of the @spi_device or @spi_controller to avoid repeated allocations. 2990d780c371SMartin Sperl */ 2991d780c371SMartin Sperl void *spi_res_alloc(struct spi_device *spi, 2992d780c371SMartin Sperl spi_res_release_t release, 2993d780c371SMartin Sperl size_t size, gfp_t gfp) 2994d780c371SMartin Sperl { 2995d780c371SMartin Sperl struct spi_res *sres; 2996d780c371SMartin Sperl 2997d780c371SMartin Sperl sres = kzalloc(sizeof(*sres) + size, gfp); 2998d780c371SMartin Sperl if (!sres) 2999d780c371SMartin Sperl return NULL; 3000d780c371SMartin Sperl 3001d780c371SMartin Sperl INIT_LIST_HEAD(&sres->entry); 3002d780c371SMartin Sperl sres->release = release; 3003d780c371SMartin Sperl 3004d780c371SMartin Sperl return sres->data; 3005d780c371SMartin Sperl } 3006d780c371SMartin Sperl EXPORT_SYMBOL_GPL(spi_res_alloc); 3007d780c371SMartin Sperl 3008d780c371SMartin Sperl /** 3009d780c371SMartin Sperl * spi_res_free - free an spi resource 3010d780c371SMartin Sperl * @res: pointer to the custom data of a resource 3011d780c371SMartin Sperl * 3012d780c371SMartin Sperl */ 3013d780c371SMartin Sperl void spi_res_free(void *res) 3014d780c371SMartin Sperl { 3015d780c371SMartin Sperl struct spi_res *sres = container_of(res, struct spi_res, data); 3016d780c371SMartin Sperl 3017d780c371SMartin Sperl if (!res) 3018d780c371SMartin Sperl return; 3019d780c371SMartin Sperl 3020d780c371SMartin Sperl WARN_ON(!list_empty(&sres->entry)); 3021d780c371SMartin Sperl kfree(sres); 3022d780c371SMartin Sperl } 3023d780c371SMartin Sperl EXPORT_SYMBOL_GPL(spi_res_free); 3024d780c371SMartin Sperl 3025d780c371SMartin Sperl /** 3026d780c371SMartin Sperl * spi_res_add - add a spi_res to the spi_message 3027d780c371SMartin Sperl * @message: the spi message 3028d780c371SMartin Sperl * @res: the spi_resource 3029d780c371SMartin Sperl */ 3030d780c371SMartin Sperl void spi_res_add(struct spi_message *message, void *res) 3031d780c371SMartin Sperl { 3032d780c371SMartin Sperl struct spi_res *sres = container_of(res, struct spi_res, data); 3033d780c371SMartin Sperl 3034d780c371SMartin Sperl WARN_ON(!list_empty(&sres->entry)); 3035d780c371SMartin Sperl list_add_tail(&sres->entry, &message->resources); 3036d780c371SMartin Sperl } 3037d780c371SMartin Sperl EXPORT_SYMBOL_GPL(spi_res_add); 3038d780c371SMartin Sperl 3039d780c371SMartin Sperl /** 3040d780c371SMartin Sperl * spi_res_release - release all spi resources for this message 30418caab75fSGeert Uytterhoeven * @ctlr: the @spi_controller 3042d780c371SMartin Sperl * @message: the @spi_message 3043d780c371SMartin Sperl */ 30448caab75fSGeert Uytterhoeven void spi_res_release(struct spi_controller *ctlr, struct spi_message *message) 3045d780c371SMartin Sperl { 3046f5694369SVladimir Zapolskiy struct spi_res *res, *tmp; 3047d780c371SMartin Sperl 3048f5694369SVladimir Zapolskiy list_for_each_entry_safe_reverse(res, tmp, &message->resources, entry) { 3049d780c371SMartin Sperl if (res->release) 30508caab75fSGeert Uytterhoeven res->release(ctlr, message, res->data); 3051d780c371SMartin Sperl 3052d780c371SMartin Sperl list_del(&res->entry); 3053d780c371SMartin Sperl 3054d780c371SMartin Sperl kfree(res); 3055d780c371SMartin Sperl } 3056d780c371SMartin Sperl } 3057d780c371SMartin Sperl EXPORT_SYMBOL_GPL(spi_res_release); 30588ae12a0dSDavid Brownell 30598ae12a0dSDavid Brownell /*-------------------------------------------------------------------------*/ 30608ae12a0dSDavid Brownell 3061523baf5aSMartin Sperl /* Core methods for spi_message alterations */ 3062523baf5aSMartin Sperl 30638caab75fSGeert Uytterhoeven static void __spi_replace_transfers_release(struct spi_controller *ctlr, 3064523baf5aSMartin Sperl struct spi_message *msg, 3065523baf5aSMartin Sperl void *res) 3066523baf5aSMartin Sperl { 3067523baf5aSMartin Sperl struct spi_replaced_transfers *rxfer = res; 3068523baf5aSMartin Sperl size_t i; 3069523baf5aSMartin Sperl 3070523baf5aSMartin Sperl /* call extra callback if requested */ 3071523baf5aSMartin Sperl if (rxfer->release) 30728caab75fSGeert Uytterhoeven rxfer->release(ctlr, msg, res); 3073523baf5aSMartin Sperl 3074523baf5aSMartin Sperl /* insert replaced transfers back into the message */ 3075523baf5aSMartin Sperl list_splice(&rxfer->replaced_transfers, rxfer->replaced_after); 3076523baf5aSMartin Sperl 3077523baf5aSMartin Sperl /* remove the formerly inserted entries */ 3078523baf5aSMartin Sperl for (i = 0; i < rxfer->inserted; i++) 3079523baf5aSMartin Sperl list_del(&rxfer->inserted_transfers[i].transfer_list); 3080523baf5aSMartin Sperl } 3081523baf5aSMartin Sperl 3082523baf5aSMartin Sperl /** 3083523baf5aSMartin Sperl * spi_replace_transfers - replace transfers with several transfers 3084523baf5aSMartin Sperl * and register change with spi_message.resources 3085523baf5aSMartin Sperl * @msg: the spi_message we work upon 3086523baf5aSMartin Sperl * @xfer_first: the first spi_transfer we want to replace 3087523baf5aSMartin Sperl * @remove: number of transfers to remove 3088523baf5aSMartin Sperl * @insert: the number of transfers we want to insert instead 3089523baf5aSMartin Sperl * @release: extra release code necessary in some circumstances 3090523baf5aSMartin Sperl * @extradatasize: extra data to allocate (with alignment guarantees 3091523baf5aSMartin Sperl * of struct @spi_transfer) 309205885397SMartin Sperl * @gfp: gfp flags 3093523baf5aSMartin Sperl * 3094523baf5aSMartin Sperl * Returns: pointer to @spi_replaced_transfers, 3095523baf5aSMartin Sperl * PTR_ERR(...) in case of errors. 3096523baf5aSMartin Sperl */ 3097523baf5aSMartin Sperl struct spi_replaced_transfers *spi_replace_transfers( 3098523baf5aSMartin Sperl struct spi_message *msg, 3099523baf5aSMartin Sperl struct spi_transfer *xfer_first, 3100523baf5aSMartin Sperl size_t remove, 3101523baf5aSMartin Sperl size_t insert, 3102523baf5aSMartin Sperl spi_replaced_release_t release, 3103523baf5aSMartin Sperl size_t extradatasize, 3104523baf5aSMartin Sperl gfp_t gfp) 3105523baf5aSMartin Sperl { 3106523baf5aSMartin Sperl struct spi_replaced_transfers *rxfer; 3107523baf5aSMartin Sperl struct spi_transfer *xfer; 3108523baf5aSMartin Sperl size_t i; 3109523baf5aSMartin Sperl 3110523baf5aSMartin Sperl /* allocate the structure using spi_res */ 3111523baf5aSMartin Sperl rxfer = spi_res_alloc(msg->spi, __spi_replace_transfers_release, 3112aef97522SGustavo A. R. Silva struct_size(rxfer, inserted_transfers, insert) 3113523baf5aSMartin Sperl + extradatasize, 3114523baf5aSMartin Sperl gfp); 3115523baf5aSMartin Sperl if (!rxfer) 3116523baf5aSMartin Sperl return ERR_PTR(-ENOMEM); 3117523baf5aSMartin Sperl 3118523baf5aSMartin Sperl /* the release code to invoke before running the generic release */ 3119523baf5aSMartin Sperl rxfer->release = release; 3120523baf5aSMartin Sperl 3121523baf5aSMartin Sperl /* assign extradata */ 3122523baf5aSMartin Sperl if (extradatasize) 3123523baf5aSMartin Sperl rxfer->extradata = 3124523baf5aSMartin Sperl &rxfer->inserted_transfers[insert]; 3125523baf5aSMartin Sperl 3126523baf5aSMartin Sperl /* init the replaced_transfers list */ 3127523baf5aSMartin Sperl INIT_LIST_HEAD(&rxfer->replaced_transfers); 3128523baf5aSMartin Sperl 3129523baf5aSMartin Sperl /* assign the list_entry after which we should reinsert 3130523baf5aSMartin Sperl * the @replaced_transfers - it may be spi_message.messages! 3131523baf5aSMartin Sperl */ 3132523baf5aSMartin Sperl rxfer->replaced_after = xfer_first->transfer_list.prev; 3133523baf5aSMartin Sperl 3134523baf5aSMartin Sperl /* remove the requested number of transfers */ 3135523baf5aSMartin Sperl for (i = 0; i < remove; i++) { 3136523baf5aSMartin Sperl /* if the entry after replaced_after it is msg->transfers 3137523baf5aSMartin Sperl * then we have been requested to remove more transfers 3138523baf5aSMartin Sperl * than are in the list 3139523baf5aSMartin Sperl */ 3140523baf5aSMartin Sperl if (rxfer->replaced_after->next == &msg->transfers) { 3141523baf5aSMartin Sperl dev_err(&msg->spi->dev, 3142523baf5aSMartin Sperl "requested to remove more spi_transfers than are available\n"); 3143523baf5aSMartin Sperl /* insert replaced transfers back into the message */ 3144523baf5aSMartin Sperl list_splice(&rxfer->replaced_transfers, 3145523baf5aSMartin Sperl rxfer->replaced_after); 3146523baf5aSMartin Sperl 3147523baf5aSMartin Sperl /* free the spi_replace_transfer structure */ 3148523baf5aSMartin Sperl spi_res_free(rxfer); 3149523baf5aSMartin Sperl 3150523baf5aSMartin Sperl /* and return with an error */ 3151523baf5aSMartin Sperl return ERR_PTR(-EINVAL); 3152523baf5aSMartin Sperl } 3153523baf5aSMartin Sperl 3154523baf5aSMartin Sperl /* remove the entry after replaced_after from list of 3155523baf5aSMartin Sperl * transfers and add it to list of replaced_transfers 3156523baf5aSMartin Sperl */ 3157523baf5aSMartin Sperl list_move_tail(rxfer->replaced_after->next, 3158523baf5aSMartin Sperl &rxfer->replaced_transfers); 3159523baf5aSMartin Sperl } 3160523baf5aSMartin Sperl 3161523baf5aSMartin Sperl /* create copy of the given xfer with identical settings 3162523baf5aSMartin Sperl * based on the first transfer to get removed 3163523baf5aSMartin Sperl */ 3164523baf5aSMartin Sperl for (i = 0; i < insert; i++) { 3165523baf5aSMartin Sperl /* we need to run in reverse order */ 3166523baf5aSMartin Sperl xfer = &rxfer->inserted_transfers[insert - 1 - i]; 3167523baf5aSMartin Sperl 3168523baf5aSMartin Sperl /* copy all spi_transfer data */ 3169523baf5aSMartin Sperl memcpy(xfer, xfer_first, sizeof(*xfer)); 3170523baf5aSMartin Sperl 3171523baf5aSMartin Sperl /* add to list */ 3172523baf5aSMartin Sperl list_add(&xfer->transfer_list, rxfer->replaced_after); 3173523baf5aSMartin Sperl 3174bebcfd27SAlexandru Ardelean /* clear cs_change and delay for all but the last */ 3175523baf5aSMartin Sperl if (i) { 3176523baf5aSMartin Sperl xfer->cs_change = false; 3177523baf5aSMartin Sperl xfer->delay_usecs = 0; 3178bebcfd27SAlexandru Ardelean xfer->delay.value = 0; 3179523baf5aSMartin Sperl } 3180523baf5aSMartin Sperl } 3181523baf5aSMartin Sperl 3182523baf5aSMartin Sperl /* set up inserted */ 3183523baf5aSMartin Sperl rxfer->inserted = insert; 3184523baf5aSMartin Sperl 3185523baf5aSMartin Sperl /* and register it with spi_res/spi_message */ 3186523baf5aSMartin Sperl spi_res_add(msg, rxfer); 3187523baf5aSMartin Sperl 3188523baf5aSMartin Sperl return rxfer; 3189523baf5aSMartin Sperl } 3190523baf5aSMartin Sperl EXPORT_SYMBOL_GPL(spi_replace_transfers); 3191523baf5aSMartin Sperl 31928caab75fSGeert Uytterhoeven static int __spi_split_transfer_maxsize(struct spi_controller *ctlr, 3193d9f12122SMartin Sperl struct spi_message *msg, 3194d9f12122SMartin Sperl struct spi_transfer **xferp, 3195d9f12122SMartin Sperl size_t maxsize, 3196d9f12122SMartin Sperl gfp_t gfp) 3197d9f12122SMartin Sperl { 3198d9f12122SMartin Sperl struct spi_transfer *xfer = *xferp, *xfers; 3199d9f12122SMartin Sperl struct spi_replaced_transfers *srt; 3200d9f12122SMartin Sperl size_t offset; 3201d9f12122SMartin Sperl size_t count, i; 3202d9f12122SMartin Sperl 3203d9f12122SMartin Sperl /* calculate how many we have to replace */ 3204d9f12122SMartin Sperl count = DIV_ROUND_UP(xfer->len, maxsize); 3205d9f12122SMartin Sperl 3206d9f12122SMartin Sperl /* create replacement */ 3207d9f12122SMartin Sperl srt = spi_replace_transfers(msg, xfer, 1, count, NULL, 0, gfp); 3208657d32efSDan Carpenter if (IS_ERR(srt)) 3209657d32efSDan Carpenter return PTR_ERR(srt); 3210d9f12122SMartin Sperl xfers = srt->inserted_transfers; 3211d9f12122SMartin Sperl 3212d9f12122SMartin Sperl /* now handle each of those newly inserted spi_transfers 3213d9f12122SMartin Sperl * note that the replacements spi_transfers all are preset 3214d9f12122SMartin Sperl * to the same values as *xferp, so tx_buf, rx_buf and len 3215d9f12122SMartin Sperl * are all identical (as well as most others) 3216d9f12122SMartin Sperl * so we just have to fix up len and the pointers. 3217d9f12122SMartin Sperl * 3218d9f12122SMartin Sperl * this also includes support for the depreciated 3219d9f12122SMartin Sperl * spi_message.is_dma_mapped interface 3220d9f12122SMartin Sperl */ 3221d9f12122SMartin Sperl 3222d9f12122SMartin Sperl /* the first transfer just needs the length modified, so we 3223d9f12122SMartin Sperl * run it outside the loop 3224d9f12122SMartin Sperl */ 3225c8dab77aSFabio Estevam xfers[0].len = min_t(size_t, maxsize, xfer[0].len); 3226d9f12122SMartin Sperl 3227d9f12122SMartin Sperl /* all the others need rx_buf/tx_buf also set */ 3228d9f12122SMartin Sperl for (i = 1, offset = maxsize; i < count; offset += maxsize, i++) { 3229d9f12122SMartin Sperl /* update rx_buf, tx_buf and dma */ 3230d9f12122SMartin Sperl if (xfers[i].rx_buf) 3231d9f12122SMartin Sperl xfers[i].rx_buf += offset; 3232d9f12122SMartin Sperl if (xfers[i].rx_dma) 3233d9f12122SMartin Sperl xfers[i].rx_dma += offset; 3234d9f12122SMartin Sperl if (xfers[i].tx_buf) 3235d9f12122SMartin Sperl xfers[i].tx_buf += offset; 3236d9f12122SMartin Sperl if (xfers[i].tx_dma) 3237d9f12122SMartin Sperl xfers[i].tx_dma += offset; 3238d9f12122SMartin Sperl 3239d9f12122SMartin Sperl /* update length */ 3240d9f12122SMartin Sperl xfers[i].len = min(maxsize, xfers[i].len - offset); 3241d9f12122SMartin Sperl } 3242d9f12122SMartin Sperl 3243d9f12122SMartin Sperl /* we set up xferp to the last entry we have inserted, 3244d9f12122SMartin Sperl * so that we skip those already split transfers 3245d9f12122SMartin Sperl */ 3246d9f12122SMartin Sperl *xferp = &xfers[count - 1]; 3247d9f12122SMartin Sperl 3248d9f12122SMartin Sperl /* increment statistics counters */ 32498caab75fSGeert Uytterhoeven SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics, 3250d9f12122SMartin Sperl transfers_split_maxsize); 3251d9f12122SMartin Sperl SPI_STATISTICS_INCREMENT_FIELD(&msg->spi->statistics, 3252d9f12122SMartin Sperl transfers_split_maxsize); 3253d9f12122SMartin Sperl 3254d9f12122SMartin Sperl return 0; 3255d9f12122SMartin Sperl } 3256d9f12122SMartin Sperl 3257d9f12122SMartin Sperl /** 3258ce2424d7SMauro Carvalho Chehab * spi_split_transfers_maxsize - split spi transfers into multiple transfers 3259d9f12122SMartin Sperl * when an individual transfer exceeds a 3260d9f12122SMartin Sperl * certain size 32618caab75fSGeert Uytterhoeven * @ctlr: the @spi_controller for this transfer 32623700ce95SMasanari Iida * @msg: the @spi_message to transform 32633700ce95SMasanari Iida * @maxsize: the maximum when to apply this 326410f11a22SJavier Martinez Canillas * @gfp: GFP allocation flags 3265d9f12122SMartin Sperl * 3266d9f12122SMartin Sperl * Return: status of transformation 3267d9f12122SMartin Sperl */ 32688caab75fSGeert Uytterhoeven int spi_split_transfers_maxsize(struct spi_controller *ctlr, 3269d9f12122SMartin Sperl struct spi_message *msg, 3270d9f12122SMartin Sperl size_t maxsize, 3271d9f12122SMartin Sperl gfp_t gfp) 3272d9f12122SMartin Sperl { 3273d9f12122SMartin Sperl struct spi_transfer *xfer; 3274d9f12122SMartin Sperl int ret; 3275d9f12122SMartin Sperl 3276d9f12122SMartin Sperl /* iterate over the transfer_list, 3277d9f12122SMartin Sperl * but note that xfer is advanced to the last transfer inserted 3278d9f12122SMartin Sperl * to avoid checking sizes again unnecessarily (also xfer does 3279d9f12122SMartin Sperl * potentiall belong to a different list by the time the 3280d9f12122SMartin Sperl * replacement has happened 3281d9f12122SMartin Sperl */ 3282d9f12122SMartin Sperl list_for_each_entry(xfer, &msg->transfers, transfer_list) { 3283d9f12122SMartin Sperl if (xfer->len > maxsize) { 32848caab75fSGeert Uytterhoeven ret = __spi_split_transfer_maxsize(ctlr, msg, &xfer, 32858caab75fSGeert Uytterhoeven maxsize, gfp); 3286d9f12122SMartin Sperl if (ret) 3287d9f12122SMartin Sperl return ret; 3288d9f12122SMartin Sperl } 3289d9f12122SMartin Sperl } 3290d9f12122SMartin Sperl 3291d9f12122SMartin Sperl return 0; 3292d9f12122SMartin Sperl } 3293d9f12122SMartin Sperl EXPORT_SYMBOL_GPL(spi_split_transfers_maxsize); 32948ae12a0dSDavid Brownell 32958ae12a0dSDavid Brownell /*-------------------------------------------------------------------------*/ 32968ae12a0dSDavid Brownell 32978caab75fSGeert Uytterhoeven /* Core methods for SPI controller protocol drivers. Some of the 32987d077197SDavid Brownell * other core methods are currently defined as inline functions. 32997d077197SDavid Brownell */ 33007d077197SDavid Brownell 33018caab75fSGeert Uytterhoeven static int __spi_validate_bits_per_word(struct spi_controller *ctlr, 33028caab75fSGeert Uytterhoeven u8 bits_per_word) 330363ab645fSStefan Brüns { 33048caab75fSGeert Uytterhoeven if (ctlr->bits_per_word_mask) { 330563ab645fSStefan Brüns /* Only 32 bits fit in the mask */ 330663ab645fSStefan Brüns if (bits_per_word > 32) 330763ab645fSStefan Brüns return -EINVAL; 33088caab75fSGeert Uytterhoeven if (!(ctlr->bits_per_word_mask & SPI_BPW_MASK(bits_per_word))) 330963ab645fSStefan Brüns return -EINVAL; 331063ab645fSStefan Brüns } 331163ab645fSStefan Brüns 331263ab645fSStefan Brüns return 0; 331363ab645fSStefan Brüns } 331463ab645fSStefan Brüns 33157d077197SDavid Brownell /** 33167d077197SDavid Brownell * spi_setup - setup SPI mode and clock rate 33177d077197SDavid Brownell * @spi: the device whose settings are being modified 33187d077197SDavid Brownell * Context: can sleep, and no requests are queued to the device 33197d077197SDavid Brownell * 33207d077197SDavid Brownell * SPI protocol drivers may need to update the transfer mode if the 33217d077197SDavid Brownell * device doesn't work with its default. They may likewise need 33227d077197SDavid Brownell * to update clock rates or word sizes from initial values. This function 33237d077197SDavid Brownell * changes those settings, and must be called from a context that can sleep. 33247d077197SDavid Brownell * Except for SPI_CS_HIGH, which takes effect immediately, the changes take 33257d077197SDavid Brownell * effect the next time the device is selected and data is transferred to 33267d077197SDavid Brownell * or from it. When this function returns, the spi device is deselected. 33277d077197SDavid Brownell * 33287d077197SDavid Brownell * Note that this call will fail if the protocol driver specifies an option 33297d077197SDavid Brownell * that the underlying controller or its driver does not support. For 33307d077197SDavid Brownell * example, not all hardware supports wire transfers using nine bit words, 33317d077197SDavid Brownell * LSB-first wire encoding, or active-high chipselects. 333297d56dc6SJavier Martinez Canillas * 333397d56dc6SJavier Martinez Canillas * Return: zero on success, else a negative error code. 33347d077197SDavid Brownell */ 33357d077197SDavid Brownell int spi_setup(struct spi_device *spi) 33367d077197SDavid Brownell { 333783596fbeSGeert Uytterhoeven unsigned bad_bits, ugly_bits; 33385ab8d262SAndy Shevchenko int status; 33397d077197SDavid Brownell 3340d962608cSDragos Bogdan /* 3341d962608cSDragos Bogdan * check mode to prevent that any two of DUAL, QUAD and NO_MOSI/MISO 3342d962608cSDragos Bogdan * are set at the same time 3343f477b7fbSwangyuhang */ 3344d962608cSDragos Bogdan if ((hweight_long(spi->mode & 3345d962608cSDragos Bogdan (SPI_TX_DUAL | SPI_TX_QUAD | SPI_NO_TX)) > 1) || 3346d962608cSDragos Bogdan (hweight_long(spi->mode & 3347d962608cSDragos Bogdan (SPI_RX_DUAL | SPI_RX_QUAD | SPI_NO_RX)) > 1)) { 3348f477b7fbSwangyuhang dev_err(&spi->dev, 3349d962608cSDragos Bogdan "setup: can not select any two of dual, quad and no-rx/tx at the same time\n"); 3350f477b7fbSwangyuhang return -EINVAL; 3351f477b7fbSwangyuhang } 3352f477b7fbSwangyuhang /* if it is SPI_3WIRE mode, DUAL and QUAD should be forbidden 3353f477b7fbSwangyuhang */ 3354f477b7fbSwangyuhang if ((spi->mode & SPI_3WIRE) && (spi->mode & 33556b03061fSYogesh Narayan Gaur (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL | 33566b03061fSYogesh Narayan Gaur SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL))) 3357f477b7fbSwangyuhang return -EINVAL; 3358e7db06b5SDavid Brownell /* help drivers fail *cleanly* when they need options 33598caab75fSGeert Uytterhoeven * that aren't supported with their current controller 3360cbaa62e0SDavid Lechner * SPI_CS_WORD has a fallback software implementation, 3361cbaa62e0SDavid Lechner * so it is ignored here. 3362e7db06b5SDavid Brownell */ 3363d962608cSDragos Bogdan bad_bits = spi->mode & ~(spi->controller->mode_bits | SPI_CS_WORD | 3364d962608cSDragos Bogdan SPI_NO_TX | SPI_NO_RX); 3365d61ad23cSSerge Semin /* nothing prevents from working with active-high CS in case if it 3366d61ad23cSSerge Semin * is driven by GPIO. 3367d61ad23cSSerge Semin */ 3368d61ad23cSSerge Semin if (gpio_is_valid(spi->cs_gpio)) 3369d61ad23cSSerge Semin bad_bits &= ~SPI_CS_HIGH; 337083596fbeSGeert Uytterhoeven ugly_bits = bad_bits & 33716b03061fSYogesh Narayan Gaur (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL | 33726b03061fSYogesh Narayan Gaur SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL); 337383596fbeSGeert Uytterhoeven if (ugly_bits) { 337483596fbeSGeert Uytterhoeven dev_warn(&spi->dev, 337583596fbeSGeert Uytterhoeven "setup: ignoring unsupported mode bits %x\n", 337683596fbeSGeert Uytterhoeven ugly_bits); 337783596fbeSGeert Uytterhoeven spi->mode &= ~ugly_bits; 337883596fbeSGeert Uytterhoeven bad_bits &= ~ugly_bits; 337983596fbeSGeert Uytterhoeven } 3380e7db06b5SDavid Brownell if (bad_bits) { 3381eb288a1fSLinus Walleij dev_err(&spi->dev, "setup: unsupported mode bits %x\n", 3382e7db06b5SDavid Brownell bad_bits); 3383e7db06b5SDavid Brownell return -EINVAL; 3384e7db06b5SDavid Brownell } 3385e7db06b5SDavid Brownell 33867d077197SDavid Brownell if (!spi->bits_per_word) 33877d077197SDavid Brownell spi->bits_per_word = 8; 33887d077197SDavid Brownell 33898caab75fSGeert Uytterhoeven status = __spi_validate_bits_per_word(spi->controller, 33908caab75fSGeert Uytterhoeven spi->bits_per_word); 33915ab8d262SAndy Shevchenko if (status) 33925ab8d262SAndy Shevchenko return status; 339363ab645fSStefan Brüns 33946820e812STudor Ambarus if (spi->controller->max_speed_hz && 33956820e812STudor Ambarus (!spi->max_speed_hz || 33966820e812STudor Ambarus spi->max_speed_hz > spi->controller->max_speed_hz)) 33978caab75fSGeert Uytterhoeven spi->max_speed_hz = spi->controller->max_speed_hz; 3398052eb2d4SAxel Lin 33994fae3a58SSerge Semin mutex_lock(&spi->controller->io_mutex); 34004fae3a58SSerge Semin 34018caab75fSGeert Uytterhoeven if (spi->controller->setup) 34028caab75fSGeert Uytterhoeven status = spi->controller->setup(spi); 34037d077197SDavid Brownell 3404d948e6caSLuhua Xu if (spi->controller->auto_runtime_pm && spi->controller->set_cs) { 3405d948e6caSLuhua Xu status = pm_runtime_get_sync(spi->controller->dev.parent); 3406d948e6caSLuhua Xu if (status < 0) { 34074fae3a58SSerge Semin mutex_unlock(&spi->controller->io_mutex); 3408d948e6caSLuhua Xu pm_runtime_put_noidle(spi->controller->dev.parent); 3409d948e6caSLuhua Xu dev_err(&spi->controller->dev, "Failed to power device: %d\n", 3410d948e6caSLuhua Xu status); 3411d948e6caSLuhua Xu return status; 3412d948e6caSLuhua Xu } 341357a94607STony Lindgren 341457a94607STony Lindgren /* 341557a94607STony Lindgren * We do not want to return positive value from pm_runtime_get, 341657a94607STony Lindgren * there are many instances of devices calling spi_setup() and 341757a94607STony Lindgren * checking for a non-zero return value instead of a negative 341857a94607STony Lindgren * return value. 341957a94607STony Lindgren */ 342057a94607STony Lindgren status = 0; 342157a94607STony Lindgren 3422abeedb01SFranklin S Cooper Jr spi_set_cs(spi, false); 3423d948e6caSLuhua Xu pm_runtime_mark_last_busy(spi->controller->dev.parent); 3424d948e6caSLuhua Xu pm_runtime_put_autosuspend(spi->controller->dev.parent); 3425d948e6caSLuhua Xu } else { 3426d948e6caSLuhua Xu spi_set_cs(spi, false); 3427d948e6caSLuhua Xu } 3428abeedb01SFranklin S Cooper Jr 34294fae3a58SSerge Semin mutex_unlock(&spi->controller->io_mutex); 34304fae3a58SSerge Semin 3431924b5867SDouglas Anderson if (spi->rt && !spi->controller->rt) { 3432924b5867SDouglas Anderson spi->controller->rt = true; 3433924b5867SDouglas Anderson spi_set_thread_rt(spi->controller); 3434924b5867SDouglas Anderson } 3435924b5867SDouglas Anderson 34365fe5f05eSJingoo Han dev_dbg(&spi->dev, "setup mode %d, %s%s%s%s%u bits/w, %u Hz max --> %d\n", 34377d077197SDavid Brownell (int) (spi->mode & (SPI_CPOL | SPI_CPHA)), 34387d077197SDavid Brownell (spi->mode & SPI_CS_HIGH) ? "cs_high, " : "", 34397d077197SDavid Brownell (spi->mode & SPI_LSB_FIRST) ? "lsb, " : "", 34407d077197SDavid Brownell (spi->mode & SPI_3WIRE) ? "3wire, " : "", 34417d077197SDavid Brownell (spi->mode & SPI_LOOP) ? "loopback, " : "", 34427d077197SDavid Brownell spi->bits_per_word, spi->max_speed_hz, 34437d077197SDavid Brownell status); 34447d077197SDavid Brownell 34457d077197SDavid Brownell return status; 34467d077197SDavid Brownell } 34477d077197SDavid Brownell EXPORT_SYMBOL_GPL(spi_setup); 34487d077197SDavid Brownell 3449f1ca9992SSowjanya Komatineni /** 3450f1ca9992SSowjanya Komatineni * spi_set_cs_timing - configure CS setup, hold, and inactive delays 3451f1ca9992SSowjanya Komatineni * @spi: the device that requires specific CS timing configuration 345281059366SAlexandru Ardelean * @setup: CS setup time specified via @spi_delay 345381059366SAlexandru Ardelean * @hold: CS hold time specified via @spi_delay 345481059366SAlexandru Ardelean * @inactive: CS inactive delay between transfers specified via @spi_delay 345581059366SAlexandru Ardelean * 345681059366SAlexandru Ardelean * Return: zero on success, else a negative error code. 3457f1ca9992SSowjanya Komatineni */ 345881059366SAlexandru Ardelean int spi_set_cs_timing(struct spi_device *spi, struct spi_delay *setup, 345981059366SAlexandru Ardelean struct spi_delay *hold, struct spi_delay *inactive) 3460f1ca9992SSowjanya Komatineni { 34614cea6b8cSleilk.liu struct device *parent = spi->controller->dev.parent; 346225093bdeSAlexandru Ardelean size_t len; 34634cea6b8cSleilk.liu int status; 346425093bdeSAlexandru Ardelean 34650486d9f9Sleilk.liu if (spi->controller->set_cs_timing && 34660486d9f9Sleilk.liu !(spi->cs_gpiod || gpio_is_valid(spi->cs_gpio))) { 34674cea6b8cSleilk.liu if (spi->controller->auto_runtime_pm) { 34684cea6b8cSleilk.liu status = pm_runtime_get_sync(parent); 34694cea6b8cSleilk.liu if (status < 0) { 34704cea6b8cSleilk.liu pm_runtime_put_noidle(parent); 34714cea6b8cSleilk.liu dev_err(&spi->controller->dev, "Failed to power device: %d\n", 34724cea6b8cSleilk.liu status); 34734cea6b8cSleilk.liu return status; 34744cea6b8cSleilk.liu } 34754cea6b8cSleilk.liu 34764cea6b8cSleilk.liu status = spi->controller->set_cs_timing(spi, setup, 34774cea6b8cSleilk.liu hold, inactive); 34784cea6b8cSleilk.liu pm_runtime_mark_last_busy(parent); 34794cea6b8cSleilk.liu pm_runtime_put_autosuspend(parent); 34804cea6b8cSleilk.liu return status; 34814cea6b8cSleilk.liu } else { 348281059366SAlexandru Ardelean return spi->controller->set_cs_timing(spi, setup, hold, 348381059366SAlexandru Ardelean inactive); 34844cea6b8cSleilk.liu } 34854cea6b8cSleilk.liu } 348625093bdeSAlexandru Ardelean 348725093bdeSAlexandru Ardelean if ((setup && setup->unit == SPI_DELAY_UNIT_SCK) || 348825093bdeSAlexandru Ardelean (hold && hold->unit == SPI_DELAY_UNIT_SCK) || 348925093bdeSAlexandru Ardelean (inactive && inactive->unit == SPI_DELAY_UNIT_SCK)) { 349025093bdeSAlexandru Ardelean dev_err(&spi->dev, 349125093bdeSAlexandru Ardelean "Clock-cycle delays for CS not supported in SW mode\n"); 349281059366SAlexandru Ardelean return -ENOTSUPP; 3493f1ca9992SSowjanya Komatineni } 349425093bdeSAlexandru Ardelean 349525093bdeSAlexandru Ardelean len = sizeof(struct spi_delay); 349625093bdeSAlexandru Ardelean 349725093bdeSAlexandru Ardelean /* copy delays to controller */ 349825093bdeSAlexandru Ardelean if (setup) 349925093bdeSAlexandru Ardelean memcpy(&spi->controller->cs_setup, setup, len); 350025093bdeSAlexandru Ardelean else 350125093bdeSAlexandru Ardelean memset(&spi->controller->cs_setup, 0, len); 350225093bdeSAlexandru Ardelean 350325093bdeSAlexandru Ardelean if (hold) 350425093bdeSAlexandru Ardelean memcpy(&spi->controller->cs_hold, hold, len); 350525093bdeSAlexandru Ardelean else 350625093bdeSAlexandru Ardelean memset(&spi->controller->cs_hold, 0, len); 350725093bdeSAlexandru Ardelean 350825093bdeSAlexandru Ardelean if (inactive) 350925093bdeSAlexandru Ardelean memcpy(&spi->controller->cs_inactive, inactive, len); 351025093bdeSAlexandru Ardelean else 351125093bdeSAlexandru Ardelean memset(&spi->controller->cs_inactive, 0, len); 351225093bdeSAlexandru Ardelean 351325093bdeSAlexandru Ardelean return 0; 3514f1ca9992SSowjanya Komatineni } 3515f1ca9992SSowjanya Komatineni EXPORT_SYMBOL_GPL(spi_set_cs_timing); 3516f1ca9992SSowjanya Komatineni 35176c613f68SAlexandru Ardelean static int _spi_xfer_word_delay_update(struct spi_transfer *xfer, 35186c613f68SAlexandru Ardelean struct spi_device *spi) 35196c613f68SAlexandru Ardelean { 35206c613f68SAlexandru Ardelean int delay1, delay2; 35216c613f68SAlexandru Ardelean 35223984d39bSAlexandru Ardelean delay1 = spi_delay_to_ns(&xfer->word_delay, xfer); 35236c613f68SAlexandru Ardelean if (delay1 < 0) 35246c613f68SAlexandru Ardelean return delay1; 35256c613f68SAlexandru Ardelean 35263984d39bSAlexandru Ardelean delay2 = spi_delay_to_ns(&spi->word_delay, xfer); 35276c613f68SAlexandru Ardelean if (delay2 < 0) 35286c613f68SAlexandru Ardelean return delay2; 35296c613f68SAlexandru Ardelean 35306c613f68SAlexandru Ardelean if (delay1 < delay2) 35316c613f68SAlexandru Ardelean memcpy(&xfer->word_delay, &spi->word_delay, 35326c613f68SAlexandru Ardelean sizeof(xfer->word_delay)); 35336c613f68SAlexandru Ardelean 35346c613f68SAlexandru Ardelean return 0; 35356c613f68SAlexandru Ardelean } 35366c613f68SAlexandru Ardelean 353790808738SMark Brown static int __spi_validate(struct spi_device *spi, struct spi_message *message) 3538cf32b71eSErnst Schwab { 35398caab75fSGeert Uytterhoeven struct spi_controller *ctlr = spi->controller; 3540e6811d1dSLaxman Dewangan struct spi_transfer *xfer; 35416ea31293SAtsushi Nemoto int w_size; 3542cf32b71eSErnst Schwab 354324a0013aSMark Brown if (list_empty(&message->transfers)) 354424a0013aSMark Brown return -EINVAL; 354524a0013aSMark Brown 3546cbaa62e0SDavid Lechner /* If an SPI controller does not support toggling the CS line on each 354771388b21SDavid Lechner * transfer (indicated by the SPI_CS_WORD flag) or we are using a GPIO 354871388b21SDavid Lechner * for the CS line, we can emulate the CS-per-word hardware function by 3549cbaa62e0SDavid Lechner * splitting transfers into one-word transfers and ensuring that 3550cbaa62e0SDavid Lechner * cs_change is set for each transfer. 3551cbaa62e0SDavid Lechner */ 355271388b21SDavid Lechner if ((spi->mode & SPI_CS_WORD) && (!(ctlr->mode_bits & SPI_CS_WORD) || 3553f3186dd8SLinus Walleij spi->cs_gpiod || 355471388b21SDavid Lechner gpio_is_valid(spi->cs_gpio))) { 3555cbaa62e0SDavid Lechner size_t maxsize; 3556cbaa62e0SDavid Lechner int ret; 3557cbaa62e0SDavid Lechner 3558cbaa62e0SDavid Lechner maxsize = (spi->bits_per_word + 7) / 8; 3559cbaa62e0SDavid Lechner 3560cbaa62e0SDavid Lechner /* spi_split_transfers_maxsize() requires message->spi */ 3561cbaa62e0SDavid Lechner message->spi = spi; 3562cbaa62e0SDavid Lechner 3563cbaa62e0SDavid Lechner ret = spi_split_transfers_maxsize(ctlr, message, maxsize, 3564cbaa62e0SDavid Lechner GFP_KERNEL); 3565cbaa62e0SDavid Lechner if (ret) 3566cbaa62e0SDavid Lechner return ret; 3567cbaa62e0SDavid Lechner 3568cbaa62e0SDavid Lechner list_for_each_entry(xfer, &message->transfers, transfer_list) { 3569cbaa62e0SDavid Lechner /* don't change cs_change on the last entry in the list */ 3570cbaa62e0SDavid Lechner if (list_is_last(&xfer->transfer_list, &message->transfers)) 3571cbaa62e0SDavid Lechner break; 3572cbaa62e0SDavid Lechner xfer->cs_change = 1; 3573cbaa62e0SDavid Lechner } 3574cbaa62e0SDavid Lechner } 3575cbaa62e0SDavid Lechner 3576cf32b71eSErnst Schwab /* Half-duplex links include original MicroWire, and ones with 3577cf32b71eSErnst Schwab * only one data pin like SPI_3WIRE (switches direction) or where 3578cf32b71eSErnst Schwab * either MOSI or MISO is missing. They can also be caused by 3579cf32b71eSErnst Schwab * software limitations. 3580cf32b71eSErnst Schwab */ 35818caab75fSGeert Uytterhoeven if ((ctlr->flags & SPI_CONTROLLER_HALF_DUPLEX) || 35828caab75fSGeert Uytterhoeven (spi->mode & SPI_3WIRE)) { 35838caab75fSGeert Uytterhoeven unsigned flags = ctlr->flags; 3584cf32b71eSErnst Schwab 3585cf32b71eSErnst Schwab list_for_each_entry(xfer, &message->transfers, transfer_list) { 3586cf32b71eSErnst Schwab if (xfer->rx_buf && xfer->tx_buf) 3587cf32b71eSErnst Schwab return -EINVAL; 35888caab75fSGeert Uytterhoeven if ((flags & SPI_CONTROLLER_NO_TX) && xfer->tx_buf) 3589cf32b71eSErnst Schwab return -EINVAL; 35908caab75fSGeert Uytterhoeven if ((flags & SPI_CONTROLLER_NO_RX) && xfer->rx_buf) 3591cf32b71eSErnst Schwab return -EINVAL; 3592cf32b71eSErnst Schwab } 3593cf32b71eSErnst Schwab } 3594cf32b71eSErnst Schwab 3595e6811d1dSLaxman Dewangan /** 3596059b8ffeSLaxman Dewangan * Set transfer bits_per_word and max speed as spi device default if 3597059b8ffeSLaxman Dewangan * it is not set for this transfer. 3598f477b7fbSwangyuhang * Set transfer tx_nbits and rx_nbits as single transfer default 3599f477b7fbSwangyuhang * (SPI_NBITS_SINGLE) if it is not set for this transfer. 3600b7bb367aSJonas Bonn * Ensure transfer word_delay is at least as long as that required by 3601b7bb367aSJonas Bonn * device itself. 3602e6811d1dSLaxman Dewangan */ 360377e80588SMartin Sperl message->frame_length = 0; 3604e6811d1dSLaxman Dewangan list_for_each_entry(xfer, &message->transfers, transfer_list) { 36055d7e2b5eSMartin Sperl xfer->effective_speed_hz = 0; 3606078726ceSSourav Poddar message->frame_length += xfer->len; 3607e6811d1dSLaxman Dewangan if (!xfer->bits_per_word) 3608e6811d1dSLaxman Dewangan xfer->bits_per_word = spi->bits_per_word; 3609a6f87fadSAxel Lin 3610a6f87fadSAxel Lin if (!xfer->speed_hz) 3611059b8ffeSLaxman Dewangan xfer->speed_hz = spi->max_speed_hz; 3612a6f87fadSAxel Lin 36138caab75fSGeert Uytterhoeven if (ctlr->max_speed_hz && xfer->speed_hz > ctlr->max_speed_hz) 36148caab75fSGeert Uytterhoeven xfer->speed_hz = ctlr->max_speed_hz; 361556ede94aSGabor Juhos 36168caab75fSGeert Uytterhoeven if (__spi_validate_bits_per_word(ctlr, xfer->bits_per_word)) 3617543bb255SStephen Warren return -EINVAL; 3618a2fd4f9fSMark Brown 36194d94bd21SIvan T. Ivanov /* 36204d94bd21SIvan T. Ivanov * SPI transfer length should be multiple of SPI word size 36214d94bd21SIvan T. Ivanov * where SPI word size should be power-of-two multiple 36224d94bd21SIvan T. Ivanov */ 36234d94bd21SIvan T. Ivanov if (xfer->bits_per_word <= 8) 36244d94bd21SIvan T. Ivanov w_size = 1; 36254d94bd21SIvan T. Ivanov else if (xfer->bits_per_word <= 16) 36264d94bd21SIvan T. Ivanov w_size = 2; 36274d94bd21SIvan T. Ivanov else 36284d94bd21SIvan T. Ivanov w_size = 4; 36294d94bd21SIvan T. Ivanov 36304d94bd21SIvan T. Ivanov /* No partial transfers accepted */ 36316ea31293SAtsushi Nemoto if (xfer->len % w_size) 36324d94bd21SIvan T. Ivanov return -EINVAL; 36334d94bd21SIvan T. Ivanov 36348caab75fSGeert Uytterhoeven if (xfer->speed_hz && ctlr->min_speed_hz && 36358caab75fSGeert Uytterhoeven xfer->speed_hz < ctlr->min_speed_hz) 3636a2fd4f9fSMark Brown return -EINVAL; 3637f477b7fbSwangyuhang 3638f477b7fbSwangyuhang if (xfer->tx_buf && !xfer->tx_nbits) 3639f477b7fbSwangyuhang xfer->tx_nbits = SPI_NBITS_SINGLE; 3640f477b7fbSwangyuhang if (xfer->rx_buf && !xfer->rx_nbits) 3641f477b7fbSwangyuhang xfer->rx_nbits = SPI_NBITS_SINGLE; 3642f477b7fbSwangyuhang /* check transfer tx/rx_nbits: 36431afd9989SGeert Uytterhoeven * 1. check the value matches one of single, dual and quad 36441afd9989SGeert Uytterhoeven * 2. check tx/rx_nbits match the mode in spi_device 3645f477b7fbSwangyuhang */ 3646db90a441SSourav Poddar if (xfer->tx_buf) { 3647d962608cSDragos Bogdan if (spi->mode & SPI_NO_TX) 3648d962608cSDragos Bogdan return -EINVAL; 3649f477b7fbSwangyuhang if (xfer->tx_nbits != SPI_NBITS_SINGLE && 3650f477b7fbSwangyuhang xfer->tx_nbits != SPI_NBITS_DUAL && 3651f477b7fbSwangyuhang xfer->tx_nbits != SPI_NBITS_QUAD) 3652a2fd4f9fSMark Brown return -EINVAL; 3653f477b7fbSwangyuhang if ((xfer->tx_nbits == SPI_NBITS_DUAL) && 3654f477b7fbSwangyuhang !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD))) 3655f477b7fbSwangyuhang return -EINVAL; 3656f477b7fbSwangyuhang if ((xfer->tx_nbits == SPI_NBITS_QUAD) && 3657f477b7fbSwangyuhang !(spi->mode & SPI_TX_QUAD)) 3658f477b7fbSwangyuhang return -EINVAL; 3659db90a441SSourav Poddar } 3660f477b7fbSwangyuhang /* check transfer rx_nbits */ 3661db90a441SSourav Poddar if (xfer->rx_buf) { 3662d962608cSDragos Bogdan if (spi->mode & SPI_NO_RX) 3663d962608cSDragos Bogdan return -EINVAL; 3664f477b7fbSwangyuhang if (xfer->rx_nbits != SPI_NBITS_SINGLE && 3665f477b7fbSwangyuhang xfer->rx_nbits != SPI_NBITS_DUAL && 3666f477b7fbSwangyuhang xfer->rx_nbits != SPI_NBITS_QUAD) 3667f477b7fbSwangyuhang return -EINVAL; 3668f477b7fbSwangyuhang if ((xfer->rx_nbits == SPI_NBITS_DUAL) && 3669f477b7fbSwangyuhang !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD))) 3670f477b7fbSwangyuhang return -EINVAL; 3671f477b7fbSwangyuhang if ((xfer->rx_nbits == SPI_NBITS_QUAD) && 3672f477b7fbSwangyuhang !(spi->mode & SPI_RX_QUAD)) 3673f477b7fbSwangyuhang return -EINVAL; 3674e6811d1dSLaxman Dewangan } 3675b7bb367aSJonas Bonn 36766c613f68SAlexandru Ardelean if (_spi_xfer_word_delay_update(xfer, spi)) 36776c613f68SAlexandru Ardelean return -EINVAL; 3678e6811d1dSLaxman Dewangan } 3679e6811d1dSLaxman Dewangan 3680cf32b71eSErnst Schwab message->status = -EINPROGRESS; 368190808738SMark Brown 368290808738SMark Brown return 0; 368390808738SMark Brown } 368490808738SMark Brown 368590808738SMark Brown static int __spi_async(struct spi_device *spi, struct spi_message *message) 368690808738SMark Brown { 36878caab75fSGeert Uytterhoeven struct spi_controller *ctlr = spi->controller; 3688b42faeeeSVladimir Oltean struct spi_transfer *xfer; 368990808738SMark Brown 3690b5932f5cSBoris Brezillon /* 3691b5932f5cSBoris Brezillon * Some controllers do not support doing regular SPI transfers. Return 3692b5932f5cSBoris Brezillon * ENOTSUPP when this is the case. 3693b5932f5cSBoris Brezillon */ 3694b5932f5cSBoris Brezillon if (!ctlr->transfer) 3695b5932f5cSBoris Brezillon return -ENOTSUPP; 3696b5932f5cSBoris Brezillon 369790808738SMark Brown message->spi = spi; 369890808738SMark Brown 36998caab75fSGeert Uytterhoeven SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics, spi_async); 3700eca2ebc7SMartin Sperl SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_async); 3701eca2ebc7SMartin Sperl 370290808738SMark Brown trace_spi_message_submit(message); 370390808738SMark Brown 3704b42faeeeSVladimir Oltean if (!ctlr->ptp_sts_supported) { 3705b42faeeeSVladimir Oltean list_for_each_entry(xfer, &message->transfers, transfer_list) { 3706b42faeeeSVladimir Oltean xfer->ptp_sts_word_pre = 0; 3707b42faeeeSVladimir Oltean ptp_read_system_prets(xfer->ptp_sts); 3708b42faeeeSVladimir Oltean } 3709b42faeeeSVladimir Oltean } 3710b42faeeeSVladimir Oltean 37118caab75fSGeert Uytterhoeven return ctlr->transfer(spi, message); 3712cf32b71eSErnst Schwab } 3713cf32b71eSErnst Schwab 3714568d0697SDavid Brownell /** 3715568d0697SDavid Brownell * spi_async - asynchronous SPI transfer 3716568d0697SDavid Brownell * @spi: device with which data will be exchanged 3717568d0697SDavid Brownell * @message: describes the data transfers, including completion callback 3718568d0697SDavid Brownell * Context: any (irqs may be blocked, etc) 3719568d0697SDavid Brownell * 3720568d0697SDavid Brownell * This call may be used in_irq and other contexts which can't sleep, 3721568d0697SDavid Brownell * as well as from task contexts which can sleep. 3722568d0697SDavid Brownell * 3723568d0697SDavid Brownell * The completion callback is invoked in a context which can't sleep. 3724568d0697SDavid Brownell * Before that invocation, the value of message->status is undefined. 3725568d0697SDavid Brownell * When the callback is issued, message->status holds either zero (to 3726568d0697SDavid Brownell * indicate complete success) or a negative error code. After that 3727568d0697SDavid Brownell * callback returns, the driver which issued the transfer request may 3728568d0697SDavid Brownell * deallocate the associated memory; it's no longer in use by any SPI 3729568d0697SDavid Brownell * core or controller driver code. 3730568d0697SDavid Brownell * 3731568d0697SDavid Brownell * Note that although all messages to a spi_device are handled in 3732568d0697SDavid Brownell * FIFO order, messages may go to different devices in other orders. 3733568d0697SDavid Brownell * Some device might be higher priority, or have various "hard" access 3734568d0697SDavid Brownell * time requirements, for example. 3735568d0697SDavid Brownell * 3736568d0697SDavid Brownell * On detection of any fault during the transfer, processing of 3737568d0697SDavid Brownell * the entire message is aborted, and the device is deselected. 3738568d0697SDavid Brownell * Until returning from the associated message completion callback, 3739568d0697SDavid Brownell * no other spi_message queued to that device will be processed. 3740568d0697SDavid Brownell * (This rule applies equally to all the synchronous transfer calls, 3741568d0697SDavid Brownell * which are wrappers around this core asynchronous primitive.) 374297d56dc6SJavier Martinez Canillas * 374397d56dc6SJavier Martinez Canillas * Return: zero on success, else a negative error code. 3744568d0697SDavid Brownell */ 3745568d0697SDavid Brownell int spi_async(struct spi_device *spi, struct spi_message *message) 3746568d0697SDavid Brownell { 37478caab75fSGeert Uytterhoeven struct spi_controller *ctlr = spi->controller; 3748cf32b71eSErnst Schwab int ret; 3749cf32b71eSErnst Schwab unsigned long flags; 3750568d0697SDavid Brownell 375190808738SMark Brown ret = __spi_validate(spi, message); 375290808738SMark Brown if (ret != 0) 375390808738SMark Brown return ret; 375490808738SMark Brown 37558caab75fSGeert Uytterhoeven spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags); 3756568d0697SDavid Brownell 37578caab75fSGeert Uytterhoeven if (ctlr->bus_lock_flag) 3758cf32b71eSErnst Schwab ret = -EBUSY; 3759cf32b71eSErnst Schwab else 3760cf32b71eSErnst Schwab ret = __spi_async(spi, message); 3761568d0697SDavid Brownell 37628caab75fSGeert Uytterhoeven spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags); 3763cf32b71eSErnst Schwab 3764cf32b71eSErnst Schwab return ret; 3765568d0697SDavid Brownell } 3766568d0697SDavid Brownell EXPORT_SYMBOL_GPL(spi_async); 3767568d0697SDavid Brownell 3768cf32b71eSErnst Schwab /** 3769cf32b71eSErnst Schwab * spi_async_locked - version of spi_async with exclusive bus usage 3770cf32b71eSErnst Schwab * @spi: device with which data will be exchanged 3771cf32b71eSErnst Schwab * @message: describes the data transfers, including completion callback 3772cf32b71eSErnst Schwab * Context: any (irqs may be blocked, etc) 3773cf32b71eSErnst Schwab * 3774cf32b71eSErnst Schwab * This call may be used in_irq and other contexts which can't sleep, 3775cf32b71eSErnst Schwab * as well as from task contexts which can sleep. 3776cf32b71eSErnst Schwab * 3777cf32b71eSErnst Schwab * The completion callback is invoked in a context which can't sleep. 3778cf32b71eSErnst Schwab * Before that invocation, the value of message->status is undefined. 3779cf32b71eSErnst Schwab * When the callback is issued, message->status holds either zero (to 3780cf32b71eSErnst Schwab * indicate complete success) or a negative error code. After that 3781cf32b71eSErnst Schwab * callback returns, the driver which issued the transfer request may 3782cf32b71eSErnst Schwab * deallocate the associated memory; it's no longer in use by any SPI 3783cf32b71eSErnst Schwab * core or controller driver code. 3784cf32b71eSErnst Schwab * 3785cf32b71eSErnst Schwab * Note that although all messages to a spi_device are handled in 3786cf32b71eSErnst Schwab * FIFO order, messages may go to different devices in other orders. 3787cf32b71eSErnst Schwab * Some device might be higher priority, or have various "hard" access 3788cf32b71eSErnst Schwab * time requirements, for example. 3789cf32b71eSErnst Schwab * 3790cf32b71eSErnst Schwab * On detection of any fault during the transfer, processing of 3791cf32b71eSErnst Schwab * the entire message is aborted, and the device is deselected. 3792cf32b71eSErnst Schwab * Until returning from the associated message completion callback, 3793cf32b71eSErnst Schwab * no other spi_message queued to that device will be processed. 3794cf32b71eSErnst Schwab * (This rule applies equally to all the synchronous transfer calls, 3795cf32b71eSErnst Schwab * which are wrappers around this core asynchronous primitive.) 379697d56dc6SJavier Martinez Canillas * 379797d56dc6SJavier Martinez Canillas * Return: zero on success, else a negative error code. 3798cf32b71eSErnst Schwab */ 3799cf32b71eSErnst Schwab int spi_async_locked(struct spi_device *spi, struct spi_message *message) 3800cf32b71eSErnst Schwab { 38018caab75fSGeert Uytterhoeven struct spi_controller *ctlr = spi->controller; 3802cf32b71eSErnst Schwab int ret; 3803cf32b71eSErnst Schwab unsigned long flags; 3804cf32b71eSErnst Schwab 380590808738SMark Brown ret = __spi_validate(spi, message); 380690808738SMark Brown if (ret != 0) 380790808738SMark Brown return ret; 380890808738SMark Brown 38098caab75fSGeert Uytterhoeven spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags); 3810cf32b71eSErnst Schwab 3811cf32b71eSErnst Schwab ret = __spi_async(spi, message); 3812cf32b71eSErnst Schwab 38138caab75fSGeert Uytterhoeven spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags); 3814cf32b71eSErnst Schwab 3815cf32b71eSErnst Schwab return ret; 3816cf32b71eSErnst Schwab 3817cf32b71eSErnst Schwab } 3818cf32b71eSErnst Schwab EXPORT_SYMBOL_GPL(spi_async_locked); 3819cf32b71eSErnst Schwab 38207d077197SDavid Brownell /*-------------------------------------------------------------------------*/ 38217d077197SDavid Brownell 38228caab75fSGeert Uytterhoeven /* Utility methods for SPI protocol drivers, layered on 38237d077197SDavid Brownell * top of the core. Some other utility methods are defined as 38247d077197SDavid Brownell * inline functions. 38257d077197SDavid Brownell */ 38267d077197SDavid Brownell 38275d870c8eSAndrew Morton static void spi_complete(void *arg) 38285d870c8eSAndrew Morton { 38295d870c8eSAndrew Morton complete(arg); 38305d870c8eSAndrew Morton } 38315d870c8eSAndrew Morton 3832ef4d96ecSMark Brown static int __spi_sync(struct spi_device *spi, struct spi_message *message) 3833cf32b71eSErnst Schwab { 3834cf32b71eSErnst Schwab DECLARE_COMPLETION_ONSTACK(done); 3835cf32b71eSErnst Schwab int status; 38368caab75fSGeert Uytterhoeven struct spi_controller *ctlr = spi->controller; 38370461a414SMark Brown unsigned long flags; 38380461a414SMark Brown 38390461a414SMark Brown status = __spi_validate(spi, message); 38400461a414SMark Brown if (status != 0) 38410461a414SMark Brown return status; 3842cf32b71eSErnst Schwab 3843cf32b71eSErnst Schwab message->complete = spi_complete; 3844cf32b71eSErnst Schwab message->context = &done; 38450461a414SMark Brown message->spi = spi; 3846cf32b71eSErnst Schwab 38478caab75fSGeert Uytterhoeven SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics, spi_sync); 3848eca2ebc7SMartin Sperl SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_sync); 3849eca2ebc7SMartin Sperl 38500461a414SMark Brown /* If we're not using the legacy transfer method then we will 38510461a414SMark Brown * try to transfer in the calling context so special case. 38520461a414SMark Brown * This code would be less tricky if we could remove the 38530461a414SMark Brown * support for driver implemented message queues. 38540461a414SMark Brown */ 38558caab75fSGeert Uytterhoeven if (ctlr->transfer == spi_queued_transfer) { 38568caab75fSGeert Uytterhoeven spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags); 38570461a414SMark Brown 38580461a414SMark Brown trace_spi_message_submit(message); 38590461a414SMark Brown 38600461a414SMark Brown status = __spi_queued_transfer(spi, message, false); 38610461a414SMark Brown 38628caab75fSGeert Uytterhoeven spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags); 38630461a414SMark Brown } else { 3864cf32b71eSErnst Schwab status = spi_async_locked(spi, message); 38650461a414SMark Brown } 3866cf32b71eSErnst Schwab 3867cf32b71eSErnst Schwab if (status == 0) { 38680461a414SMark Brown /* Push out the messages in the calling context if we 38690461a414SMark Brown * can. 38700461a414SMark Brown */ 38718caab75fSGeert Uytterhoeven if (ctlr->transfer == spi_queued_transfer) { 38728caab75fSGeert Uytterhoeven SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics, 3873eca2ebc7SMartin Sperl spi_sync_immediate); 3874eca2ebc7SMartin Sperl SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, 3875eca2ebc7SMartin Sperl spi_sync_immediate); 38768caab75fSGeert Uytterhoeven __spi_pump_messages(ctlr, false); 3877eca2ebc7SMartin Sperl } 38780461a414SMark Brown 3879cf32b71eSErnst Schwab wait_for_completion(&done); 3880cf32b71eSErnst Schwab status = message->status; 3881cf32b71eSErnst Schwab } 3882cf32b71eSErnst Schwab message->context = NULL; 3883cf32b71eSErnst Schwab return status; 3884cf32b71eSErnst Schwab } 3885cf32b71eSErnst Schwab 38868ae12a0dSDavid Brownell /** 38878ae12a0dSDavid Brownell * spi_sync - blocking/synchronous SPI data transfers 38888ae12a0dSDavid Brownell * @spi: device with which data will be exchanged 38898ae12a0dSDavid Brownell * @message: describes the data transfers 389033e34dc6SDavid Brownell * Context: can sleep 38918ae12a0dSDavid Brownell * 38928ae12a0dSDavid Brownell * This call may only be used from a context that may sleep. The sleep 38938ae12a0dSDavid Brownell * is non-interruptible, and has no timeout. Low-overhead controller 38948ae12a0dSDavid Brownell * drivers may DMA directly into and out of the message buffers. 38958ae12a0dSDavid Brownell * 38968ae12a0dSDavid Brownell * Note that the SPI device's chip select is active during the message, 38978ae12a0dSDavid Brownell * and then is normally disabled between messages. Drivers for some 38988ae12a0dSDavid Brownell * frequently-used devices may want to minimize costs of selecting a chip, 38998ae12a0dSDavid Brownell * by leaving it selected in anticipation that the next message will go 39008ae12a0dSDavid Brownell * to the same chip. (That may increase power usage.) 39018ae12a0dSDavid Brownell * 39020c868461SDavid Brownell * Also, the caller is guaranteeing that the memory associated with the 39030c868461SDavid Brownell * message will not be freed before this call returns. 39040c868461SDavid Brownell * 390597d56dc6SJavier Martinez Canillas * Return: zero on success, else a negative error code. 39068ae12a0dSDavid Brownell */ 39078ae12a0dSDavid Brownell int spi_sync(struct spi_device *spi, struct spi_message *message) 39088ae12a0dSDavid Brownell { 3909ef4d96ecSMark Brown int ret; 3910ef4d96ecSMark Brown 39118caab75fSGeert Uytterhoeven mutex_lock(&spi->controller->bus_lock_mutex); 3912ef4d96ecSMark Brown ret = __spi_sync(spi, message); 39138caab75fSGeert Uytterhoeven mutex_unlock(&spi->controller->bus_lock_mutex); 3914ef4d96ecSMark Brown 3915ef4d96ecSMark Brown return ret; 39168ae12a0dSDavid Brownell } 39178ae12a0dSDavid Brownell EXPORT_SYMBOL_GPL(spi_sync); 39188ae12a0dSDavid Brownell 3919cf32b71eSErnst Schwab /** 3920cf32b71eSErnst Schwab * spi_sync_locked - version of spi_sync with exclusive bus usage 3921cf32b71eSErnst Schwab * @spi: device with which data will be exchanged 3922cf32b71eSErnst Schwab * @message: describes the data transfers 3923cf32b71eSErnst Schwab * Context: can sleep 3924cf32b71eSErnst Schwab * 3925cf32b71eSErnst Schwab * This call may only be used from a context that may sleep. The sleep 3926cf32b71eSErnst Schwab * is non-interruptible, and has no timeout. Low-overhead controller 3927cf32b71eSErnst Schwab * drivers may DMA directly into and out of the message buffers. 3928cf32b71eSErnst Schwab * 3929cf32b71eSErnst Schwab * This call should be used by drivers that require exclusive access to the 393025985edcSLucas De Marchi * SPI bus. It has to be preceded by a spi_bus_lock call. The SPI bus must 3931cf32b71eSErnst Schwab * be released by a spi_bus_unlock call when the exclusive access is over. 3932cf32b71eSErnst Schwab * 393397d56dc6SJavier Martinez Canillas * Return: zero on success, else a negative error code. 3934cf32b71eSErnst Schwab */ 3935cf32b71eSErnst Schwab int spi_sync_locked(struct spi_device *spi, struct spi_message *message) 3936cf32b71eSErnst Schwab { 3937ef4d96ecSMark Brown return __spi_sync(spi, message); 3938cf32b71eSErnst Schwab } 3939cf32b71eSErnst Schwab EXPORT_SYMBOL_GPL(spi_sync_locked); 3940cf32b71eSErnst Schwab 3941cf32b71eSErnst Schwab /** 3942cf32b71eSErnst Schwab * spi_bus_lock - obtain a lock for exclusive SPI bus usage 39438caab75fSGeert Uytterhoeven * @ctlr: SPI bus master that should be locked for exclusive bus access 3944cf32b71eSErnst Schwab * Context: can sleep 3945cf32b71eSErnst Schwab * 3946cf32b71eSErnst Schwab * This call may only be used from a context that may sleep. The sleep 3947cf32b71eSErnst Schwab * is non-interruptible, and has no timeout. 3948cf32b71eSErnst Schwab * 3949cf32b71eSErnst Schwab * This call should be used by drivers that require exclusive access to the 3950cf32b71eSErnst Schwab * SPI bus. The SPI bus must be released by a spi_bus_unlock call when the 3951cf32b71eSErnst Schwab * exclusive access is over. Data transfer must be done by spi_sync_locked 3952cf32b71eSErnst Schwab * and spi_async_locked calls when the SPI bus lock is held. 3953cf32b71eSErnst Schwab * 395497d56dc6SJavier Martinez Canillas * Return: always zero. 3955cf32b71eSErnst Schwab */ 39568caab75fSGeert Uytterhoeven int spi_bus_lock(struct spi_controller *ctlr) 3957cf32b71eSErnst Schwab { 3958cf32b71eSErnst Schwab unsigned long flags; 3959cf32b71eSErnst Schwab 39608caab75fSGeert Uytterhoeven mutex_lock(&ctlr->bus_lock_mutex); 3961cf32b71eSErnst Schwab 39628caab75fSGeert Uytterhoeven spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags); 39638caab75fSGeert Uytterhoeven ctlr->bus_lock_flag = 1; 39648caab75fSGeert Uytterhoeven spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags); 3965cf32b71eSErnst Schwab 3966cf32b71eSErnst Schwab /* mutex remains locked until spi_bus_unlock is called */ 3967cf32b71eSErnst Schwab 3968cf32b71eSErnst Schwab return 0; 3969cf32b71eSErnst Schwab } 3970cf32b71eSErnst Schwab EXPORT_SYMBOL_GPL(spi_bus_lock); 3971cf32b71eSErnst Schwab 3972cf32b71eSErnst Schwab /** 3973cf32b71eSErnst Schwab * spi_bus_unlock - release the lock for exclusive SPI bus usage 39748caab75fSGeert Uytterhoeven * @ctlr: SPI bus master that was locked for exclusive bus access 3975cf32b71eSErnst Schwab * Context: can sleep 3976cf32b71eSErnst Schwab * 3977cf32b71eSErnst Schwab * This call may only be used from a context that may sleep. The sleep 3978cf32b71eSErnst Schwab * is non-interruptible, and has no timeout. 3979cf32b71eSErnst Schwab * 3980cf32b71eSErnst Schwab * This call releases an SPI bus lock previously obtained by an spi_bus_lock 3981cf32b71eSErnst Schwab * call. 3982cf32b71eSErnst Schwab * 398397d56dc6SJavier Martinez Canillas * Return: always zero. 3984cf32b71eSErnst Schwab */ 39858caab75fSGeert Uytterhoeven int spi_bus_unlock(struct spi_controller *ctlr) 3986cf32b71eSErnst Schwab { 39878caab75fSGeert Uytterhoeven ctlr->bus_lock_flag = 0; 3988cf32b71eSErnst Schwab 39898caab75fSGeert Uytterhoeven mutex_unlock(&ctlr->bus_lock_mutex); 3990cf32b71eSErnst Schwab 3991cf32b71eSErnst Schwab return 0; 3992cf32b71eSErnst Schwab } 3993cf32b71eSErnst Schwab EXPORT_SYMBOL_GPL(spi_bus_unlock); 3994cf32b71eSErnst Schwab 3995a9948b61SDavid Brownell /* portable code must never pass more than 32 bytes */ 3996a9948b61SDavid Brownell #define SPI_BUFSIZ max(32, SMP_CACHE_BYTES) 39978ae12a0dSDavid Brownell 39988ae12a0dSDavid Brownell static u8 *buf; 39998ae12a0dSDavid Brownell 40008ae12a0dSDavid Brownell /** 40018ae12a0dSDavid Brownell * spi_write_then_read - SPI synchronous write followed by read 40028ae12a0dSDavid Brownell * @spi: device with which data will be exchanged 40038ae12a0dSDavid Brownell * @txbuf: data to be written (need not be dma-safe) 40048ae12a0dSDavid Brownell * @n_tx: size of txbuf, in bytes 400527570497SJiri Pirko * @rxbuf: buffer into which data will be read (need not be dma-safe) 400627570497SJiri Pirko * @n_rx: size of rxbuf, in bytes 400733e34dc6SDavid Brownell * Context: can sleep 40088ae12a0dSDavid Brownell * 40098ae12a0dSDavid Brownell * This performs a half duplex MicroWire style transaction with the 40108ae12a0dSDavid Brownell * device, sending txbuf and then reading rxbuf. The return value 40118ae12a0dSDavid Brownell * is zero for success, else a negative errno status code. 4012b885244eSDavid Brownell * This call may only be used from a context that may sleep. 40138ae12a0dSDavid Brownell * 4014c373643bSMark Brown * Parameters to this routine are always copied using a small buffer. 401533e34dc6SDavid Brownell * Performance-sensitive or bulk transfer code should instead use 40160c868461SDavid Brownell * spi_{async,sync}() calls with dma-safe buffers. 401797d56dc6SJavier Martinez Canillas * 401897d56dc6SJavier Martinez Canillas * Return: zero on success, else a negative error code. 40198ae12a0dSDavid Brownell */ 40208ae12a0dSDavid Brownell int spi_write_then_read(struct spi_device *spi, 40210c4a1590SMark Brown const void *txbuf, unsigned n_tx, 40220c4a1590SMark Brown void *rxbuf, unsigned n_rx) 40238ae12a0dSDavid Brownell { 4024068f4070SDavid Brownell static DEFINE_MUTEX(lock); 40258ae12a0dSDavid Brownell 40268ae12a0dSDavid Brownell int status; 40278ae12a0dSDavid Brownell struct spi_message message; 4028bdff549eSDavid Brownell struct spi_transfer x[2]; 40298ae12a0dSDavid Brownell u8 *local_buf; 40308ae12a0dSDavid Brownell 4031b3a223eeSMark Brown /* Use preallocated DMA-safe buffer if we can. We can't avoid 4032b3a223eeSMark Brown * copying here, (as a pure convenience thing), but we can 4033b3a223eeSMark Brown * keep heap costs out of the hot path unless someone else is 4034b3a223eeSMark Brown * using the pre-allocated buffer or the transfer is too large. 40358ae12a0dSDavid Brownell */ 4036b3a223eeSMark Brown if ((n_tx + n_rx) > SPI_BUFSIZ || !mutex_trylock(&lock)) { 40372cd94c8aSMark Brown local_buf = kmalloc(max((unsigned)SPI_BUFSIZ, n_tx + n_rx), 40382cd94c8aSMark Brown GFP_KERNEL | GFP_DMA); 4039b3a223eeSMark Brown if (!local_buf) 4040b3a223eeSMark Brown return -ENOMEM; 4041b3a223eeSMark Brown } else { 4042b3a223eeSMark Brown local_buf = buf; 4043b3a223eeSMark Brown } 40448ae12a0dSDavid Brownell 40458275c642SVitaly Wool spi_message_init(&message); 40465fe5f05eSJingoo Han memset(x, 0, sizeof(x)); 4047bdff549eSDavid Brownell if (n_tx) { 4048bdff549eSDavid Brownell x[0].len = n_tx; 4049bdff549eSDavid Brownell spi_message_add_tail(&x[0], &message); 4050bdff549eSDavid Brownell } 4051bdff549eSDavid Brownell if (n_rx) { 4052bdff549eSDavid Brownell x[1].len = n_rx; 4053bdff549eSDavid Brownell spi_message_add_tail(&x[1], &message); 4054bdff549eSDavid Brownell } 40558275c642SVitaly Wool 40568ae12a0dSDavid Brownell memcpy(local_buf, txbuf, n_tx); 4057bdff549eSDavid Brownell x[0].tx_buf = local_buf; 4058bdff549eSDavid Brownell x[1].rx_buf = local_buf + n_tx; 40598ae12a0dSDavid Brownell 40608ae12a0dSDavid Brownell /* do the i/o */ 40618ae12a0dSDavid Brownell status = spi_sync(spi, &message); 40629b938b74SMarc Pignat if (status == 0) 4063bdff549eSDavid Brownell memcpy(rxbuf, x[1].rx_buf, n_rx); 40648ae12a0dSDavid Brownell 4065bdff549eSDavid Brownell if (x[0].tx_buf == buf) 4066068f4070SDavid Brownell mutex_unlock(&lock); 40678ae12a0dSDavid Brownell else 40688ae12a0dSDavid Brownell kfree(local_buf); 40698ae12a0dSDavid Brownell 40708ae12a0dSDavid Brownell return status; 40718ae12a0dSDavid Brownell } 40728ae12a0dSDavid Brownell EXPORT_SYMBOL_GPL(spi_write_then_read); 40738ae12a0dSDavid Brownell 40748ae12a0dSDavid Brownell /*-------------------------------------------------------------------------*/ 40758ae12a0dSDavid Brownell 40765f143af7SMarco Felsch #if IS_ENABLED(CONFIG_OF) 4077ce79d54aSPantelis Antoniou /* must call put_device() when done with returned spi_device device */ 40785f143af7SMarco Felsch struct spi_device *of_find_spi_device_by_node(struct device_node *node) 4079ce79d54aSPantelis Antoniou { 4080cfba5de9SSuzuki K Poulose struct device *dev = bus_find_device_by_of_node(&spi_bus_type, node); 4081cfba5de9SSuzuki K Poulose 4082ce79d54aSPantelis Antoniou return dev ? to_spi_device(dev) : NULL; 4083ce79d54aSPantelis Antoniou } 40845f143af7SMarco Felsch EXPORT_SYMBOL_GPL(of_find_spi_device_by_node); 40855f143af7SMarco Felsch #endif /* IS_ENABLED(CONFIG_OF) */ 4086ce79d54aSPantelis Antoniou 40875f143af7SMarco Felsch #if IS_ENABLED(CONFIG_OF_DYNAMIC) 40888caab75fSGeert Uytterhoeven /* the spi controllers are not using spi_bus, so we find it with another way */ 40898caab75fSGeert Uytterhoeven static struct spi_controller *of_find_spi_controller_by_node(struct device_node *node) 4090ce79d54aSPantelis Antoniou { 4091ce79d54aSPantelis Antoniou struct device *dev; 4092ce79d54aSPantelis Antoniou 4093cfba5de9SSuzuki K Poulose dev = class_find_device_by_of_node(&spi_master_class, node); 40946c364062SGeert Uytterhoeven if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE)) 4095cfba5de9SSuzuki K Poulose dev = class_find_device_by_of_node(&spi_slave_class, node); 4096ce79d54aSPantelis Antoniou if (!dev) 4097ce79d54aSPantelis Antoniou return NULL; 4098ce79d54aSPantelis Antoniou 4099ce79d54aSPantelis Antoniou /* reference got in class_find_device */ 41008caab75fSGeert Uytterhoeven return container_of(dev, struct spi_controller, dev); 4101ce79d54aSPantelis Antoniou } 4102ce79d54aSPantelis Antoniou 4103ce79d54aSPantelis Antoniou static int of_spi_notify(struct notifier_block *nb, unsigned long action, 4104ce79d54aSPantelis Antoniou void *arg) 4105ce79d54aSPantelis Antoniou { 4106ce79d54aSPantelis Antoniou struct of_reconfig_data *rd = arg; 41078caab75fSGeert Uytterhoeven struct spi_controller *ctlr; 4108ce79d54aSPantelis Antoniou struct spi_device *spi; 4109ce79d54aSPantelis Antoniou 4110ce79d54aSPantelis Antoniou switch (of_reconfig_get_state_change(action, arg)) { 4111ce79d54aSPantelis Antoniou case OF_RECONFIG_CHANGE_ADD: 41128caab75fSGeert Uytterhoeven ctlr = of_find_spi_controller_by_node(rd->dn->parent); 41138caab75fSGeert Uytterhoeven if (ctlr == NULL) 4114ce79d54aSPantelis Antoniou return NOTIFY_OK; /* not for us */ 4115ce79d54aSPantelis Antoniou 4116bd6c1644SGeert Uytterhoeven if (of_node_test_and_set_flag(rd->dn, OF_POPULATED)) { 41178caab75fSGeert Uytterhoeven put_device(&ctlr->dev); 4118bd6c1644SGeert Uytterhoeven return NOTIFY_OK; 4119bd6c1644SGeert Uytterhoeven } 4120bd6c1644SGeert Uytterhoeven 41218caab75fSGeert Uytterhoeven spi = of_register_spi_device(ctlr, rd->dn); 41228caab75fSGeert Uytterhoeven put_device(&ctlr->dev); 4123ce79d54aSPantelis Antoniou 4124ce79d54aSPantelis Antoniou if (IS_ERR(spi)) { 412525c56c88SRob Herring pr_err("%s: failed to create for '%pOF'\n", 412625c56c88SRob Herring __func__, rd->dn); 4127e0af98a7SRalf Ramsauer of_node_clear_flag(rd->dn, OF_POPULATED); 4128ce79d54aSPantelis Antoniou return notifier_from_errno(PTR_ERR(spi)); 4129ce79d54aSPantelis Antoniou } 4130ce79d54aSPantelis Antoniou break; 4131ce79d54aSPantelis Antoniou 4132ce79d54aSPantelis Antoniou case OF_RECONFIG_CHANGE_REMOVE: 4133bd6c1644SGeert Uytterhoeven /* already depopulated? */ 4134bd6c1644SGeert Uytterhoeven if (!of_node_check_flag(rd->dn, OF_POPULATED)) 4135bd6c1644SGeert Uytterhoeven return NOTIFY_OK; 4136bd6c1644SGeert Uytterhoeven 4137ce79d54aSPantelis Antoniou /* find our device by node */ 4138ce79d54aSPantelis Antoniou spi = of_find_spi_device_by_node(rd->dn); 4139ce79d54aSPantelis Antoniou if (spi == NULL) 4140ce79d54aSPantelis Antoniou return NOTIFY_OK; /* no? not meant for us */ 4141ce79d54aSPantelis Antoniou 4142ce79d54aSPantelis Antoniou /* unregister takes one ref away */ 4143ce79d54aSPantelis Antoniou spi_unregister_device(spi); 4144ce79d54aSPantelis Antoniou 4145ce79d54aSPantelis Antoniou /* and put the reference of the find */ 4146ce79d54aSPantelis Antoniou put_device(&spi->dev); 4147ce79d54aSPantelis Antoniou break; 4148ce79d54aSPantelis Antoniou } 4149ce79d54aSPantelis Antoniou 4150ce79d54aSPantelis Antoniou return NOTIFY_OK; 4151ce79d54aSPantelis Antoniou } 4152ce79d54aSPantelis Antoniou 4153ce79d54aSPantelis Antoniou static struct notifier_block spi_of_notifier = { 4154ce79d54aSPantelis Antoniou .notifier_call = of_spi_notify, 4155ce79d54aSPantelis Antoniou }; 4156ce79d54aSPantelis Antoniou #else /* IS_ENABLED(CONFIG_OF_DYNAMIC) */ 4157ce79d54aSPantelis Antoniou extern struct notifier_block spi_of_notifier; 4158ce79d54aSPantelis Antoniou #endif /* IS_ENABLED(CONFIG_OF_DYNAMIC) */ 4159ce79d54aSPantelis Antoniou 41607f24467fSOctavian Purdila #if IS_ENABLED(CONFIG_ACPI) 41618caab75fSGeert Uytterhoeven static int spi_acpi_controller_match(struct device *dev, const void *data) 41627f24467fSOctavian Purdila { 41637f24467fSOctavian Purdila return ACPI_COMPANION(dev->parent) == data; 41647f24467fSOctavian Purdila } 41657f24467fSOctavian Purdila 41668caab75fSGeert Uytterhoeven static struct spi_controller *acpi_spi_find_controller_by_adev(struct acpi_device *adev) 41677f24467fSOctavian Purdila { 41687f24467fSOctavian Purdila struct device *dev; 41697f24467fSOctavian Purdila 41707f24467fSOctavian Purdila dev = class_find_device(&spi_master_class, NULL, adev, 41718caab75fSGeert Uytterhoeven spi_acpi_controller_match); 41726c364062SGeert Uytterhoeven if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE)) 41736c364062SGeert Uytterhoeven dev = class_find_device(&spi_slave_class, NULL, adev, 41748caab75fSGeert Uytterhoeven spi_acpi_controller_match); 41757f24467fSOctavian Purdila if (!dev) 41767f24467fSOctavian Purdila return NULL; 41777f24467fSOctavian Purdila 41788caab75fSGeert Uytterhoeven return container_of(dev, struct spi_controller, dev); 41797f24467fSOctavian Purdila } 41807f24467fSOctavian Purdila 41817f24467fSOctavian Purdila static struct spi_device *acpi_spi_find_device_by_adev(struct acpi_device *adev) 41827f24467fSOctavian Purdila { 41837f24467fSOctavian Purdila struct device *dev; 41847f24467fSOctavian Purdila 418500500147SSuzuki K Poulose dev = bus_find_device_by_acpi_dev(&spi_bus_type, adev); 41865b16668eSWolfram Sang return to_spi_device(dev); 41877f24467fSOctavian Purdila } 41887f24467fSOctavian Purdila 41897f24467fSOctavian Purdila static int acpi_spi_notify(struct notifier_block *nb, unsigned long value, 41907f24467fSOctavian Purdila void *arg) 41917f24467fSOctavian Purdila { 41927f24467fSOctavian Purdila struct acpi_device *adev = arg; 41938caab75fSGeert Uytterhoeven struct spi_controller *ctlr; 41947f24467fSOctavian Purdila struct spi_device *spi; 41957f24467fSOctavian Purdila 41967f24467fSOctavian Purdila switch (value) { 41977f24467fSOctavian Purdila case ACPI_RECONFIG_DEVICE_ADD: 41988caab75fSGeert Uytterhoeven ctlr = acpi_spi_find_controller_by_adev(adev->parent); 41998caab75fSGeert Uytterhoeven if (!ctlr) 42007f24467fSOctavian Purdila break; 42017f24467fSOctavian Purdila 42028caab75fSGeert Uytterhoeven acpi_register_spi_device(ctlr, adev); 42038caab75fSGeert Uytterhoeven put_device(&ctlr->dev); 42047f24467fSOctavian Purdila break; 42057f24467fSOctavian Purdila case ACPI_RECONFIG_DEVICE_REMOVE: 42067f24467fSOctavian Purdila if (!acpi_device_enumerated(adev)) 42077f24467fSOctavian Purdila break; 42087f24467fSOctavian Purdila 42097f24467fSOctavian Purdila spi = acpi_spi_find_device_by_adev(adev); 42107f24467fSOctavian Purdila if (!spi) 42117f24467fSOctavian Purdila break; 42127f24467fSOctavian Purdila 42137f24467fSOctavian Purdila spi_unregister_device(spi); 42147f24467fSOctavian Purdila put_device(&spi->dev); 42157f24467fSOctavian Purdila break; 42167f24467fSOctavian Purdila } 42177f24467fSOctavian Purdila 42187f24467fSOctavian Purdila return NOTIFY_OK; 42197f24467fSOctavian Purdila } 42207f24467fSOctavian Purdila 42217f24467fSOctavian Purdila static struct notifier_block spi_acpi_notifier = { 42227f24467fSOctavian Purdila .notifier_call = acpi_spi_notify, 42237f24467fSOctavian Purdila }; 42247f24467fSOctavian Purdila #else 42257f24467fSOctavian Purdila extern struct notifier_block spi_acpi_notifier; 42267f24467fSOctavian Purdila #endif 42277f24467fSOctavian Purdila 42288ae12a0dSDavid Brownell static int __init spi_init(void) 42298ae12a0dSDavid Brownell { 4230b885244eSDavid Brownell int status; 42318ae12a0dSDavid Brownell 4232e94b1766SChristoph Lameter buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL); 4233b885244eSDavid Brownell if (!buf) { 4234b885244eSDavid Brownell status = -ENOMEM; 4235b885244eSDavid Brownell goto err0; 42368ae12a0dSDavid Brownell } 4237b885244eSDavid Brownell 4238b885244eSDavid Brownell status = bus_register(&spi_bus_type); 4239b885244eSDavid Brownell if (status < 0) 4240b885244eSDavid Brownell goto err1; 4241b885244eSDavid Brownell 4242b885244eSDavid Brownell status = class_register(&spi_master_class); 4243b885244eSDavid Brownell if (status < 0) 4244b885244eSDavid Brownell goto err2; 4245ce79d54aSPantelis Antoniou 42466c364062SGeert Uytterhoeven if (IS_ENABLED(CONFIG_SPI_SLAVE)) { 42476c364062SGeert Uytterhoeven status = class_register(&spi_slave_class); 42486c364062SGeert Uytterhoeven if (status < 0) 42496c364062SGeert Uytterhoeven goto err3; 42506c364062SGeert Uytterhoeven } 42516c364062SGeert Uytterhoeven 42525267720eSFabio Estevam if (IS_ENABLED(CONFIG_OF_DYNAMIC)) 4253ce79d54aSPantelis Antoniou WARN_ON(of_reconfig_notifier_register(&spi_of_notifier)); 42547f24467fSOctavian Purdila if (IS_ENABLED(CONFIG_ACPI)) 42557f24467fSOctavian Purdila WARN_ON(acpi_reconfig_notifier_register(&spi_acpi_notifier)); 4256ce79d54aSPantelis Antoniou 4257b885244eSDavid Brownell return 0; 4258b885244eSDavid Brownell 42596c364062SGeert Uytterhoeven err3: 42606c364062SGeert Uytterhoeven class_unregister(&spi_master_class); 4261b885244eSDavid Brownell err2: 4262b885244eSDavid Brownell bus_unregister(&spi_bus_type); 4263b885244eSDavid Brownell err1: 4264b885244eSDavid Brownell kfree(buf); 4265b885244eSDavid Brownell buf = NULL; 4266b885244eSDavid Brownell err0: 4267b885244eSDavid Brownell return status; 4268b885244eSDavid Brownell } 4269b885244eSDavid Brownell 42708ae12a0dSDavid Brownell /* board_info is normally registered in arch_initcall(), 42718ae12a0dSDavid Brownell * but even essential drivers wait till later 4272b885244eSDavid Brownell * 4273b885244eSDavid Brownell * REVISIT only boardinfo really needs static linking. the rest (device and 4274b885244eSDavid Brownell * driver registration) _could_ be dynamically linked (modular) ... costs 4275b885244eSDavid Brownell * include needing to have boardinfo data structures be much more public. 42768ae12a0dSDavid Brownell */ 4277673c0c00SDavid Brownell postcore_initcall(spi_init); 4278