1b445bfcbSMarco Felsch // SPDX-License-Identifier: GPL-2.0-or-later 2787f4889SMark Brown // SPI init/core code 3787f4889SMark Brown // 4787f4889SMark Brown // Copyright (C) 2005 David Brownell 5787f4889SMark Brown // Copyright (C) 2008 Secret Lab Technologies Ltd. 68ae12a0dSDavid Brownell 78ae12a0dSDavid Brownell #include <linux/kernel.h> 88ae12a0dSDavid Brownell #include <linux/device.h> 98ae12a0dSDavid Brownell #include <linux/init.h> 108ae12a0dSDavid Brownell #include <linux/cache.h> 1199adef31SMark Brown #include <linux/dma-mapping.h> 1299adef31SMark Brown #include <linux/dmaengine.h> 1394040828SMatthias Kaehlcke #include <linux/mutex.h> 142b7a32f7SSinan Akman #include <linux/of_device.h> 15d57a4282SGrant Likely #include <linux/of_irq.h> 1686be408bSSylwester Nawrocki #include <linux/clk/clk-conf.h> 175a0e3ad6STejun Heo #include <linux/slab.h> 18e0626e38SAnton Vorontsov #include <linux/mod_devicetable.h> 198ae12a0dSDavid Brownell #include <linux/spi/spi.h> 20b5932f5cSBoris Brezillon #include <linux/spi/spi-mem.h> 2174317984SJean-Christophe PLAGNIOL-VILLARD #include <linux/of_gpio.h> 22f3186dd8SLinus Walleij #include <linux/gpio/consumer.h> 233ae22e8cSMark Brown #include <linux/pm_runtime.h> 24f48c767cSUlf Hansson #include <linux/pm_domain.h> 25826cf175SDmitry Torokhov #include <linux/property.h> 26025ed130SPaul Gortmaker #include <linux/export.h> 278bd75c77SClark Williams #include <linux/sched/rt.h> 28ae7e81c0SIngo Molnar #include <uapi/linux/sched/types.h> 29ffbbdd21SLinus Walleij #include <linux/delay.h> 30ffbbdd21SLinus Walleij #include <linux/kthread.h> 3164bee4d2SMika Westerberg #include <linux/ioport.h> 3264bee4d2SMika Westerberg #include <linux/acpi.h> 33b1b8153cSVignesh R #include <linux/highmem.h> 349b61e302SSuniel Mahesh #include <linux/idr.h> 358a2e487eSLukas Wunner #include <linux/platform_data/x86/apple.h> 368ae12a0dSDavid Brownell 3756ec1978SMark Brown #define CREATE_TRACE_POINTS 3856ec1978SMark Brown #include <trace/events/spi.h> 399b61e302SSuniel Mahesh 4046336966SBoris Brezillon #include "internals.h" 4146336966SBoris Brezillon 429b61e302SSuniel Mahesh static DEFINE_IDR(spi_master_idr); 4356ec1978SMark Brown 448ae12a0dSDavid Brownell static void spidev_release(struct device *dev) 458ae12a0dSDavid Brownell { 460ffa0285SHans-Peter Nilsson struct spi_device *spi = to_spi_device(dev); 478ae12a0dSDavid Brownell 488caab75fSGeert Uytterhoeven /* spi controllers may cleanup for released devices */ 498caab75fSGeert Uytterhoeven if (spi->controller->cleanup) 508caab75fSGeert Uytterhoeven spi->controller->cleanup(spi); 518ae12a0dSDavid Brownell 528caab75fSGeert Uytterhoeven spi_controller_put(spi->controller); 535039563eSTrent Piepho kfree(spi->driver_override); 5407a389feSRoman Tereshonkov kfree(spi); 558ae12a0dSDavid Brownell } 568ae12a0dSDavid Brownell 578ae12a0dSDavid Brownell static ssize_t 588ae12a0dSDavid Brownell modalias_show(struct device *dev, struct device_attribute *a, char *buf) 598ae12a0dSDavid Brownell { 608ae12a0dSDavid Brownell const struct spi_device *spi = to_spi_device(dev); 618c4ff6d0SZhang Rui int len; 628c4ff6d0SZhang Rui 638c4ff6d0SZhang Rui len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1); 648c4ff6d0SZhang Rui if (len != -ENODEV) 658c4ff6d0SZhang Rui return len; 668ae12a0dSDavid Brownell 67d8e328b3SGrant Likely return sprintf(buf, "%s%s\n", SPI_MODULE_PREFIX, spi->modalias); 688ae12a0dSDavid Brownell } 69aa7da564SGreg Kroah-Hartman static DEVICE_ATTR_RO(modalias); 708ae12a0dSDavid Brownell 715039563eSTrent Piepho static ssize_t driver_override_store(struct device *dev, 725039563eSTrent Piepho struct device_attribute *a, 735039563eSTrent Piepho const char *buf, size_t count) 745039563eSTrent Piepho { 755039563eSTrent Piepho struct spi_device *spi = to_spi_device(dev); 765039563eSTrent Piepho const char *end = memchr(buf, '\n', count); 775039563eSTrent Piepho const size_t len = end ? end - buf : count; 785039563eSTrent Piepho const char *driver_override, *old; 795039563eSTrent Piepho 805039563eSTrent Piepho /* We need to keep extra room for a newline when displaying value */ 815039563eSTrent Piepho if (len >= (PAGE_SIZE - 1)) 825039563eSTrent Piepho return -EINVAL; 835039563eSTrent Piepho 845039563eSTrent Piepho driver_override = kstrndup(buf, len, GFP_KERNEL); 855039563eSTrent Piepho if (!driver_override) 865039563eSTrent Piepho return -ENOMEM; 875039563eSTrent Piepho 885039563eSTrent Piepho device_lock(dev); 895039563eSTrent Piepho old = spi->driver_override; 905039563eSTrent Piepho if (len) { 915039563eSTrent Piepho spi->driver_override = driver_override; 925039563eSTrent Piepho } else { 935039563eSTrent Piepho /* Emptry string, disable driver override */ 945039563eSTrent Piepho spi->driver_override = NULL; 955039563eSTrent Piepho kfree(driver_override); 965039563eSTrent Piepho } 975039563eSTrent Piepho device_unlock(dev); 985039563eSTrent Piepho kfree(old); 995039563eSTrent Piepho 1005039563eSTrent Piepho return count; 1015039563eSTrent Piepho } 1025039563eSTrent Piepho 1035039563eSTrent Piepho static ssize_t driver_override_show(struct device *dev, 1045039563eSTrent Piepho struct device_attribute *a, char *buf) 1055039563eSTrent Piepho { 1065039563eSTrent Piepho const struct spi_device *spi = to_spi_device(dev); 1075039563eSTrent Piepho ssize_t len; 1085039563eSTrent Piepho 1095039563eSTrent Piepho device_lock(dev); 1105039563eSTrent Piepho len = snprintf(buf, PAGE_SIZE, "%s\n", spi->driver_override ? : ""); 1115039563eSTrent Piepho device_unlock(dev); 1125039563eSTrent Piepho return len; 1135039563eSTrent Piepho } 1145039563eSTrent Piepho static DEVICE_ATTR_RW(driver_override); 1155039563eSTrent Piepho 116eca2ebc7SMartin Sperl #define SPI_STATISTICS_ATTRS(field, file) \ 1178caab75fSGeert Uytterhoeven static ssize_t spi_controller_##field##_show(struct device *dev, \ 118eca2ebc7SMartin Sperl struct device_attribute *attr, \ 119eca2ebc7SMartin Sperl char *buf) \ 120eca2ebc7SMartin Sperl { \ 1218caab75fSGeert Uytterhoeven struct spi_controller *ctlr = container_of(dev, \ 1228caab75fSGeert Uytterhoeven struct spi_controller, dev); \ 1238caab75fSGeert Uytterhoeven return spi_statistics_##field##_show(&ctlr->statistics, buf); \ 124eca2ebc7SMartin Sperl } \ 1258caab75fSGeert Uytterhoeven static struct device_attribute dev_attr_spi_controller_##field = { \ 126ad25c92eSGeert Uytterhoeven .attr = { .name = file, .mode = 0444 }, \ 1278caab75fSGeert Uytterhoeven .show = spi_controller_##field##_show, \ 128eca2ebc7SMartin Sperl }; \ 129eca2ebc7SMartin Sperl static ssize_t spi_device_##field##_show(struct device *dev, \ 130eca2ebc7SMartin Sperl struct device_attribute *attr, \ 131eca2ebc7SMartin Sperl char *buf) \ 132eca2ebc7SMartin Sperl { \ 133d1eba93bSGeliang Tang struct spi_device *spi = to_spi_device(dev); \ 134eca2ebc7SMartin Sperl return spi_statistics_##field##_show(&spi->statistics, buf); \ 135eca2ebc7SMartin Sperl } \ 136eca2ebc7SMartin Sperl static struct device_attribute dev_attr_spi_device_##field = { \ 137ad25c92eSGeert Uytterhoeven .attr = { .name = file, .mode = 0444 }, \ 138eca2ebc7SMartin Sperl .show = spi_device_##field##_show, \ 139eca2ebc7SMartin Sperl } 140eca2ebc7SMartin Sperl 141eca2ebc7SMartin Sperl #define SPI_STATISTICS_SHOW_NAME(name, file, field, format_string) \ 142eca2ebc7SMartin Sperl static ssize_t spi_statistics_##name##_show(struct spi_statistics *stat, \ 143eca2ebc7SMartin Sperl char *buf) \ 144eca2ebc7SMartin Sperl { \ 145eca2ebc7SMartin Sperl unsigned long flags; \ 146eca2ebc7SMartin Sperl ssize_t len; \ 147eca2ebc7SMartin Sperl spin_lock_irqsave(&stat->lock, flags); \ 148eca2ebc7SMartin Sperl len = sprintf(buf, format_string, stat->field); \ 149eca2ebc7SMartin Sperl spin_unlock_irqrestore(&stat->lock, flags); \ 150eca2ebc7SMartin Sperl return len; \ 151eca2ebc7SMartin Sperl } \ 152eca2ebc7SMartin Sperl SPI_STATISTICS_ATTRS(name, file) 153eca2ebc7SMartin Sperl 154eca2ebc7SMartin Sperl #define SPI_STATISTICS_SHOW(field, format_string) \ 155eca2ebc7SMartin Sperl SPI_STATISTICS_SHOW_NAME(field, __stringify(field), \ 156eca2ebc7SMartin Sperl field, format_string) 157eca2ebc7SMartin Sperl 158eca2ebc7SMartin Sperl SPI_STATISTICS_SHOW(messages, "%lu"); 159eca2ebc7SMartin Sperl SPI_STATISTICS_SHOW(transfers, "%lu"); 160eca2ebc7SMartin Sperl SPI_STATISTICS_SHOW(errors, "%lu"); 161eca2ebc7SMartin Sperl SPI_STATISTICS_SHOW(timedout, "%lu"); 162eca2ebc7SMartin Sperl 163eca2ebc7SMartin Sperl SPI_STATISTICS_SHOW(spi_sync, "%lu"); 164eca2ebc7SMartin Sperl SPI_STATISTICS_SHOW(spi_sync_immediate, "%lu"); 165eca2ebc7SMartin Sperl SPI_STATISTICS_SHOW(spi_async, "%lu"); 166eca2ebc7SMartin Sperl 167eca2ebc7SMartin Sperl SPI_STATISTICS_SHOW(bytes, "%llu"); 168eca2ebc7SMartin Sperl SPI_STATISTICS_SHOW(bytes_rx, "%llu"); 169eca2ebc7SMartin Sperl SPI_STATISTICS_SHOW(bytes_tx, "%llu"); 170eca2ebc7SMartin Sperl 1716b7bc061SMartin Sperl #define SPI_STATISTICS_TRANSFER_BYTES_HISTO(index, number) \ 1726b7bc061SMartin Sperl SPI_STATISTICS_SHOW_NAME(transfer_bytes_histo##index, \ 1736b7bc061SMartin Sperl "transfer_bytes_histo_" number, \ 1746b7bc061SMartin Sperl transfer_bytes_histo[index], "%lu") 1756b7bc061SMartin Sperl SPI_STATISTICS_TRANSFER_BYTES_HISTO(0, "0-1"); 1766b7bc061SMartin Sperl SPI_STATISTICS_TRANSFER_BYTES_HISTO(1, "2-3"); 1776b7bc061SMartin Sperl SPI_STATISTICS_TRANSFER_BYTES_HISTO(2, "4-7"); 1786b7bc061SMartin Sperl SPI_STATISTICS_TRANSFER_BYTES_HISTO(3, "8-15"); 1796b7bc061SMartin Sperl SPI_STATISTICS_TRANSFER_BYTES_HISTO(4, "16-31"); 1806b7bc061SMartin Sperl SPI_STATISTICS_TRANSFER_BYTES_HISTO(5, "32-63"); 1816b7bc061SMartin Sperl SPI_STATISTICS_TRANSFER_BYTES_HISTO(6, "64-127"); 1826b7bc061SMartin Sperl SPI_STATISTICS_TRANSFER_BYTES_HISTO(7, "128-255"); 1836b7bc061SMartin Sperl SPI_STATISTICS_TRANSFER_BYTES_HISTO(8, "256-511"); 1846b7bc061SMartin Sperl SPI_STATISTICS_TRANSFER_BYTES_HISTO(9, "512-1023"); 1856b7bc061SMartin Sperl SPI_STATISTICS_TRANSFER_BYTES_HISTO(10, "1024-2047"); 1866b7bc061SMartin Sperl SPI_STATISTICS_TRANSFER_BYTES_HISTO(11, "2048-4095"); 1876b7bc061SMartin Sperl SPI_STATISTICS_TRANSFER_BYTES_HISTO(12, "4096-8191"); 1886b7bc061SMartin Sperl SPI_STATISTICS_TRANSFER_BYTES_HISTO(13, "8192-16383"); 1896b7bc061SMartin Sperl SPI_STATISTICS_TRANSFER_BYTES_HISTO(14, "16384-32767"); 1906b7bc061SMartin Sperl SPI_STATISTICS_TRANSFER_BYTES_HISTO(15, "32768-65535"); 1916b7bc061SMartin Sperl SPI_STATISTICS_TRANSFER_BYTES_HISTO(16, "65536+"); 1926b7bc061SMartin Sperl 193d9f12122SMartin Sperl SPI_STATISTICS_SHOW(transfers_split_maxsize, "%lu"); 194d9f12122SMartin Sperl 195aa7da564SGreg Kroah-Hartman static struct attribute *spi_dev_attrs[] = { 196aa7da564SGreg Kroah-Hartman &dev_attr_modalias.attr, 1975039563eSTrent Piepho &dev_attr_driver_override.attr, 198aa7da564SGreg Kroah-Hartman NULL, 1998ae12a0dSDavid Brownell }; 200eca2ebc7SMartin Sperl 201eca2ebc7SMartin Sperl static const struct attribute_group spi_dev_group = { 202eca2ebc7SMartin Sperl .attrs = spi_dev_attrs, 203eca2ebc7SMartin Sperl }; 204eca2ebc7SMartin Sperl 205eca2ebc7SMartin Sperl static struct attribute *spi_device_statistics_attrs[] = { 206eca2ebc7SMartin Sperl &dev_attr_spi_device_messages.attr, 207eca2ebc7SMartin Sperl &dev_attr_spi_device_transfers.attr, 208eca2ebc7SMartin Sperl &dev_attr_spi_device_errors.attr, 209eca2ebc7SMartin Sperl &dev_attr_spi_device_timedout.attr, 210eca2ebc7SMartin Sperl &dev_attr_spi_device_spi_sync.attr, 211eca2ebc7SMartin Sperl &dev_attr_spi_device_spi_sync_immediate.attr, 212eca2ebc7SMartin Sperl &dev_attr_spi_device_spi_async.attr, 213eca2ebc7SMartin Sperl &dev_attr_spi_device_bytes.attr, 214eca2ebc7SMartin Sperl &dev_attr_spi_device_bytes_rx.attr, 215eca2ebc7SMartin Sperl &dev_attr_spi_device_bytes_tx.attr, 2166b7bc061SMartin Sperl &dev_attr_spi_device_transfer_bytes_histo0.attr, 2176b7bc061SMartin Sperl &dev_attr_spi_device_transfer_bytes_histo1.attr, 2186b7bc061SMartin Sperl &dev_attr_spi_device_transfer_bytes_histo2.attr, 2196b7bc061SMartin Sperl &dev_attr_spi_device_transfer_bytes_histo3.attr, 2206b7bc061SMartin Sperl &dev_attr_spi_device_transfer_bytes_histo4.attr, 2216b7bc061SMartin Sperl &dev_attr_spi_device_transfer_bytes_histo5.attr, 2226b7bc061SMartin Sperl &dev_attr_spi_device_transfer_bytes_histo6.attr, 2236b7bc061SMartin Sperl &dev_attr_spi_device_transfer_bytes_histo7.attr, 2246b7bc061SMartin Sperl &dev_attr_spi_device_transfer_bytes_histo8.attr, 2256b7bc061SMartin Sperl &dev_attr_spi_device_transfer_bytes_histo9.attr, 2266b7bc061SMartin Sperl &dev_attr_spi_device_transfer_bytes_histo10.attr, 2276b7bc061SMartin Sperl &dev_attr_spi_device_transfer_bytes_histo11.attr, 2286b7bc061SMartin Sperl &dev_attr_spi_device_transfer_bytes_histo12.attr, 2296b7bc061SMartin Sperl &dev_attr_spi_device_transfer_bytes_histo13.attr, 2306b7bc061SMartin Sperl &dev_attr_spi_device_transfer_bytes_histo14.attr, 2316b7bc061SMartin Sperl &dev_attr_spi_device_transfer_bytes_histo15.attr, 2326b7bc061SMartin Sperl &dev_attr_spi_device_transfer_bytes_histo16.attr, 233d9f12122SMartin Sperl &dev_attr_spi_device_transfers_split_maxsize.attr, 234eca2ebc7SMartin Sperl NULL, 235eca2ebc7SMartin Sperl }; 236eca2ebc7SMartin Sperl 237eca2ebc7SMartin Sperl static const struct attribute_group spi_device_statistics_group = { 238eca2ebc7SMartin Sperl .name = "statistics", 239eca2ebc7SMartin Sperl .attrs = spi_device_statistics_attrs, 240eca2ebc7SMartin Sperl }; 241eca2ebc7SMartin Sperl 242eca2ebc7SMartin Sperl static const struct attribute_group *spi_dev_groups[] = { 243eca2ebc7SMartin Sperl &spi_dev_group, 244eca2ebc7SMartin Sperl &spi_device_statistics_group, 245eca2ebc7SMartin Sperl NULL, 246eca2ebc7SMartin Sperl }; 247eca2ebc7SMartin Sperl 2488caab75fSGeert Uytterhoeven static struct attribute *spi_controller_statistics_attrs[] = { 2498caab75fSGeert Uytterhoeven &dev_attr_spi_controller_messages.attr, 2508caab75fSGeert Uytterhoeven &dev_attr_spi_controller_transfers.attr, 2518caab75fSGeert Uytterhoeven &dev_attr_spi_controller_errors.attr, 2528caab75fSGeert Uytterhoeven &dev_attr_spi_controller_timedout.attr, 2538caab75fSGeert Uytterhoeven &dev_attr_spi_controller_spi_sync.attr, 2548caab75fSGeert Uytterhoeven &dev_attr_spi_controller_spi_sync_immediate.attr, 2558caab75fSGeert Uytterhoeven &dev_attr_spi_controller_spi_async.attr, 2568caab75fSGeert Uytterhoeven &dev_attr_spi_controller_bytes.attr, 2578caab75fSGeert Uytterhoeven &dev_attr_spi_controller_bytes_rx.attr, 2588caab75fSGeert Uytterhoeven &dev_attr_spi_controller_bytes_tx.attr, 2598caab75fSGeert Uytterhoeven &dev_attr_spi_controller_transfer_bytes_histo0.attr, 2608caab75fSGeert Uytterhoeven &dev_attr_spi_controller_transfer_bytes_histo1.attr, 2618caab75fSGeert Uytterhoeven &dev_attr_spi_controller_transfer_bytes_histo2.attr, 2628caab75fSGeert Uytterhoeven &dev_attr_spi_controller_transfer_bytes_histo3.attr, 2638caab75fSGeert Uytterhoeven &dev_attr_spi_controller_transfer_bytes_histo4.attr, 2648caab75fSGeert Uytterhoeven &dev_attr_spi_controller_transfer_bytes_histo5.attr, 2658caab75fSGeert Uytterhoeven &dev_attr_spi_controller_transfer_bytes_histo6.attr, 2668caab75fSGeert Uytterhoeven &dev_attr_spi_controller_transfer_bytes_histo7.attr, 2678caab75fSGeert Uytterhoeven &dev_attr_spi_controller_transfer_bytes_histo8.attr, 2688caab75fSGeert Uytterhoeven &dev_attr_spi_controller_transfer_bytes_histo9.attr, 2698caab75fSGeert Uytterhoeven &dev_attr_spi_controller_transfer_bytes_histo10.attr, 2708caab75fSGeert Uytterhoeven &dev_attr_spi_controller_transfer_bytes_histo11.attr, 2718caab75fSGeert Uytterhoeven &dev_attr_spi_controller_transfer_bytes_histo12.attr, 2728caab75fSGeert Uytterhoeven &dev_attr_spi_controller_transfer_bytes_histo13.attr, 2738caab75fSGeert Uytterhoeven &dev_attr_spi_controller_transfer_bytes_histo14.attr, 2748caab75fSGeert Uytterhoeven &dev_attr_spi_controller_transfer_bytes_histo15.attr, 2758caab75fSGeert Uytterhoeven &dev_attr_spi_controller_transfer_bytes_histo16.attr, 2768caab75fSGeert Uytterhoeven &dev_attr_spi_controller_transfers_split_maxsize.attr, 277eca2ebc7SMartin Sperl NULL, 278eca2ebc7SMartin Sperl }; 279eca2ebc7SMartin Sperl 2808caab75fSGeert Uytterhoeven static const struct attribute_group spi_controller_statistics_group = { 281eca2ebc7SMartin Sperl .name = "statistics", 2828caab75fSGeert Uytterhoeven .attrs = spi_controller_statistics_attrs, 283eca2ebc7SMartin Sperl }; 284eca2ebc7SMartin Sperl 285eca2ebc7SMartin Sperl static const struct attribute_group *spi_master_groups[] = { 2868caab75fSGeert Uytterhoeven &spi_controller_statistics_group, 287eca2ebc7SMartin Sperl NULL, 288eca2ebc7SMartin Sperl }; 289eca2ebc7SMartin Sperl 290eca2ebc7SMartin Sperl void spi_statistics_add_transfer_stats(struct spi_statistics *stats, 291eca2ebc7SMartin Sperl struct spi_transfer *xfer, 2928caab75fSGeert Uytterhoeven struct spi_controller *ctlr) 293eca2ebc7SMartin Sperl { 294eca2ebc7SMartin Sperl unsigned long flags; 2956b7bc061SMartin Sperl int l2len = min(fls(xfer->len), SPI_STATISTICS_HISTO_SIZE) - 1; 2966b7bc061SMartin Sperl 2976b7bc061SMartin Sperl if (l2len < 0) 2986b7bc061SMartin Sperl l2len = 0; 299eca2ebc7SMartin Sperl 300eca2ebc7SMartin Sperl spin_lock_irqsave(&stats->lock, flags); 301eca2ebc7SMartin Sperl 302eca2ebc7SMartin Sperl stats->transfers++; 3036b7bc061SMartin Sperl stats->transfer_bytes_histo[l2len]++; 304eca2ebc7SMartin Sperl 305eca2ebc7SMartin Sperl stats->bytes += xfer->len; 306eca2ebc7SMartin Sperl if ((xfer->tx_buf) && 3078caab75fSGeert Uytterhoeven (xfer->tx_buf != ctlr->dummy_tx)) 308eca2ebc7SMartin Sperl stats->bytes_tx += xfer->len; 309eca2ebc7SMartin Sperl if ((xfer->rx_buf) && 3108caab75fSGeert Uytterhoeven (xfer->rx_buf != ctlr->dummy_rx)) 311eca2ebc7SMartin Sperl stats->bytes_rx += xfer->len; 312eca2ebc7SMartin Sperl 313eca2ebc7SMartin Sperl spin_unlock_irqrestore(&stats->lock, flags); 314eca2ebc7SMartin Sperl } 315eca2ebc7SMartin Sperl EXPORT_SYMBOL_GPL(spi_statistics_add_transfer_stats); 3168ae12a0dSDavid Brownell 3178ae12a0dSDavid Brownell /* modalias support makes "modprobe $MODALIAS" new-style hotplug work, 3188ae12a0dSDavid Brownell * and the sysfs version makes coldplug work too. 3198ae12a0dSDavid Brownell */ 3208ae12a0dSDavid Brownell 32175368bf6SAnton Vorontsov static const struct spi_device_id *spi_match_id(const struct spi_device_id *id, 32275368bf6SAnton Vorontsov const struct spi_device *sdev) 32375368bf6SAnton Vorontsov { 32475368bf6SAnton Vorontsov while (id->name[0]) { 32575368bf6SAnton Vorontsov if (!strcmp(sdev->modalias, id->name)) 32675368bf6SAnton Vorontsov return id; 32775368bf6SAnton Vorontsov id++; 32875368bf6SAnton Vorontsov } 32975368bf6SAnton Vorontsov return NULL; 33075368bf6SAnton Vorontsov } 33175368bf6SAnton Vorontsov 33275368bf6SAnton Vorontsov const struct spi_device_id *spi_get_device_id(const struct spi_device *sdev) 33375368bf6SAnton Vorontsov { 33475368bf6SAnton Vorontsov const struct spi_driver *sdrv = to_spi_driver(sdev->dev.driver); 33575368bf6SAnton Vorontsov 33675368bf6SAnton Vorontsov return spi_match_id(sdrv->id_table, sdev); 33775368bf6SAnton Vorontsov } 33875368bf6SAnton Vorontsov EXPORT_SYMBOL_GPL(spi_get_device_id); 33975368bf6SAnton Vorontsov 3408ae12a0dSDavid Brownell static int spi_match_device(struct device *dev, struct device_driver *drv) 3418ae12a0dSDavid Brownell { 3428ae12a0dSDavid Brownell const struct spi_device *spi = to_spi_device(dev); 34375368bf6SAnton Vorontsov const struct spi_driver *sdrv = to_spi_driver(drv); 34475368bf6SAnton Vorontsov 3455039563eSTrent Piepho /* Check override first, and if set, only use the named driver */ 3465039563eSTrent Piepho if (spi->driver_override) 3475039563eSTrent Piepho return strcmp(spi->driver_override, drv->name) == 0; 3485039563eSTrent Piepho 3492b7a32f7SSinan Akman /* Attempt an OF style match */ 3502b7a32f7SSinan Akman if (of_driver_match_device(dev, drv)) 3512b7a32f7SSinan Akman return 1; 3522b7a32f7SSinan Akman 35364bee4d2SMika Westerberg /* Then try ACPI */ 35464bee4d2SMika Westerberg if (acpi_driver_match_device(dev, drv)) 35564bee4d2SMika Westerberg return 1; 35664bee4d2SMika Westerberg 35775368bf6SAnton Vorontsov if (sdrv->id_table) 35875368bf6SAnton Vorontsov return !!spi_match_id(sdrv->id_table, spi); 3598ae12a0dSDavid Brownell 36035f74fcaSKay Sievers return strcmp(spi->modalias, drv->name) == 0; 3618ae12a0dSDavid Brownell } 3628ae12a0dSDavid Brownell 3637eff2e7aSKay Sievers static int spi_uevent(struct device *dev, struct kobj_uevent_env *env) 3648ae12a0dSDavid Brownell { 3658ae12a0dSDavid Brownell const struct spi_device *spi = to_spi_device(dev); 3668c4ff6d0SZhang Rui int rc; 3678c4ff6d0SZhang Rui 3688c4ff6d0SZhang Rui rc = acpi_device_uevent_modalias(dev, env); 3698c4ff6d0SZhang Rui if (rc != -ENODEV) 3708c4ff6d0SZhang Rui return rc; 3718ae12a0dSDavid Brownell 3722856670fSAndy Shevchenko return add_uevent_var(env, "MODALIAS=%s%s", SPI_MODULE_PREFIX, spi->modalias); 3738ae12a0dSDavid Brownell } 3748ae12a0dSDavid Brownell 3758ae12a0dSDavid Brownell struct bus_type spi_bus_type = { 3768ae12a0dSDavid Brownell .name = "spi", 377aa7da564SGreg Kroah-Hartman .dev_groups = spi_dev_groups, 3788ae12a0dSDavid Brownell .match = spi_match_device, 3798ae12a0dSDavid Brownell .uevent = spi_uevent, 3808ae12a0dSDavid Brownell }; 3818ae12a0dSDavid Brownell EXPORT_SYMBOL_GPL(spi_bus_type); 3828ae12a0dSDavid Brownell 383b885244eSDavid Brownell 384b885244eSDavid Brownell static int spi_drv_probe(struct device *dev) 385b885244eSDavid Brownell { 386b885244eSDavid Brownell const struct spi_driver *sdrv = to_spi_driver(dev->driver); 38744af7927SJon Hunter struct spi_device *spi = to_spi_device(dev); 38833cf00e5SMika Westerberg int ret; 389b885244eSDavid Brownell 39086be408bSSylwester Nawrocki ret = of_clk_set_defaults(dev->of_node, false); 39186be408bSSylwester Nawrocki if (ret) 39286be408bSSylwester Nawrocki return ret; 39386be408bSSylwester Nawrocki 39444af7927SJon Hunter if (dev->of_node) { 39544af7927SJon Hunter spi->irq = of_irq_get(dev->of_node, 0); 39644af7927SJon Hunter if (spi->irq == -EPROBE_DEFER) 39744af7927SJon Hunter return -EPROBE_DEFER; 39844af7927SJon Hunter if (spi->irq < 0) 39944af7927SJon Hunter spi->irq = 0; 40044af7927SJon Hunter } 40144af7927SJon Hunter 402676e7c25SUlf Hansson ret = dev_pm_domain_attach(dev, true); 40371f277a7SUlf Hansson if (ret) 40471f277a7SUlf Hansson return ret; 40571f277a7SUlf Hansson 40644af7927SJon Hunter ret = sdrv->probe(spi); 40733cf00e5SMika Westerberg if (ret) 408676e7c25SUlf Hansson dev_pm_domain_detach(dev, true); 40933cf00e5SMika Westerberg 41033cf00e5SMika Westerberg return ret; 411b885244eSDavid Brownell } 412b885244eSDavid Brownell 413b885244eSDavid Brownell static int spi_drv_remove(struct device *dev) 414b885244eSDavid Brownell { 415b885244eSDavid Brownell const struct spi_driver *sdrv = to_spi_driver(dev->driver); 41633cf00e5SMika Westerberg int ret; 417b885244eSDavid Brownell 418aec35f4eSJean Delvare ret = sdrv->remove(to_spi_device(dev)); 419676e7c25SUlf Hansson dev_pm_domain_detach(dev, true); 42033cf00e5SMika Westerberg 42133cf00e5SMika Westerberg return ret; 422b885244eSDavid Brownell } 423b885244eSDavid Brownell 424b885244eSDavid Brownell static void spi_drv_shutdown(struct device *dev) 425b885244eSDavid Brownell { 426b885244eSDavid Brownell const struct spi_driver *sdrv = to_spi_driver(dev->driver); 427b885244eSDavid Brownell 428b885244eSDavid Brownell sdrv->shutdown(to_spi_device(dev)); 429b885244eSDavid Brownell } 430b885244eSDavid Brownell 43133e34dc6SDavid Brownell /** 432ca5d2485SAndrew F. Davis * __spi_register_driver - register a SPI driver 43388c9321dSThierry Reding * @owner: owner module of the driver to register 43433e34dc6SDavid Brownell * @sdrv: the driver to register 43533e34dc6SDavid Brownell * Context: can sleep 43697d56dc6SJavier Martinez Canillas * 43797d56dc6SJavier Martinez Canillas * Return: zero on success, else a negative error code. 43833e34dc6SDavid Brownell */ 439ca5d2485SAndrew F. Davis int __spi_register_driver(struct module *owner, struct spi_driver *sdrv) 440b885244eSDavid Brownell { 441ca5d2485SAndrew F. Davis sdrv->driver.owner = owner; 442b885244eSDavid Brownell sdrv->driver.bus = &spi_bus_type; 443b885244eSDavid Brownell if (sdrv->probe) 444b885244eSDavid Brownell sdrv->driver.probe = spi_drv_probe; 445b885244eSDavid Brownell if (sdrv->remove) 446b885244eSDavid Brownell sdrv->driver.remove = spi_drv_remove; 447b885244eSDavid Brownell if (sdrv->shutdown) 448b885244eSDavid Brownell sdrv->driver.shutdown = spi_drv_shutdown; 449b885244eSDavid Brownell return driver_register(&sdrv->driver); 450b885244eSDavid Brownell } 451ca5d2485SAndrew F. Davis EXPORT_SYMBOL_GPL(__spi_register_driver); 452b885244eSDavid Brownell 4538ae12a0dSDavid Brownell /*-------------------------------------------------------------------------*/ 4548ae12a0dSDavid Brownell 4558ae12a0dSDavid Brownell /* SPI devices should normally not be created by SPI device drivers; that 4568caab75fSGeert Uytterhoeven * would make them board-specific. Similarly with SPI controller drivers. 4578ae12a0dSDavid Brownell * Device registration normally goes into like arch/.../mach.../board-YYY.c 4588ae12a0dSDavid Brownell * with other readonly (flashable) information about mainboard devices. 4598ae12a0dSDavid Brownell */ 4608ae12a0dSDavid Brownell 4618ae12a0dSDavid Brownell struct boardinfo { 4628ae12a0dSDavid Brownell struct list_head list; 4632b9603a0SFeng Tang struct spi_board_info board_info; 4648ae12a0dSDavid Brownell }; 4658ae12a0dSDavid Brownell 4668ae12a0dSDavid Brownell static LIST_HEAD(board_list); 4678caab75fSGeert Uytterhoeven static LIST_HEAD(spi_controller_list); 4682b9603a0SFeng Tang 4692b9603a0SFeng Tang /* 4702b9603a0SFeng Tang * Used to protect add/del opertion for board_info list and 4718caab75fSGeert Uytterhoeven * spi_controller list, and their matching process 4729b61e302SSuniel Mahesh * also used to protect object of type struct idr 4732b9603a0SFeng Tang */ 47494040828SMatthias Kaehlcke static DEFINE_MUTEX(board_lock); 4758ae12a0dSDavid Brownell 476dc87c98eSGrant Likely /** 477dc87c98eSGrant Likely * spi_alloc_device - Allocate a new SPI device 4788caab75fSGeert Uytterhoeven * @ctlr: Controller to which device is connected 479dc87c98eSGrant Likely * Context: can sleep 480dc87c98eSGrant Likely * 481dc87c98eSGrant Likely * Allows a driver to allocate and initialize a spi_device without 482dc87c98eSGrant Likely * registering it immediately. This allows a driver to directly 483dc87c98eSGrant Likely * fill the spi_device with device parameters before calling 484dc87c98eSGrant Likely * spi_add_device() on it. 485dc87c98eSGrant Likely * 486dc87c98eSGrant Likely * Caller is responsible to call spi_add_device() on the returned 4878caab75fSGeert Uytterhoeven * spi_device structure to add it to the SPI controller. If the caller 488dc87c98eSGrant Likely * needs to discard the spi_device without adding it, then it should 489dc87c98eSGrant Likely * call spi_dev_put() on it. 490dc87c98eSGrant Likely * 49197d56dc6SJavier Martinez Canillas * Return: a pointer to the new device, or NULL. 492dc87c98eSGrant Likely */ 4938caab75fSGeert Uytterhoeven struct spi_device *spi_alloc_device(struct spi_controller *ctlr) 494dc87c98eSGrant Likely { 495dc87c98eSGrant Likely struct spi_device *spi; 496dc87c98eSGrant Likely 4978caab75fSGeert Uytterhoeven if (!spi_controller_get(ctlr)) 498dc87c98eSGrant Likely return NULL; 499dc87c98eSGrant Likely 5005fe5f05eSJingoo Han spi = kzalloc(sizeof(*spi), GFP_KERNEL); 501dc87c98eSGrant Likely if (!spi) { 5028caab75fSGeert Uytterhoeven spi_controller_put(ctlr); 503dc87c98eSGrant Likely return NULL; 504dc87c98eSGrant Likely } 505dc87c98eSGrant Likely 5068caab75fSGeert Uytterhoeven spi->master = spi->controller = ctlr; 5078caab75fSGeert Uytterhoeven spi->dev.parent = &ctlr->dev; 508dc87c98eSGrant Likely spi->dev.bus = &spi_bus_type; 509dc87c98eSGrant Likely spi->dev.release = spidev_release; 510446411e1SAndreas Larsson spi->cs_gpio = -ENOENT; 511eca2ebc7SMartin Sperl 512eca2ebc7SMartin Sperl spin_lock_init(&spi->statistics.lock); 513eca2ebc7SMartin Sperl 514dc87c98eSGrant Likely device_initialize(&spi->dev); 515dc87c98eSGrant Likely return spi; 516dc87c98eSGrant Likely } 517dc87c98eSGrant Likely EXPORT_SYMBOL_GPL(spi_alloc_device); 518dc87c98eSGrant Likely 519e13ac47bSJarkko Nikula static void spi_dev_set_name(struct spi_device *spi) 520e13ac47bSJarkko Nikula { 521e13ac47bSJarkko Nikula struct acpi_device *adev = ACPI_COMPANION(&spi->dev); 522e13ac47bSJarkko Nikula 523e13ac47bSJarkko Nikula if (adev) { 524e13ac47bSJarkko Nikula dev_set_name(&spi->dev, "spi-%s", acpi_dev_name(adev)); 525e13ac47bSJarkko Nikula return; 526e13ac47bSJarkko Nikula } 527e13ac47bSJarkko Nikula 5288caab75fSGeert Uytterhoeven dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->controller->dev), 529e13ac47bSJarkko Nikula spi->chip_select); 530e13ac47bSJarkko Nikula } 531e13ac47bSJarkko Nikula 532b6fb8d3aSMika Westerberg static int spi_dev_check(struct device *dev, void *data) 533b6fb8d3aSMika Westerberg { 534b6fb8d3aSMika Westerberg struct spi_device *spi = to_spi_device(dev); 535b6fb8d3aSMika Westerberg struct spi_device *new_spi = data; 536b6fb8d3aSMika Westerberg 5378caab75fSGeert Uytterhoeven if (spi->controller == new_spi->controller && 538b6fb8d3aSMika Westerberg spi->chip_select == new_spi->chip_select) 539b6fb8d3aSMika Westerberg return -EBUSY; 540b6fb8d3aSMika Westerberg return 0; 541b6fb8d3aSMika Westerberg } 542b6fb8d3aSMika Westerberg 543dc87c98eSGrant Likely /** 544dc87c98eSGrant Likely * spi_add_device - Add spi_device allocated with spi_alloc_device 545dc87c98eSGrant Likely * @spi: spi_device to register 546dc87c98eSGrant Likely * 547dc87c98eSGrant Likely * Companion function to spi_alloc_device. Devices allocated with 548dc87c98eSGrant Likely * spi_alloc_device can be added onto the spi bus with this function. 549dc87c98eSGrant Likely * 55097d56dc6SJavier Martinez Canillas * Return: 0 on success; negative errno on failure 551dc87c98eSGrant Likely */ 552dc87c98eSGrant Likely int spi_add_device(struct spi_device *spi) 553dc87c98eSGrant Likely { 554e48880e0SDavid Brownell static DEFINE_MUTEX(spi_add_lock); 5558caab75fSGeert Uytterhoeven struct spi_controller *ctlr = spi->controller; 5568caab75fSGeert Uytterhoeven struct device *dev = ctlr->dev.parent; 557dc87c98eSGrant Likely int status; 558dc87c98eSGrant Likely 559dc87c98eSGrant Likely /* Chipselects are numbered 0..max; validate. */ 5608caab75fSGeert Uytterhoeven if (spi->chip_select >= ctlr->num_chipselect) { 5618caab75fSGeert Uytterhoeven dev_err(dev, "cs%d >= max %d\n", spi->chip_select, 5628caab75fSGeert Uytterhoeven ctlr->num_chipselect); 563dc87c98eSGrant Likely return -EINVAL; 564dc87c98eSGrant Likely } 565dc87c98eSGrant Likely 566dc87c98eSGrant Likely /* Set the bus ID string */ 567e13ac47bSJarkko Nikula spi_dev_set_name(spi); 568e48880e0SDavid Brownell 569e48880e0SDavid Brownell /* We need to make sure there's no other device with this 570e48880e0SDavid Brownell * chipselect **BEFORE** we call setup(), else we'll trash 571e48880e0SDavid Brownell * its configuration. Lock against concurrent add() calls. 572e48880e0SDavid Brownell */ 573e48880e0SDavid Brownell mutex_lock(&spi_add_lock); 574e48880e0SDavid Brownell 575b6fb8d3aSMika Westerberg status = bus_for_each_dev(&spi_bus_type, NULL, spi, spi_dev_check); 576b6fb8d3aSMika Westerberg if (status) { 577e48880e0SDavid Brownell dev_err(dev, "chipselect %d already in use\n", 578e48880e0SDavid Brownell spi->chip_select); 579e48880e0SDavid Brownell goto done; 580e48880e0SDavid Brownell } 581e48880e0SDavid Brownell 582f3186dd8SLinus Walleij /* Descriptors take precedence */ 583f3186dd8SLinus Walleij if (ctlr->cs_gpiods) 584f3186dd8SLinus Walleij spi->cs_gpiod = ctlr->cs_gpiods[spi->chip_select]; 585f3186dd8SLinus Walleij else if (ctlr->cs_gpios) 5868caab75fSGeert Uytterhoeven spi->cs_gpio = ctlr->cs_gpios[spi->chip_select]; 58774317984SJean-Christophe PLAGNIOL-VILLARD 588e48880e0SDavid Brownell /* Drivers may modify this initial i/o setup, but will 589e48880e0SDavid Brownell * normally rely on the device being setup. Devices 590e48880e0SDavid Brownell * using SPI_CS_HIGH can't coexist well otherwise... 591e48880e0SDavid Brownell */ 5927d077197SDavid Brownell status = spi_setup(spi); 593dc87c98eSGrant Likely if (status < 0) { 594eb288a1fSLinus Walleij dev_err(dev, "can't setup %s, status %d\n", 595eb288a1fSLinus Walleij dev_name(&spi->dev), status); 596e48880e0SDavid Brownell goto done; 597dc87c98eSGrant Likely } 598dc87c98eSGrant Likely 599e48880e0SDavid Brownell /* Device may be bound to an active driver when this returns */ 600dc87c98eSGrant Likely status = device_add(&spi->dev); 601e48880e0SDavid Brownell if (status < 0) 602eb288a1fSLinus Walleij dev_err(dev, "can't add %s, status %d\n", 603eb288a1fSLinus Walleij dev_name(&spi->dev), status); 604e48880e0SDavid Brownell else 60535f74fcaSKay Sievers dev_dbg(dev, "registered child %s\n", dev_name(&spi->dev)); 606e48880e0SDavid Brownell 607e48880e0SDavid Brownell done: 608e48880e0SDavid Brownell mutex_unlock(&spi_add_lock); 609e48880e0SDavid Brownell return status; 610dc87c98eSGrant Likely } 611dc87c98eSGrant Likely EXPORT_SYMBOL_GPL(spi_add_device); 6128ae12a0dSDavid Brownell 61333e34dc6SDavid Brownell /** 61433e34dc6SDavid Brownell * spi_new_device - instantiate one new SPI device 6158caab75fSGeert Uytterhoeven * @ctlr: Controller to which device is connected 61633e34dc6SDavid Brownell * @chip: Describes the SPI device 61733e34dc6SDavid Brownell * Context: can sleep 61833e34dc6SDavid Brownell * 61933e34dc6SDavid Brownell * On typical mainboards, this is purely internal; and it's not needed 6208ae12a0dSDavid Brownell * after board init creates the hard-wired devices. Some development 6218ae12a0dSDavid Brownell * platforms may not be able to use spi_register_board_info though, and 6228ae12a0dSDavid Brownell * this is exported so that for example a USB or parport based adapter 6238ae12a0dSDavid Brownell * driver could add devices (which it would learn about out-of-band). 624082c8cb4SDavid Brownell * 62597d56dc6SJavier Martinez Canillas * Return: the new device, or NULL. 6268ae12a0dSDavid Brownell */ 6278caab75fSGeert Uytterhoeven struct spi_device *spi_new_device(struct spi_controller *ctlr, 628e9d5a461SAdrian Bunk struct spi_board_info *chip) 6298ae12a0dSDavid Brownell { 6308ae12a0dSDavid Brownell struct spi_device *proxy; 6318ae12a0dSDavid Brownell int status; 6328ae12a0dSDavid Brownell 633082c8cb4SDavid Brownell /* NOTE: caller did any chip->bus_num checks necessary. 634082c8cb4SDavid Brownell * 635082c8cb4SDavid Brownell * Also, unless we change the return value convention to use 636082c8cb4SDavid Brownell * error-or-pointer (not NULL-or-pointer), troubleshootability 637082c8cb4SDavid Brownell * suggests syslogged diagnostics are best here (ugh). 638082c8cb4SDavid Brownell */ 639082c8cb4SDavid Brownell 6408caab75fSGeert Uytterhoeven proxy = spi_alloc_device(ctlr); 641dc87c98eSGrant Likely if (!proxy) 6428ae12a0dSDavid Brownell return NULL; 6438ae12a0dSDavid Brownell 644102eb975SGrant Likely WARN_ON(strlen(chip->modalias) >= sizeof(proxy->modalias)); 645102eb975SGrant Likely 6468ae12a0dSDavid Brownell proxy->chip_select = chip->chip_select; 6478ae12a0dSDavid Brownell proxy->max_speed_hz = chip->max_speed_hz; 648980a01c9SDavid Brownell proxy->mode = chip->mode; 6498ae12a0dSDavid Brownell proxy->irq = chip->irq; 650102eb975SGrant Likely strlcpy(proxy->modalias, chip->modalias, sizeof(proxy->modalias)); 6518ae12a0dSDavid Brownell proxy->dev.platform_data = (void *) chip->platform_data; 6528ae12a0dSDavid Brownell proxy->controller_data = chip->controller_data; 6538ae12a0dSDavid Brownell proxy->controller_state = NULL; 6548ae12a0dSDavid Brownell 655826cf175SDmitry Torokhov if (chip->properties) { 656826cf175SDmitry Torokhov status = device_add_properties(&proxy->dev, chip->properties); 657826cf175SDmitry Torokhov if (status) { 6588caab75fSGeert Uytterhoeven dev_err(&ctlr->dev, 659826cf175SDmitry Torokhov "failed to add properties to '%s': %d\n", 660826cf175SDmitry Torokhov chip->modalias, status); 661826cf175SDmitry Torokhov goto err_dev_put; 662826cf175SDmitry Torokhov } 6638ae12a0dSDavid Brownell } 664dc87c98eSGrant Likely 665826cf175SDmitry Torokhov status = spi_add_device(proxy); 666826cf175SDmitry Torokhov if (status < 0) 667826cf175SDmitry Torokhov goto err_remove_props; 668826cf175SDmitry Torokhov 669dc87c98eSGrant Likely return proxy; 670826cf175SDmitry Torokhov 671826cf175SDmitry Torokhov err_remove_props: 672826cf175SDmitry Torokhov if (chip->properties) 673826cf175SDmitry Torokhov device_remove_properties(&proxy->dev); 674826cf175SDmitry Torokhov err_dev_put: 675826cf175SDmitry Torokhov spi_dev_put(proxy); 676826cf175SDmitry Torokhov return NULL; 677dc87c98eSGrant Likely } 6788ae12a0dSDavid Brownell EXPORT_SYMBOL_GPL(spi_new_device); 6798ae12a0dSDavid Brownell 6803b1884c2SGeert Uytterhoeven /** 6813b1884c2SGeert Uytterhoeven * spi_unregister_device - unregister a single SPI device 6823b1884c2SGeert Uytterhoeven * @spi: spi_device to unregister 6833b1884c2SGeert Uytterhoeven * 6843b1884c2SGeert Uytterhoeven * Start making the passed SPI device vanish. Normally this would be handled 6858caab75fSGeert Uytterhoeven * by spi_unregister_controller(). 6863b1884c2SGeert Uytterhoeven */ 6873b1884c2SGeert Uytterhoeven void spi_unregister_device(struct spi_device *spi) 6883b1884c2SGeert Uytterhoeven { 689bd6c1644SGeert Uytterhoeven if (!spi) 690bd6c1644SGeert Uytterhoeven return; 691bd6c1644SGeert Uytterhoeven 6928324147fSJohan Hovold if (spi->dev.of_node) { 693bd6c1644SGeert Uytterhoeven of_node_clear_flag(spi->dev.of_node, OF_POPULATED); 6948324147fSJohan Hovold of_node_put(spi->dev.of_node); 6958324147fSJohan Hovold } 6967f24467fSOctavian Purdila if (ACPI_COMPANION(&spi->dev)) 6977f24467fSOctavian Purdila acpi_device_clear_enumerated(ACPI_COMPANION(&spi->dev)); 6983b1884c2SGeert Uytterhoeven device_unregister(&spi->dev); 6993b1884c2SGeert Uytterhoeven } 7003b1884c2SGeert Uytterhoeven EXPORT_SYMBOL_GPL(spi_unregister_device); 7013b1884c2SGeert Uytterhoeven 7028caab75fSGeert Uytterhoeven static void spi_match_controller_to_boardinfo(struct spi_controller *ctlr, 7032b9603a0SFeng Tang struct spi_board_info *bi) 7042b9603a0SFeng Tang { 7052b9603a0SFeng Tang struct spi_device *dev; 7062b9603a0SFeng Tang 7078caab75fSGeert Uytterhoeven if (ctlr->bus_num != bi->bus_num) 7082b9603a0SFeng Tang return; 7092b9603a0SFeng Tang 7108caab75fSGeert Uytterhoeven dev = spi_new_device(ctlr, bi); 7112b9603a0SFeng Tang if (!dev) 7128caab75fSGeert Uytterhoeven dev_err(ctlr->dev.parent, "can't create new device for %s\n", 7132b9603a0SFeng Tang bi->modalias); 7142b9603a0SFeng Tang } 7152b9603a0SFeng Tang 71633e34dc6SDavid Brownell /** 71733e34dc6SDavid Brownell * spi_register_board_info - register SPI devices for a given board 71833e34dc6SDavid Brownell * @info: array of chip descriptors 71933e34dc6SDavid Brownell * @n: how many descriptors are provided 72033e34dc6SDavid Brownell * Context: can sleep 72133e34dc6SDavid Brownell * 7228ae12a0dSDavid Brownell * Board-specific early init code calls this (probably during arch_initcall) 7238ae12a0dSDavid Brownell * with segments of the SPI device table. Any device nodes are created later, 7248ae12a0dSDavid Brownell * after the relevant parent SPI controller (bus_num) is defined. We keep 7258ae12a0dSDavid Brownell * this table of devices forever, so that reloading a controller driver will 7268ae12a0dSDavid Brownell * not make Linux forget about these hard-wired devices. 7278ae12a0dSDavid Brownell * 7288ae12a0dSDavid Brownell * Other code can also call this, e.g. a particular add-on board might provide 7298ae12a0dSDavid Brownell * SPI devices through its expansion connector, so code initializing that board 7308ae12a0dSDavid Brownell * would naturally declare its SPI devices. 7318ae12a0dSDavid Brownell * 7328ae12a0dSDavid Brownell * The board info passed can safely be __initdata ... but be careful of 7338ae12a0dSDavid Brownell * any embedded pointers (platform_data, etc), they're copied as-is. 734826cf175SDmitry Torokhov * Device properties are deep-copied though. 73597d56dc6SJavier Martinez Canillas * 73697d56dc6SJavier Martinez Canillas * Return: zero on success, else a negative error code. 7378ae12a0dSDavid Brownell */ 738fd4a319bSGrant Likely int spi_register_board_info(struct spi_board_info const *info, unsigned n) 7398ae12a0dSDavid Brownell { 7408ae12a0dSDavid Brownell struct boardinfo *bi; 7412b9603a0SFeng Tang int i; 7428ae12a0dSDavid Brownell 743c7908a37SXiubo Li if (!n) 744f974cf57SDmitry Torokhov return 0; 745c7908a37SXiubo Li 746f9bdb7fdSMarkus Elfring bi = kcalloc(n, sizeof(*bi), GFP_KERNEL); 7478ae12a0dSDavid Brownell if (!bi) 7488ae12a0dSDavid Brownell return -ENOMEM; 7498ae12a0dSDavid Brownell 7502b9603a0SFeng Tang for (i = 0; i < n; i++, bi++, info++) { 7518caab75fSGeert Uytterhoeven struct spi_controller *ctlr; 7522b9603a0SFeng Tang 7532b9603a0SFeng Tang memcpy(&bi->board_info, info, sizeof(*info)); 754826cf175SDmitry Torokhov if (info->properties) { 755826cf175SDmitry Torokhov bi->board_info.properties = 756826cf175SDmitry Torokhov property_entries_dup(info->properties); 757826cf175SDmitry Torokhov if (IS_ERR(bi->board_info.properties)) 758826cf175SDmitry Torokhov return PTR_ERR(bi->board_info.properties); 759826cf175SDmitry Torokhov } 760826cf175SDmitry Torokhov 76194040828SMatthias Kaehlcke mutex_lock(&board_lock); 7628ae12a0dSDavid Brownell list_add_tail(&bi->list, &board_list); 7638caab75fSGeert Uytterhoeven list_for_each_entry(ctlr, &spi_controller_list, list) 7648caab75fSGeert Uytterhoeven spi_match_controller_to_boardinfo(ctlr, 7658caab75fSGeert Uytterhoeven &bi->board_info); 76694040828SMatthias Kaehlcke mutex_unlock(&board_lock); 7672b9603a0SFeng Tang } 7682b9603a0SFeng Tang 7698ae12a0dSDavid Brownell return 0; 7708ae12a0dSDavid Brownell } 7718ae12a0dSDavid Brownell 7728ae12a0dSDavid Brownell /*-------------------------------------------------------------------------*/ 7738ae12a0dSDavid Brownell 774b158935fSMark Brown static void spi_set_cs(struct spi_device *spi, bool enable) 775b158935fSMark Brown { 776b158935fSMark Brown if (spi->mode & SPI_CS_HIGH) 777b158935fSMark Brown enable = !enable; 778b158935fSMark Brown 779f3186dd8SLinus Walleij if (spi->cs_gpiod || gpio_is_valid(spi->cs_gpio)) { 780f3186dd8SLinus Walleij /* 781f3186dd8SLinus Walleij * Honour the SPI_NO_CS flag and invert the enable line, as 782f3186dd8SLinus Walleij * active low is default for SPI. Execution paths that handle 783f3186dd8SLinus Walleij * polarity inversion in gpiolib (such as device tree) will 784f3186dd8SLinus Walleij * enforce active high using the SPI_CS_HIGH resulting in a 785f3186dd8SLinus Walleij * double inversion through the code above. 786f3186dd8SLinus Walleij */ 787f3186dd8SLinus Walleij if (!(spi->mode & SPI_NO_CS)) { 788f3186dd8SLinus Walleij if (spi->cs_gpiod) 789f3186dd8SLinus Walleij gpiod_set_value(spi->cs_gpiod, !enable); 790f3186dd8SLinus Walleij else 791b158935fSMark Brown gpio_set_value(spi->cs_gpio, !enable); 792f3186dd8SLinus Walleij } 7938eee6b9dSThor Thayer /* Some SPI masters need both GPIO CS & slave_select */ 7948caab75fSGeert Uytterhoeven if ((spi->controller->flags & SPI_MASTER_GPIO_SS) && 7958caab75fSGeert Uytterhoeven spi->controller->set_cs) 7968caab75fSGeert Uytterhoeven spi->controller->set_cs(spi, !enable); 7978caab75fSGeert Uytterhoeven } else if (spi->controller->set_cs) { 7988caab75fSGeert Uytterhoeven spi->controller->set_cs(spi, !enable); 7998eee6b9dSThor Thayer } 800b158935fSMark Brown } 801b158935fSMark Brown 8022de440f5SGeert Uytterhoeven #ifdef CONFIG_HAS_DMA 80346336966SBoris Brezillon int spi_map_buf(struct spi_controller *ctlr, struct device *dev, 8046ad45a27SMark Brown struct sg_table *sgt, void *buf, size_t len, 8056ad45a27SMark Brown enum dma_data_direction dir) 8066ad45a27SMark Brown { 8076ad45a27SMark Brown const bool vmalloced_buf = is_vmalloc_addr(buf); 808df88e91bSAndy Shevchenko unsigned int max_seg_size = dma_get_max_seg_size(dev); 809b1b8153cSVignesh R #ifdef CONFIG_HIGHMEM 810b1b8153cSVignesh R const bool kmap_buf = ((unsigned long)buf >= PKMAP_BASE && 811b1b8153cSVignesh R (unsigned long)buf < (PKMAP_BASE + 812b1b8153cSVignesh R (LAST_PKMAP * PAGE_SIZE))); 813b1b8153cSVignesh R #else 814b1b8153cSVignesh R const bool kmap_buf = false; 815b1b8153cSVignesh R #endif 81665598c13SAndrew Gabbasov int desc_len; 81765598c13SAndrew Gabbasov int sgs; 8186ad45a27SMark Brown struct page *vm_page; 8198dd4a016SJuan Gutierrez struct scatterlist *sg; 8206ad45a27SMark Brown void *sg_buf; 8216ad45a27SMark Brown size_t min; 8226ad45a27SMark Brown int i, ret; 8236ad45a27SMark Brown 824b1b8153cSVignesh R if (vmalloced_buf || kmap_buf) { 825df88e91bSAndy Shevchenko desc_len = min_t(int, max_seg_size, PAGE_SIZE); 82665598c13SAndrew Gabbasov sgs = DIV_ROUND_UP(len + offset_in_page(buf), desc_len); 8270569a88fSVignesh R } else if (virt_addr_valid(buf)) { 8288caab75fSGeert Uytterhoeven desc_len = min_t(int, max_seg_size, ctlr->max_dma_len); 82965598c13SAndrew Gabbasov sgs = DIV_ROUND_UP(len, desc_len); 8300569a88fSVignesh R } else { 8310569a88fSVignesh R return -EINVAL; 83265598c13SAndrew Gabbasov } 83365598c13SAndrew Gabbasov 8346ad45a27SMark Brown ret = sg_alloc_table(sgt, sgs, GFP_KERNEL); 8356ad45a27SMark Brown if (ret != 0) 8366ad45a27SMark Brown return ret; 8376ad45a27SMark Brown 8388dd4a016SJuan Gutierrez sg = &sgt->sgl[0]; 8396ad45a27SMark Brown for (i = 0; i < sgs; i++) { 8406ad45a27SMark Brown 841b1b8153cSVignesh R if (vmalloced_buf || kmap_buf) { 842ce99319aSMaxime Chevallier /* 843ce99319aSMaxime Chevallier * Next scatterlist entry size is the minimum between 844ce99319aSMaxime Chevallier * the desc_len and the remaining buffer length that 845ce99319aSMaxime Chevallier * fits in a page. 846ce99319aSMaxime Chevallier */ 847ce99319aSMaxime Chevallier min = min_t(size_t, desc_len, 848ce99319aSMaxime Chevallier min_t(size_t, len, 849ce99319aSMaxime Chevallier PAGE_SIZE - offset_in_page(buf))); 850b1b8153cSVignesh R if (vmalloced_buf) 8516ad45a27SMark Brown vm_page = vmalloc_to_page(buf); 852b1b8153cSVignesh R else 853b1b8153cSVignesh R vm_page = kmap_to_page(buf); 8546ad45a27SMark Brown if (!vm_page) { 8556ad45a27SMark Brown sg_free_table(sgt); 8566ad45a27SMark Brown return -ENOMEM; 8576ad45a27SMark Brown } 8588dd4a016SJuan Gutierrez sg_set_page(sg, vm_page, 859c1aefbddSCharles Keepax min, offset_in_page(buf)); 8606ad45a27SMark Brown } else { 86165598c13SAndrew Gabbasov min = min_t(size_t, len, desc_len); 8626ad45a27SMark Brown sg_buf = buf; 8638dd4a016SJuan Gutierrez sg_set_buf(sg, sg_buf, min); 8646ad45a27SMark Brown } 8656ad45a27SMark Brown 8666ad45a27SMark Brown buf += min; 8676ad45a27SMark Brown len -= min; 8688dd4a016SJuan Gutierrez sg = sg_next(sg); 8696ad45a27SMark Brown } 8706ad45a27SMark Brown 8716ad45a27SMark Brown ret = dma_map_sg(dev, sgt->sgl, sgt->nents, dir); 87289e4b66aSGeert Uytterhoeven if (!ret) 87389e4b66aSGeert Uytterhoeven ret = -ENOMEM; 8746ad45a27SMark Brown if (ret < 0) { 8756ad45a27SMark Brown sg_free_table(sgt); 8766ad45a27SMark Brown return ret; 8776ad45a27SMark Brown } 8786ad45a27SMark Brown 8796ad45a27SMark Brown sgt->nents = ret; 8806ad45a27SMark Brown 8816ad45a27SMark Brown return 0; 8826ad45a27SMark Brown } 8836ad45a27SMark Brown 88446336966SBoris Brezillon void spi_unmap_buf(struct spi_controller *ctlr, struct device *dev, 8856ad45a27SMark Brown struct sg_table *sgt, enum dma_data_direction dir) 8866ad45a27SMark Brown { 8876ad45a27SMark Brown if (sgt->orig_nents) { 8886ad45a27SMark Brown dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir); 8896ad45a27SMark Brown sg_free_table(sgt); 8906ad45a27SMark Brown } 8916ad45a27SMark Brown } 8926ad45a27SMark Brown 8938caab75fSGeert Uytterhoeven static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg) 89499adef31SMark Brown { 89599adef31SMark Brown struct device *tx_dev, *rx_dev; 89699adef31SMark Brown struct spi_transfer *xfer; 8976ad45a27SMark Brown int ret; 8983a2eba9bSMark Brown 8998caab75fSGeert Uytterhoeven if (!ctlr->can_dma) 90099adef31SMark Brown return 0; 90199adef31SMark Brown 9028caab75fSGeert Uytterhoeven if (ctlr->dma_tx) 9038caab75fSGeert Uytterhoeven tx_dev = ctlr->dma_tx->device->dev; 904c37f45b5SLeilk Liu else 9058caab75fSGeert Uytterhoeven tx_dev = ctlr->dev.parent; 906c37f45b5SLeilk Liu 9078caab75fSGeert Uytterhoeven if (ctlr->dma_rx) 9088caab75fSGeert Uytterhoeven rx_dev = ctlr->dma_rx->device->dev; 909c37f45b5SLeilk Liu else 9108caab75fSGeert Uytterhoeven rx_dev = ctlr->dev.parent; 91199adef31SMark Brown 91299adef31SMark Brown list_for_each_entry(xfer, &msg->transfers, transfer_list) { 9138caab75fSGeert Uytterhoeven if (!ctlr->can_dma(ctlr, msg->spi, xfer)) 91499adef31SMark Brown continue; 91599adef31SMark Brown 91699adef31SMark Brown if (xfer->tx_buf != NULL) { 9178caab75fSGeert Uytterhoeven ret = spi_map_buf(ctlr, tx_dev, &xfer->tx_sg, 9186ad45a27SMark Brown (void *)xfer->tx_buf, xfer->len, 91999adef31SMark Brown DMA_TO_DEVICE); 9206ad45a27SMark Brown if (ret != 0) 9216ad45a27SMark Brown return ret; 92299adef31SMark Brown } 92399adef31SMark Brown 92499adef31SMark Brown if (xfer->rx_buf != NULL) { 9258caab75fSGeert Uytterhoeven ret = spi_map_buf(ctlr, rx_dev, &xfer->rx_sg, 92699adef31SMark Brown xfer->rx_buf, xfer->len, 92799adef31SMark Brown DMA_FROM_DEVICE); 9286ad45a27SMark Brown if (ret != 0) { 9298caab75fSGeert Uytterhoeven spi_unmap_buf(ctlr, tx_dev, &xfer->tx_sg, 9306ad45a27SMark Brown DMA_TO_DEVICE); 9316ad45a27SMark Brown return ret; 93299adef31SMark Brown } 93399adef31SMark Brown } 93499adef31SMark Brown } 93599adef31SMark Brown 9368caab75fSGeert Uytterhoeven ctlr->cur_msg_mapped = true; 93799adef31SMark Brown 93899adef31SMark Brown return 0; 93999adef31SMark Brown } 94099adef31SMark Brown 9418caab75fSGeert Uytterhoeven static int __spi_unmap_msg(struct spi_controller *ctlr, struct spi_message *msg) 94299adef31SMark Brown { 94399adef31SMark Brown struct spi_transfer *xfer; 94499adef31SMark Brown struct device *tx_dev, *rx_dev; 94599adef31SMark Brown 9468caab75fSGeert Uytterhoeven if (!ctlr->cur_msg_mapped || !ctlr->can_dma) 94799adef31SMark Brown return 0; 94899adef31SMark Brown 9498caab75fSGeert Uytterhoeven if (ctlr->dma_tx) 9508caab75fSGeert Uytterhoeven tx_dev = ctlr->dma_tx->device->dev; 951c37f45b5SLeilk Liu else 9528caab75fSGeert Uytterhoeven tx_dev = ctlr->dev.parent; 953c37f45b5SLeilk Liu 9548caab75fSGeert Uytterhoeven if (ctlr->dma_rx) 9558caab75fSGeert Uytterhoeven rx_dev = ctlr->dma_rx->device->dev; 956c37f45b5SLeilk Liu else 9578caab75fSGeert Uytterhoeven rx_dev = ctlr->dev.parent; 95899adef31SMark Brown 95999adef31SMark Brown list_for_each_entry(xfer, &msg->transfers, transfer_list) { 9608caab75fSGeert Uytterhoeven if (!ctlr->can_dma(ctlr, msg->spi, xfer)) 96199adef31SMark Brown continue; 96299adef31SMark Brown 9638caab75fSGeert Uytterhoeven spi_unmap_buf(ctlr, rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE); 9648caab75fSGeert Uytterhoeven spi_unmap_buf(ctlr, tx_dev, &xfer->tx_sg, DMA_TO_DEVICE); 96599adef31SMark Brown } 96699adef31SMark Brown 96799adef31SMark Brown return 0; 96899adef31SMark Brown } 9692de440f5SGeert Uytterhoeven #else /* !CONFIG_HAS_DMA */ 9708caab75fSGeert Uytterhoeven static inline int __spi_map_msg(struct spi_controller *ctlr, 9712de440f5SGeert Uytterhoeven struct spi_message *msg) 9722de440f5SGeert Uytterhoeven { 9732de440f5SGeert Uytterhoeven return 0; 9742de440f5SGeert Uytterhoeven } 9752de440f5SGeert Uytterhoeven 9768caab75fSGeert Uytterhoeven static inline int __spi_unmap_msg(struct spi_controller *ctlr, 9772de440f5SGeert Uytterhoeven struct spi_message *msg) 9782de440f5SGeert Uytterhoeven { 9792de440f5SGeert Uytterhoeven return 0; 9802de440f5SGeert Uytterhoeven } 9812de440f5SGeert Uytterhoeven #endif /* !CONFIG_HAS_DMA */ 9822de440f5SGeert Uytterhoeven 9838caab75fSGeert Uytterhoeven static inline int spi_unmap_msg(struct spi_controller *ctlr, 9844b786458SMartin Sperl struct spi_message *msg) 9854b786458SMartin Sperl { 9864b786458SMartin Sperl struct spi_transfer *xfer; 9874b786458SMartin Sperl 9884b786458SMartin Sperl list_for_each_entry(xfer, &msg->transfers, transfer_list) { 9894b786458SMartin Sperl /* 9904b786458SMartin Sperl * Restore the original value of tx_buf or rx_buf if they are 9914b786458SMartin Sperl * NULL. 9924b786458SMartin Sperl */ 9938caab75fSGeert Uytterhoeven if (xfer->tx_buf == ctlr->dummy_tx) 9944b786458SMartin Sperl xfer->tx_buf = NULL; 9958caab75fSGeert Uytterhoeven if (xfer->rx_buf == ctlr->dummy_rx) 9964b786458SMartin Sperl xfer->rx_buf = NULL; 9974b786458SMartin Sperl } 9984b786458SMartin Sperl 9998caab75fSGeert Uytterhoeven return __spi_unmap_msg(ctlr, msg); 10004b786458SMartin Sperl } 10014b786458SMartin Sperl 10028caab75fSGeert Uytterhoeven static int spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg) 10032de440f5SGeert Uytterhoeven { 10042de440f5SGeert Uytterhoeven struct spi_transfer *xfer; 10052de440f5SGeert Uytterhoeven void *tmp; 10062de440f5SGeert Uytterhoeven unsigned int max_tx, max_rx; 10072de440f5SGeert Uytterhoeven 10088caab75fSGeert Uytterhoeven if (ctlr->flags & (SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX)) { 10092de440f5SGeert Uytterhoeven max_tx = 0; 10102de440f5SGeert Uytterhoeven max_rx = 0; 10112de440f5SGeert Uytterhoeven 10122de440f5SGeert Uytterhoeven list_for_each_entry(xfer, &msg->transfers, transfer_list) { 10138caab75fSGeert Uytterhoeven if ((ctlr->flags & SPI_CONTROLLER_MUST_TX) && 10142de440f5SGeert Uytterhoeven !xfer->tx_buf) 10152de440f5SGeert Uytterhoeven max_tx = max(xfer->len, max_tx); 10168caab75fSGeert Uytterhoeven if ((ctlr->flags & SPI_CONTROLLER_MUST_RX) && 10172de440f5SGeert Uytterhoeven !xfer->rx_buf) 10182de440f5SGeert Uytterhoeven max_rx = max(xfer->len, max_rx); 10192de440f5SGeert Uytterhoeven } 10202de440f5SGeert Uytterhoeven 10212de440f5SGeert Uytterhoeven if (max_tx) { 10228caab75fSGeert Uytterhoeven tmp = krealloc(ctlr->dummy_tx, max_tx, 10232de440f5SGeert Uytterhoeven GFP_KERNEL | GFP_DMA); 10242de440f5SGeert Uytterhoeven if (!tmp) 10252de440f5SGeert Uytterhoeven return -ENOMEM; 10268caab75fSGeert Uytterhoeven ctlr->dummy_tx = tmp; 10272de440f5SGeert Uytterhoeven memset(tmp, 0, max_tx); 10282de440f5SGeert Uytterhoeven } 10292de440f5SGeert Uytterhoeven 10302de440f5SGeert Uytterhoeven if (max_rx) { 10318caab75fSGeert Uytterhoeven tmp = krealloc(ctlr->dummy_rx, max_rx, 10322de440f5SGeert Uytterhoeven GFP_KERNEL | GFP_DMA); 10332de440f5SGeert Uytterhoeven if (!tmp) 10342de440f5SGeert Uytterhoeven return -ENOMEM; 10358caab75fSGeert Uytterhoeven ctlr->dummy_rx = tmp; 10362de440f5SGeert Uytterhoeven } 10372de440f5SGeert Uytterhoeven 10382de440f5SGeert Uytterhoeven if (max_tx || max_rx) { 10392de440f5SGeert Uytterhoeven list_for_each_entry(xfer, &msg->transfers, 10402de440f5SGeert Uytterhoeven transfer_list) { 10412de440f5SGeert Uytterhoeven if (!xfer->tx_buf) 10428caab75fSGeert Uytterhoeven xfer->tx_buf = ctlr->dummy_tx; 10432de440f5SGeert Uytterhoeven if (!xfer->rx_buf) 10448caab75fSGeert Uytterhoeven xfer->rx_buf = ctlr->dummy_rx; 10452de440f5SGeert Uytterhoeven } 10462de440f5SGeert Uytterhoeven } 10472de440f5SGeert Uytterhoeven } 10482de440f5SGeert Uytterhoeven 10498caab75fSGeert Uytterhoeven return __spi_map_msg(ctlr, msg); 10502de440f5SGeert Uytterhoeven } 105199adef31SMark Brown 1052810923f3SLubomir Rintel static int spi_transfer_wait(struct spi_controller *ctlr, 1053810923f3SLubomir Rintel struct spi_message *msg, 1054810923f3SLubomir Rintel struct spi_transfer *xfer) 1055810923f3SLubomir Rintel { 1056810923f3SLubomir Rintel struct spi_statistics *statm = &ctlr->statistics; 1057810923f3SLubomir Rintel struct spi_statistics *stats = &msg->spi->statistics; 1058810923f3SLubomir Rintel unsigned long long ms = 1; 1059810923f3SLubomir Rintel 1060810923f3SLubomir Rintel if (spi_controller_is_slave(ctlr)) { 1061810923f3SLubomir Rintel if (wait_for_completion_interruptible(&ctlr->xfer_completion)) { 1062810923f3SLubomir Rintel dev_dbg(&msg->spi->dev, "SPI transfer interrupted\n"); 1063810923f3SLubomir Rintel return -EINTR; 1064810923f3SLubomir Rintel } 1065810923f3SLubomir Rintel } else { 1066810923f3SLubomir Rintel ms = 8LL * 1000LL * xfer->len; 1067810923f3SLubomir Rintel do_div(ms, xfer->speed_hz); 1068810923f3SLubomir Rintel ms += ms + 200; /* some tolerance */ 1069810923f3SLubomir Rintel 1070810923f3SLubomir Rintel if (ms > UINT_MAX) 1071810923f3SLubomir Rintel ms = UINT_MAX; 1072810923f3SLubomir Rintel 1073810923f3SLubomir Rintel ms = wait_for_completion_timeout(&ctlr->xfer_completion, 1074810923f3SLubomir Rintel msecs_to_jiffies(ms)); 1075810923f3SLubomir Rintel 1076810923f3SLubomir Rintel if (ms == 0) { 1077810923f3SLubomir Rintel SPI_STATISTICS_INCREMENT_FIELD(statm, timedout); 1078810923f3SLubomir Rintel SPI_STATISTICS_INCREMENT_FIELD(stats, timedout); 1079810923f3SLubomir Rintel dev_err(&msg->spi->dev, 1080810923f3SLubomir Rintel "SPI transfer timed out\n"); 1081810923f3SLubomir Rintel return -ETIMEDOUT; 1082810923f3SLubomir Rintel } 1083810923f3SLubomir Rintel } 1084810923f3SLubomir Rintel 1085810923f3SLubomir Rintel return 0; 1086810923f3SLubomir Rintel } 1087810923f3SLubomir Rintel 1088b158935fSMark Brown /* 1089b158935fSMark Brown * spi_transfer_one_message - Default implementation of transfer_one_message() 1090b158935fSMark Brown * 1091b158935fSMark Brown * This is a standard implementation of transfer_one_message() for 10928ba811a7SMoritz Fischer * drivers which implement a transfer_one() operation. It provides 1093b158935fSMark Brown * standard handling of delays and chip select management. 1094b158935fSMark Brown */ 10958caab75fSGeert Uytterhoeven static int spi_transfer_one_message(struct spi_controller *ctlr, 1096b158935fSMark Brown struct spi_message *msg) 1097b158935fSMark Brown { 1098b158935fSMark Brown struct spi_transfer *xfer; 1099b158935fSMark Brown bool keep_cs = false; 1100b158935fSMark Brown int ret = 0; 11018caab75fSGeert Uytterhoeven struct spi_statistics *statm = &ctlr->statistics; 1102eca2ebc7SMartin Sperl struct spi_statistics *stats = &msg->spi->statistics; 1103b158935fSMark Brown 1104b158935fSMark Brown spi_set_cs(msg->spi, true); 1105b158935fSMark Brown 1106eca2ebc7SMartin Sperl SPI_STATISTICS_INCREMENT_FIELD(statm, messages); 1107eca2ebc7SMartin Sperl SPI_STATISTICS_INCREMENT_FIELD(stats, messages); 1108eca2ebc7SMartin Sperl 1109b158935fSMark Brown list_for_each_entry(xfer, &msg->transfers, transfer_list) { 1110b158935fSMark Brown trace_spi_transfer_start(msg, xfer); 1111b158935fSMark Brown 11128caab75fSGeert Uytterhoeven spi_statistics_add_transfer_stats(statm, xfer, ctlr); 11138caab75fSGeert Uytterhoeven spi_statistics_add_transfer_stats(stats, xfer, ctlr); 1114eca2ebc7SMartin Sperl 111538ec10f6SMark Brown if (xfer->tx_buf || xfer->rx_buf) { 11168caab75fSGeert Uytterhoeven reinit_completion(&ctlr->xfer_completion); 1117b158935fSMark Brown 11188caab75fSGeert Uytterhoeven ret = ctlr->transfer_one(ctlr, msg->spi, xfer); 1119b158935fSMark Brown if (ret < 0) { 1120eca2ebc7SMartin Sperl SPI_STATISTICS_INCREMENT_FIELD(statm, 1121eca2ebc7SMartin Sperl errors); 1122eca2ebc7SMartin Sperl SPI_STATISTICS_INCREMENT_FIELD(stats, 1123eca2ebc7SMartin Sperl errors); 1124b158935fSMark Brown dev_err(&msg->spi->dev, 1125b158935fSMark Brown "SPI transfer failed: %d\n", ret); 1126b158935fSMark Brown goto out; 1127b158935fSMark Brown } 1128b158935fSMark Brown 1129d57e7960SMark Brown if (ret > 0) { 1130810923f3SLubomir Rintel ret = spi_transfer_wait(ctlr, msg, xfer); 1131810923f3SLubomir Rintel if (ret < 0) 1132810923f3SLubomir Rintel msg->status = ret; 1133d57e7960SMark Brown } 113438ec10f6SMark Brown } else { 113538ec10f6SMark Brown if (xfer->len) 113638ec10f6SMark Brown dev_err(&msg->spi->dev, 113738ec10f6SMark Brown "Bufferless transfer has length %u\n", 113838ec10f6SMark Brown xfer->len); 113938ec10f6SMark Brown } 1140b158935fSMark Brown 1141b158935fSMark Brown trace_spi_transfer_stop(msg, xfer); 1142b158935fSMark Brown 1143b158935fSMark Brown if (msg->status != -EINPROGRESS) 1144b158935fSMark Brown goto out; 1145b158935fSMark Brown 11468244bd3aSDaniel Kurtz if (xfer->delay_usecs) { 11478244bd3aSDaniel Kurtz u16 us = xfer->delay_usecs; 11488244bd3aSDaniel Kurtz 11498244bd3aSDaniel Kurtz if (us <= 10) 11508244bd3aSDaniel Kurtz udelay(us); 11518244bd3aSDaniel Kurtz else 11528244bd3aSDaniel Kurtz usleep_range(us, us + DIV_ROUND_UP(us, 10)); 11538244bd3aSDaniel Kurtz } 1154b158935fSMark Brown 1155b158935fSMark Brown if (xfer->cs_change) { 1156b158935fSMark Brown if (list_is_last(&xfer->transfer_list, 1157b158935fSMark Brown &msg->transfers)) { 1158b158935fSMark Brown keep_cs = true; 1159b158935fSMark Brown } else { 11600b73aa63SMark Brown spi_set_cs(msg->spi, false); 11610b73aa63SMark Brown udelay(10); 11620b73aa63SMark Brown spi_set_cs(msg->spi, true); 1163b158935fSMark Brown } 1164b158935fSMark Brown } 1165b158935fSMark Brown 1166b158935fSMark Brown msg->actual_length += xfer->len; 1167b158935fSMark Brown } 1168b158935fSMark Brown 1169b158935fSMark Brown out: 1170b158935fSMark Brown if (ret != 0 || !keep_cs) 1171b158935fSMark Brown spi_set_cs(msg->spi, false); 1172b158935fSMark Brown 1173b158935fSMark Brown if (msg->status == -EINPROGRESS) 1174b158935fSMark Brown msg->status = ret; 1175b158935fSMark Brown 11768caab75fSGeert Uytterhoeven if (msg->status && ctlr->handle_err) 11778caab75fSGeert Uytterhoeven ctlr->handle_err(ctlr, msg); 1178b716c4ffSAndy Shevchenko 11798caab75fSGeert Uytterhoeven spi_res_release(ctlr, msg); 1180d780c371SMartin Sperl 11818caab75fSGeert Uytterhoeven spi_finalize_current_message(ctlr); 1182b158935fSMark Brown 1183b158935fSMark Brown return ret; 1184b158935fSMark Brown } 1185b158935fSMark Brown 1186b158935fSMark Brown /** 1187b158935fSMark Brown * spi_finalize_current_transfer - report completion of a transfer 11888caab75fSGeert Uytterhoeven * @ctlr: the controller reporting completion 1189b158935fSMark Brown * 1190b158935fSMark Brown * Called by SPI drivers using the core transfer_one_message() 1191b158935fSMark Brown * implementation to notify it that the current interrupt driven 11929e8f4882SGeert Uytterhoeven * transfer has finished and the next one may be scheduled. 1193b158935fSMark Brown */ 11948caab75fSGeert Uytterhoeven void spi_finalize_current_transfer(struct spi_controller *ctlr) 1195b158935fSMark Brown { 11968caab75fSGeert Uytterhoeven complete(&ctlr->xfer_completion); 1197b158935fSMark Brown } 1198b158935fSMark Brown EXPORT_SYMBOL_GPL(spi_finalize_current_transfer); 1199b158935fSMark Brown 1200ffbbdd21SLinus Walleij /** 1201fc9e0f71SMark Brown * __spi_pump_messages - function which processes spi message queue 12028caab75fSGeert Uytterhoeven * @ctlr: controller to process queue for 1203fc9e0f71SMark Brown * @in_kthread: true if we are in the context of the message pump thread 1204ffbbdd21SLinus Walleij * 1205ffbbdd21SLinus Walleij * This function checks if there is any spi message in the queue that 1206ffbbdd21SLinus Walleij * needs processing and if so call out to the driver to initialize hardware 1207ffbbdd21SLinus Walleij * and transfer each message. 1208ffbbdd21SLinus Walleij * 12090461a414SMark Brown * Note that it is called both from the kthread itself and also from 12100461a414SMark Brown * inside spi_sync(); the queue extraction handling at the top of the 12110461a414SMark Brown * function should deal with this safely. 1212ffbbdd21SLinus Walleij */ 12138caab75fSGeert Uytterhoeven static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread) 1214ffbbdd21SLinus Walleij { 1215ffbbdd21SLinus Walleij unsigned long flags; 1216ffbbdd21SLinus Walleij bool was_busy = false; 1217ffbbdd21SLinus Walleij int ret; 1218ffbbdd21SLinus Walleij 1219983aee5dSMark Brown /* Lock queue */ 12208caab75fSGeert Uytterhoeven spin_lock_irqsave(&ctlr->queue_lock, flags); 1221983aee5dSMark Brown 1222983aee5dSMark Brown /* Make sure we are not already running a message */ 12238caab75fSGeert Uytterhoeven if (ctlr->cur_msg) { 12248caab75fSGeert Uytterhoeven spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1225983aee5dSMark Brown return; 1226983aee5dSMark Brown } 1227983aee5dSMark Brown 1228*f0125f1aSMark Brown /* If another context is idling the device then defer */ 12298caab75fSGeert Uytterhoeven if (ctlr->idling) { 12308caab75fSGeert Uytterhoeven kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages); 12318caab75fSGeert Uytterhoeven spin_unlock_irqrestore(&ctlr->queue_lock, flags); 12320461a414SMark Brown return; 12330461a414SMark Brown } 12340461a414SMark Brown 1235983aee5dSMark Brown /* Check if the queue is idle */ 12368caab75fSGeert Uytterhoeven if (list_empty(&ctlr->queue) || !ctlr->running) { 12378caab75fSGeert Uytterhoeven if (!ctlr->busy) { 12388caab75fSGeert Uytterhoeven spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1239ffbbdd21SLinus Walleij return; 1240ffbbdd21SLinus Walleij } 1241fc9e0f71SMark Brown 1242*f0125f1aSMark Brown /* Only do teardown in the thread */ 1243*f0125f1aSMark Brown if (!in_kthread) { 1244*f0125f1aSMark Brown kthread_queue_work(&ctlr->kworker, 1245*f0125f1aSMark Brown &ctlr->pump_messages); 1246*f0125f1aSMark Brown spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1247*f0125f1aSMark Brown return; 1248*f0125f1aSMark Brown } 1249*f0125f1aSMark Brown 1250*f0125f1aSMark Brown ctlr->busy = false; 1251*f0125f1aSMark Brown ctlr->idling = true; 1252*f0125f1aSMark Brown spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1253*f0125f1aSMark Brown 1254*f0125f1aSMark Brown kfree(ctlr->dummy_rx); 1255*f0125f1aSMark Brown ctlr->dummy_rx = NULL; 1256*f0125f1aSMark Brown kfree(ctlr->dummy_tx); 1257*f0125f1aSMark Brown ctlr->dummy_tx = NULL; 1258*f0125f1aSMark Brown if (ctlr->unprepare_transfer_hardware && 1259*f0125f1aSMark Brown ctlr->unprepare_transfer_hardware(ctlr)) 1260*f0125f1aSMark Brown dev_err(&ctlr->dev, 1261*f0125f1aSMark Brown "failed to unprepare transfer hardware\n"); 1262*f0125f1aSMark Brown if (ctlr->auto_runtime_pm) { 1263*f0125f1aSMark Brown pm_runtime_mark_last_busy(ctlr->dev.parent); 1264*f0125f1aSMark Brown pm_runtime_put_autosuspend(ctlr->dev.parent); 1265*f0125f1aSMark Brown } 1266*f0125f1aSMark Brown trace_spi_controller_idle(ctlr); 1267*f0125f1aSMark Brown 1268*f0125f1aSMark Brown spin_lock_irqsave(&ctlr->queue_lock, flags); 1269*f0125f1aSMark Brown ctlr->idling = false; 12708caab75fSGeert Uytterhoeven spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1271ffbbdd21SLinus Walleij return; 1272ffbbdd21SLinus Walleij } 1273ffbbdd21SLinus Walleij 1274ffbbdd21SLinus Walleij /* Extract head of queue */ 12758caab75fSGeert Uytterhoeven ctlr->cur_msg = 12768caab75fSGeert Uytterhoeven list_first_entry(&ctlr->queue, struct spi_message, queue); 1277ffbbdd21SLinus Walleij 12788caab75fSGeert Uytterhoeven list_del_init(&ctlr->cur_msg->queue); 12798caab75fSGeert Uytterhoeven if (ctlr->busy) 1280ffbbdd21SLinus Walleij was_busy = true; 1281ffbbdd21SLinus Walleij else 12828caab75fSGeert Uytterhoeven ctlr->busy = true; 12838caab75fSGeert Uytterhoeven spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1284ffbbdd21SLinus Walleij 12858caab75fSGeert Uytterhoeven mutex_lock(&ctlr->io_mutex); 1286ef4d96ecSMark Brown 12878caab75fSGeert Uytterhoeven if (!was_busy && ctlr->auto_runtime_pm) { 12888caab75fSGeert Uytterhoeven ret = pm_runtime_get_sync(ctlr->dev.parent); 128949834de2SMark Brown if (ret < 0) { 12907e48e23aSTony Lindgren pm_runtime_put_noidle(ctlr->dev.parent); 12918caab75fSGeert Uytterhoeven dev_err(&ctlr->dev, "Failed to power device: %d\n", 129249834de2SMark Brown ret); 12938caab75fSGeert Uytterhoeven mutex_unlock(&ctlr->io_mutex); 129449834de2SMark Brown return; 129549834de2SMark Brown } 129649834de2SMark Brown } 129749834de2SMark Brown 129856ec1978SMark Brown if (!was_busy) 12998caab75fSGeert Uytterhoeven trace_spi_controller_busy(ctlr); 130056ec1978SMark Brown 13018caab75fSGeert Uytterhoeven if (!was_busy && ctlr->prepare_transfer_hardware) { 13028caab75fSGeert Uytterhoeven ret = ctlr->prepare_transfer_hardware(ctlr); 1303ffbbdd21SLinus Walleij if (ret) { 13048caab75fSGeert Uytterhoeven dev_err(&ctlr->dev, 1305ffbbdd21SLinus Walleij "failed to prepare transfer hardware\n"); 130649834de2SMark Brown 13078caab75fSGeert Uytterhoeven if (ctlr->auto_runtime_pm) 13088caab75fSGeert Uytterhoeven pm_runtime_put(ctlr->dev.parent); 13098caab75fSGeert Uytterhoeven mutex_unlock(&ctlr->io_mutex); 1310ffbbdd21SLinus Walleij return; 1311ffbbdd21SLinus Walleij } 1312ffbbdd21SLinus Walleij } 1313ffbbdd21SLinus Walleij 13148caab75fSGeert Uytterhoeven trace_spi_message_start(ctlr->cur_msg); 131556ec1978SMark Brown 13168caab75fSGeert Uytterhoeven if (ctlr->prepare_message) { 13178caab75fSGeert Uytterhoeven ret = ctlr->prepare_message(ctlr, ctlr->cur_msg); 13182841a5fcSMark Brown if (ret) { 13198caab75fSGeert Uytterhoeven dev_err(&ctlr->dev, "failed to prepare message: %d\n", 13208caab75fSGeert Uytterhoeven ret); 13218caab75fSGeert Uytterhoeven ctlr->cur_msg->status = ret; 13228caab75fSGeert Uytterhoeven spi_finalize_current_message(ctlr); 132349023d2eSJon Hunter goto out; 13242841a5fcSMark Brown } 13258caab75fSGeert Uytterhoeven ctlr->cur_msg_prepared = true; 13262841a5fcSMark Brown } 13272841a5fcSMark Brown 13288caab75fSGeert Uytterhoeven ret = spi_map_msg(ctlr, ctlr->cur_msg); 132999adef31SMark Brown if (ret) { 13308caab75fSGeert Uytterhoeven ctlr->cur_msg->status = ret; 13318caab75fSGeert Uytterhoeven spi_finalize_current_message(ctlr); 133249023d2eSJon Hunter goto out; 133399adef31SMark Brown } 133499adef31SMark Brown 13358caab75fSGeert Uytterhoeven ret = ctlr->transfer_one_message(ctlr, ctlr->cur_msg); 1336ffbbdd21SLinus Walleij if (ret) { 13378caab75fSGeert Uytterhoeven dev_err(&ctlr->dev, 13381f802f82SGeert Uytterhoeven "failed to transfer one message from queue\n"); 133949023d2eSJon Hunter goto out; 1340ffbbdd21SLinus Walleij } 134149023d2eSJon Hunter 134249023d2eSJon Hunter out: 13438caab75fSGeert Uytterhoeven mutex_unlock(&ctlr->io_mutex); 134462826970SMark Brown 134562826970SMark Brown /* Prod the scheduler in case transfer_one() was busy waiting */ 134649023d2eSJon Hunter if (!ret) 134762826970SMark Brown cond_resched(); 1348ffbbdd21SLinus Walleij } 1349ffbbdd21SLinus Walleij 1350fc9e0f71SMark Brown /** 1351fc9e0f71SMark Brown * spi_pump_messages - kthread work function which processes spi message queue 13528caab75fSGeert Uytterhoeven * @work: pointer to kthread work struct contained in the controller struct 1353fc9e0f71SMark Brown */ 1354fc9e0f71SMark Brown static void spi_pump_messages(struct kthread_work *work) 1355fc9e0f71SMark Brown { 13568caab75fSGeert Uytterhoeven struct spi_controller *ctlr = 13578caab75fSGeert Uytterhoeven container_of(work, struct spi_controller, pump_messages); 1358fc9e0f71SMark Brown 13598caab75fSGeert Uytterhoeven __spi_pump_messages(ctlr, true); 1360fc9e0f71SMark Brown } 1361fc9e0f71SMark Brown 13628caab75fSGeert Uytterhoeven static int spi_init_queue(struct spi_controller *ctlr) 1363ffbbdd21SLinus Walleij { 1364ffbbdd21SLinus Walleij struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 }; 1365ffbbdd21SLinus Walleij 13668caab75fSGeert Uytterhoeven ctlr->running = false; 13678caab75fSGeert Uytterhoeven ctlr->busy = false; 1368ffbbdd21SLinus Walleij 13698caab75fSGeert Uytterhoeven kthread_init_worker(&ctlr->kworker); 13708caab75fSGeert Uytterhoeven ctlr->kworker_task = kthread_run(kthread_worker_fn, &ctlr->kworker, 13718caab75fSGeert Uytterhoeven "%s", dev_name(&ctlr->dev)); 13728caab75fSGeert Uytterhoeven if (IS_ERR(ctlr->kworker_task)) { 13738caab75fSGeert Uytterhoeven dev_err(&ctlr->dev, "failed to create message pump task\n"); 13748caab75fSGeert Uytterhoeven return PTR_ERR(ctlr->kworker_task); 1375ffbbdd21SLinus Walleij } 13768caab75fSGeert Uytterhoeven kthread_init_work(&ctlr->pump_messages, spi_pump_messages); 1377*f0125f1aSMark Brown 1378ffbbdd21SLinus Walleij /* 13798caab75fSGeert Uytterhoeven * Controller config will indicate if this controller should run the 1380ffbbdd21SLinus Walleij * message pump with high (realtime) priority to reduce the transfer 1381ffbbdd21SLinus Walleij * latency on the bus by minimising the delay between a transfer 1382ffbbdd21SLinus Walleij * request and the scheduling of the message pump thread. Without this 1383ffbbdd21SLinus Walleij * setting the message pump thread will remain at default priority. 1384ffbbdd21SLinus Walleij */ 13858caab75fSGeert Uytterhoeven if (ctlr->rt) { 13868caab75fSGeert Uytterhoeven dev_info(&ctlr->dev, 1387ffbbdd21SLinus Walleij "will run message pump with realtime priority\n"); 13888caab75fSGeert Uytterhoeven sched_setscheduler(ctlr->kworker_task, SCHED_FIFO, ¶m); 1389ffbbdd21SLinus Walleij } 1390ffbbdd21SLinus Walleij 1391ffbbdd21SLinus Walleij return 0; 1392ffbbdd21SLinus Walleij } 1393ffbbdd21SLinus Walleij 1394ffbbdd21SLinus Walleij /** 1395ffbbdd21SLinus Walleij * spi_get_next_queued_message() - called by driver to check for queued 1396ffbbdd21SLinus Walleij * messages 13978caab75fSGeert Uytterhoeven * @ctlr: the controller to check for queued messages 1398ffbbdd21SLinus Walleij * 1399ffbbdd21SLinus Walleij * If there are more messages in the queue, the next message is returned from 1400ffbbdd21SLinus Walleij * this call. 140197d56dc6SJavier Martinez Canillas * 140297d56dc6SJavier Martinez Canillas * Return: the next message in the queue, else NULL if the queue is empty. 1403ffbbdd21SLinus Walleij */ 14048caab75fSGeert Uytterhoeven struct spi_message *spi_get_next_queued_message(struct spi_controller *ctlr) 1405ffbbdd21SLinus Walleij { 1406ffbbdd21SLinus Walleij struct spi_message *next; 1407ffbbdd21SLinus Walleij unsigned long flags; 1408ffbbdd21SLinus Walleij 1409ffbbdd21SLinus Walleij /* get a pointer to the next message, if any */ 14108caab75fSGeert Uytterhoeven spin_lock_irqsave(&ctlr->queue_lock, flags); 14118caab75fSGeert Uytterhoeven next = list_first_entry_or_null(&ctlr->queue, struct spi_message, 14121cfd97f9SAxel Lin queue); 14138caab75fSGeert Uytterhoeven spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1414ffbbdd21SLinus Walleij 1415ffbbdd21SLinus Walleij return next; 1416ffbbdd21SLinus Walleij } 1417ffbbdd21SLinus Walleij EXPORT_SYMBOL_GPL(spi_get_next_queued_message); 1418ffbbdd21SLinus Walleij 1419ffbbdd21SLinus Walleij /** 1420ffbbdd21SLinus Walleij * spi_finalize_current_message() - the current message is complete 14218caab75fSGeert Uytterhoeven * @ctlr: the controller to return the message to 1422ffbbdd21SLinus Walleij * 1423ffbbdd21SLinus Walleij * Called by the driver to notify the core that the message in the front of the 1424ffbbdd21SLinus Walleij * queue is complete and can be removed from the queue. 1425ffbbdd21SLinus Walleij */ 14268caab75fSGeert Uytterhoeven void spi_finalize_current_message(struct spi_controller *ctlr) 1427ffbbdd21SLinus Walleij { 1428ffbbdd21SLinus Walleij struct spi_message *mesg; 1429ffbbdd21SLinus Walleij unsigned long flags; 14302841a5fcSMark Brown int ret; 1431ffbbdd21SLinus Walleij 14328caab75fSGeert Uytterhoeven spin_lock_irqsave(&ctlr->queue_lock, flags); 14338caab75fSGeert Uytterhoeven mesg = ctlr->cur_msg; 14348caab75fSGeert Uytterhoeven spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1435ffbbdd21SLinus Walleij 14368caab75fSGeert Uytterhoeven spi_unmap_msg(ctlr, mesg); 143799adef31SMark Brown 14388caab75fSGeert Uytterhoeven if (ctlr->cur_msg_prepared && ctlr->unprepare_message) { 14398caab75fSGeert Uytterhoeven ret = ctlr->unprepare_message(ctlr, mesg); 14402841a5fcSMark Brown if (ret) { 14418caab75fSGeert Uytterhoeven dev_err(&ctlr->dev, "failed to unprepare message: %d\n", 14428caab75fSGeert Uytterhoeven ret); 14432841a5fcSMark Brown } 14442841a5fcSMark Brown } 1445391949b6SUwe Kleine-König 14468caab75fSGeert Uytterhoeven spin_lock_irqsave(&ctlr->queue_lock, flags); 14478caab75fSGeert Uytterhoeven ctlr->cur_msg = NULL; 14488caab75fSGeert Uytterhoeven ctlr->cur_msg_prepared = false; 14498caab75fSGeert Uytterhoeven kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages); 14508caab75fSGeert Uytterhoeven spin_unlock_irqrestore(&ctlr->queue_lock, flags); 14518e76ef88SMartin Sperl 14528e76ef88SMartin Sperl trace_spi_message_done(mesg); 14532841a5fcSMark Brown 1454ffbbdd21SLinus Walleij mesg->state = NULL; 1455ffbbdd21SLinus Walleij if (mesg->complete) 1456ffbbdd21SLinus Walleij mesg->complete(mesg->context); 1457ffbbdd21SLinus Walleij } 1458ffbbdd21SLinus Walleij EXPORT_SYMBOL_GPL(spi_finalize_current_message); 1459ffbbdd21SLinus Walleij 14608caab75fSGeert Uytterhoeven static int spi_start_queue(struct spi_controller *ctlr) 1461ffbbdd21SLinus Walleij { 1462ffbbdd21SLinus Walleij unsigned long flags; 1463ffbbdd21SLinus Walleij 14648caab75fSGeert Uytterhoeven spin_lock_irqsave(&ctlr->queue_lock, flags); 1465ffbbdd21SLinus Walleij 14668caab75fSGeert Uytterhoeven if (ctlr->running || ctlr->busy) { 14678caab75fSGeert Uytterhoeven spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1468ffbbdd21SLinus Walleij return -EBUSY; 1469ffbbdd21SLinus Walleij } 1470ffbbdd21SLinus Walleij 14718caab75fSGeert Uytterhoeven ctlr->running = true; 14728caab75fSGeert Uytterhoeven ctlr->cur_msg = NULL; 14738caab75fSGeert Uytterhoeven spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1474ffbbdd21SLinus Walleij 14758caab75fSGeert Uytterhoeven kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages); 1476ffbbdd21SLinus Walleij 1477ffbbdd21SLinus Walleij return 0; 1478ffbbdd21SLinus Walleij } 1479ffbbdd21SLinus Walleij 14808caab75fSGeert Uytterhoeven static int spi_stop_queue(struct spi_controller *ctlr) 1481ffbbdd21SLinus Walleij { 1482ffbbdd21SLinus Walleij unsigned long flags; 1483ffbbdd21SLinus Walleij unsigned limit = 500; 1484ffbbdd21SLinus Walleij int ret = 0; 1485ffbbdd21SLinus Walleij 14868caab75fSGeert Uytterhoeven spin_lock_irqsave(&ctlr->queue_lock, flags); 1487ffbbdd21SLinus Walleij 1488ffbbdd21SLinus Walleij /* 1489ffbbdd21SLinus Walleij * This is a bit lame, but is optimized for the common execution path. 14908caab75fSGeert Uytterhoeven * A wait_queue on the ctlr->busy could be used, but then the common 1491ffbbdd21SLinus Walleij * execution path (pump_messages) would be required to call wake_up or 1492ffbbdd21SLinus Walleij * friends on every SPI message. Do this instead. 1493ffbbdd21SLinus Walleij */ 14948caab75fSGeert Uytterhoeven while ((!list_empty(&ctlr->queue) || ctlr->busy) && limit--) { 14958caab75fSGeert Uytterhoeven spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1496f97b26b0SAxel Lin usleep_range(10000, 11000); 14978caab75fSGeert Uytterhoeven spin_lock_irqsave(&ctlr->queue_lock, flags); 1498ffbbdd21SLinus Walleij } 1499ffbbdd21SLinus Walleij 15008caab75fSGeert Uytterhoeven if (!list_empty(&ctlr->queue) || ctlr->busy) 1501ffbbdd21SLinus Walleij ret = -EBUSY; 1502ffbbdd21SLinus Walleij else 15038caab75fSGeert Uytterhoeven ctlr->running = false; 1504ffbbdd21SLinus Walleij 15058caab75fSGeert Uytterhoeven spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1506ffbbdd21SLinus Walleij 1507ffbbdd21SLinus Walleij if (ret) { 15088caab75fSGeert Uytterhoeven dev_warn(&ctlr->dev, "could not stop message queue\n"); 1509ffbbdd21SLinus Walleij return ret; 1510ffbbdd21SLinus Walleij } 1511ffbbdd21SLinus Walleij return ret; 1512ffbbdd21SLinus Walleij } 1513ffbbdd21SLinus Walleij 15148caab75fSGeert Uytterhoeven static int spi_destroy_queue(struct spi_controller *ctlr) 1515ffbbdd21SLinus Walleij { 1516ffbbdd21SLinus Walleij int ret; 1517ffbbdd21SLinus Walleij 15188caab75fSGeert Uytterhoeven ret = spi_stop_queue(ctlr); 1519ffbbdd21SLinus Walleij 1520ffbbdd21SLinus Walleij /* 15213989144fSPetr Mladek * kthread_flush_worker will block until all work is done. 1522ffbbdd21SLinus Walleij * If the reason that stop_queue timed out is that the work will never 1523ffbbdd21SLinus Walleij * finish, then it does no good to call flush/stop thread, so 1524ffbbdd21SLinus Walleij * return anyway. 1525ffbbdd21SLinus Walleij */ 1526ffbbdd21SLinus Walleij if (ret) { 15278caab75fSGeert Uytterhoeven dev_err(&ctlr->dev, "problem destroying queue\n"); 1528ffbbdd21SLinus Walleij return ret; 1529ffbbdd21SLinus Walleij } 1530ffbbdd21SLinus Walleij 15318caab75fSGeert Uytterhoeven kthread_flush_worker(&ctlr->kworker); 15328caab75fSGeert Uytterhoeven kthread_stop(ctlr->kworker_task); 1533ffbbdd21SLinus Walleij 1534ffbbdd21SLinus Walleij return 0; 1535ffbbdd21SLinus Walleij } 1536ffbbdd21SLinus Walleij 15370461a414SMark Brown static int __spi_queued_transfer(struct spi_device *spi, 15380461a414SMark Brown struct spi_message *msg, 15390461a414SMark Brown bool need_pump) 1540ffbbdd21SLinus Walleij { 15418caab75fSGeert Uytterhoeven struct spi_controller *ctlr = spi->controller; 1542ffbbdd21SLinus Walleij unsigned long flags; 1543ffbbdd21SLinus Walleij 15448caab75fSGeert Uytterhoeven spin_lock_irqsave(&ctlr->queue_lock, flags); 1545ffbbdd21SLinus Walleij 15468caab75fSGeert Uytterhoeven if (!ctlr->running) { 15478caab75fSGeert Uytterhoeven spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1548ffbbdd21SLinus Walleij return -ESHUTDOWN; 1549ffbbdd21SLinus Walleij } 1550ffbbdd21SLinus Walleij msg->actual_length = 0; 1551ffbbdd21SLinus Walleij msg->status = -EINPROGRESS; 1552ffbbdd21SLinus Walleij 15538caab75fSGeert Uytterhoeven list_add_tail(&msg->queue, &ctlr->queue); 1554*f0125f1aSMark Brown if (!ctlr->busy && need_pump) 15558caab75fSGeert Uytterhoeven kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages); 1556ffbbdd21SLinus Walleij 15578caab75fSGeert Uytterhoeven spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1558ffbbdd21SLinus Walleij return 0; 1559ffbbdd21SLinus Walleij } 1560ffbbdd21SLinus Walleij 15610461a414SMark Brown /** 15620461a414SMark Brown * spi_queued_transfer - transfer function for queued transfers 15630461a414SMark Brown * @spi: spi device which is requesting transfer 15640461a414SMark Brown * @msg: spi message which is to handled is queued to driver queue 156597d56dc6SJavier Martinez Canillas * 156697d56dc6SJavier Martinez Canillas * Return: zero on success, else a negative error code. 15670461a414SMark Brown */ 15680461a414SMark Brown static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg) 15690461a414SMark Brown { 15700461a414SMark Brown return __spi_queued_transfer(spi, msg, true); 15710461a414SMark Brown } 15720461a414SMark Brown 15738caab75fSGeert Uytterhoeven static int spi_controller_initialize_queue(struct spi_controller *ctlr) 1574ffbbdd21SLinus Walleij { 1575ffbbdd21SLinus Walleij int ret; 1576ffbbdd21SLinus Walleij 15778caab75fSGeert Uytterhoeven ctlr->transfer = spi_queued_transfer; 15788caab75fSGeert Uytterhoeven if (!ctlr->transfer_one_message) 15798caab75fSGeert Uytterhoeven ctlr->transfer_one_message = spi_transfer_one_message; 1580ffbbdd21SLinus Walleij 1581ffbbdd21SLinus Walleij /* Initialize and start queue */ 15828caab75fSGeert Uytterhoeven ret = spi_init_queue(ctlr); 1583ffbbdd21SLinus Walleij if (ret) { 15848caab75fSGeert Uytterhoeven dev_err(&ctlr->dev, "problem initializing queue\n"); 1585ffbbdd21SLinus Walleij goto err_init_queue; 1586ffbbdd21SLinus Walleij } 15878caab75fSGeert Uytterhoeven ctlr->queued = true; 15888caab75fSGeert Uytterhoeven ret = spi_start_queue(ctlr); 1589ffbbdd21SLinus Walleij if (ret) { 15908caab75fSGeert Uytterhoeven dev_err(&ctlr->dev, "problem starting queue\n"); 1591ffbbdd21SLinus Walleij goto err_start_queue; 1592ffbbdd21SLinus Walleij } 1593ffbbdd21SLinus Walleij 1594ffbbdd21SLinus Walleij return 0; 1595ffbbdd21SLinus Walleij 1596ffbbdd21SLinus Walleij err_start_queue: 15978caab75fSGeert Uytterhoeven spi_destroy_queue(ctlr); 1598c3676d5cSMark Brown err_init_queue: 1599ffbbdd21SLinus Walleij return ret; 1600ffbbdd21SLinus Walleij } 1601ffbbdd21SLinus Walleij 1602988f259bSBoris Brezillon /** 1603988f259bSBoris Brezillon * spi_flush_queue - Send all pending messages in the queue from the callers' 1604988f259bSBoris Brezillon * context 1605988f259bSBoris Brezillon * @ctlr: controller to process queue for 1606988f259bSBoris Brezillon * 1607988f259bSBoris Brezillon * This should be used when one wants to ensure all pending messages have been 1608988f259bSBoris Brezillon * sent before doing something. Is used by the spi-mem code to make sure SPI 1609988f259bSBoris Brezillon * memory operations do not preempt regular SPI transfers that have been queued 1610988f259bSBoris Brezillon * before the spi-mem operation. 1611988f259bSBoris Brezillon */ 1612988f259bSBoris Brezillon void spi_flush_queue(struct spi_controller *ctlr) 1613988f259bSBoris Brezillon { 1614988f259bSBoris Brezillon if (ctlr->transfer == spi_queued_transfer) 1615988f259bSBoris Brezillon __spi_pump_messages(ctlr, false); 1616988f259bSBoris Brezillon } 1617988f259bSBoris Brezillon 1618ffbbdd21SLinus Walleij /*-------------------------------------------------------------------------*/ 1619ffbbdd21SLinus Walleij 16207cb94361SAndreas Larsson #if defined(CONFIG_OF) 16218caab75fSGeert Uytterhoeven static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi, 1622c2e51ac3SGeert Uytterhoeven struct device_node *nc) 1623d57a4282SGrant Likely { 162489da4293STrent Piepho u32 value; 1625c2e51ac3SGeert Uytterhoeven int rc; 1626d57a4282SGrant Likely 1627d57a4282SGrant Likely /* Mode (clock phase/polarity/etc.) */ 1628e0bcb680SSergei Shtylyov if (of_property_read_bool(nc, "spi-cpha")) 1629d57a4282SGrant Likely spi->mode |= SPI_CPHA; 1630e0bcb680SSergei Shtylyov if (of_property_read_bool(nc, "spi-cpol")) 1631d57a4282SGrant Likely spi->mode |= SPI_CPOL; 1632e0bcb680SSergei Shtylyov if (of_property_read_bool(nc, "spi-3wire")) 1633c20151dfSLars-Peter Clausen spi->mode |= SPI_3WIRE; 1634e0bcb680SSergei Shtylyov if (of_property_read_bool(nc, "spi-lsb-first")) 1635cd6339e6SZhao Qiang spi->mode |= SPI_LSB_FIRST; 1636d57a4282SGrant Likely 1637f3186dd8SLinus Walleij /* 1638f3186dd8SLinus Walleij * For descriptors associated with the device, polarity inversion is 1639f3186dd8SLinus Walleij * handled in the gpiolib, so all chip selects are "active high" in 1640f3186dd8SLinus Walleij * the logical sense, the gpiolib will invert the line if need be. 1641f3186dd8SLinus Walleij */ 1642f3186dd8SLinus Walleij if (ctlr->use_gpio_descriptors) 1643f3186dd8SLinus Walleij spi->mode |= SPI_CS_HIGH; 1644f3186dd8SLinus Walleij else if (of_property_read_bool(nc, "spi-cs-high")) 1645f3186dd8SLinus Walleij spi->mode |= SPI_CS_HIGH; 1646f3186dd8SLinus Walleij 1647f477b7fbSwangyuhang /* Device DUAL/QUAD mode */ 164889da4293STrent Piepho if (!of_property_read_u32(nc, "spi-tx-bus-width", &value)) { 164989da4293STrent Piepho switch (value) { 165089da4293STrent Piepho case 1: 1651f477b7fbSwangyuhang break; 165289da4293STrent Piepho case 2: 1653f477b7fbSwangyuhang spi->mode |= SPI_TX_DUAL; 1654f477b7fbSwangyuhang break; 165589da4293STrent Piepho case 4: 1656f477b7fbSwangyuhang spi->mode |= SPI_TX_QUAD; 1657f477b7fbSwangyuhang break; 16586b03061fSYogesh Narayan Gaur case 8: 16596b03061fSYogesh Narayan Gaur spi->mode |= SPI_TX_OCTAL; 16606b03061fSYogesh Narayan Gaur break; 1661f477b7fbSwangyuhang default: 16628caab75fSGeert Uytterhoeven dev_warn(&ctlr->dev, 1663a110f93dSwangyuhang "spi-tx-bus-width %d not supported\n", 166489da4293STrent Piepho value); 166580874d8cSGeert Uytterhoeven break; 1666f477b7fbSwangyuhang } 1667a822e99cSMark Brown } 1668f477b7fbSwangyuhang 166989da4293STrent Piepho if (!of_property_read_u32(nc, "spi-rx-bus-width", &value)) { 167089da4293STrent Piepho switch (value) { 167189da4293STrent Piepho case 1: 1672f477b7fbSwangyuhang break; 167389da4293STrent Piepho case 2: 1674f477b7fbSwangyuhang spi->mode |= SPI_RX_DUAL; 1675f477b7fbSwangyuhang break; 167689da4293STrent Piepho case 4: 1677f477b7fbSwangyuhang spi->mode |= SPI_RX_QUAD; 1678f477b7fbSwangyuhang break; 16796b03061fSYogesh Narayan Gaur case 8: 16806b03061fSYogesh Narayan Gaur spi->mode |= SPI_RX_OCTAL; 16816b03061fSYogesh Narayan Gaur break; 1682f477b7fbSwangyuhang default: 16838caab75fSGeert Uytterhoeven dev_warn(&ctlr->dev, 1684a110f93dSwangyuhang "spi-rx-bus-width %d not supported\n", 168589da4293STrent Piepho value); 168680874d8cSGeert Uytterhoeven break; 1687f477b7fbSwangyuhang } 1688a822e99cSMark Brown } 1689f477b7fbSwangyuhang 16908caab75fSGeert Uytterhoeven if (spi_controller_is_slave(ctlr)) { 1691194276b0SRob Herring if (!of_node_name_eq(nc, "slave")) { 169225c56c88SRob Herring dev_err(&ctlr->dev, "%pOF is not called 'slave'\n", 169325c56c88SRob Herring nc); 16946c364062SGeert Uytterhoeven return -EINVAL; 16956c364062SGeert Uytterhoeven } 16966c364062SGeert Uytterhoeven return 0; 16976c364062SGeert Uytterhoeven } 16986c364062SGeert Uytterhoeven 16996c364062SGeert Uytterhoeven /* Device address */ 17006c364062SGeert Uytterhoeven rc = of_property_read_u32(nc, "reg", &value); 17016c364062SGeert Uytterhoeven if (rc) { 170225c56c88SRob Herring dev_err(&ctlr->dev, "%pOF has no valid 'reg' property (%d)\n", 170325c56c88SRob Herring nc, rc); 17046c364062SGeert Uytterhoeven return rc; 17056c364062SGeert Uytterhoeven } 17066c364062SGeert Uytterhoeven spi->chip_select = value; 17076c364062SGeert Uytterhoeven 1708d57a4282SGrant Likely /* Device speed */ 170989da4293STrent Piepho rc = of_property_read_u32(nc, "spi-max-frequency", &value); 171089da4293STrent Piepho if (rc) { 17118caab75fSGeert Uytterhoeven dev_err(&ctlr->dev, 171225c56c88SRob Herring "%pOF has no valid 'spi-max-frequency' property (%d)\n", nc, rc); 1713c2e51ac3SGeert Uytterhoeven return rc; 1714d57a4282SGrant Likely } 171589da4293STrent Piepho spi->max_speed_hz = value; 1716d57a4282SGrant Likely 1717c2e51ac3SGeert Uytterhoeven return 0; 1718c2e51ac3SGeert Uytterhoeven } 1719c2e51ac3SGeert Uytterhoeven 1720c2e51ac3SGeert Uytterhoeven static struct spi_device * 17218caab75fSGeert Uytterhoeven of_register_spi_device(struct spi_controller *ctlr, struct device_node *nc) 1722c2e51ac3SGeert Uytterhoeven { 1723c2e51ac3SGeert Uytterhoeven struct spi_device *spi; 1724c2e51ac3SGeert Uytterhoeven int rc; 1725c2e51ac3SGeert Uytterhoeven 1726c2e51ac3SGeert Uytterhoeven /* Alloc an spi_device */ 17278caab75fSGeert Uytterhoeven spi = spi_alloc_device(ctlr); 1728c2e51ac3SGeert Uytterhoeven if (!spi) { 172925c56c88SRob Herring dev_err(&ctlr->dev, "spi_device alloc error for %pOF\n", nc); 1730c2e51ac3SGeert Uytterhoeven rc = -ENOMEM; 1731c2e51ac3SGeert Uytterhoeven goto err_out; 1732c2e51ac3SGeert Uytterhoeven } 1733c2e51ac3SGeert Uytterhoeven 1734c2e51ac3SGeert Uytterhoeven /* Select device driver */ 1735c2e51ac3SGeert Uytterhoeven rc = of_modalias_node(nc, spi->modalias, 1736c2e51ac3SGeert Uytterhoeven sizeof(spi->modalias)); 1737c2e51ac3SGeert Uytterhoeven if (rc < 0) { 173825c56c88SRob Herring dev_err(&ctlr->dev, "cannot find modalias for %pOF\n", nc); 1739c2e51ac3SGeert Uytterhoeven goto err_out; 1740c2e51ac3SGeert Uytterhoeven } 1741c2e51ac3SGeert Uytterhoeven 17428caab75fSGeert Uytterhoeven rc = of_spi_parse_dt(ctlr, spi, nc); 1743c2e51ac3SGeert Uytterhoeven if (rc) 1744c2e51ac3SGeert Uytterhoeven goto err_out; 1745c2e51ac3SGeert Uytterhoeven 1746d57a4282SGrant Likely /* Store a pointer to the node in the device structure */ 1747d57a4282SGrant Likely of_node_get(nc); 1748d57a4282SGrant Likely spi->dev.of_node = nc; 1749d57a4282SGrant Likely 1750d57a4282SGrant Likely /* Register the new device */ 1751d57a4282SGrant Likely rc = spi_add_device(spi); 1752d57a4282SGrant Likely if (rc) { 175325c56c88SRob Herring dev_err(&ctlr->dev, "spi_device register error %pOF\n", nc); 17548324147fSJohan Hovold goto err_of_node_put; 1755d57a4282SGrant Likely } 1756d57a4282SGrant Likely 1757aff5e3f8SPantelis Antoniou return spi; 1758aff5e3f8SPantelis Antoniou 17598324147fSJohan Hovold err_of_node_put: 17608324147fSJohan Hovold of_node_put(nc); 1761aff5e3f8SPantelis Antoniou err_out: 1762aff5e3f8SPantelis Antoniou spi_dev_put(spi); 1763aff5e3f8SPantelis Antoniou return ERR_PTR(rc); 1764aff5e3f8SPantelis Antoniou } 1765aff5e3f8SPantelis Antoniou 1766aff5e3f8SPantelis Antoniou /** 1767aff5e3f8SPantelis Antoniou * of_register_spi_devices() - Register child devices onto the SPI bus 17688caab75fSGeert Uytterhoeven * @ctlr: Pointer to spi_controller device 1769aff5e3f8SPantelis Antoniou * 17706c364062SGeert Uytterhoeven * Registers an spi_device for each child node of controller node which 17716c364062SGeert Uytterhoeven * represents a valid SPI slave. 1772aff5e3f8SPantelis Antoniou */ 17738caab75fSGeert Uytterhoeven static void of_register_spi_devices(struct spi_controller *ctlr) 1774aff5e3f8SPantelis Antoniou { 1775aff5e3f8SPantelis Antoniou struct spi_device *spi; 1776aff5e3f8SPantelis Antoniou struct device_node *nc; 1777aff5e3f8SPantelis Antoniou 17788caab75fSGeert Uytterhoeven if (!ctlr->dev.of_node) 1779aff5e3f8SPantelis Antoniou return; 1780aff5e3f8SPantelis Antoniou 17818caab75fSGeert Uytterhoeven for_each_available_child_of_node(ctlr->dev.of_node, nc) { 1782bd6c1644SGeert Uytterhoeven if (of_node_test_and_set_flag(nc, OF_POPULATED)) 1783bd6c1644SGeert Uytterhoeven continue; 17848caab75fSGeert Uytterhoeven spi = of_register_spi_device(ctlr, nc); 1785e0af98a7SRalf Ramsauer if (IS_ERR(spi)) { 17868caab75fSGeert Uytterhoeven dev_warn(&ctlr->dev, 178725c56c88SRob Herring "Failed to create SPI device for %pOF\n", nc); 1788e0af98a7SRalf Ramsauer of_node_clear_flag(nc, OF_POPULATED); 1789e0af98a7SRalf Ramsauer } 1790d57a4282SGrant Likely } 1791d57a4282SGrant Likely } 1792d57a4282SGrant Likely #else 17938caab75fSGeert Uytterhoeven static void of_register_spi_devices(struct spi_controller *ctlr) { } 1794d57a4282SGrant Likely #endif 1795d57a4282SGrant Likely 179664bee4d2SMika Westerberg #ifdef CONFIG_ACPI 17978a2e487eSLukas Wunner static void acpi_spi_parse_apple_properties(struct spi_device *spi) 17988a2e487eSLukas Wunner { 17998a2e487eSLukas Wunner struct acpi_device *dev = ACPI_COMPANION(&spi->dev); 18008a2e487eSLukas Wunner const union acpi_object *obj; 18018a2e487eSLukas Wunner 18028a2e487eSLukas Wunner if (!x86_apple_machine) 18038a2e487eSLukas Wunner return; 18048a2e487eSLukas Wunner 18058a2e487eSLukas Wunner if (!acpi_dev_get_property(dev, "spiSclkPeriod", ACPI_TYPE_BUFFER, &obj) 18068a2e487eSLukas Wunner && obj->buffer.length >= 4) 18078a2e487eSLukas Wunner spi->max_speed_hz = NSEC_PER_SEC / *(u32 *)obj->buffer.pointer; 18088a2e487eSLukas Wunner 18098a2e487eSLukas Wunner if (!acpi_dev_get_property(dev, "spiWordSize", ACPI_TYPE_BUFFER, &obj) 18108a2e487eSLukas Wunner && obj->buffer.length == 8) 18118a2e487eSLukas Wunner spi->bits_per_word = *(u64 *)obj->buffer.pointer; 18128a2e487eSLukas Wunner 18138a2e487eSLukas Wunner if (!acpi_dev_get_property(dev, "spiBitOrder", ACPI_TYPE_BUFFER, &obj) 18148a2e487eSLukas Wunner && obj->buffer.length == 8 && !*(u64 *)obj->buffer.pointer) 18158a2e487eSLukas Wunner spi->mode |= SPI_LSB_FIRST; 18168a2e487eSLukas Wunner 18178a2e487eSLukas Wunner if (!acpi_dev_get_property(dev, "spiSPO", ACPI_TYPE_BUFFER, &obj) 18188a2e487eSLukas Wunner && obj->buffer.length == 8 && *(u64 *)obj->buffer.pointer) 18198a2e487eSLukas Wunner spi->mode |= SPI_CPOL; 18208a2e487eSLukas Wunner 18218a2e487eSLukas Wunner if (!acpi_dev_get_property(dev, "spiSPH", ACPI_TYPE_BUFFER, &obj) 18228a2e487eSLukas Wunner && obj->buffer.length == 8 && *(u64 *)obj->buffer.pointer) 18238a2e487eSLukas Wunner spi->mode |= SPI_CPHA; 18248a2e487eSLukas Wunner } 18258a2e487eSLukas Wunner 182664bee4d2SMika Westerberg static int acpi_spi_add_resource(struct acpi_resource *ares, void *data) 182764bee4d2SMika Westerberg { 182864bee4d2SMika Westerberg struct spi_device *spi = data; 18298caab75fSGeert Uytterhoeven struct spi_controller *ctlr = spi->controller; 183064bee4d2SMika Westerberg 183164bee4d2SMika Westerberg if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) { 183264bee4d2SMika Westerberg struct acpi_resource_spi_serialbus *sb; 183364bee4d2SMika Westerberg 183464bee4d2SMika Westerberg sb = &ares->data.spi_serial_bus; 183564bee4d2SMika Westerberg if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_SPI) { 1836a0a90718SMika Westerberg /* 1837a0a90718SMika Westerberg * ACPI DeviceSelection numbering is handled by the 1838a0a90718SMika Westerberg * host controller driver in Windows and can vary 1839a0a90718SMika Westerberg * from driver to driver. In Linux we always expect 1840a0a90718SMika Westerberg * 0 .. max - 1 so we need to ask the driver to 1841a0a90718SMika Westerberg * translate between the two schemes. 1842a0a90718SMika Westerberg */ 18438caab75fSGeert Uytterhoeven if (ctlr->fw_translate_cs) { 18448caab75fSGeert Uytterhoeven int cs = ctlr->fw_translate_cs(ctlr, 1845a0a90718SMika Westerberg sb->device_selection); 1846a0a90718SMika Westerberg if (cs < 0) 1847a0a90718SMika Westerberg return cs; 1848a0a90718SMika Westerberg spi->chip_select = cs; 1849a0a90718SMika Westerberg } else { 185064bee4d2SMika Westerberg spi->chip_select = sb->device_selection; 1851a0a90718SMika Westerberg } 1852a0a90718SMika Westerberg 185364bee4d2SMika Westerberg spi->max_speed_hz = sb->connection_speed; 185464bee4d2SMika Westerberg 185564bee4d2SMika Westerberg if (sb->clock_phase == ACPI_SPI_SECOND_PHASE) 185664bee4d2SMika Westerberg spi->mode |= SPI_CPHA; 185764bee4d2SMika Westerberg if (sb->clock_polarity == ACPI_SPI_START_HIGH) 185864bee4d2SMika Westerberg spi->mode |= SPI_CPOL; 185964bee4d2SMika Westerberg if (sb->device_polarity == ACPI_SPI_ACTIVE_HIGH) 186064bee4d2SMika Westerberg spi->mode |= SPI_CS_HIGH; 186164bee4d2SMika Westerberg } 186264bee4d2SMika Westerberg } else if (spi->irq < 0) { 186364bee4d2SMika Westerberg struct resource r; 186464bee4d2SMika Westerberg 186564bee4d2SMika Westerberg if (acpi_dev_resource_interrupt(ares, 0, &r)) 186664bee4d2SMika Westerberg spi->irq = r.start; 186764bee4d2SMika Westerberg } 186864bee4d2SMika Westerberg 186964bee4d2SMika Westerberg /* Always tell the ACPI core to skip this resource */ 187064bee4d2SMika Westerberg return 1; 187164bee4d2SMika Westerberg } 187264bee4d2SMika Westerberg 18738caab75fSGeert Uytterhoeven static acpi_status acpi_register_spi_device(struct spi_controller *ctlr, 18747f24467fSOctavian Purdila struct acpi_device *adev) 187564bee4d2SMika Westerberg { 187664bee4d2SMika Westerberg struct list_head resource_list; 187764bee4d2SMika Westerberg struct spi_device *spi; 187864bee4d2SMika Westerberg int ret; 187964bee4d2SMika Westerberg 18807f24467fSOctavian Purdila if (acpi_bus_get_status(adev) || !adev->status.present || 18817f24467fSOctavian Purdila acpi_device_enumerated(adev)) 188264bee4d2SMika Westerberg return AE_OK; 188364bee4d2SMika Westerberg 18848caab75fSGeert Uytterhoeven spi = spi_alloc_device(ctlr); 188564bee4d2SMika Westerberg if (!spi) { 18868caab75fSGeert Uytterhoeven dev_err(&ctlr->dev, "failed to allocate SPI device for %s\n", 188764bee4d2SMika Westerberg dev_name(&adev->dev)); 188864bee4d2SMika Westerberg return AE_NO_MEMORY; 188964bee4d2SMika Westerberg } 189064bee4d2SMika Westerberg 18917b199811SRafael J. Wysocki ACPI_COMPANION_SET(&spi->dev, adev); 189264bee4d2SMika Westerberg spi->irq = -1; 189364bee4d2SMika Westerberg 189464bee4d2SMika Westerberg INIT_LIST_HEAD(&resource_list); 189564bee4d2SMika Westerberg ret = acpi_dev_get_resources(adev, &resource_list, 189664bee4d2SMika Westerberg acpi_spi_add_resource, spi); 189764bee4d2SMika Westerberg acpi_dev_free_resource_list(&resource_list); 189864bee4d2SMika Westerberg 18998a2e487eSLukas Wunner acpi_spi_parse_apple_properties(spi); 19008a2e487eSLukas Wunner 190164bee4d2SMika Westerberg if (ret < 0 || !spi->max_speed_hz) { 190264bee4d2SMika Westerberg spi_dev_put(spi); 190364bee4d2SMika Westerberg return AE_OK; 190464bee4d2SMika Westerberg } 190564bee4d2SMika Westerberg 19060c6543f6SDan O'Donovan acpi_set_modalias(adev, acpi_device_hid(adev), spi->modalias, 19070c6543f6SDan O'Donovan sizeof(spi->modalias)); 19080c6543f6SDan O'Donovan 190933ada67dSChristophe RICARD if (spi->irq < 0) 191033ada67dSChristophe RICARD spi->irq = acpi_dev_gpio_irq_get(adev, 0); 191133ada67dSChristophe RICARD 19127f24467fSOctavian Purdila acpi_device_set_enumerated(adev); 19137f24467fSOctavian Purdila 191433cf00e5SMika Westerberg adev->power.flags.ignore_parent = true; 191564bee4d2SMika Westerberg if (spi_add_device(spi)) { 191633cf00e5SMika Westerberg adev->power.flags.ignore_parent = false; 19178caab75fSGeert Uytterhoeven dev_err(&ctlr->dev, "failed to add SPI device %s from ACPI\n", 191864bee4d2SMika Westerberg dev_name(&adev->dev)); 191964bee4d2SMika Westerberg spi_dev_put(spi); 192064bee4d2SMika Westerberg } 192164bee4d2SMika Westerberg 192264bee4d2SMika Westerberg return AE_OK; 192364bee4d2SMika Westerberg } 192464bee4d2SMika Westerberg 19257f24467fSOctavian Purdila static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level, 19267f24467fSOctavian Purdila void *data, void **return_value) 19277f24467fSOctavian Purdila { 19288caab75fSGeert Uytterhoeven struct spi_controller *ctlr = data; 19297f24467fSOctavian Purdila struct acpi_device *adev; 19307f24467fSOctavian Purdila 19317f24467fSOctavian Purdila if (acpi_bus_get_device(handle, &adev)) 19327f24467fSOctavian Purdila return AE_OK; 19337f24467fSOctavian Purdila 19348caab75fSGeert Uytterhoeven return acpi_register_spi_device(ctlr, adev); 19357f24467fSOctavian Purdila } 19367f24467fSOctavian Purdila 19378caab75fSGeert Uytterhoeven static void acpi_register_spi_devices(struct spi_controller *ctlr) 193864bee4d2SMika Westerberg { 193964bee4d2SMika Westerberg acpi_status status; 194064bee4d2SMika Westerberg acpi_handle handle; 194164bee4d2SMika Westerberg 19428caab75fSGeert Uytterhoeven handle = ACPI_HANDLE(ctlr->dev.parent); 194364bee4d2SMika Westerberg if (!handle) 194464bee4d2SMika Westerberg return; 194564bee4d2SMika Westerberg 194664bee4d2SMika Westerberg status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1, 19478caab75fSGeert Uytterhoeven acpi_spi_add_device, NULL, ctlr, NULL); 194864bee4d2SMika Westerberg if (ACPI_FAILURE(status)) 19498caab75fSGeert Uytterhoeven dev_warn(&ctlr->dev, "failed to enumerate SPI slaves\n"); 195064bee4d2SMika Westerberg } 195164bee4d2SMika Westerberg #else 19528caab75fSGeert Uytterhoeven static inline void acpi_register_spi_devices(struct spi_controller *ctlr) {} 195364bee4d2SMika Westerberg #endif /* CONFIG_ACPI */ 195464bee4d2SMika Westerberg 19558caab75fSGeert Uytterhoeven static void spi_controller_release(struct device *dev) 19568ae12a0dSDavid Brownell { 19578caab75fSGeert Uytterhoeven struct spi_controller *ctlr; 19588ae12a0dSDavid Brownell 19598caab75fSGeert Uytterhoeven ctlr = container_of(dev, struct spi_controller, dev); 19608caab75fSGeert Uytterhoeven kfree(ctlr); 19618ae12a0dSDavid Brownell } 19628ae12a0dSDavid Brownell 19638ae12a0dSDavid Brownell static struct class spi_master_class = { 19648ae12a0dSDavid Brownell .name = "spi_master", 19658ae12a0dSDavid Brownell .owner = THIS_MODULE, 19668caab75fSGeert Uytterhoeven .dev_release = spi_controller_release, 1967eca2ebc7SMartin Sperl .dev_groups = spi_master_groups, 19688ae12a0dSDavid Brownell }; 19698ae12a0dSDavid Brownell 19706c364062SGeert Uytterhoeven #ifdef CONFIG_SPI_SLAVE 19716c364062SGeert Uytterhoeven /** 19726c364062SGeert Uytterhoeven * spi_slave_abort - abort the ongoing transfer request on an SPI slave 19736c364062SGeert Uytterhoeven * controller 19746c364062SGeert Uytterhoeven * @spi: device used for the current transfer 19756c364062SGeert Uytterhoeven */ 19766c364062SGeert Uytterhoeven int spi_slave_abort(struct spi_device *spi) 19776c364062SGeert Uytterhoeven { 19788caab75fSGeert Uytterhoeven struct spi_controller *ctlr = spi->controller; 19796c364062SGeert Uytterhoeven 19808caab75fSGeert Uytterhoeven if (spi_controller_is_slave(ctlr) && ctlr->slave_abort) 19818caab75fSGeert Uytterhoeven return ctlr->slave_abort(ctlr); 19826c364062SGeert Uytterhoeven 19836c364062SGeert Uytterhoeven return -ENOTSUPP; 19846c364062SGeert Uytterhoeven } 19856c364062SGeert Uytterhoeven EXPORT_SYMBOL_GPL(spi_slave_abort); 19866c364062SGeert Uytterhoeven 19876c364062SGeert Uytterhoeven static int match_true(struct device *dev, void *data) 19886c364062SGeert Uytterhoeven { 19896c364062SGeert Uytterhoeven return 1; 19906c364062SGeert Uytterhoeven } 19916c364062SGeert Uytterhoeven 19926c364062SGeert Uytterhoeven static ssize_t spi_slave_show(struct device *dev, 19936c364062SGeert Uytterhoeven struct device_attribute *attr, char *buf) 19946c364062SGeert Uytterhoeven { 19958caab75fSGeert Uytterhoeven struct spi_controller *ctlr = container_of(dev, struct spi_controller, 19968caab75fSGeert Uytterhoeven dev); 19976c364062SGeert Uytterhoeven struct device *child; 19986c364062SGeert Uytterhoeven 19996c364062SGeert Uytterhoeven child = device_find_child(&ctlr->dev, NULL, match_true); 20006c364062SGeert Uytterhoeven return sprintf(buf, "%s\n", 20016c364062SGeert Uytterhoeven child ? to_spi_device(child)->modalias : NULL); 20026c364062SGeert Uytterhoeven } 20036c364062SGeert Uytterhoeven 20046c364062SGeert Uytterhoeven static ssize_t spi_slave_store(struct device *dev, 20056c364062SGeert Uytterhoeven struct device_attribute *attr, const char *buf, 20066c364062SGeert Uytterhoeven size_t count) 20076c364062SGeert Uytterhoeven { 20088caab75fSGeert Uytterhoeven struct spi_controller *ctlr = container_of(dev, struct spi_controller, 20098caab75fSGeert Uytterhoeven dev); 20106c364062SGeert Uytterhoeven struct spi_device *spi; 20116c364062SGeert Uytterhoeven struct device *child; 20126c364062SGeert Uytterhoeven char name[32]; 20136c364062SGeert Uytterhoeven int rc; 20146c364062SGeert Uytterhoeven 20156c364062SGeert Uytterhoeven rc = sscanf(buf, "%31s", name); 20166c364062SGeert Uytterhoeven if (rc != 1 || !name[0]) 20176c364062SGeert Uytterhoeven return -EINVAL; 20186c364062SGeert Uytterhoeven 20196c364062SGeert Uytterhoeven child = device_find_child(&ctlr->dev, NULL, match_true); 20206c364062SGeert Uytterhoeven if (child) { 20216c364062SGeert Uytterhoeven /* Remove registered slave */ 20226c364062SGeert Uytterhoeven device_unregister(child); 20236c364062SGeert Uytterhoeven put_device(child); 20246c364062SGeert Uytterhoeven } 20256c364062SGeert Uytterhoeven 20266c364062SGeert Uytterhoeven if (strcmp(name, "(null)")) { 20276c364062SGeert Uytterhoeven /* Register new slave */ 20286c364062SGeert Uytterhoeven spi = spi_alloc_device(ctlr); 20296c364062SGeert Uytterhoeven if (!spi) 20306c364062SGeert Uytterhoeven return -ENOMEM; 20316c364062SGeert Uytterhoeven 20326c364062SGeert Uytterhoeven strlcpy(spi->modalias, name, sizeof(spi->modalias)); 20336c364062SGeert Uytterhoeven 20346c364062SGeert Uytterhoeven rc = spi_add_device(spi); 20356c364062SGeert Uytterhoeven if (rc) { 20366c364062SGeert Uytterhoeven spi_dev_put(spi); 20376c364062SGeert Uytterhoeven return rc; 20386c364062SGeert Uytterhoeven } 20396c364062SGeert Uytterhoeven } 20406c364062SGeert Uytterhoeven 20416c364062SGeert Uytterhoeven return count; 20426c364062SGeert Uytterhoeven } 20436c364062SGeert Uytterhoeven 20446c364062SGeert Uytterhoeven static DEVICE_ATTR(slave, 0644, spi_slave_show, spi_slave_store); 20456c364062SGeert Uytterhoeven 20466c364062SGeert Uytterhoeven static struct attribute *spi_slave_attrs[] = { 20476c364062SGeert Uytterhoeven &dev_attr_slave.attr, 20486c364062SGeert Uytterhoeven NULL, 20496c364062SGeert Uytterhoeven }; 20506c364062SGeert Uytterhoeven 20516c364062SGeert Uytterhoeven static const struct attribute_group spi_slave_group = { 20526c364062SGeert Uytterhoeven .attrs = spi_slave_attrs, 20536c364062SGeert Uytterhoeven }; 20546c364062SGeert Uytterhoeven 20556c364062SGeert Uytterhoeven static const struct attribute_group *spi_slave_groups[] = { 20568caab75fSGeert Uytterhoeven &spi_controller_statistics_group, 20576c364062SGeert Uytterhoeven &spi_slave_group, 20586c364062SGeert Uytterhoeven NULL, 20596c364062SGeert Uytterhoeven }; 20606c364062SGeert Uytterhoeven 20616c364062SGeert Uytterhoeven static struct class spi_slave_class = { 20626c364062SGeert Uytterhoeven .name = "spi_slave", 20636c364062SGeert Uytterhoeven .owner = THIS_MODULE, 20648caab75fSGeert Uytterhoeven .dev_release = spi_controller_release, 20656c364062SGeert Uytterhoeven .dev_groups = spi_slave_groups, 20666c364062SGeert Uytterhoeven }; 20676c364062SGeert Uytterhoeven #else 20686c364062SGeert Uytterhoeven extern struct class spi_slave_class; /* dummy */ 20696c364062SGeert Uytterhoeven #endif 20708ae12a0dSDavid Brownell 20718ae12a0dSDavid Brownell /** 20726c364062SGeert Uytterhoeven * __spi_alloc_controller - allocate an SPI master or slave controller 20738ae12a0dSDavid Brownell * @dev: the controller, possibly using the platform_bus 207433e34dc6SDavid Brownell * @size: how much zeroed driver-private data to allocate; the pointer to this 207549dce689STony Jones * memory is in the driver_data field of the returned device, 20768caab75fSGeert Uytterhoeven * accessible with spi_controller_get_devdata(). 20776c364062SGeert Uytterhoeven * @slave: flag indicating whether to allocate an SPI master (false) or SPI 20786c364062SGeert Uytterhoeven * slave (true) controller 207933e34dc6SDavid Brownell * Context: can sleep 20808ae12a0dSDavid Brownell * 20816c364062SGeert Uytterhoeven * This call is used only by SPI controller drivers, which are the 20828ae12a0dSDavid Brownell * only ones directly touching chip registers. It's how they allocate 20838caab75fSGeert Uytterhoeven * an spi_controller structure, prior to calling spi_register_controller(). 20848ae12a0dSDavid Brownell * 208597d56dc6SJavier Martinez Canillas * This must be called from context that can sleep. 20868ae12a0dSDavid Brownell * 20876c364062SGeert Uytterhoeven * The caller is responsible for assigning the bus number and initializing the 20888caab75fSGeert Uytterhoeven * controller's methods before calling spi_register_controller(); and (after 20898caab75fSGeert Uytterhoeven * errors adding the device) calling spi_controller_put() to prevent a memory 20908caab75fSGeert Uytterhoeven * leak. 209197d56dc6SJavier Martinez Canillas * 20926c364062SGeert Uytterhoeven * Return: the SPI controller structure on success, else NULL. 20938ae12a0dSDavid Brownell */ 20948caab75fSGeert Uytterhoeven struct spi_controller *__spi_alloc_controller(struct device *dev, 20956c364062SGeert Uytterhoeven unsigned int size, bool slave) 20968ae12a0dSDavid Brownell { 20978caab75fSGeert Uytterhoeven struct spi_controller *ctlr; 20988ae12a0dSDavid Brownell 20990c868461SDavid Brownell if (!dev) 21000c868461SDavid Brownell return NULL; 21010c868461SDavid Brownell 21028caab75fSGeert Uytterhoeven ctlr = kzalloc(size + sizeof(*ctlr), GFP_KERNEL); 21038caab75fSGeert Uytterhoeven if (!ctlr) 21048ae12a0dSDavid Brownell return NULL; 21058ae12a0dSDavid Brownell 21068caab75fSGeert Uytterhoeven device_initialize(&ctlr->dev); 21078caab75fSGeert Uytterhoeven ctlr->bus_num = -1; 21088caab75fSGeert Uytterhoeven ctlr->num_chipselect = 1; 21098caab75fSGeert Uytterhoeven ctlr->slave = slave; 21106c364062SGeert Uytterhoeven if (IS_ENABLED(CONFIG_SPI_SLAVE) && slave) 21118caab75fSGeert Uytterhoeven ctlr->dev.class = &spi_slave_class; 21126c364062SGeert Uytterhoeven else 21138caab75fSGeert Uytterhoeven ctlr->dev.class = &spi_master_class; 21148caab75fSGeert Uytterhoeven ctlr->dev.parent = dev; 21158caab75fSGeert Uytterhoeven pm_suspend_ignore_children(&ctlr->dev, true); 21168caab75fSGeert Uytterhoeven spi_controller_set_devdata(ctlr, &ctlr[1]); 21178ae12a0dSDavid Brownell 21188caab75fSGeert Uytterhoeven return ctlr; 21198ae12a0dSDavid Brownell } 21206c364062SGeert Uytterhoeven EXPORT_SYMBOL_GPL(__spi_alloc_controller); 21218ae12a0dSDavid Brownell 212274317984SJean-Christophe PLAGNIOL-VILLARD #ifdef CONFIG_OF 21238caab75fSGeert Uytterhoeven static int of_spi_register_master(struct spi_controller *ctlr) 212474317984SJean-Christophe PLAGNIOL-VILLARD { 2125e80beb27SGrant Likely int nb, i, *cs; 21268caab75fSGeert Uytterhoeven struct device_node *np = ctlr->dev.of_node; 212774317984SJean-Christophe PLAGNIOL-VILLARD 212874317984SJean-Christophe PLAGNIOL-VILLARD if (!np) 212974317984SJean-Christophe PLAGNIOL-VILLARD return 0; 213074317984SJean-Christophe PLAGNIOL-VILLARD 213174317984SJean-Christophe PLAGNIOL-VILLARD nb = of_gpio_named_count(np, "cs-gpios"); 21328caab75fSGeert Uytterhoeven ctlr->num_chipselect = max_t(int, nb, ctlr->num_chipselect); 213374317984SJean-Christophe PLAGNIOL-VILLARD 21348ec5d84eSAndreas Larsson /* Return error only for an incorrectly formed cs-gpios property */ 21358ec5d84eSAndreas Larsson if (nb == 0 || nb == -ENOENT) 213674317984SJean-Christophe PLAGNIOL-VILLARD return 0; 21378ec5d84eSAndreas Larsson else if (nb < 0) 21388ec5d84eSAndreas Larsson return nb; 213974317984SJean-Christophe PLAGNIOL-VILLARD 2140a86854d0SKees Cook cs = devm_kcalloc(&ctlr->dev, ctlr->num_chipselect, sizeof(int), 214174317984SJean-Christophe PLAGNIOL-VILLARD GFP_KERNEL); 21428caab75fSGeert Uytterhoeven ctlr->cs_gpios = cs; 214374317984SJean-Christophe PLAGNIOL-VILLARD 21448caab75fSGeert Uytterhoeven if (!ctlr->cs_gpios) 214574317984SJean-Christophe PLAGNIOL-VILLARD return -ENOMEM; 214674317984SJean-Christophe PLAGNIOL-VILLARD 21478caab75fSGeert Uytterhoeven for (i = 0; i < ctlr->num_chipselect; i++) 2148446411e1SAndreas Larsson cs[i] = -ENOENT; 214974317984SJean-Christophe PLAGNIOL-VILLARD 215074317984SJean-Christophe PLAGNIOL-VILLARD for (i = 0; i < nb; i++) 215174317984SJean-Christophe PLAGNIOL-VILLARD cs[i] = of_get_named_gpio(np, "cs-gpios", i); 215274317984SJean-Christophe PLAGNIOL-VILLARD 215374317984SJean-Christophe PLAGNIOL-VILLARD return 0; 215474317984SJean-Christophe PLAGNIOL-VILLARD } 215574317984SJean-Christophe PLAGNIOL-VILLARD #else 21568caab75fSGeert Uytterhoeven static int of_spi_register_master(struct spi_controller *ctlr) 215774317984SJean-Christophe PLAGNIOL-VILLARD { 215874317984SJean-Christophe PLAGNIOL-VILLARD return 0; 215974317984SJean-Christophe PLAGNIOL-VILLARD } 216074317984SJean-Christophe PLAGNIOL-VILLARD #endif 216174317984SJean-Christophe PLAGNIOL-VILLARD 2162f3186dd8SLinus Walleij /** 2163f3186dd8SLinus Walleij * spi_get_gpio_descs() - grab chip select GPIOs for the master 2164f3186dd8SLinus Walleij * @ctlr: The SPI master to grab GPIO descriptors for 2165f3186dd8SLinus Walleij */ 2166f3186dd8SLinus Walleij static int spi_get_gpio_descs(struct spi_controller *ctlr) 2167f3186dd8SLinus Walleij { 2168f3186dd8SLinus Walleij int nb, i; 2169f3186dd8SLinus Walleij struct gpio_desc **cs; 2170f3186dd8SLinus Walleij struct device *dev = &ctlr->dev; 2171f3186dd8SLinus Walleij 2172f3186dd8SLinus Walleij nb = gpiod_count(dev, "cs"); 2173f3186dd8SLinus Walleij ctlr->num_chipselect = max_t(int, nb, ctlr->num_chipselect); 2174f3186dd8SLinus Walleij 2175f3186dd8SLinus Walleij /* No GPIOs at all is fine, else return the error */ 2176f3186dd8SLinus Walleij if (nb == 0 || nb == -ENOENT) 2177f3186dd8SLinus Walleij return 0; 2178f3186dd8SLinus Walleij else if (nb < 0) 2179f3186dd8SLinus Walleij return nb; 2180f3186dd8SLinus Walleij 2181f3186dd8SLinus Walleij cs = devm_kcalloc(dev, ctlr->num_chipselect, sizeof(*cs), 2182f3186dd8SLinus Walleij GFP_KERNEL); 2183f3186dd8SLinus Walleij if (!cs) 2184f3186dd8SLinus Walleij return -ENOMEM; 2185f3186dd8SLinus Walleij ctlr->cs_gpiods = cs; 2186f3186dd8SLinus Walleij 2187f3186dd8SLinus Walleij for (i = 0; i < nb; i++) { 2188f3186dd8SLinus Walleij /* 2189f3186dd8SLinus Walleij * Most chipselects are active low, the inverted 2190f3186dd8SLinus Walleij * semantics are handled by special quirks in gpiolib, 2191f3186dd8SLinus Walleij * so initializing them GPIOD_OUT_LOW here means 2192f3186dd8SLinus Walleij * "unasserted", in most cases this will drive the physical 2193f3186dd8SLinus Walleij * line high. 2194f3186dd8SLinus Walleij */ 2195f3186dd8SLinus Walleij cs[i] = devm_gpiod_get_index_optional(dev, "cs", i, 2196f3186dd8SLinus Walleij GPIOD_OUT_LOW); 2197f3186dd8SLinus Walleij 2198f3186dd8SLinus Walleij if (cs[i]) { 2199f3186dd8SLinus Walleij /* 2200f3186dd8SLinus Walleij * If we find a CS GPIO, name it after the device and 2201f3186dd8SLinus Walleij * chip select line. 2202f3186dd8SLinus Walleij */ 2203f3186dd8SLinus Walleij char *gpioname; 2204f3186dd8SLinus Walleij 2205f3186dd8SLinus Walleij gpioname = devm_kasprintf(dev, GFP_KERNEL, "%s CS%d", 2206f3186dd8SLinus Walleij dev_name(dev), i); 2207f3186dd8SLinus Walleij if (!gpioname) 2208f3186dd8SLinus Walleij return -ENOMEM; 2209f3186dd8SLinus Walleij gpiod_set_consumer_name(cs[i], gpioname); 2210f3186dd8SLinus Walleij } 2211f3186dd8SLinus Walleij } 2212f3186dd8SLinus Walleij 2213f3186dd8SLinus Walleij return 0; 2214f3186dd8SLinus Walleij } 2215f3186dd8SLinus Walleij 2216bdf3a3b5SBoris Brezillon static int spi_controller_check_ops(struct spi_controller *ctlr) 2217bdf3a3b5SBoris Brezillon { 2218bdf3a3b5SBoris Brezillon /* 2219b5932f5cSBoris Brezillon * The controller may implement only the high-level SPI-memory like 2220b5932f5cSBoris Brezillon * operations if it does not support regular SPI transfers, and this is 2221b5932f5cSBoris Brezillon * valid use case. 2222b5932f5cSBoris Brezillon * If ->mem_ops is NULL, we request that at least one of the 2223b5932f5cSBoris Brezillon * ->transfer_xxx() method be implemented. 2224bdf3a3b5SBoris Brezillon */ 2225b5932f5cSBoris Brezillon if (ctlr->mem_ops) { 2226b5932f5cSBoris Brezillon if (!ctlr->mem_ops->exec_op) 2227bdf3a3b5SBoris Brezillon return -EINVAL; 2228b5932f5cSBoris Brezillon } else if (!ctlr->transfer && !ctlr->transfer_one && 2229b5932f5cSBoris Brezillon !ctlr->transfer_one_message) { 2230b5932f5cSBoris Brezillon return -EINVAL; 2231b5932f5cSBoris Brezillon } 2232bdf3a3b5SBoris Brezillon 2233bdf3a3b5SBoris Brezillon return 0; 2234bdf3a3b5SBoris Brezillon } 2235bdf3a3b5SBoris Brezillon 22368ae12a0dSDavid Brownell /** 22378caab75fSGeert Uytterhoeven * spi_register_controller - register SPI master or slave controller 22388caab75fSGeert Uytterhoeven * @ctlr: initialized master, originally from spi_alloc_master() or 22398caab75fSGeert Uytterhoeven * spi_alloc_slave() 224033e34dc6SDavid Brownell * Context: can sleep 22418ae12a0dSDavid Brownell * 22428caab75fSGeert Uytterhoeven * SPI controllers connect to their drivers using some non-SPI bus, 22438ae12a0dSDavid Brownell * such as the platform bus. The final stage of probe() in that code 22448caab75fSGeert Uytterhoeven * includes calling spi_register_controller() to hook up to this SPI bus glue. 22458ae12a0dSDavid Brownell * 22468ae12a0dSDavid Brownell * SPI controllers use board specific (often SOC specific) bus numbers, 22478ae12a0dSDavid Brownell * and board-specific addressing for SPI devices combines those numbers 22488ae12a0dSDavid Brownell * with chip select numbers. Since SPI does not directly support dynamic 22498ae12a0dSDavid Brownell * device identification, boards need configuration tables telling which 22508ae12a0dSDavid Brownell * chip is at which address. 22518ae12a0dSDavid Brownell * 22528ae12a0dSDavid Brownell * This must be called from context that can sleep. It returns zero on 22538caab75fSGeert Uytterhoeven * success, else a negative error code (dropping the controller's refcount). 22540c868461SDavid Brownell * After a successful return, the caller is responsible for calling 22558caab75fSGeert Uytterhoeven * spi_unregister_controller(). 225697d56dc6SJavier Martinez Canillas * 225797d56dc6SJavier Martinez Canillas * Return: zero on success, else a negative error code. 22588ae12a0dSDavid Brownell */ 22598caab75fSGeert Uytterhoeven int spi_register_controller(struct spi_controller *ctlr) 22608ae12a0dSDavid Brownell { 22618caab75fSGeert Uytterhoeven struct device *dev = ctlr->dev.parent; 22622b9603a0SFeng Tang struct boardinfo *bi; 22638ae12a0dSDavid Brownell int status = -ENODEV; 226442bdd706SLucas Stach int id, first_dynamic; 22658ae12a0dSDavid Brownell 22660c868461SDavid Brownell if (!dev) 22670c868461SDavid Brownell return -ENODEV; 22680c868461SDavid Brownell 2269bdf3a3b5SBoris Brezillon /* 2270bdf3a3b5SBoris Brezillon * Make sure all necessary hooks are implemented before registering 2271bdf3a3b5SBoris Brezillon * the SPI controller. 2272bdf3a3b5SBoris Brezillon */ 2273bdf3a3b5SBoris Brezillon status = spi_controller_check_ops(ctlr); 2274bdf3a3b5SBoris Brezillon if (status) 2275bdf3a3b5SBoris Brezillon return status; 2276bdf3a3b5SBoris Brezillon 22778caab75fSGeert Uytterhoeven if (!spi_controller_is_slave(ctlr)) { 2278f3186dd8SLinus Walleij if (ctlr->use_gpio_descriptors) { 2279f3186dd8SLinus Walleij status = spi_get_gpio_descs(ctlr); 2280f3186dd8SLinus Walleij if (status) 2281f3186dd8SLinus Walleij return status; 2282f3186dd8SLinus Walleij } else { 2283f3186dd8SLinus Walleij /* Legacy code path for GPIOs from DT */ 22848caab75fSGeert Uytterhoeven status = of_spi_register_master(ctlr); 228574317984SJean-Christophe PLAGNIOL-VILLARD if (status) 228674317984SJean-Christophe PLAGNIOL-VILLARD return status; 22876c364062SGeert Uytterhoeven } 2288f3186dd8SLinus Walleij } 228974317984SJean-Christophe PLAGNIOL-VILLARD 2290082c8cb4SDavid Brownell /* even if it's just one always-selected device, there must 2291082c8cb4SDavid Brownell * be at least one chipselect 2292082c8cb4SDavid Brownell */ 22938caab75fSGeert Uytterhoeven if (ctlr->num_chipselect == 0) 2294082c8cb4SDavid Brownell return -EINVAL; 229504b2d03aSGeert Uytterhoeven if (ctlr->bus_num >= 0) { 229604b2d03aSGeert Uytterhoeven /* devices with a fixed bus num must check-in with the num */ 229704b2d03aSGeert Uytterhoeven mutex_lock(&board_lock); 229804b2d03aSGeert Uytterhoeven id = idr_alloc(&spi_master_idr, ctlr, ctlr->bus_num, 229904b2d03aSGeert Uytterhoeven ctlr->bus_num + 1, GFP_KERNEL); 230004b2d03aSGeert Uytterhoeven mutex_unlock(&board_lock); 230104b2d03aSGeert Uytterhoeven if (WARN(id < 0, "couldn't get idr")) 230204b2d03aSGeert Uytterhoeven return id == -ENOSPC ? -EBUSY : id; 230304b2d03aSGeert Uytterhoeven ctlr->bus_num = id; 230404b2d03aSGeert Uytterhoeven } else if (ctlr->dev.of_node) { 23059b61e302SSuniel Mahesh /* allocate dynamic bus number using Linux idr */ 23069b61e302SSuniel Mahesh id = of_alias_get_id(ctlr->dev.of_node, "spi"); 23079b61e302SSuniel Mahesh if (id >= 0) { 23089b61e302SSuniel Mahesh ctlr->bus_num = id; 23099b61e302SSuniel Mahesh mutex_lock(&board_lock); 23109b61e302SSuniel Mahesh id = idr_alloc(&spi_master_idr, ctlr, ctlr->bus_num, 23119b61e302SSuniel Mahesh ctlr->bus_num + 1, GFP_KERNEL); 23129b61e302SSuniel Mahesh mutex_unlock(&board_lock); 23139b61e302SSuniel Mahesh if (WARN(id < 0, "couldn't get idr")) 23149b61e302SSuniel Mahesh return id == -ENOSPC ? -EBUSY : id; 23159b61e302SSuniel Mahesh } 23169b61e302SSuniel Mahesh } 23178caab75fSGeert Uytterhoeven if (ctlr->bus_num < 0) { 231842bdd706SLucas Stach first_dynamic = of_alias_get_highest_id("spi"); 231942bdd706SLucas Stach if (first_dynamic < 0) 232042bdd706SLucas Stach first_dynamic = 0; 232142bdd706SLucas Stach else 232242bdd706SLucas Stach first_dynamic++; 232342bdd706SLucas Stach 23249b61e302SSuniel Mahesh mutex_lock(&board_lock); 232542bdd706SLucas Stach id = idr_alloc(&spi_master_idr, ctlr, first_dynamic, 232642bdd706SLucas Stach 0, GFP_KERNEL); 23279b61e302SSuniel Mahesh mutex_unlock(&board_lock); 23289b61e302SSuniel Mahesh if (WARN(id < 0, "couldn't get idr")) 23299b61e302SSuniel Mahesh return id; 23309b61e302SSuniel Mahesh ctlr->bus_num = id; 23318ae12a0dSDavid Brownell } 23328caab75fSGeert Uytterhoeven INIT_LIST_HEAD(&ctlr->queue); 23338caab75fSGeert Uytterhoeven spin_lock_init(&ctlr->queue_lock); 23348caab75fSGeert Uytterhoeven spin_lock_init(&ctlr->bus_lock_spinlock); 23358caab75fSGeert Uytterhoeven mutex_init(&ctlr->bus_lock_mutex); 23368caab75fSGeert Uytterhoeven mutex_init(&ctlr->io_mutex); 23378caab75fSGeert Uytterhoeven ctlr->bus_lock_flag = 0; 23388caab75fSGeert Uytterhoeven init_completion(&ctlr->xfer_completion); 23398caab75fSGeert Uytterhoeven if (!ctlr->max_dma_len) 23408caab75fSGeert Uytterhoeven ctlr->max_dma_len = INT_MAX; 2341cf32b71eSErnst Schwab 23428ae12a0dSDavid Brownell /* register the device, then userspace will see it. 23438ae12a0dSDavid Brownell * registration fails if the bus ID is in use. 23448ae12a0dSDavid Brownell */ 23458caab75fSGeert Uytterhoeven dev_set_name(&ctlr->dev, "spi%u", ctlr->bus_num); 23468caab75fSGeert Uytterhoeven status = device_add(&ctlr->dev); 23479b61e302SSuniel Mahesh if (status < 0) { 23489b61e302SSuniel Mahesh /* free bus id */ 23499b61e302SSuniel Mahesh mutex_lock(&board_lock); 23509b61e302SSuniel Mahesh idr_remove(&spi_master_idr, ctlr->bus_num); 23519b61e302SSuniel Mahesh mutex_unlock(&board_lock); 23528ae12a0dSDavid Brownell goto done; 23539b61e302SSuniel Mahesh } 23549b61e302SSuniel Mahesh dev_dbg(dev, "registered %s %s\n", 23558caab75fSGeert Uytterhoeven spi_controller_is_slave(ctlr) ? "slave" : "master", 23569b61e302SSuniel Mahesh dev_name(&ctlr->dev)); 23578ae12a0dSDavid Brownell 2358b5932f5cSBoris Brezillon /* 2359b5932f5cSBoris Brezillon * If we're using a queued driver, start the queue. Note that we don't 2360b5932f5cSBoris Brezillon * need the queueing logic if the driver is only supporting high-level 2361b5932f5cSBoris Brezillon * memory operations. 2362b5932f5cSBoris Brezillon */ 2363b5932f5cSBoris Brezillon if (ctlr->transfer) { 23648caab75fSGeert Uytterhoeven dev_info(dev, "controller is unqueued, this is deprecated\n"); 2365b5932f5cSBoris Brezillon } else if (ctlr->transfer_one || ctlr->transfer_one_message) { 23668caab75fSGeert Uytterhoeven status = spi_controller_initialize_queue(ctlr); 2367ffbbdd21SLinus Walleij if (status) { 23688caab75fSGeert Uytterhoeven device_del(&ctlr->dev); 23699b61e302SSuniel Mahesh /* free bus id */ 23709b61e302SSuniel Mahesh mutex_lock(&board_lock); 23719b61e302SSuniel Mahesh idr_remove(&spi_master_idr, ctlr->bus_num); 23729b61e302SSuniel Mahesh mutex_unlock(&board_lock); 2373ffbbdd21SLinus Walleij goto done; 2374ffbbdd21SLinus Walleij } 2375ffbbdd21SLinus Walleij } 2376eca2ebc7SMartin Sperl /* add statistics */ 23778caab75fSGeert Uytterhoeven spin_lock_init(&ctlr->statistics.lock); 2378ffbbdd21SLinus Walleij 23792b9603a0SFeng Tang mutex_lock(&board_lock); 23808caab75fSGeert Uytterhoeven list_add_tail(&ctlr->list, &spi_controller_list); 23812b9603a0SFeng Tang list_for_each_entry(bi, &board_list, list) 23828caab75fSGeert Uytterhoeven spi_match_controller_to_boardinfo(ctlr, &bi->board_info); 23832b9603a0SFeng Tang mutex_unlock(&board_lock); 23842b9603a0SFeng Tang 238564bee4d2SMika Westerberg /* Register devices from the device tree and ACPI */ 23868caab75fSGeert Uytterhoeven of_register_spi_devices(ctlr); 23878caab75fSGeert Uytterhoeven acpi_register_spi_devices(ctlr); 23888ae12a0dSDavid Brownell done: 23898ae12a0dSDavid Brownell return status; 23908ae12a0dSDavid Brownell } 23918caab75fSGeert Uytterhoeven EXPORT_SYMBOL_GPL(spi_register_controller); 23928ae12a0dSDavid Brownell 2393666d5b4cSMark Brown static void devm_spi_unregister(struct device *dev, void *res) 2394666d5b4cSMark Brown { 23958caab75fSGeert Uytterhoeven spi_unregister_controller(*(struct spi_controller **)res); 2396666d5b4cSMark Brown } 2397666d5b4cSMark Brown 2398666d5b4cSMark Brown /** 23998caab75fSGeert Uytterhoeven * devm_spi_register_controller - register managed SPI master or slave 24008caab75fSGeert Uytterhoeven * controller 24018caab75fSGeert Uytterhoeven * @dev: device managing SPI controller 24028caab75fSGeert Uytterhoeven * @ctlr: initialized controller, originally from spi_alloc_master() or 24038caab75fSGeert Uytterhoeven * spi_alloc_slave() 2404666d5b4cSMark Brown * Context: can sleep 2405666d5b4cSMark Brown * 24068caab75fSGeert Uytterhoeven * Register a SPI device as with spi_register_controller() which will 240768b892f1SJohan Hovold * automatically be unregistered and freed. 240897d56dc6SJavier Martinez Canillas * 240997d56dc6SJavier Martinez Canillas * Return: zero on success, else a negative error code. 2410666d5b4cSMark Brown */ 24118caab75fSGeert Uytterhoeven int devm_spi_register_controller(struct device *dev, 24128caab75fSGeert Uytterhoeven struct spi_controller *ctlr) 2413666d5b4cSMark Brown { 24148caab75fSGeert Uytterhoeven struct spi_controller **ptr; 2415666d5b4cSMark Brown int ret; 2416666d5b4cSMark Brown 2417666d5b4cSMark Brown ptr = devres_alloc(devm_spi_unregister, sizeof(*ptr), GFP_KERNEL); 2418666d5b4cSMark Brown if (!ptr) 2419666d5b4cSMark Brown return -ENOMEM; 2420666d5b4cSMark Brown 24218caab75fSGeert Uytterhoeven ret = spi_register_controller(ctlr); 24224b92894eSStephen Warren if (!ret) { 24238caab75fSGeert Uytterhoeven *ptr = ctlr; 2424666d5b4cSMark Brown devres_add(dev, ptr); 2425666d5b4cSMark Brown } else { 2426666d5b4cSMark Brown devres_free(ptr); 2427666d5b4cSMark Brown } 2428666d5b4cSMark Brown 2429666d5b4cSMark Brown return ret; 2430666d5b4cSMark Brown } 24318caab75fSGeert Uytterhoeven EXPORT_SYMBOL_GPL(devm_spi_register_controller); 2432666d5b4cSMark Brown 243334860089SDavid Lamparter static int __unregister(struct device *dev, void *null) 24348ae12a0dSDavid Brownell { 24350c868461SDavid Brownell spi_unregister_device(to_spi_device(dev)); 24368ae12a0dSDavid Brownell return 0; 24378ae12a0dSDavid Brownell } 24388ae12a0dSDavid Brownell 24398ae12a0dSDavid Brownell /** 24408caab75fSGeert Uytterhoeven * spi_unregister_controller - unregister SPI master or slave controller 24418caab75fSGeert Uytterhoeven * @ctlr: the controller being unregistered 244233e34dc6SDavid Brownell * Context: can sleep 24438ae12a0dSDavid Brownell * 24448caab75fSGeert Uytterhoeven * This call is used only by SPI controller drivers, which are the 24458ae12a0dSDavid Brownell * only ones directly touching chip registers. 24468ae12a0dSDavid Brownell * 24478ae12a0dSDavid Brownell * This must be called from context that can sleep. 244868b892f1SJohan Hovold * 244968b892f1SJohan Hovold * Note that this function also drops a reference to the controller. 24508ae12a0dSDavid Brownell */ 24518caab75fSGeert Uytterhoeven void spi_unregister_controller(struct spi_controller *ctlr) 24528ae12a0dSDavid Brownell { 24539b61e302SSuniel Mahesh struct spi_controller *found; 245467f7b278SJohan Hovold int id = ctlr->bus_num; 245589fc9a1aSJeff Garzik int dummy; 245689fc9a1aSJeff Garzik 24579b61e302SSuniel Mahesh /* First make sure that this controller was ever added */ 24589b61e302SSuniel Mahesh mutex_lock(&board_lock); 245967f7b278SJohan Hovold found = idr_find(&spi_master_idr, id); 24609b61e302SSuniel Mahesh mutex_unlock(&board_lock); 24618caab75fSGeert Uytterhoeven if (ctlr->queued) { 24628caab75fSGeert Uytterhoeven if (spi_destroy_queue(ctlr)) 24638caab75fSGeert Uytterhoeven dev_err(&ctlr->dev, "queue remove failed\n"); 2464ffbbdd21SLinus Walleij } 24652b9603a0SFeng Tang mutex_lock(&board_lock); 24668caab75fSGeert Uytterhoeven list_del(&ctlr->list); 24672b9603a0SFeng Tang mutex_unlock(&board_lock); 24682b9603a0SFeng Tang 24698caab75fSGeert Uytterhoeven dummy = device_for_each_child(&ctlr->dev, NULL, __unregister); 24708caab75fSGeert Uytterhoeven device_unregister(&ctlr->dev); 24719b61e302SSuniel Mahesh /* free bus id */ 24729b61e302SSuniel Mahesh mutex_lock(&board_lock); 2473613bd1eaSJarkko Nikula if (found == ctlr) 247467f7b278SJohan Hovold idr_remove(&spi_master_idr, id); 24759b61e302SSuniel Mahesh mutex_unlock(&board_lock); 24768ae12a0dSDavid Brownell } 24778caab75fSGeert Uytterhoeven EXPORT_SYMBOL_GPL(spi_unregister_controller); 24788ae12a0dSDavid Brownell 24798caab75fSGeert Uytterhoeven int spi_controller_suspend(struct spi_controller *ctlr) 2480ffbbdd21SLinus Walleij { 2481ffbbdd21SLinus Walleij int ret; 2482ffbbdd21SLinus Walleij 24838caab75fSGeert Uytterhoeven /* Basically no-ops for non-queued controllers */ 24848caab75fSGeert Uytterhoeven if (!ctlr->queued) 2485ffbbdd21SLinus Walleij return 0; 2486ffbbdd21SLinus Walleij 24878caab75fSGeert Uytterhoeven ret = spi_stop_queue(ctlr); 2488ffbbdd21SLinus Walleij if (ret) 24898caab75fSGeert Uytterhoeven dev_err(&ctlr->dev, "queue stop failed\n"); 2490ffbbdd21SLinus Walleij 2491ffbbdd21SLinus Walleij return ret; 2492ffbbdd21SLinus Walleij } 24938caab75fSGeert Uytterhoeven EXPORT_SYMBOL_GPL(spi_controller_suspend); 2494ffbbdd21SLinus Walleij 24958caab75fSGeert Uytterhoeven int spi_controller_resume(struct spi_controller *ctlr) 2496ffbbdd21SLinus Walleij { 2497ffbbdd21SLinus Walleij int ret; 2498ffbbdd21SLinus Walleij 24998caab75fSGeert Uytterhoeven if (!ctlr->queued) 2500ffbbdd21SLinus Walleij return 0; 2501ffbbdd21SLinus Walleij 25028caab75fSGeert Uytterhoeven ret = spi_start_queue(ctlr); 2503ffbbdd21SLinus Walleij if (ret) 25048caab75fSGeert Uytterhoeven dev_err(&ctlr->dev, "queue restart failed\n"); 2505ffbbdd21SLinus Walleij 2506ffbbdd21SLinus Walleij return ret; 2507ffbbdd21SLinus Walleij } 25088caab75fSGeert Uytterhoeven EXPORT_SYMBOL_GPL(spi_controller_resume); 2509ffbbdd21SLinus Walleij 25108caab75fSGeert Uytterhoeven static int __spi_controller_match(struct device *dev, const void *data) 25115ed2c832SDave Young { 25128caab75fSGeert Uytterhoeven struct spi_controller *ctlr; 25139f3b795aSMichał Mirosław const u16 *bus_num = data; 25145ed2c832SDave Young 25158caab75fSGeert Uytterhoeven ctlr = container_of(dev, struct spi_controller, dev); 25168caab75fSGeert Uytterhoeven return ctlr->bus_num == *bus_num; 25175ed2c832SDave Young } 25185ed2c832SDave Young 25198ae12a0dSDavid Brownell /** 25208ae12a0dSDavid Brownell * spi_busnum_to_master - look up master associated with bus_num 25218ae12a0dSDavid Brownell * @bus_num: the master's bus number 252233e34dc6SDavid Brownell * Context: can sleep 25238ae12a0dSDavid Brownell * 25248ae12a0dSDavid Brownell * This call may be used with devices that are registered after 25258ae12a0dSDavid Brownell * arch init time. It returns a refcounted pointer to the relevant 25268caab75fSGeert Uytterhoeven * spi_controller (which the caller must release), or NULL if there is 25278ae12a0dSDavid Brownell * no such master registered. 252897d56dc6SJavier Martinez Canillas * 252997d56dc6SJavier Martinez Canillas * Return: the SPI master structure on success, else NULL. 25308ae12a0dSDavid Brownell */ 25318caab75fSGeert Uytterhoeven struct spi_controller *spi_busnum_to_master(u16 bus_num) 25328ae12a0dSDavid Brownell { 253349dce689STony Jones struct device *dev; 25348caab75fSGeert Uytterhoeven struct spi_controller *ctlr = NULL; 25358ae12a0dSDavid Brownell 2536695794aeSGreg Kroah-Hartman dev = class_find_device(&spi_master_class, NULL, &bus_num, 25378caab75fSGeert Uytterhoeven __spi_controller_match); 25385ed2c832SDave Young if (dev) 25398caab75fSGeert Uytterhoeven ctlr = container_of(dev, struct spi_controller, dev); 25405ed2c832SDave Young /* reference got in class_find_device */ 25418caab75fSGeert Uytterhoeven return ctlr; 25428ae12a0dSDavid Brownell } 25438ae12a0dSDavid Brownell EXPORT_SYMBOL_GPL(spi_busnum_to_master); 25448ae12a0dSDavid Brownell 2545d780c371SMartin Sperl /*-------------------------------------------------------------------------*/ 2546d780c371SMartin Sperl 2547d780c371SMartin Sperl /* Core methods for SPI resource management */ 2548d780c371SMartin Sperl 2549d780c371SMartin Sperl /** 2550d780c371SMartin Sperl * spi_res_alloc - allocate a spi resource that is life-cycle managed 2551d780c371SMartin Sperl * during the processing of a spi_message while using 2552d780c371SMartin Sperl * spi_transfer_one 2553d780c371SMartin Sperl * @spi: the spi device for which we allocate memory 2554d780c371SMartin Sperl * @release: the release code to execute for this resource 2555d780c371SMartin Sperl * @size: size to alloc and return 2556d780c371SMartin Sperl * @gfp: GFP allocation flags 2557d780c371SMartin Sperl * 2558d780c371SMartin Sperl * Return: the pointer to the allocated data 2559d780c371SMartin Sperl * 2560d780c371SMartin Sperl * This may get enhanced in the future to allocate from a memory pool 25618caab75fSGeert Uytterhoeven * of the @spi_device or @spi_controller to avoid repeated allocations. 2562d780c371SMartin Sperl */ 2563d780c371SMartin Sperl void *spi_res_alloc(struct spi_device *spi, 2564d780c371SMartin Sperl spi_res_release_t release, 2565d780c371SMartin Sperl size_t size, gfp_t gfp) 2566d780c371SMartin Sperl { 2567d780c371SMartin Sperl struct spi_res *sres; 2568d780c371SMartin Sperl 2569d780c371SMartin Sperl sres = kzalloc(sizeof(*sres) + size, gfp); 2570d780c371SMartin Sperl if (!sres) 2571d780c371SMartin Sperl return NULL; 2572d780c371SMartin Sperl 2573d780c371SMartin Sperl INIT_LIST_HEAD(&sres->entry); 2574d780c371SMartin Sperl sres->release = release; 2575d780c371SMartin Sperl 2576d780c371SMartin Sperl return sres->data; 2577d780c371SMartin Sperl } 2578d780c371SMartin Sperl EXPORT_SYMBOL_GPL(spi_res_alloc); 2579d780c371SMartin Sperl 2580d780c371SMartin Sperl /** 2581d780c371SMartin Sperl * spi_res_free - free an spi resource 2582d780c371SMartin Sperl * @res: pointer to the custom data of a resource 2583d780c371SMartin Sperl * 2584d780c371SMartin Sperl */ 2585d780c371SMartin Sperl void spi_res_free(void *res) 2586d780c371SMartin Sperl { 2587d780c371SMartin Sperl struct spi_res *sres = container_of(res, struct spi_res, data); 2588d780c371SMartin Sperl 2589d780c371SMartin Sperl if (!res) 2590d780c371SMartin Sperl return; 2591d780c371SMartin Sperl 2592d780c371SMartin Sperl WARN_ON(!list_empty(&sres->entry)); 2593d780c371SMartin Sperl kfree(sres); 2594d780c371SMartin Sperl } 2595d780c371SMartin Sperl EXPORT_SYMBOL_GPL(spi_res_free); 2596d780c371SMartin Sperl 2597d780c371SMartin Sperl /** 2598d780c371SMartin Sperl * spi_res_add - add a spi_res to the spi_message 2599d780c371SMartin Sperl * @message: the spi message 2600d780c371SMartin Sperl * @res: the spi_resource 2601d780c371SMartin Sperl */ 2602d780c371SMartin Sperl void spi_res_add(struct spi_message *message, void *res) 2603d780c371SMartin Sperl { 2604d780c371SMartin Sperl struct spi_res *sres = container_of(res, struct spi_res, data); 2605d780c371SMartin Sperl 2606d780c371SMartin Sperl WARN_ON(!list_empty(&sres->entry)); 2607d780c371SMartin Sperl list_add_tail(&sres->entry, &message->resources); 2608d780c371SMartin Sperl } 2609d780c371SMartin Sperl EXPORT_SYMBOL_GPL(spi_res_add); 2610d780c371SMartin Sperl 2611d780c371SMartin Sperl /** 2612d780c371SMartin Sperl * spi_res_release - release all spi resources for this message 26138caab75fSGeert Uytterhoeven * @ctlr: the @spi_controller 2614d780c371SMartin Sperl * @message: the @spi_message 2615d780c371SMartin Sperl */ 26168caab75fSGeert Uytterhoeven void spi_res_release(struct spi_controller *ctlr, struct spi_message *message) 2617d780c371SMartin Sperl { 2618d780c371SMartin Sperl struct spi_res *res; 2619d780c371SMartin Sperl 2620d780c371SMartin Sperl while (!list_empty(&message->resources)) { 2621d780c371SMartin Sperl res = list_last_entry(&message->resources, 2622d780c371SMartin Sperl struct spi_res, entry); 2623d780c371SMartin Sperl 2624d780c371SMartin Sperl if (res->release) 26258caab75fSGeert Uytterhoeven res->release(ctlr, message, res->data); 2626d780c371SMartin Sperl 2627d780c371SMartin Sperl list_del(&res->entry); 2628d780c371SMartin Sperl 2629d780c371SMartin Sperl kfree(res); 2630d780c371SMartin Sperl } 2631d780c371SMartin Sperl } 2632d780c371SMartin Sperl EXPORT_SYMBOL_GPL(spi_res_release); 26338ae12a0dSDavid Brownell 26348ae12a0dSDavid Brownell /*-------------------------------------------------------------------------*/ 26358ae12a0dSDavid Brownell 2636523baf5aSMartin Sperl /* Core methods for spi_message alterations */ 2637523baf5aSMartin Sperl 26388caab75fSGeert Uytterhoeven static void __spi_replace_transfers_release(struct spi_controller *ctlr, 2639523baf5aSMartin Sperl struct spi_message *msg, 2640523baf5aSMartin Sperl void *res) 2641523baf5aSMartin Sperl { 2642523baf5aSMartin Sperl struct spi_replaced_transfers *rxfer = res; 2643523baf5aSMartin Sperl size_t i; 2644523baf5aSMartin Sperl 2645523baf5aSMartin Sperl /* call extra callback if requested */ 2646523baf5aSMartin Sperl if (rxfer->release) 26478caab75fSGeert Uytterhoeven rxfer->release(ctlr, msg, res); 2648523baf5aSMartin Sperl 2649523baf5aSMartin Sperl /* insert replaced transfers back into the message */ 2650523baf5aSMartin Sperl list_splice(&rxfer->replaced_transfers, rxfer->replaced_after); 2651523baf5aSMartin Sperl 2652523baf5aSMartin Sperl /* remove the formerly inserted entries */ 2653523baf5aSMartin Sperl for (i = 0; i < rxfer->inserted; i++) 2654523baf5aSMartin Sperl list_del(&rxfer->inserted_transfers[i].transfer_list); 2655523baf5aSMartin Sperl } 2656523baf5aSMartin Sperl 2657523baf5aSMartin Sperl /** 2658523baf5aSMartin Sperl * spi_replace_transfers - replace transfers with several transfers 2659523baf5aSMartin Sperl * and register change with spi_message.resources 2660523baf5aSMartin Sperl * @msg: the spi_message we work upon 2661523baf5aSMartin Sperl * @xfer_first: the first spi_transfer we want to replace 2662523baf5aSMartin Sperl * @remove: number of transfers to remove 2663523baf5aSMartin Sperl * @insert: the number of transfers we want to insert instead 2664523baf5aSMartin Sperl * @release: extra release code necessary in some circumstances 2665523baf5aSMartin Sperl * @extradatasize: extra data to allocate (with alignment guarantees 2666523baf5aSMartin Sperl * of struct @spi_transfer) 266705885397SMartin Sperl * @gfp: gfp flags 2668523baf5aSMartin Sperl * 2669523baf5aSMartin Sperl * Returns: pointer to @spi_replaced_transfers, 2670523baf5aSMartin Sperl * PTR_ERR(...) in case of errors. 2671523baf5aSMartin Sperl */ 2672523baf5aSMartin Sperl struct spi_replaced_transfers *spi_replace_transfers( 2673523baf5aSMartin Sperl struct spi_message *msg, 2674523baf5aSMartin Sperl struct spi_transfer *xfer_first, 2675523baf5aSMartin Sperl size_t remove, 2676523baf5aSMartin Sperl size_t insert, 2677523baf5aSMartin Sperl spi_replaced_release_t release, 2678523baf5aSMartin Sperl size_t extradatasize, 2679523baf5aSMartin Sperl gfp_t gfp) 2680523baf5aSMartin Sperl { 2681523baf5aSMartin Sperl struct spi_replaced_transfers *rxfer; 2682523baf5aSMartin Sperl struct spi_transfer *xfer; 2683523baf5aSMartin Sperl size_t i; 2684523baf5aSMartin Sperl 2685523baf5aSMartin Sperl /* allocate the structure using spi_res */ 2686523baf5aSMartin Sperl rxfer = spi_res_alloc(msg->spi, __spi_replace_transfers_release, 2687523baf5aSMartin Sperl insert * sizeof(struct spi_transfer) 2688523baf5aSMartin Sperl + sizeof(struct spi_replaced_transfers) 2689523baf5aSMartin Sperl + extradatasize, 2690523baf5aSMartin Sperl gfp); 2691523baf5aSMartin Sperl if (!rxfer) 2692523baf5aSMartin Sperl return ERR_PTR(-ENOMEM); 2693523baf5aSMartin Sperl 2694523baf5aSMartin Sperl /* the release code to invoke before running the generic release */ 2695523baf5aSMartin Sperl rxfer->release = release; 2696523baf5aSMartin Sperl 2697523baf5aSMartin Sperl /* assign extradata */ 2698523baf5aSMartin Sperl if (extradatasize) 2699523baf5aSMartin Sperl rxfer->extradata = 2700523baf5aSMartin Sperl &rxfer->inserted_transfers[insert]; 2701523baf5aSMartin Sperl 2702523baf5aSMartin Sperl /* init the replaced_transfers list */ 2703523baf5aSMartin Sperl INIT_LIST_HEAD(&rxfer->replaced_transfers); 2704523baf5aSMartin Sperl 2705523baf5aSMartin Sperl /* assign the list_entry after which we should reinsert 2706523baf5aSMartin Sperl * the @replaced_transfers - it may be spi_message.messages! 2707523baf5aSMartin Sperl */ 2708523baf5aSMartin Sperl rxfer->replaced_after = xfer_first->transfer_list.prev; 2709523baf5aSMartin Sperl 2710523baf5aSMartin Sperl /* remove the requested number of transfers */ 2711523baf5aSMartin Sperl for (i = 0; i < remove; i++) { 2712523baf5aSMartin Sperl /* if the entry after replaced_after it is msg->transfers 2713523baf5aSMartin Sperl * then we have been requested to remove more transfers 2714523baf5aSMartin Sperl * than are in the list 2715523baf5aSMartin Sperl */ 2716523baf5aSMartin Sperl if (rxfer->replaced_after->next == &msg->transfers) { 2717523baf5aSMartin Sperl dev_err(&msg->spi->dev, 2718523baf5aSMartin Sperl "requested to remove more spi_transfers than are available\n"); 2719523baf5aSMartin Sperl /* insert replaced transfers back into the message */ 2720523baf5aSMartin Sperl list_splice(&rxfer->replaced_transfers, 2721523baf5aSMartin Sperl rxfer->replaced_after); 2722523baf5aSMartin Sperl 2723523baf5aSMartin Sperl /* free the spi_replace_transfer structure */ 2724523baf5aSMartin Sperl spi_res_free(rxfer); 2725523baf5aSMartin Sperl 2726523baf5aSMartin Sperl /* and return with an error */ 2727523baf5aSMartin Sperl return ERR_PTR(-EINVAL); 2728523baf5aSMartin Sperl } 2729523baf5aSMartin Sperl 2730523baf5aSMartin Sperl /* remove the entry after replaced_after from list of 2731523baf5aSMartin Sperl * transfers and add it to list of replaced_transfers 2732523baf5aSMartin Sperl */ 2733523baf5aSMartin Sperl list_move_tail(rxfer->replaced_after->next, 2734523baf5aSMartin Sperl &rxfer->replaced_transfers); 2735523baf5aSMartin Sperl } 2736523baf5aSMartin Sperl 2737523baf5aSMartin Sperl /* create copy of the given xfer with identical settings 2738523baf5aSMartin Sperl * based on the first transfer to get removed 2739523baf5aSMartin Sperl */ 2740523baf5aSMartin Sperl for (i = 0; i < insert; i++) { 2741523baf5aSMartin Sperl /* we need to run in reverse order */ 2742523baf5aSMartin Sperl xfer = &rxfer->inserted_transfers[insert - 1 - i]; 2743523baf5aSMartin Sperl 2744523baf5aSMartin Sperl /* copy all spi_transfer data */ 2745523baf5aSMartin Sperl memcpy(xfer, xfer_first, sizeof(*xfer)); 2746523baf5aSMartin Sperl 2747523baf5aSMartin Sperl /* add to list */ 2748523baf5aSMartin Sperl list_add(&xfer->transfer_list, rxfer->replaced_after); 2749523baf5aSMartin Sperl 2750523baf5aSMartin Sperl /* clear cs_change and delay_usecs for all but the last */ 2751523baf5aSMartin Sperl if (i) { 2752523baf5aSMartin Sperl xfer->cs_change = false; 2753523baf5aSMartin Sperl xfer->delay_usecs = 0; 2754523baf5aSMartin Sperl } 2755523baf5aSMartin Sperl } 2756523baf5aSMartin Sperl 2757523baf5aSMartin Sperl /* set up inserted */ 2758523baf5aSMartin Sperl rxfer->inserted = insert; 2759523baf5aSMartin Sperl 2760523baf5aSMartin Sperl /* and register it with spi_res/spi_message */ 2761523baf5aSMartin Sperl spi_res_add(msg, rxfer); 2762523baf5aSMartin Sperl 2763523baf5aSMartin Sperl return rxfer; 2764523baf5aSMartin Sperl } 2765523baf5aSMartin Sperl EXPORT_SYMBOL_GPL(spi_replace_transfers); 2766523baf5aSMartin Sperl 27678caab75fSGeert Uytterhoeven static int __spi_split_transfer_maxsize(struct spi_controller *ctlr, 2768d9f12122SMartin Sperl struct spi_message *msg, 2769d9f12122SMartin Sperl struct spi_transfer **xferp, 2770d9f12122SMartin Sperl size_t maxsize, 2771d9f12122SMartin Sperl gfp_t gfp) 2772d9f12122SMartin Sperl { 2773d9f12122SMartin Sperl struct spi_transfer *xfer = *xferp, *xfers; 2774d9f12122SMartin Sperl struct spi_replaced_transfers *srt; 2775d9f12122SMartin Sperl size_t offset; 2776d9f12122SMartin Sperl size_t count, i; 2777d9f12122SMartin Sperl 2778d9f12122SMartin Sperl /* warn once about this fact that we are splitting a transfer */ 2779d9f12122SMartin Sperl dev_warn_once(&msg->spi->dev, 27807d62f51eSFabio Estevam "spi_transfer of length %i exceed max length of %zu - needed to split transfers\n", 2781d9f12122SMartin Sperl xfer->len, maxsize); 2782d9f12122SMartin Sperl 2783d9f12122SMartin Sperl /* calculate how many we have to replace */ 2784d9f12122SMartin Sperl count = DIV_ROUND_UP(xfer->len, maxsize); 2785d9f12122SMartin Sperl 2786d9f12122SMartin Sperl /* create replacement */ 2787d9f12122SMartin Sperl srt = spi_replace_transfers(msg, xfer, 1, count, NULL, 0, gfp); 2788657d32efSDan Carpenter if (IS_ERR(srt)) 2789657d32efSDan Carpenter return PTR_ERR(srt); 2790d9f12122SMartin Sperl xfers = srt->inserted_transfers; 2791d9f12122SMartin Sperl 2792d9f12122SMartin Sperl /* now handle each of those newly inserted spi_transfers 2793d9f12122SMartin Sperl * note that the replacements spi_transfers all are preset 2794d9f12122SMartin Sperl * to the same values as *xferp, so tx_buf, rx_buf and len 2795d9f12122SMartin Sperl * are all identical (as well as most others) 2796d9f12122SMartin Sperl * so we just have to fix up len and the pointers. 2797d9f12122SMartin Sperl * 2798d9f12122SMartin Sperl * this also includes support for the depreciated 2799d9f12122SMartin Sperl * spi_message.is_dma_mapped interface 2800d9f12122SMartin Sperl */ 2801d9f12122SMartin Sperl 2802d9f12122SMartin Sperl /* the first transfer just needs the length modified, so we 2803d9f12122SMartin Sperl * run it outside the loop 2804d9f12122SMartin Sperl */ 2805c8dab77aSFabio Estevam xfers[0].len = min_t(size_t, maxsize, xfer[0].len); 2806d9f12122SMartin Sperl 2807d9f12122SMartin Sperl /* all the others need rx_buf/tx_buf also set */ 2808d9f12122SMartin Sperl for (i = 1, offset = maxsize; i < count; offset += maxsize, i++) { 2809d9f12122SMartin Sperl /* update rx_buf, tx_buf and dma */ 2810d9f12122SMartin Sperl if (xfers[i].rx_buf) 2811d9f12122SMartin Sperl xfers[i].rx_buf += offset; 2812d9f12122SMartin Sperl if (xfers[i].rx_dma) 2813d9f12122SMartin Sperl xfers[i].rx_dma += offset; 2814d9f12122SMartin Sperl if (xfers[i].tx_buf) 2815d9f12122SMartin Sperl xfers[i].tx_buf += offset; 2816d9f12122SMartin Sperl if (xfers[i].tx_dma) 2817d9f12122SMartin Sperl xfers[i].tx_dma += offset; 2818d9f12122SMartin Sperl 2819d9f12122SMartin Sperl /* update length */ 2820d9f12122SMartin Sperl xfers[i].len = min(maxsize, xfers[i].len - offset); 2821d9f12122SMartin Sperl } 2822d9f12122SMartin Sperl 2823d9f12122SMartin Sperl /* we set up xferp to the last entry we have inserted, 2824d9f12122SMartin Sperl * so that we skip those already split transfers 2825d9f12122SMartin Sperl */ 2826d9f12122SMartin Sperl *xferp = &xfers[count - 1]; 2827d9f12122SMartin Sperl 2828d9f12122SMartin Sperl /* increment statistics counters */ 28298caab75fSGeert Uytterhoeven SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics, 2830d9f12122SMartin Sperl transfers_split_maxsize); 2831d9f12122SMartin Sperl SPI_STATISTICS_INCREMENT_FIELD(&msg->spi->statistics, 2832d9f12122SMartin Sperl transfers_split_maxsize); 2833d9f12122SMartin Sperl 2834d9f12122SMartin Sperl return 0; 2835d9f12122SMartin Sperl } 2836d9f12122SMartin Sperl 2837d9f12122SMartin Sperl /** 2838d9f12122SMartin Sperl * spi_split_tranfers_maxsize - split spi transfers into multiple transfers 2839d9f12122SMartin Sperl * when an individual transfer exceeds a 2840d9f12122SMartin Sperl * certain size 28418caab75fSGeert Uytterhoeven * @ctlr: the @spi_controller for this transfer 28423700ce95SMasanari Iida * @msg: the @spi_message to transform 28433700ce95SMasanari Iida * @maxsize: the maximum when to apply this 284410f11a22SJavier Martinez Canillas * @gfp: GFP allocation flags 2845d9f12122SMartin Sperl * 2846d9f12122SMartin Sperl * Return: status of transformation 2847d9f12122SMartin Sperl */ 28488caab75fSGeert Uytterhoeven int spi_split_transfers_maxsize(struct spi_controller *ctlr, 2849d9f12122SMartin Sperl struct spi_message *msg, 2850d9f12122SMartin Sperl size_t maxsize, 2851d9f12122SMartin Sperl gfp_t gfp) 2852d9f12122SMartin Sperl { 2853d9f12122SMartin Sperl struct spi_transfer *xfer; 2854d9f12122SMartin Sperl int ret; 2855d9f12122SMartin Sperl 2856d9f12122SMartin Sperl /* iterate over the transfer_list, 2857d9f12122SMartin Sperl * but note that xfer is advanced to the last transfer inserted 2858d9f12122SMartin Sperl * to avoid checking sizes again unnecessarily (also xfer does 2859d9f12122SMartin Sperl * potentiall belong to a different list by the time the 2860d9f12122SMartin Sperl * replacement has happened 2861d9f12122SMartin Sperl */ 2862d9f12122SMartin Sperl list_for_each_entry(xfer, &msg->transfers, transfer_list) { 2863d9f12122SMartin Sperl if (xfer->len > maxsize) { 28648caab75fSGeert Uytterhoeven ret = __spi_split_transfer_maxsize(ctlr, msg, &xfer, 28658caab75fSGeert Uytterhoeven maxsize, gfp); 2866d9f12122SMartin Sperl if (ret) 2867d9f12122SMartin Sperl return ret; 2868d9f12122SMartin Sperl } 2869d9f12122SMartin Sperl } 2870d9f12122SMartin Sperl 2871d9f12122SMartin Sperl return 0; 2872d9f12122SMartin Sperl } 2873d9f12122SMartin Sperl EXPORT_SYMBOL_GPL(spi_split_transfers_maxsize); 28748ae12a0dSDavid Brownell 28758ae12a0dSDavid Brownell /*-------------------------------------------------------------------------*/ 28768ae12a0dSDavid Brownell 28778caab75fSGeert Uytterhoeven /* Core methods for SPI controller protocol drivers. Some of the 28787d077197SDavid Brownell * other core methods are currently defined as inline functions. 28797d077197SDavid Brownell */ 28807d077197SDavid Brownell 28818caab75fSGeert Uytterhoeven static int __spi_validate_bits_per_word(struct spi_controller *ctlr, 28828caab75fSGeert Uytterhoeven u8 bits_per_word) 288363ab645fSStefan Brüns { 28848caab75fSGeert Uytterhoeven if (ctlr->bits_per_word_mask) { 288563ab645fSStefan Brüns /* Only 32 bits fit in the mask */ 288663ab645fSStefan Brüns if (bits_per_word > 32) 288763ab645fSStefan Brüns return -EINVAL; 28888caab75fSGeert Uytterhoeven if (!(ctlr->bits_per_word_mask & SPI_BPW_MASK(bits_per_word))) 288963ab645fSStefan Brüns return -EINVAL; 289063ab645fSStefan Brüns } 289163ab645fSStefan Brüns 289263ab645fSStefan Brüns return 0; 289363ab645fSStefan Brüns } 289463ab645fSStefan Brüns 28957d077197SDavid Brownell /** 28967d077197SDavid Brownell * spi_setup - setup SPI mode and clock rate 28977d077197SDavid Brownell * @spi: the device whose settings are being modified 28987d077197SDavid Brownell * Context: can sleep, and no requests are queued to the device 28997d077197SDavid Brownell * 29007d077197SDavid Brownell * SPI protocol drivers may need to update the transfer mode if the 29017d077197SDavid Brownell * device doesn't work with its default. They may likewise need 29027d077197SDavid Brownell * to update clock rates or word sizes from initial values. This function 29037d077197SDavid Brownell * changes those settings, and must be called from a context that can sleep. 29047d077197SDavid Brownell * Except for SPI_CS_HIGH, which takes effect immediately, the changes take 29057d077197SDavid Brownell * effect the next time the device is selected and data is transferred to 29067d077197SDavid Brownell * or from it. When this function returns, the spi device is deselected. 29077d077197SDavid Brownell * 29087d077197SDavid Brownell * Note that this call will fail if the protocol driver specifies an option 29097d077197SDavid Brownell * that the underlying controller or its driver does not support. For 29107d077197SDavid Brownell * example, not all hardware supports wire transfers using nine bit words, 29117d077197SDavid Brownell * LSB-first wire encoding, or active-high chipselects. 291297d56dc6SJavier Martinez Canillas * 291397d56dc6SJavier Martinez Canillas * Return: zero on success, else a negative error code. 29147d077197SDavid Brownell */ 29157d077197SDavid Brownell int spi_setup(struct spi_device *spi) 29167d077197SDavid Brownell { 291783596fbeSGeert Uytterhoeven unsigned bad_bits, ugly_bits; 29185ab8d262SAndy Shevchenko int status; 29197d077197SDavid Brownell 2920f477b7fbSwangyuhang /* check mode to prevent that DUAL and QUAD set at the same time 2921f477b7fbSwangyuhang */ 2922f477b7fbSwangyuhang if (((spi->mode & SPI_TX_DUAL) && (spi->mode & SPI_TX_QUAD)) || 2923f477b7fbSwangyuhang ((spi->mode & SPI_RX_DUAL) && (spi->mode & SPI_RX_QUAD))) { 2924f477b7fbSwangyuhang dev_err(&spi->dev, 2925f477b7fbSwangyuhang "setup: can not select dual and quad at the same time\n"); 2926f477b7fbSwangyuhang return -EINVAL; 2927f477b7fbSwangyuhang } 2928f477b7fbSwangyuhang /* if it is SPI_3WIRE mode, DUAL and QUAD should be forbidden 2929f477b7fbSwangyuhang */ 2930f477b7fbSwangyuhang if ((spi->mode & SPI_3WIRE) && (spi->mode & 29316b03061fSYogesh Narayan Gaur (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL | 29326b03061fSYogesh Narayan Gaur SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL))) 2933f477b7fbSwangyuhang return -EINVAL; 2934e7db06b5SDavid Brownell /* help drivers fail *cleanly* when they need options 29358caab75fSGeert Uytterhoeven * that aren't supported with their current controller 2936cbaa62e0SDavid Lechner * SPI_CS_WORD has a fallback software implementation, 2937cbaa62e0SDavid Lechner * so it is ignored here. 2938e7db06b5SDavid Brownell */ 2939cbaa62e0SDavid Lechner bad_bits = spi->mode & ~(spi->controller->mode_bits | SPI_CS_WORD); 294083596fbeSGeert Uytterhoeven ugly_bits = bad_bits & 29416b03061fSYogesh Narayan Gaur (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL | 29426b03061fSYogesh Narayan Gaur SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL); 294383596fbeSGeert Uytterhoeven if (ugly_bits) { 294483596fbeSGeert Uytterhoeven dev_warn(&spi->dev, 294583596fbeSGeert Uytterhoeven "setup: ignoring unsupported mode bits %x\n", 294683596fbeSGeert Uytterhoeven ugly_bits); 294783596fbeSGeert Uytterhoeven spi->mode &= ~ugly_bits; 294883596fbeSGeert Uytterhoeven bad_bits &= ~ugly_bits; 294983596fbeSGeert Uytterhoeven } 2950e7db06b5SDavid Brownell if (bad_bits) { 2951eb288a1fSLinus Walleij dev_err(&spi->dev, "setup: unsupported mode bits %x\n", 2952e7db06b5SDavid Brownell bad_bits); 2953e7db06b5SDavid Brownell return -EINVAL; 2954e7db06b5SDavid Brownell } 2955e7db06b5SDavid Brownell 29567d077197SDavid Brownell if (!spi->bits_per_word) 29577d077197SDavid Brownell spi->bits_per_word = 8; 29587d077197SDavid Brownell 29598caab75fSGeert Uytterhoeven status = __spi_validate_bits_per_word(spi->controller, 29608caab75fSGeert Uytterhoeven spi->bits_per_word); 29615ab8d262SAndy Shevchenko if (status) 29625ab8d262SAndy Shevchenko return status; 296363ab645fSStefan Brüns 2964052eb2d4SAxel Lin if (!spi->max_speed_hz) 29658caab75fSGeert Uytterhoeven spi->max_speed_hz = spi->controller->max_speed_hz; 2966052eb2d4SAxel Lin 29678caab75fSGeert Uytterhoeven if (spi->controller->setup) 29688caab75fSGeert Uytterhoeven status = spi->controller->setup(spi); 29697d077197SDavid Brownell 2970abeedb01SFranklin S Cooper Jr spi_set_cs(spi, false); 2971abeedb01SFranklin S Cooper Jr 29725fe5f05eSJingoo Han dev_dbg(&spi->dev, "setup mode %d, %s%s%s%s%u bits/w, %u Hz max --> %d\n", 29737d077197SDavid Brownell (int) (spi->mode & (SPI_CPOL | SPI_CPHA)), 29747d077197SDavid Brownell (spi->mode & SPI_CS_HIGH) ? "cs_high, " : "", 29757d077197SDavid Brownell (spi->mode & SPI_LSB_FIRST) ? "lsb, " : "", 29767d077197SDavid Brownell (spi->mode & SPI_3WIRE) ? "3wire, " : "", 29777d077197SDavid Brownell (spi->mode & SPI_LOOP) ? "loopback, " : "", 29787d077197SDavid Brownell spi->bits_per_word, spi->max_speed_hz, 29797d077197SDavid Brownell status); 29807d077197SDavid Brownell 29817d077197SDavid Brownell return status; 29827d077197SDavid Brownell } 29837d077197SDavid Brownell EXPORT_SYMBOL_GPL(spi_setup); 29847d077197SDavid Brownell 298590808738SMark Brown static int __spi_validate(struct spi_device *spi, struct spi_message *message) 2986cf32b71eSErnst Schwab { 29878caab75fSGeert Uytterhoeven struct spi_controller *ctlr = spi->controller; 2988e6811d1dSLaxman Dewangan struct spi_transfer *xfer; 29896ea31293SAtsushi Nemoto int w_size; 2990cf32b71eSErnst Schwab 299124a0013aSMark Brown if (list_empty(&message->transfers)) 299224a0013aSMark Brown return -EINVAL; 299324a0013aSMark Brown 2994cbaa62e0SDavid Lechner /* If an SPI controller does not support toggling the CS line on each 299571388b21SDavid Lechner * transfer (indicated by the SPI_CS_WORD flag) or we are using a GPIO 299671388b21SDavid Lechner * for the CS line, we can emulate the CS-per-word hardware function by 2997cbaa62e0SDavid Lechner * splitting transfers into one-word transfers and ensuring that 2998cbaa62e0SDavid Lechner * cs_change is set for each transfer. 2999cbaa62e0SDavid Lechner */ 300071388b21SDavid Lechner if ((spi->mode & SPI_CS_WORD) && (!(ctlr->mode_bits & SPI_CS_WORD) || 3001f3186dd8SLinus Walleij spi->cs_gpiod || 300271388b21SDavid Lechner gpio_is_valid(spi->cs_gpio))) { 3003cbaa62e0SDavid Lechner size_t maxsize; 3004cbaa62e0SDavid Lechner int ret; 3005cbaa62e0SDavid Lechner 3006cbaa62e0SDavid Lechner maxsize = (spi->bits_per_word + 7) / 8; 3007cbaa62e0SDavid Lechner 3008cbaa62e0SDavid Lechner /* spi_split_transfers_maxsize() requires message->spi */ 3009cbaa62e0SDavid Lechner message->spi = spi; 3010cbaa62e0SDavid Lechner 3011cbaa62e0SDavid Lechner ret = spi_split_transfers_maxsize(ctlr, message, maxsize, 3012cbaa62e0SDavid Lechner GFP_KERNEL); 3013cbaa62e0SDavid Lechner if (ret) 3014cbaa62e0SDavid Lechner return ret; 3015cbaa62e0SDavid Lechner 3016cbaa62e0SDavid Lechner list_for_each_entry(xfer, &message->transfers, transfer_list) { 3017cbaa62e0SDavid Lechner /* don't change cs_change on the last entry in the list */ 3018cbaa62e0SDavid Lechner if (list_is_last(&xfer->transfer_list, &message->transfers)) 3019cbaa62e0SDavid Lechner break; 3020cbaa62e0SDavid Lechner xfer->cs_change = 1; 3021cbaa62e0SDavid Lechner } 3022cbaa62e0SDavid Lechner } 3023cbaa62e0SDavid Lechner 3024cf32b71eSErnst Schwab /* Half-duplex links include original MicroWire, and ones with 3025cf32b71eSErnst Schwab * only one data pin like SPI_3WIRE (switches direction) or where 3026cf32b71eSErnst Schwab * either MOSI or MISO is missing. They can also be caused by 3027cf32b71eSErnst Schwab * software limitations. 3028cf32b71eSErnst Schwab */ 30298caab75fSGeert Uytterhoeven if ((ctlr->flags & SPI_CONTROLLER_HALF_DUPLEX) || 30308caab75fSGeert Uytterhoeven (spi->mode & SPI_3WIRE)) { 30318caab75fSGeert Uytterhoeven unsigned flags = ctlr->flags; 3032cf32b71eSErnst Schwab 3033cf32b71eSErnst Schwab list_for_each_entry(xfer, &message->transfers, transfer_list) { 3034cf32b71eSErnst Schwab if (xfer->rx_buf && xfer->tx_buf) 3035cf32b71eSErnst Schwab return -EINVAL; 30368caab75fSGeert Uytterhoeven if ((flags & SPI_CONTROLLER_NO_TX) && xfer->tx_buf) 3037cf32b71eSErnst Schwab return -EINVAL; 30388caab75fSGeert Uytterhoeven if ((flags & SPI_CONTROLLER_NO_RX) && xfer->rx_buf) 3039cf32b71eSErnst Schwab return -EINVAL; 3040cf32b71eSErnst Schwab } 3041cf32b71eSErnst Schwab } 3042cf32b71eSErnst Schwab 3043e6811d1dSLaxman Dewangan /** 3044059b8ffeSLaxman Dewangan * Set transfer bits_per_word and max speed as spi device default if 3045059b8ffeSLaxman Dewangan * it is not set for this transfer. 3046f477b7fbSwangyuhang * Set transfer tx_nbits and rx_nbits as single transfer default 3047f477b7fbSwangyuhang * (SPI_NBITS_SINGLE) if it is not set for this transfer. 3048e6811d1dSLaxman Dewangan */ 304977e80588SMartin Sperl message->frame_length = 0; 3050e6811d1dSLaxman Dewangan list_for_each_entry(xfer, &message->transfers, transfer_list) { 3051078726ceSSourav Poddar message->frame_length += xfer->len; 3052e6811d1dSLaxman Dewangan if (!xfer->bits_per_word) 3053e6811d1dSLaxman Dewangan xfer->bits_per_word = spi->bits_per_word; 3054a6f87fadSAxel Lin 3055a6f87fadSAxel Lin if (!xfer->speed_hz) 3056059b8ffeSLaxman Dewangan xfer->speed_hz = spi->max_speed_hz; 30577dc9fbc3SMark Brown if (!xfer->speed_hz) 30588caab75fSGeert Uytterhoeven xfer->speed_hz = ctlr->max_speed_hz; 3059a6f87fadSAxel Lin 30608caab75fSGeert Uytterhoeven if (ctlr->max_speed_hz && xfer->speed_hz > ctlr->max_speed_hz) 30618caab75fSGeert Uytterhoeven xfer->speed_hz = ctlr->max_speed_hz; 306256ede94aSGabor Juhos 30638caab75fSGeert Uytterhoeven if (__spi_validate_bits_per_word(ctlr, xfer->bits_per_word)) 3064543bb255SStephen Warren return -EINVAL; 3065a2fd4f9fSMark Brown 30664d94bd21SIvan T. Ivanov /* 30674d94bd21SIvan T. Ivanov * SPI transfer length should be multiple of SPI word size 30684d94bd21SIvan T. Ivanov * where SPI word size should be power-of-two multiple 30694d94bd21SIvan T. Ivanov */ 30704d94bd21SIvan T. Ivanov if (xfer->bits_per_word <= 8) 30714d94bd21SIvan T. Ivanov w_size = 1; 30724d94bd21SIvan T. Ivanov else if (xfer->bits_per_word <= 16) 30734d94bd21SIvan T. Ivanov w_size = 2; 30744d94bd21SIvan T. Ivanov else 30754d94bd21SIvan T. Ivanov w_size = 4; 30764d94bd21SIvan T. Ivanov 30774d94bd21SIvan T. Ivanov /* No partial transfers accepted */ 30786ea31293SAtsushi Nemoto if (xfer->len % w_size) 30794d94bd21SIvan T. Ivanov return -EINVAL; 30804d94bd21SIvan T. Ivanov 30818caab75fSGeert Uytterhoeven if (xfer->speed_hz && ctlr->min_speed_hz && 30828caab75fSGeert Uytterhoeven xfer->speed_hz < ctlr->min_speed_hz) 3083a2fd4f9fSMark Brown return -EINVAL; 3084f477b7fbSwangyuhang 3085f477b7fbSwangyuhang if (xfer->tx_buf && !xfer->tx_nbits) 3086f477b7fbSwangyuhang xfer->tx_nbits = SPI_NBITS_SINGLE; 3087f477b7fbSwangyuhang if (xfer->rx_buf && !xfer->rx_nbits) 3088f477b7fbSwangyuhang xfer->rx_nbits = SPI_NBITS_SINGLE; 3089f477b7fbSwangyuhang /* check transfer tx/rx_nbits: 30901afd9989SGeert Uytterhoeven * 1. check the value matches one of single, dual and quad 30911afd9989SGeert Uytterhoeven * 2. check tx/rx_nbits match the mode in spi_device 3092f477b7fbSwangyuhang */ 3093db90a441SSourav Poddar if (xfer->tx_buf) { 3094f477b7fbSwangyuhang if (xfer->tx_nbits != SPI_NBITS_SINGLE && 3095f477b7fbSwangyuhang xfer->tx_nbits != SPI_NBITS_DUAL && 3096f477b7fbSwangyuhang xfer->tx_nbits != SPI_NBITS_QUAD) 3097a2fd4f9fSMark Brown return -EINVAL; 3098f477b7fbSwangyuhang if ((xfer->tx_nbits == SPI_NBITS_DUAL) && 3099f477b7fbSwangyuhang !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD))) 3100f477b7fbSwangyuhang return -EINVAL; 3101f477b7fbSwangyuhang if ((xfer->tx_nbits == SPI_NBITS_QUAD) && 3102f477b7fbSwangyuhang !(spi->mode & SPI_TX_QUAD)) 3103f477b7fbSwangyuhang return -EINVAL; 3104db90a441SSourav Poddar } 3105f477b7fbSwangyuhang /* check transfer rx_nbits */ 3106db90a441SSourav Poddar if (xfer->rx_buf) { 3107f477b7fbSwangyuhang if (xfer->rx_nbits != SPI_NBITS_SINGLE && 3108f477b7fbSwangyuhang xfer->rx_nbits != SPI_NBITS_DUAL && 3109f477b7fbSwangyuhang xfer->rx_nbits != SPI_NBITS_QUAD) 3110f477b7fbSwangyuhang return -EINVAL; 3111f477b7fbSwangyuhang if ((xfer->rx_nbits == SPI_NBITS_DUAL) && 3112f477b7fbSwangyuhang !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD))) 3113f477b7fbSwangyuhang return -EINVAL; 3114f477b7fbSwangyuhang if ((xfer->rx_nbits == SPI_NBITS_QUAD) && 3115f477b7fbSwangyuhang !(spi->mode & SPI_RX_QUAD)) 3116f477b7fbSwangyuhang return -EINVAL; 3117e6811d1dSLaxman Dewangan } 3118e6811d1dSLaxman Dewangan } 3119e6811d1dSLaxman Dewangan 3120cf32b71eSErnst Schwab message->status = -EINPROGRESS; 312190808738SMark Brown 312290808738SMark Brown return 0; 312390808738SMark Brown } 312490808738SMark Brown 312590808738SMark Brown static int __spi_async(struct spi_device *spi, struct spi_message *message) 312690808738SMark Brown { 31278caab75fSGeert Uytterhoeven struct spi_controller *ctlr = spi->controller; 312890808738SMark Brown 3129b5932f5cSBoris Brezillon /* 3130b5932f5cSBoris Brezillon * Some controllers do not support doing regular SPI transfers. Return 3131b5932f5cSBoris Brezillon * ENOTSUPP when this is the case. 3132b5932f5cSBoris Brezillon */ 3133b5932f5cSBoris Brezillon if (!ctlr->transfer) 3134b5932f5cSBoris Brezillon return -ENOTSUPP; 3135b5932f5cSBoris Brezillon 313690808738SMark Brown message->spi = spi; 313790808738SMark Brown 31388caab75fSGeert Uytterhoeven SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics, spi_async); 3139eca2ebc7SMartin Sperl SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_async); 3140eca2ebc7SMartin Sperl 314190808738SMark Brown trace_spi_message_submit(message); 314290808738SMark Brown 31438caab75fSGeert Uytterhoeven return ctlr->transfer(spi, message); 3144cf32b71eSErnst Schwab } 3145cf32b71eSErnst Schwab 3146568d0697SDavid Brownell /** 3147568d0697SDavid Brownell * spi_async - asynchronous SPI transfer 3148568d0697SDavid Brownell * @spi: device with which data will be exchanged 3149568d0697SDavid Brownell * @message: describes the data transfers, including completion callback 3150568d0697SDavid Brownell * Context: any (irqs may be blocked, etc) 3151568d0697SDavid Brownell * 3152568d0697SDavid Brownell * This call may be used in_irq and other contexts which can't sleep, 3153568d0697SDavid Brownell * as well as from task contexts which can sleep. 3154568d0697SDavid Brownell * 3155568d0697SDavid Brownell * The completion callback is invoked in a context which can't sleep. 3156568d0697SDavid Brownell * Before that invocation, the value of message->status is undefined. 3157568d0697SDavid Brownell * When the callback is issued, message->status holds either zero (to 3158568d0697SDavid Brownell * indicate complete success) or a negative error code. After that 3159568d0697SDavid Brownell * callback returns, the driver which issued the transfer request may 3160568d0697SDavid Brownell * deallocate the associated memory; it's no longer in use by any SPI 3161568d0697SDavid Brownell * core or controller driver code. 3162568d0697SDavid Brownell * 3163568d0697SDavid Brownell * Note that although all messages to a spi_device are handled in 3164568d0697SDavid Brownell * FIFO order, messages may go to different devices in other orders. 3165568d0697SDavid Brownell * Some device might be higher priority, or have various "hard" access 3166568d0697SDavid Brownell * time requirements, for example. 3167568d0697SDavid Brownell * 3168568d0697SDavid Brownell * On detection of any fault during the transfer, processing of 3169568d0697SDavid Brownell * the entire message is aborted, and the device is deselected. 3170568d0697SDavid Brownell * Until returning from the associated message completion callback, 3171568d0697SDavid Brownell * no other spi_message queued to that device will be processed. 3172568d0697SDavid Brownell * (This rule applies equally to all the synchronous transfer calls, 3173568d0697SDavid Brownell * which are wrappers around this core asynchronous primitive.) 317497d56dc6SJavier Martinez Canillas * 317597d56dc6SJavier Martinez Canillas * Return: zero on success, else a negative error code. 3176568d0697SDavid Brownell */ 3177568d0697SDavid Brownell int spi_async(struct spi_device *spi, struct spi_message *message) 3178568d0697SDavid Brownell { 31798caab75fSGeert Uytterhoeven struct spi_controller *ctlr = spi->controller; 3180cf32b71eSErnst Schwab int ret; 3181cf32b71eSErnst Schwab unsigned long flags; 3182568d0697SDavid Brownell 318390808738SMark Brown ret = __spi_validate(spi, message); 318490808738SMark Brown if (ret != 0) 318590808738SMark Brown return ret; 318690808738SMark Brown 31878caab75fSGeert Uytterhoeven spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags); 3188568d0697SDavid Brownell 31898caab75fSGeert Uytterhoeven if (ctlr->bus_lock_flag) 3190cf32b71eSErnst Schwab ret = -EBUSY; 3191cf32b71eSErnst Schwab else 3192cf32b71eSErnst Schwab ret = __spi_async(spi, message); 3193568d0697SDavid Brownell 31948caab75fSGeert Uytterhoeven spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags); 3195cf32b71eSErnst Schwab 3196cf32b71eSErnst Schwab return ret; 3197568d0697SDavid Brownell } 3198568d0697SDavid Brownell EXPORT_SYMBOL_GPL(spi_async); 3199568d0697SDavid Brownell 3200cf32b71eSErnst Schwab /** 3201cf32b71eSErnst Schwab * spi_async_locked - version of spi_async with exclusive bus usage 3202cf32b71eSErnst Schwab * @spi: device with which data will be exchanged 3203cf32b71eSErnst Schwab * @message: describes the data transfers, including completion callback 3204cf32b71eSErnst Schwab * Context: any (irqs may be blocked, etc) 3205cf32b71eSErnst Schwab * 3206cf32b71eSErnst Schwab * This call may be used in_irq and other contexts which can't sleep, 3207cf32b71eSErnst Schwab * as well as from task contexts which can sleep. 3208cf32b71eSErnst Schwab * 3209cf32b71eSErnst Schwab * The completion callback is invoked in a context which can't sleep. 3210cf32b71eSErnst Schwab * Before that invocation, the value of message->status is undefined. 3211cf32b71eSErnst Schwab * When the callback is issued, message->status holds either zero (to 3212cf32b71eSErnst Schwab * indicate complete success) or a negative error code. After that 3213cf32b71eSErnst Schwab * callback returns, the driver which issued the transfer request may 3214cf32b71eSErnst Schwab * deallocate the associated memory; it's no longer in use by any SPI 3215cf32b71eSErnst Schwab * core or controller driver code. 3216cf32b71eSErnst Schwab * 3217cf32b71eSErnst Schwab * Note that although all messages to a spi_device are handled in 3218cf32b71eSErnst Schwab * FIFO order, messages may go to different devices in other orders. 3219cf32b71eSErnst Schwab * Some device might be higher priority, or have various "hard" access 3220cf32b71eSErnst Schwab * time requirements, for example. 3221cf32b71eSErnst Schwab * 3222cf32b71eSErnst Schwab * On detection of any fault during the transfer, processing of 3223cf32b71eSErnst Schwab * the entire message is aborted, and the device is deselected. 3224cf32b71eSErnst Schwab * Until returning from the associated message completion callback, 3225cf32b71eSErnst Schwab * no other spi_message queued to that device will be processed. 3226cf32b71eSErnst Schwab * (This rule applies equally to all the synchronous transfer calls, 3227cf32b71eSErnst Schwab * which are wrappers around this core asynchronous primitive.) 322897d56dc6SJavier Martinez Canillas * 322997d56dc6SJavier Martinez Canillas * Return: zero on success, else a negative error code. 3230cf32b71eSErnst Schwab */ 3231cf32b71eSErnst Schwab int spi_async_locked(struct spi_device *spi, struct spi_message *message) 3232cf32b71eSErnst Schwab { 32338caab75fSGeert Uytterhoeven struct spi_controller *ctlr = spi->controller; 3234cf32b71eSErnst Schwab int ret; 3235cf32b71eSErnst Schwab unsigned long flags; 3236cf32b71eSErnst Schwab 323790808738SMark Brown ret = __spi_validate(spi, message); 323890808738SMark Brown if (ret != 0) 323990808738SMark Brown return ret; 324090808738SMark Brown 32418caab75fSGeert Uytterhoeven spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags); 3242cf32b71eSErnst Schwab 3243cf32b71eSErnst Schwab ret = __spi_async(spi, message); 3244cf32b71eSErnst Schwab 32458caab75fSGeert Uytterhoeven spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags); 3246cf32b71eSErnst Schwab 3247cf32b71eSErnst Schwab return ret; 3248cf32b71eSErnst Schwab 3249cf32b71eSErnst Schwab } 3250cf32b71eSErnst Schwab EXPORT_SYMBOL_GPL(spi_async_locked); 3251cf32b71eSErnst Schwab 32527d077197SDavid Brownell /*-------------------------------------------------------------------------*/ 32537d077197SDavid Brownell 32548caab75fSGeert Uytterhoeven /* Utility methods for SPI protocol drivers, layered on 32557d077197SDavid Brownell * top of the core. Some other utility methods are defined as 32567d077197SDavid Brownell * inline functions. 32577d077197SDavid Brownell */ 32587d077197SDavid Brownell 32595d870c8eSAndrew Morton static void spi_complete(void *arg) 32605d870c8eSAndrew Morton { 32615d870c8eSAndrew Morton complete(arg); 32625d870c8eSAndrew Morton } 32635d870c8eSAndrew Morton 3264ef4d96ecSMark Brown static int __spi_sync(struct spi_device *spi, struct spi_message *message) 3265cf32b71eSErnst Schwab { 3266cf32b71eSErnst Schwab DECLARE_COMPLETION_ONSTACK(done); 3267cf32b71eSErnst Schwab int status; 32688caab75fSGeert Uytterhoeven struct spi_controller *ctlr = spi->controller; 32690461a414SMark Brown unsigned long flags; 32700461a414SMark Brown 32710461a414SMark Brown status = __spi_validate(spi, message); 32720461a414SMark Brown if (status != 0) 32730461a414SMark Brown return status; 3274cf32b71eSErnst Schwab 3275cf32b71eSErnst Schwab message->complete = spi_complete; 3276cf32b71eSErnst Schwab message->context = &done; 32770461a414SMark Brown message->spi = spi; 3278cf32b71eSErnst Schwab 32798caab75fSGeert Uytterhoeven SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics, spi_sync); 3280eca2ebc7SMartin Sperl SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_sync); 3281eca2ebc7SMartin Sperl 32820461a414SMark Brown /* If we're not using the legacy transfer method then we will 32830461a414SMark Brown * try to transfer in the calling context so special case. 32840461a414SMark Brown * This code would be less tricky if we could remove the 32850461a414SMark Brown * support for driver implemented message queues. 32860461a414SMark Brown */ 32878caab75fSGeert Uytterhoeven if (ctlr->transfer == spi_queued_transfer) { 32888caab75fSGeert Uytterhoeven spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags); 32890461a414SMark Brown 32900461a414SMark Brown trace_spi_message_submit(message); 32910461a414SMark Brown 32920461a414SMark Brown status = __spi_queued_transfer(spi, message, false); 32930461a414SMark Brown 32948caab75fSGeert Uytterhoeven spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags); 32950461a414SMark Brown } else { 3296cf32b71eSErnst Schwab status = spi_async_locked(spi, message); 32970461a414SMark Brown } 3298cf32b71eSErnst Schwab 3299cf32b71eSErnst Schwab if (status == 0) { 33000461a414SMark Brown /* Push out the messages in the calling context if we 33010461a414SMark Brown * can. 33020461a414SMark Brown */ 33038caab75fSGeert Uytterhoeven if (ctlr->transfer == spi_queued_transfer) { 33048caab75fSGeert Uytterhoeven SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics, 3305eca2ebc7SMartin Sperl spi_sync_immediate); 3306eca2ebc7SMartin Sperl SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, 3307eca2ebc7SMartin Sperl spi_sync_immediate); 33088caab75fSGeert Uytterhoeven __spi_pump_messages(ctlr, false); 3309eca2ebc7SMartin Sperl } 33100461a414SMark Brown 3311cf32b71eSErnst Schwab wait_for_completion(&done); 3312cf32b71eSErnst Schwab status = message->status; 3313cf32b71eSErnst Schwab } 3314cf32b71eSErnst Schwab message->context = NULL; 3315cf32b71eSErnst Schwab return status; 3316cf32b71eSErnst Schwab } 3317cf32b71eSErnst Schwab 33188ae12a0dSDavid Brownell /** 33198ae12a0dSDavid Brownell * spi_sync - blocking/synchronous SPI data transfers 33208ae12a0dSDavid Brownell * @spi: device with which data will be exchanged 33218ae12a0dSDavid Brownell * @message: describes the data transfers 332233e34dc6SDavid Brownell * Context: can sleep 33238ae12a0dSDavid Brownell * 33248ae12a0dSDavid Brownell * This call may only be used from a context that may sleep. The sleep 33258ae12a0dSDavid Brownell * is non-interruptible, and has no timeout. Low-overhead controller 33268ae12a0dSDavid Brownell * drivers may DMA directly into and out of the message buffers. 33278ae12a0dSDavid Brownell * 33288ae12a0dSDavid Brownell * Note that the SPI device's chip select is active during the message, 33298ae12a0dSDavid Brownell * and then is normally disabled between messages. Drivers for some 33308ae12a0dSDavid Brownell * frequently-used devices may want to minimize costs of selecting a chip, 33318ae12a0dSDavid Brownell * by leaving it selected in anticipation that the next message will go 33328ae12a0dSDavid Brownell * to the same chip. (That may increase power usage.) 33338ae12a0dSDavid Brownell * 33340c868461SDavid Brownell * Also, the caller is guaranteeing that the memory associated with the 33350c868461SDavid Brownell * message will not be freed before this call returns. 33360c868461SDavid Brownell * 333797d56dc6SJavier Martinez Canillas * Return: zero on success, else a negative error code. 33388ae12a0dSDavid Brownell */ 33398ae12a0dSDavid Brownell int spi_sync(struct spi_device *spi, struct spi_message *message) 33408ae12a0dSDavid Brownell { 3341ef4d96ecSMark Brown int ret; 3342ef4d96ecSMark Brown 33438caab75fSGeert Uytterhoeven mutex_lock(&spi->controller->bus_lock_mutex); 3344ef4d96ecSMark Brown ret = __spi_sync(spi, message); 33458caab75fSGeert Uytterhoeven mutex_unlock(&spi->controller->bus_lock_mutex); 3346ef4d96ecSMark Brown 3347ef4d96ecSMark Brown return ret; 33488ae12a0dSDavid Brownell } 33498ae12a0dSDavid Brownell EXPORT_SYMBOL_GPL(spi_sync); 33508ae12a0dSDavid Brownell 3351cf32b71eSErnst Schwab /** 3352cf32b71eSErnst Schwab * spi_sync_locked - version of spi_sync with exclusive bus usage 3353cf32b71eSErnst Schwab * @spi: device with which data will be exchanged 3354cf32b71eSErnst Schwab * @message: describes the data transfers 3355cf32b71eSErnst Schwab * Context: can sleep 3356cf32b71eSErnst Schwab * 3357cf32b71eSErnst Schwab * This call may only be used from a context that may sleep. The sleep 3358cf32b71eSErnst Schwab * is non-interruptible, and has no timeout. Low-overhead controller 3359cf32b71eSErnst Schwab * drivers may DMA directly into and out of the message buffers. 3360cf32b71eSErnst Schwab * 3361cf32b71eSErnst Schwab * This call should be used by drivers that require exclusive access to the 336225985edcSLucas De Marchi * SPI bus. It has to be preceded by a spi_bus_lock call. The SPI bus must 3363cf32b71eSErnst Schwab * be released by a spi_bus_unlock call when the exclusive access is over. 3364cf32b71eSErnst Schwab * 336597d56dc6SJavier Martinez Canillas * Return: zero on success, else a negative error code. 3366cf32b71eSErnst Schwab */ 3367cf32b71eSErnst Schwab int spi_sync_locked(struct spi_device *spi, struct spi_message *message) 3368cf32b71eSErnst Schwab { 3369ef4d96ecSMark Brown return __spi_sync(spi, message); 3370cf32b71eSErnst Schwab } 3371cf32b71eSErnst Schwab EXPORT_SYMBOL_GPL(spi_sync_locked); 3372cf32b71eSErnst Schwab 3373cf32b71eSErnst Schwab /** 3374cf32b71eSErnst Schwab * spi_bus_lock - obtain a lock for exclusive SPI bus usage 33758caab75fSGeert Uytterhoeven * @ctlr: SPI bus master that should be locked for exclusive bus access 3376cf32b71eSErnst Schwab * Context: can sleep 3377cf32b71eSErnst Schwab * 3378cf32b71eSErnst Schwab * This call may only be used from a context that may sleep. The sleep 3379cf32b71eSErnst Schwab * is non-interruptible, and has no timeout. 3380cf32b71eSErnst Schwab * 3381cf32b71eSErnst Schwab * This call should be used by drivers that require exclusive access to the 3382cf32b71eSErnst Schwab * SPI bus. The SPI bus must be released by a spi_bus_unlock call when the 3383cf32b71eSErnst Schwab * exclusive access is over. Data transfer must be done by spi_sync_locked 3384cf32b71eSErnst Schwab * and spi_async_locked calls when the SPI bus lock is held. 3385cf32b71eSErnst Schwab * 338697d56dc6SJavier Martinez Canillas * Return: always zero. 3387cf32b71eSErnst Schwab */ 33888caab75fSGeert Uytterhoeven int spi_bus_lock(struct spi_controller *ctlr) 3389cf32b71eSErnst Schwab { 3390cf32b71eSErnst Schwab unsigned long flags; 3391cf32b71eSErnst Schwab 33928caab75fSGeert Uytterhoeven mutex_lock(&ctlr->bus_lock_mutex); 3393cf32b71eSErnst Schwab 33948caab75fSGeert Uytterhoeven spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags); 33958caab75fSGeert Uytterhoeven ctlr->bus_lock_flag = 1; 33968caab75fSGeert Uytterhoeven spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags); 3397cf32b71eSErnst Schwab 3398cf32b71eSErnst Schwab /* mutex remains locked until spi_bus_unlock is called */ 3399cf32b71eSErnst Schwab 3400cf32b71eSErnst Schwab return 0; 3401cf32b71eSErnst Schwab } 3402cf32b71eSErnst Schwab EXPORT_SYMBOL_GPL(spi_bus_lock); 3403cf32b71eSErnst Schwab 3404cf32b71eSErnst Schwab /** 3405cf32b71eSErnst Schwab * spi_bus_unlock - release the lock for exclusive SPI bus usage 34068caab75fSGeert Uytterhoeven * @ctlr: SPI bus master that was locked for exclusive bus access 3407cf32b71eSErnst Schwab * Context: can sleep 3408cf32b71eSErnst Schwab * 3409cf32b71eSErnst Schwab * This call may only be used from a context that may sleep. The sleep 3410cf32b71eSErnst Schwab * is non-interruptible, and has no timeout. 3411cf32b71eSErnst Schwab * 3412cf32b71eSErnst Schwab * This call releases an SPI bus lock previously obtained by an spi_bus_lock 3413cf32b71eSErnst Schwab * call. 3414cf32b71eSErnst Schwab * 341597d56dc6SJavier Martinez Canillas * Return: always zero. 3416cf32b71eSErnst Schwab */ 34178caab75fSGeert Uytterhoeven int spi_bus_unlock(struct spi_controller *ctlr) 3418cf32b71eSErnst Schwab { 34198caab75fSGeert Uytterhoeven ctlr->bus_lock_flag = 0; 3420cf32b71eSErnst Schwab 34218caab75fSGeert Uytterhoeven mutex_unlock(&ctlr->bus_lock_mutex); 3422cf32b71eSErnst Schwab 3423cf32b71eSErnst Schwab return 0; 3424cf32b71eSErnst Schwab } 3425cf32b71eSErnst Schwab EXPORT_SYMBOL_GPL(spi_bus_unlock); 3426cf32b71eSErnst Schwab 3427a9948b61SDavid Brownell /* portable code must never pass more than 32 bytes */ 3428a9948b61SDavid Brownell #define SPI_BUFSIZ max(32, SMP_CACHE_BYTES) 34298ae12a0dSDavid Brownell 34308ae12a0dSDavid Brownell static u8 *buf; 34318ae12a0dSDavid Brownell 34328ae12a0dSDavid Brownell /** 34338ae12a0dSDavid Brownell * spi_write_then_read - SPI synchronous write followed by read 34348ae12a0dSDavid Brownell * @spi: device with which data will be exchanged 34358ae12a0dSDavid Brownell * @txbuf: data to be written (need not be dma-safe) 34368ae12a0dSDavid Brownell * @n_tx: size of txbuf, in bytes 343727570497SJiri Pirko * @rxbuf: buffer into which data will be read (need not be dma-safe) 343827570497SJiri Pirko * @n_rx: size of rxbuf, in bytes 343933e34dc6SDavid Brownell * Context: can sleep 34408ae12a0dSDavid Brownell * 34418ae12a0dSDavid Brownell * This performs a half duplex MicroWire style transaction with the 34428ae12a0dSDavid Brownell * device, sending txbuf and then reading rxbuf. The return value 34438ae12a0dSDavid Brownell * is zero for success, else a negative errno status code. 3444b885244eSDavid Brownell * This call may only be used from a context that may sleep. 34458ae12a0dSDavid Brownell * 34460c868461SDavid Brownell * Parameters to this routine are always copied using a small buffer; 344733e34dc6SDavid Brownell * portable code should never use this for more than 32 bytes. 344833e34dc6SDavid Brownell * Performance-sensitive or bulk transfer code should instead use 34490c868461SDavid Brownell * spi_{async,sync}() calls with dma-safe buffers. 345097d56dc6SJavier Martinez Canillas * 345197d56dc6SJavier Martinez Canillas * Return: zero on success, else a negative error code. 34528ae12a0dSDavid Brownell */ 34538ae12a0dSDavid Brownell int spi_write_then_read(struct spi_device *spi, 34540c4a1590SMark Brown const void *txbuf, unsigned n_tx, 34550c4a1590SMark Brown void *rxbuf, unsigned n_rx) 34568ae12a0dSDavid Brownell { 3457068f4070SDavid Brownell static DEFINE_MUTEX(lock); 34588ae12a0dSDavid Brownell 34598ae12a0dSDavid Brownell int status; 34608ae12a0dSDavid Brownell struct spi_message message; 3461bdff549eSDavid Brownell struct spi_transfer x[2]; 34628ae12a0dSDavid Brownell u8 *local_buf; 34638ae12a0dSDavid Brownell 3464b3a223eeSMark Brown /* Use preallocated DMA-safe buffer if we can. We can't avoid 3465b3a223eeSMark Brown * copying here, (as a pure convenience thing), but we can 3466b3a223eeSMark Brown * keep heap costs out of the hot path unless someone else is 3467b3a223eeSMark Brown * using the pre-allocated buffer or the transfer is too large. 34688ae12a0dSDavid Brownell */ 3469b3a223eeSMark Brown if ((n_tx + n_rx) > SPI_BUFSIZ || !mutex_trylock(&lock)) { 34702cd94c8aSMark Brown local_buf = kmalloc(max((unsigned)SPI_BUFSIZ, n_tx + n_rx), 34712cd94c8aSMark Brown GFP_KERNEL | GFP_DMA); 3472b3a223eeSMark Brown if (!local_buf) 3473b3a223eeSMark Brown return -ENOMEM; 3474b3a223eeSMark Brown } else { 3475b3a223eeSMark Brown local_buf = buf; 3476b3a223eeSMark Brown } 34778ae12a0dSDavid Brownell 34788275c642SVitaly Wool spi_message_init(&message); 34795fe5f05eSJingoo Han memset(x, 0, sizeof(x)); 3480bdff549eSDavid Brownell if (n_tx) { 3481bdff549eSDavid Brownell x[0].len = n_tx; 3482bdff549eSDavid Brownell spi_message_add_tail(&x[0], &message); 3483bdff549eSDavid Brownell } 3484bdff549eSDavid Brownell if (n_rx) { 3485bdff549eSDavid Brownell x[1].len = n_rx; 3486bdff549eSDavid Brownell spi_message_add_tail(&x[1], &message); 3487bdff549eSDavid Brownell } 34888275c642SVitaly Wool 34898ae12a0dSDavid Brownell memcpy(local_buf, txbuf, n_tx); 3490bdff549eSDavid Brownell x[0].tx_buf = local_buf; 3491bdff549eSDavid Brownell x[1].rx_buf = local_buf + n_tx; 34928ae12a0dSDavid Brownell 34938ae12a0dSDavid Brownell /* do the i/o */ 34948ae12a0dSDavid Brownell status = spi_sync(spi, &message); 34959b938b74SMarc Pignat if (status == 0) 3496bdff549eSDavid Brownell memcpy(rxbuf, x[1].rx_buf, n_rx); 34978ae12a0dSDavid Brownell 3498bdff549eSDavid Brownell if (x[0].tx_buf == buf) 3499068f4070SDavid Brownell mutex_unlock(&lock); 35008ae12a0dSDavid Brownell else 35018ae12a0dSDavid Brownell kfree(local_buf); 35028ae12a0dSDavid Brownell 35038ae12a0dSDavid Brownell return status; 35048ae12a0dSDavid Brownell } 35058ae12a0dSDavid Brownell EXPORT_SYMBOL_GPL(spi_write_then_read); 35068ae12a0dSDavid Brownell 35078ae12a0dSDavid Brownell /*-------------------------------------------------------------------------*/ 35088ae12a0dSDavid Brownell 35095f143af7SMarco Felsch #if IS_ENABLED(CONFIG_OF) 3510ce79d54aSPantelis Antoniou static int __spi_of_device_match(struct device *dev, void *data) 3511ce79d54aSPantelis Antoniou { 3512ce79d54aSPantelis Antoniou return dev->of_node == data; 3513ce79d54aSPantelis Antoniou } 3514ce79d54aSPantelis Antoniou 3515ce79d54aSPantelis Antoniou /* must call put_device() when done with returned spi_device device */ 35165f143af7SMarco Felsch struct spi_device *of_find_spi_device_by_node(struct device_node *node) 3517ce79d54aSPantelis Antoniou { 3518ce79d54aSPantelis Antoniou struct device *dev = bus_find_device(&spi_bus_type, NULL, node, 3519ce79d54aSPantelis Antoniou __spi_of_device_match); 3520ce79d54aSPantelis Antoniou return dev ? to_spi_device(dev) : NULL; 3521ce79d54aSPantelis Antoniou } 35225f143af7SMarco Felsch EXPORT_SYMBOL_GPL(of_find_spi_device_by_node); 35235f143af7SMarco Felsch #endif /* IS_ENABLED(CONFIG_OF) */ 3524ce79d54aSPantelis Antoniou 35255f143af7SMarco Felsch #if IS_ENABLED(CONFIG_OF_DYNAMIC) 35268caab75fSGeert Uytterhoeven static int __spi_of_controller_match(struct device *dev, const void *data) 3527ce79d54aSPantelis Antoniou { 3528ce79d54aSPantelis Antoniou return dev->of_node == data; 3529ce79d54aSPantelis Antoniou } 3530ce79d54aSPantelis Antoniou 35318caab75fSGeert Uytterhoeven /* the spi controllers are not using spi_bus, so we find it with another way */ 35328caab75fSGeert Uytterhoeven static struct spi_controller *of_find_spi_controller_by_node(struct device_node *node) 3533ce79d54aSPantelis Antoniou { 3534ce79d54aSPantelis Antoniou struct device *dev; 3535ce79d54aSPantelis Antoniou 3536ce79d54aSPantelis Antoniou dev = class_find_device(&spi_master_class, NULL, node, 35378caab75fSGeert Uytterhoeven __spi_of_controller_match); 35386c364062SGeert Uytterhoeven if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE)) 35396c364062SGeert Uytterhoeven dev = class_find_device(&spi_slave_class, NULL, node, 35408caab75fSGeert Uytterhoeven __spi_of_controller_match); 3541ce79d54aSPantelis Antoniou if (!dev) 3542ce79d54aSPantelis Antoniou return NULL; 3543ce79d54aSPantelis Antoniou 3544ce79d54aSPantelis Antoniou /* reference got in class_find_device */ 35458caab75fSGeert Uytterhoeven return container_of(dev, struct spi_controller, dev); 3546ce79d54aSPantelis Antoniou } 3547ce79d54aSPantelis Antoniou 3548ce79d54aSPantelis Antoniou static int of_spi_notify(struct notifier_block *nb, unsigned long action, 3549ce79d54aSPantelis Antoniou void *arg) 3550ce79d54aSPantelis Antoniou { 3551ce79d54aSPantelis Antoniou struct of_reconfig_data *rd = arg; 35528caab75fSGeert Uytterhoeven struct spi_controller *ctlr; 3553ce79d54aSPantelis Antoniou struct spi_device *spi; 3554ce79d54aSPantelis Antoniou 3555ce79d54aSPantelis Antoniou switch (of_reconfig_get_state_change(action, arg)) { 3556ce79d54aSPantelis Antoniou case OF_RECONFIG_CHANGE_ADD: 35578caab75fSGeert Uytterhoeven ctlr = of_find_spi_controller_by_node(rd->dn->parent); 35588caab75fSGeert Uytterhoeven if (ctlr == NULL) 3559ce79d54aSPantelis Antoniou return NOTIFY_OK; /* not for us */ 3560ce79d54aSPantelis Antoniou 3561bd6c1644SGeert Uytterhoeven if (of_node_test_and_set_flag(rd->dn, OF_POPULATED)) { 35628caab75fSGeert Uytterhoeven put_device(&ctlr->dev); 3563bd6c1644SGeert Uytterhoeven return NOTIFY_OK; 3564bd6c1644SGeert Uytterhoeven } 3565bd6c1644SGeert Uytterhoeven 35668caab75fSGeert Uytterhoeven spi = of_register_spi_device(ctlr, rd->dn); 35678caab75fSGeert Uytterhoeven put_device(&ctlr->dev); 3568ce79d54aSPantelis Antoniou 3569ce79d54aSPantelis Antoniou if (IS_ERR(spi)) { 357025c56c88SRob Herring pr_err("%s: failed to create for '%pOF'\n", 357125c56c88SRob Herring __func__, rd->dn); 3572e0af98a7SRalf Ramsauer of_node_clear_flag(rd->dn, OF_POPULATED); 3573ce79d54aSPantelis Antoniou return notifier_from_errno(PTR_ERR(spi)); 3574ce79d54aSPantelis Antoniou } 3575ce79d54aSPantelis Antoniou break; 3576ce79d54aSPantelis Antoniou 3577ce79d54aSPantelis Antoniou case OF_RECONFIG_CHANGE_REMOVE: 3578bd6c1644SGeert Uytterhoeven /* already depopulated? */ 3579bd6c1644SGeert Uytterhoeven if (!of_node_check_flag(rd->dn, OF_POPULATED)) 3580bd6c1644SGeert Uytterhoeven return NOTIFY_OK; 3581bd6c1644SGeert Uytterhoeven 3582ce79d54aSPantelis Antoniou /* find our device by node */ 3583ce79d54aSPantelis Antoniou spi = of_find_spi_device_by_node(rd->dn); 3584ce79d54aSPantelis Antoniou if (spi == NULL) 3585ce79d54aSPantelis Antoniou return NOTIFY_OK; /* no? not meant for us */ 3586ce79d54aSPantelis Antoniou 3587ce79d54aSPantelis Antoniou /* unregister takes one ref away */ 3588ce79d54aSPantelis Antoniou spi_unregister_device(spi); 3589ce79d54aSPantelis Antoniou 3590ce79d54aSPantelis Antoniou /* and put the reference of the find */ 3591ce79d54aSPantelis Antoniou put_device(&spi->dev); 3592ce79d54aSPantelis Antoniou break; 3593ce79d54aSPantelis Antoniou } 3594ce79d54aSPantelis Antoniou 3595ce79d54aSPantelis Antoniou return NOTIFY_OK; 3596ce79d54aSPantelis Antoniou } 3597ce79d54aSPantelis Antoniou 3598ce79d54aSPantelis Antoniou static struct notifier_block spi_of_notifier = { 3599ce79d54aSPantelis Antoniou .notifier_call = of_spi_notify, 3600ce79d54aSPantelis Antoniou }; 3601ce79d54aSPantelis Antoniou #else /* IS_ENABLED(CONFIG_OF_DYNAMIC) */ 3602ce79d54aSPantelis Antoniou extern struct notifier_block spi_of_notifier; 3603ce79d54aSPantelis Antoniou #endif /* IS_ENABLED(CONFIG_OF_DYNAMIC) */ 3604ce79d54aSPantelis Antoniou 36057f24467fSOctavian Purdila #if IS_ENABLED(CONFIG_ACPI) 36068caab75fSGeert Uytterhoeven static int spi_acpi_controller_match(struct device *dev, const void *data) 36077f24467fSOctavian Purdila { 36087f24467fSOctavian Purdila return ACPI_COMPANION(dev->parent) == data; 36097f24467fSOctavian Purdila } 36107f24467fSOctavian Purdila 36117f24467fSOctavian Purdila static int spi_acpi_device_match(struct device *dev, void *data) 36127f24467fSOctavian Purdila { 36137f24467fSOctavian Purdila return ACPI_COMPANION(dev) == data; 36147f24467fSOctavian Purdila } 36157f24467fSOctavian Purdila 36168caab75fSGeert Uytterhoeven static struct spi_controller *acpi_spi_find_controller_by_adev(struct acpi_device *adev) 36177f24467fSOctavian Purdila { 36187f24467fSOctavian Purdila struct device *dev; 36197f24467fSOctavian Purdila 36207f24467fSOctavian Purdila dev = class_find_device(&spi_master_class, NULL, adev, 36218caab75fSGeert Uytterhoeven spi_acpi_controller_match); 36226c364062SGeert Uytterhoeven if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE)) 36236c364062SGeert Uytterhoeven dev = class_find_device(&spi_slave_class, NULL, adev, 36248caab75fSGeert Uytterhoeven spi_acpi_controller_match); 36257f24467fSOctavian Purdila if (!dev) 36267f24467fSOctavian Purdila return NULL; 36277f24467fSOctavian Purdila 36288caab75fSGeert Uytterhoeven return container_of(dev, struct spi_controller, dev); 36297f24467fSOctavian Purdila } 36307f24467fSOctavian Purdila 36317f24467fSOctavian Purdila static struct spi_device *acpi_spi_find_device_by_adev(struct acpi_device *adev) 36327f24467fSOctavian Purdila { 36337f24467fSOctavian Purdila struct device *dev; 36347f24467fSOctavian Purdila 36357f24467fSOctavian Purdila dev = bus_find_device(&spi_bus_type, NULL, adev, spi_acpi_device_match); 36367f24467fSOctavian Purdila 36377f24467fSOctavian Purdila return dev ? to_spi_device(dev) : NULL; 36387f24467fSOctavian Purdila } 36397f24467fSOctavian Purdila 36407f24467fSOctavian Purdila static int acpi_spi_notify(struct notifier_block *nb, unsigned long value, 36417f24467fSOctavian Purdila void *arg) 36427f24467fSOctavian Purdila { 36437f24467fSOctavian Purdila struct acpi_device *adev = arg; 36448caab75fSGeert Uytterhoeven struct spi_controller *ctlr; 36457f24467fSOctavian Purdila struct spi_device *spi; 36467f24467fSOctavian Purdila 36477f24467fSOctavian Purdila switch (value) { 36487f24467fSOctavian Purdila case ACPI_RECONFIG_DEVICE_ADD: 36498caab75fSGeert Uytterhoeven ctlr = acpi_spi_find_controller_by_adev(adev->parent); 36508caab75fSGeert Uytterhoeven if (!ctlr) 36517f24467fSOctavian Purdila break; 36527f24467fSOctavian Purdila 36538caab75fSGeert Uytterhoeven acpi_register_spi_device(ctlr, adev); 36548caab75fSGeert Uytterhoeven put_device(&ctlr->dev); 36557f24467fSOctavian Purdila break; 36567f24467fSOctavian Purdila case ACPI_RECONFIG_DEVICE_REMOVE: 36577f24467fSOctavian Purdila if (!acpi_device_enumerated(adev)) 36587f24467fSOctavian Purdila break; 36597f24467fSOctavian Purdila 36607f24467fSOctavian Purdila spi = acpi_spi_find_device_by_adev(adev); 36617f24467fSOctavian Purdila if (!spi) 36627f24467fSOctavian Purdila break; 36637f24467fSOctavian Purdila 36647f24467fSOctavian Purdila spi_unregister_device(spi); 36657f24467fSOctavian Purdila put_device(&spi->dev); 36667f24467fSOctavian Purdila break; 36677f24467fSOctavian Purdila } 36687f24467fSOctavian Purdila 36697f24467fSOctavian Purdila return NOTIFY_OK; 36707f24467fSOctavian Purdila } 36717f24467fSOctavian Purdila 36727f24467fSOctavian Purdila static struct notifier_block spi_acpi_notifier = { 36737f24467fSOctavian Purdila .notifier_call = acpi_spi_notify, 36747f24467fSOctavian Purdila }; 36757f24467fSOctavian Purdila #else 36767f24467fSOctavian Purdila extern struct notifier_block spi_acpi_notifier; 36777f24467fSOctavian Purdila #endif 36787f24467fSOctavian Purdila 36798ae12a0dSDavid Brownell static int __init spi_init(void) 36808ae12a0dSDavid Brownell { 3681b885244eSDavid Brownell int status; 36828ae12a0dSDavid Brownell 3683e94b1766SChristoph Lameter buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL); 3684b885244eSDavid Brownell if (!buf) { 3685b885244eSDavid Brownell status = -ENOMEM; 3686b885244eSDavid Brownell goto err0; 36878ae12a0dSDavid Brownell } 3688b885244eSDavid Brownell 3689b885244eSDavid Brownell status = bus_register(&spi_bus_type); 3690b885244eSDavid Brownell if (status < 0) 3691b885244eSDavid Brownell goto err1; 3692b885244eSDavid Brownell 3693b885244eSDavid Brownell status = class_register(&spi_master_class); 3694b885244eSDavid Brownell if (status < 0) 3695b885244eSDavid Brownell goto err2; 3696ce79d54aSPantelis Antoniou 36976c364062SGeert Uytterhoeven if (IS_ENABLED(CONFIG_SPI_SLAVE)) { 36986c364062SGeert Uytterhoeven status = class_register(&spi_slave_class); 36996c364062SGeert Uytterhoeven if (status < 0) 37006c364062SGeert Uytterhoeven goto err3; 37016c364062SGeert Uytterhoeven } 37026c364062SGeert Uytterhoeven 37035267720eSFabio Estevam if (IS_ENABLED(CONFIG_OF_DYNAMIC)) 3704ce79d54aSPantelis Antoniou WARN_ON(of_reconfig_notifier_register(&spi_of_notifier)); 37057f24467fSOctavian Purdila if (IS_ENABLED(CONFIG_ACPI)) 37067f24467fSOctavian Purdila WARN_ON(acpi_reconfig_notifier_register(&spi_acpi_notifier)); 3707ce79d54aSPantelis Antoniou 3708b885244eSDavid Brownell return 0; 3709b885244eSDavid Brownell 37106c364062SGeert Uytterhoeven err3: 37116c364062SGeert Uytterhoeven class_unregister(&spi_master_class); 3712b885244eSDavid Brownell err2: 3713b885244eSDavid Brownell bus_unregister(&spi_bus_type); 3714b885244eSDavid Brownell err1: 3715b885244eSDavid Brownell kfree(buf); 3716b885244eSDavid Brownell buf = NULL; 3717b885244eSDavid Brownell err0: 3718b885244eSDavid Brownell return status; 3719b885244eSDavid Brownell } 3720b885244eSDavid Brownell 37218ae12a0dSDavid Brownell /* board_info is normally registered in arch_initcall(), 37228ae12a0dSDavid Brownell * but even essential drivers wait till later 3723b885244eSDavid Brownell * 3724b885244eSDavid Brownell * REVISIT only boardinfo really needs static linking. the rest (device and 3725b885244eSDavid Brownell * driver registration) _could_ be dynamically linked (modular) ... costs 3726b885244eSDavid Brownell * include needing to have boardinfo data structures be much more public. 37278ae12a0dSDavid Brownell */ 3728673c0c00SDavid Brownell postcore_initcall(spi_init); 3729*f0125f1aSMark Brown 3730