18ae12a0dSDavid Brownell /* 2ca632f55SGrant Likely * SPI init/core code 38ae12a0dSDavid Brownell * 48ae12a0dSDavid Brownell * Copyright (C) 2005 David Brownell 5d57a4282SGrant Likely * Copyright (C) 2008 Secret Lab Technologies Ltd. 68ae12a0dSDavid Brownell * 78ae12a0dSDavid Brownell * This program is free software; you can redistribute it and/or modify 88ae12a0dSDavid Brownell * it under the terms of the GNU General Public License as published by 98ae12a0dSDavid Brownell * the Free Software Foundation; either version 2 of the License, or 108ae12a0dSDavid Brownell * (at your option) any later version. 118ae12a0dSDavid Brownell * 128ae12a0dSDavid Brownell * This program is distributed in the hope that it will be useful, 138ae12a0dSDavid Brownell * but WITHOUT ANY WARRANTY; without even the implied warranty of 148ae12a0dSDavid Brownell * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 158ae12a0dSDavid Brownell * GNU General Public License for more details. 168ae12a0dSDavid Brownell */ 178ae12a0dSDavid Brownell 188ae12a0dSDavid Brownell #include <linux/kernel.h> 198ae12a0dSDavid Brownell #include <linux/device.h> 208ae12a0dSDavid Brownell #include <linux/init.h> 218ae12a0dSDavid Brownell #include <linux/cache.h> 2299adef31SMark Brown #include <linux/dma-mapping.h> 2399adef31SMark Brown #include <linux/dmaengine.h> 2494040828SMatthias Kaehlcke #include <linux/mutex.h> 252b7a32f7SSinan Akman #include <linux/of_device.h> 26d57a4282SGrant Likely #include <linux/of_irq.h> 2786be408bSSylwester Nawrocki #include <linux/clk/clk-conf.h> 285a0e3ad6STejun Heo #include <linux/slab.h> 29e0626e38SAnton Vorontsov #include <linux/mod_devicetable.h> 308ae12a0dSDavid Brownell #include <linux/spi/spi.h> 3174317984SJean-Christophe PLAGNIOL-VILLARD #include <linux/of_gpio.h> 323ae22e8cSMark Brown #include <linux/pm_runtime.h> 33f48c767cSUlf Hansson #include <linux/pm_domain.h> 34025ed130SPaul Gortmaker #include <linux/export.h> 358bd75c77SClark Williams #include <linux/sched/rt.h> 36ffbbdd21SLinus Walleij #include <linux/delay.h> 37ffbbdd21SLinus Walleij #include <linux/kthread.h> 3864bee4d2SMika Westerberg #include <linux/ioport.h> 3964bee4d2SMika Westerberg #include <linux/acpi.h> 408ae12a0dSDavid Brownell 4156ec1978SMark Brown #define CREATE_TRACE_POINTS 4256ec1978SMark Brown #include <trace/events/spi.h> 4356ec1978SMark Brown 448ae12a0dSDavid Brownell static void spidev_release(struct device *dev) 458ae12a0dSDavid Brownell { 460ffa0285SHans-Peter Nilsson struct spi_device *spi = to_spi_device(dev); 478ae12a0dSDavid Brownell 488ae12a0dSDavid Brownell /* spi masters may cleanup for released devices */ 498ae12a0dSDavid Brownell if (spi->master->cleanup) 508ae12a0dSDavid Brownell spi->master->cleanup(spi); 518ae12a0dSDavid Brownell 520c868461SDavid Brownell spi_master_put(spi->master); 5307a389feSRoman Tereshonkov kfree(spi); 548ae12a0dSDavid Brownell } 558ae12a0dSDavid Brownell 568ae12a0dSDavid Brownell static ssize_t 578ae12a0dSDavid Brownell modalias_show(struct device *dev, struct device_attribute *a, char *buf) 588ae12a0dSDavid Brownell { 598ae12a0dSDavid Brownell const struct spi_device *spi = to_spi_device(dev); 608c4ff6d0SZhang Rui int len; 618c4ff6d0SZhang Rui 628c4ff6d0SZhang Rui len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1); 638c4ff6d0SZhang Rui if (len != -ENODEV) 648c4ff6d0SZhang Rui return len; 658ae12a0dSDavid Brownell 66d8e328b3SGrant Likely return sprintf(buf, "%s%s\n", SPI_MODULE_PREFIX, spi->modalias); 678ae12a0dSDavid Brownell } 68aa7da564SGreg Kroah-Hartman static DEVICE_ATTR_RO(modalias); 698ae12a0dSDavid Brownell 70eca2ebc7SMartin Sperl #define SPI_STATISTICS_ATTRS(field, file) \ 71eca2ebc7SMartin Sperl static ssize_t spi_master_##field##_show(struct device *dev, \ 72eca2ebc7SMartin Sperl struct device_attribute *attr, \ 73eca2ebc7SMartin Sperl char *buf) \ 74eca2ebc7SMartin Sperl { \ 75eca2ebc7SMartin Sperl struct spi_master *master = container_of(dev, \ 76eca2ebc7SMartin Sperl struct spi_master, dev); \ 77eca2ebc7SMartin Sperl return spi_statistics_##field##_show(&master->statistics, buf); \ 78eca2ebc7SMartin Sperl } \ 79eca2ebc7SMartin Sperl static struct device_attribute dev_attr_spi_master_##field = { \ 80eca2ebc7SMartin Sperl .attr = { .name = file, .mode = S_IRUGO }, \ 81eca2ebc7SMartin Sperl .show = spi_master_##field##_show, \ 82eca2ebc7SMartin Sperl }; \ 83eca2ebc7SMartin Sperl static ssize_t spi_device_##field##_show(struct device *dev, \ 84eca2ebc7SMartin Sperl struct device_attribute *attr, \ 85eca2ebc7SMartin Sperl char *buf) \ 86eca2ebc7SMartin Sperl { \ 87eca2ebc7SMartin Sperl struct spi_device *spi = container_of(dev, \ 88eca2ebc7SMartin Sperl struct spi_device, dev); \ 89eca2ebc7SMartin Sperl return spi_statistics_##field##_show(&spi->statistics, buf); \ 90eca2ebc7SMartin Sperl } \ 91eca2ebc7SMartin Sperl static struct device_attribute dev_attr_spi_device_##field = { \ 92eca2ebc7SMartin Sperl .attr = { .name = file, .mode = S_IRUGO }, \ 93eca2ebc7SMartin Sperl .show = spi_device_##field##_show, \ 94eca2ebc7SMartin Sperl } 95eca2ebc7SMartin Sperl 96eca2ebc7SMartin Sperl #define SPI_STATISTICS_SHOW_NAME(name, file, field, format_string) \ 97eca2ebc7SMartin Sperl static ssize_t spi_statistics_##name##_show(struct spi_statistics *stat, \ 98eca2ebc7SMartin Sperl char *buf) \ 99eca2ebc7SMartin Sperl { \ 100eca2ebc7SMartin Sperl unsigned long flags; \ 101eca2ebc7SMartin Sperl ssize_t len; \ 102eca2ebc7SMartin Sperl spin_lock_irqsave(&stat->lock, flags); \ 103eca2ebc7SMartin Sperl len = sprintf(buf, format_string, stat->field); \ 104eca2ebc7SMartin Sperl spin_unlock_irqrestore(&stat->lock, flags); \ 105eca2ebc7SMartin Sperl return len; \ 106eca2ebc7SMartin Sperl } \ 107eca2ebc7SMartin Sperl SPI_STATISTICS_ATTRS(name, file) 108eca2ebc7SMartin Sperl 109eca2ebc7SMartin Sperl #define SPI_STATISTICS_SHOW(field, format_string) \ 110eca2ebc7SMartin Sperl SPI_STATISTICS_SHOW_NAME(field, __stringify(field), \ 111eca2ebc7SMartin Sperl field, format_string) 112eca2ebc7SMartin Sperl 113eca2ebc7SMartin Sperl SPI_STATISTICS_SHOW(messages, "%lu"); 114eca2ebc7SMartin Sperl SPI_STATISTICS_SHOW(transfers, "%lu"); 115eca2ebc7SMartin Sperl SPI_STATISTICS_SHOW(errors, "%lu"); 116eca2ebc7SMartin Sperl SPI_STATISTICS_SHOW(timedout, "%lu"); 117eca2ebc7SMartin Sperl 118eca2ebc7SMartin Sperl SPI_STATISTICS_SHOW(spi_sync, "%lu"); 119eca2ebc7SMartin Sperl SPI_STATISTICS_SHOW(spi_sync_immediate, "%lu"); 120eca2ebc7SMartin Sperl SPI_STATISTICS_SHOW(spi_async, "%lu"); 121eca2ebc7SMartin Sperl 122eca2ebc7SMartin Sperl SPI_STATISTICS_SHOW(bytes, "%llu"); 123eca2ebc7SMartin Sperl SPI_STATISTICS_SHOW(bytes_rx, "%llu"); 124eca2ebc7SMartin Sperl SPI_STATISTICS_SHOW(bytes_tx, "%llu"); 125eca2ebc7SMartin Sperl 1266b7bc061SMartin Sperl #define SPI_STATISTICS_TRANSFER_BYTES_HISTO(index, number) \ 1276b7bc061SMartin Sperl SPI_STATISTICS_SHOW_NAME(transfer_bytes_histo##index, \ 1286b7bc061SMartin Sperl "transfer_bytes_histo_" number, \ 1296b7bc061SMartin Sperl transfer_bytes_histo[index], "%lu") 1306b7bc061SMartin Sperl SPI_STATISTICS_TRANSFER_BYTES_HISTO(0, "0-1"); 1316b7bc061SMartin Sperl SPI_STATISTICS_TRANSFER_BYTES_HISTO(1, "2-3"); 1326b7bc061SMartin Sperl SPI_STATISTICS_TRANSFER_BYTES_HISTO(2, "4-7"); 1336b7bc061SMartin Sperl SPI_STATISTICS_TRANSFER_BYTES_HISTO(3, "8-15"); 1346b7bc061SMartin Sperl SPI_STATISTICS_TRANSFER_BYTES_HISTO(4, "16-31"); 1356b7bc061SMartin Sperl SPI_STATISTICS_TRANSFER_BYTES_HISTO(5, "32-63"); 1366b7bc061SMartin Sperl SPI_STATISTICS_TRANSFER_BYTES_HISTO(6, "64-127"); 1376b7bc061SMartin Sperl SPI_STATISTICS_TRANSFER_BYTES_HISTO(7, "128-255"); 1386b7bc061SMartin Sperl SPI_STATISTICS_TRANSFER_BYTES_HISTO(8, "256-511"); 1396b7bc061SMartin Sperl SPI_STATISTICS_TRANSFER_BYTES_HISTO(9, "512-1023"); 1406b7bc061SMartin Sperl SPI_STATISTICS_TRANSFER_BYTES_HISTO(10, "1024-2047"); 1416b7bc061SMartin Sperl SPI_STATISTICS_TRANSFER_BYTES_HISTO(11, "2048-4095"); 1426b7bc061SMartin Sperl SPI_STATISTICS_TRANSFER_BYTES_HISTO(12, "4096-8191"); 1436b7bc061SMartin Sperl SPI_STATISTICS_TRANSFER_BYTES_HISTO(13, "8192-16383"); 1446b7bc061SMartin Sperl SPI_STATISTICS_TRANSFER_BYTES_HISTO(14, "16384-32767"); 1456b7bc061SMartin Sperl SPI_STATISTICS_TRANSFER_BYTES_HISTO(15, "32768-65535"); 1466b7bc061SMartin Sperl SPI_STATISTICS_TRANSFER_BYTES_HISTO(16, "65536+"); 1476b7bc061SMartin Sperl 148aa7da564SGreg Kroah-Hartman static struct attribute *spi_dev_attrs[] = { 149aa7da564SGreg Kroah-Hartman &dev_attr_modalias.attr, 150aa7da564SGreg Kroah-Hartman NULL, 1518ae12a0dSDavid Brownell }; 152eca2ebc7SMartin Sperl 153eca2ebc7SMartin Sperl static const struct attribute_group spi_dev_group = { 154eca2ebc7SMartin Sperl .attrs = spi_dev_attrs, 155eca2ebc7SMartin Sperl }; 156eca2ebc7SMartin Sperl 157eca2ebc7SMartin Sperl static struct attribute *spi_device_statistics_attrs[] = { 158eca2ebc7SMartin Sperl &dev_attr_spi_device_messages.attr, 159eca2ebc7SMartin Sperl &dev_attr_spi_device_transfers.attr, 160eca2ebc7SMartin Sperl &dev_attr_spi_device_errors.attr, 161eca2ebc7SMartin Sperl &dev_attr_spi_device_timedout.attr, 162eca2ebc7SMartin Sperl &dev_attr_spi_device_spi_sync.attr, 163eca2ebc7SMartin Sperl &dev_attr_spi_device_spi_sync_immediate.attr, 164eca2ebc7SMartin Sperl &dev_attr_spi_device_spi_async.attr, 165eca2ebc7SMartin Sperl &dev_attr_spi_device_bytes.attr, 166eca2ebc7SMartin Sperl &dev_attr_spi_device_bytes_rx.attr, 167eca2ebc7SMartin Sperl &dev_attr_spi_device_bytes_tx.attr, 1686b7bc061SMartin Sperl &dev_attr_spi_device_transfer_bytes_histo0.attr, 1696b7bc061SMartin Sperl &dev_attr_spi_device_transfer_bytes_histo1.attr, 1706b7bc061SMartin Sperl &dev_attr_spi_device_transfer_bytes_histo2.attr, 1716b7bc061SMartin Sperl &dev_attr_spi_device_transfer_bytes_histo3.attr, 1726b7bc061SMartin Sperl &dev_attr_spi_device_transfer_bytes_histo4.attr, 1736b7bc061SMartin Sperl &dev_attr_spi_device_transfer_bytes_histo5.attr, 1746b7bc061SMartin Sperl &dev_attr_spi_device_transfer_bytes_histo6.attr, 1756b7bc061SMartin Sperl &dev_attr_spi_device_transfer_bytes_histo7.attr, 1766b7bc061SMartin Sperl &dev_attr_spi_device_transfer_bytes_histo8.attr, 1776b7bc061SMartin Sperl &dev_attr_spi_device_transfer_bytes_histo9.attr, 1786b7bc061SMartin Sperl &dev_attr_spi_device_transfer_bytes_histo10.attr, 1796b7bc061SMartin Sperl &dev_attr_spi_device_transfer_bytes_histo11.attr, 1806b7bc061SMartin Sperl &dev_attr_spi_device_transfer_bytes_histo12.attr, 1816b7bc061SMartin Sperl &dev_attr_spi_device_transfer_bytes_histo13.attr, 1826b7bc061SMartin Sperl &dev_attr_spi_device_transfer_bytes_histo14.attr, 1836b7bc061SMartin Sperl &dev_attr_spi_device_transfer_bytes_histo15.attr, 1846b7bc061SMartin Sperl &dev_attr_spi_device_transfer_bytes_histo16.attr, 185eca2ebc7SMartin Sperl NULL, 186eca2ebc7SMartin Sperl }; 187eca2ebc7SMartin Sperl 188eca2ebc7SMartin Sperl static const struct attribute_group spi_device_statistics_group = { 189eca2ebc7SMartin Sperl .name = "statistics", 190eca2ebc7SMartin Sperl .attrs = spi_device_statistics_attrs, 191eca2ebc7SMartin Sperl }; 192eca2ebc7SMartin Sperl 193eca2ebc7SMartin Sperl static const struct attribute_group *spi_dev_groups[] = { 194eca2ebc7SMartin Sperl &spi_dev_group, 195eca2ebc7SMartin Sperl &spi_device_statistics_group, 196eca2ebc7SMartin Sperl NULL, 197eca2ebc7SMartin Sperl }; 198eca2ebc7SMartin Sperl 199eca2ebc7SMartin Sperl static struct attribute *spi_master_statistics_attrs[] = { 200eca2ebc7SMartin Sperl &dev_attr_spi_master_messages.attr, 201eca2ebc7SMartin Sperl &dev_attr_spi_master_transfers.attr, 202eca2ebc7SMartin Sperl &dev_attr_spi_master_errors.attr, 203eca2ebc7SMartin Sperl &dev_attr_spi_master_timedout.attr, 204eca2ebc7SMartin Sperl &dev_attr_spi_master_spi_sync.attr, 205eca2ebc7SMartin Sperl &dev_attr_spi_master_spi_sync_immediate.attr, 206eca2ebc7SMartin Sperl &dev_attr_spi_master_spi_async.attr, 207eca2ebc7SMartin Sperl &dev_attr_spi_master_bytes.attr, 208eca2ebc7SMartin Sperl &dev_attr_spi_master_bytes_rx.attr, 209eca2ebc7SMartin Sperl &dev_attr_spi_master_bytes_tx.attr, 2106b7bc061SMartin Sperl &dev_attr_spi_master_transfer_bytes_histo0.attr, 2116b7bc061SMartin Sperl &dev_attr_spi_master_transfer_bytes_histo1.attr, 2126b7bc061SMartin Sperl &dev_attr_spi_master_transfer_bytes_histo2.attr, 2136b7bc061SMartin Sperl &dev_attr_spi_master_transfer_bytes_histo3.attr, 2146b7bc061SMartin Sperl &dev_attr_spi_master_transfer_bytes_histo4.attr, 2156b7bc061SMartin Sperl &dev_attr_spi_master_transfer_bytes_histo5.attr, 2166b7bc061SMartin Sperl &dev_attr_spi_master_transfer_bytes_histo6.attr, 2176b7bc061SMartin Sperl &dev_attr_spi_master_transfer_bytes_histo7.attr, 2186b7bc061SMartin Sperl &dev_attr_spi_master_transfer_bytes_histo8.attr, 2196b7bc061SMartin Sperl &dev_attr_spi_master_transfer_bytes_histo9.attr, 2206b7bc061SMartin Sperl &dev_attr_spi_master_transfer_bytes_histo10.attr, 2216b7bc061SMartin Sperl &dev_attr_spi_master_transfer_bytes_histo11.attr, 2226b7bc061SMartin Sperl &dev_attr_spi_master_transfer_bytes_histo12.attr, 2236b7bc061SMartin Sperl &dev_attr_spi_master_transfer_bytes_histo13.attr, 2246b7bc061SMartin Sperl &dev_attr_spi_master_transfer_bytes_histo14.attr, 2256b7bc061SMartin Sperl &dev_attr_spi_master_transfer_bytes_histo15.attr, 2266b7bc061SMartin Sperl &dev_attr_spi_master_transfer_bytes_histo16.attr, 227eca2ebc7SMartin Sperl NULL, 228eca2ebc7SMartin Sperl }; 229eca2ebc7SMartin Sperl 230eca2ebc7SMartin Sperl static const struct attribute_group spi_master_statistics_group = { 231eca2ebc7SMartin Sperl .name = "statistics", 232eca2ebc7SMartin Sperl .attrs = spi_master_statistics_attrs, 233eca2ebc7SMartin Sperl }; 234eca2ebc7SMartin Sperl 235eca2ebc7SMartin Sperl static const struct attribute_group *spi_master_groups[] = { 236eca2ebc7SMartin Sperl &spi_master_statistics_group, 237eca2ebc7SMartin Sperl NULL, 238eca2ebc7SMartin Sperl }; 239eca2ebc7SMartin Sperl 240eca2ebc7SMartin Sperl void spi_statistics_add_transfer_stats(struct spi_statistics *stats, 241eca2ebc7SMartin Sperl struct spi_transfer *xfer, 242eca2ebc7SMartin Sperl struct spi_master *master) 243eca2ebc7SMartin Sperl { 244eca2ebc7SMartin Sperl unsigned long flags; 2456b7bc061SMartin Sperl int l2len = min(fls(xfer->len), SPI_STATISTICS_HISTO_SIZE) - 1; 2466b7bc061SMartin Sperl 2476b7bc061SMartin Sperl if (l2len < 0) 2486b7bc061SMartin Sperl l2len = 0; 249eca2ebc7SMartin Sperl 250eca2ebc7SMartin Sperl spin_lock_irqsave(&stats->lock, flags); 251eca2ebc7SMartin Sperl 252eca2ebc7SMartin Sperl stats->transfers++; 2536b7bc061SMartin Sperl stats->transfer_bytes_histo[l2len]++; 254eca2ebc7SMartin Sperl 255eca2ebc7SMartin Sperl stats->bytes += xfer->len; 256eca2ebc7SMartin Sperl if ((xfer->tx_buf) && 257eca2ebc7SMartin Sperl (xfer->tx_buf != master->dummy_tx)) 258eca2ebc7SMartin Sperl stats->bytes_tx += xfer->len; 259eca2ebc7SMartin Sperl if ((xfer->rx_buf) && 260eca2ebc7SMartin Sperl (xfer->rx_buf != master->dummy_rx)) 261eca2ebc7SMartin Sperl stats->bytes_rx += xfer->len; 262eca2ebc7SMartin Sperl 263eca2ebc7SMartin Sperl spin_unlock_irqrestore(&stats->lock, flags); 264eca2ebc7SMartin Sperl } 265eca2ebc7SMartin Sperl EXPORT_SYMBOL_GPL(spi_statistics_add_transfer_stats); 2668ae12a0dSDavid Brownell 2678ae12a0dSDavid Brownell /* modalias support makes "modprobe $MODALIAS" new-style hotplug work, 2688ae12a0dSDavid Brownell * and the sysfs version makes coldplug work too. 2698ae12a0dSDavid Brownell */ 2708ae12a0dSDavid Brownell 27175368bf6SAnton Vorontsov static const struct spi_device_id *spi_match_id(const struct spi_device_id *id, 27275368bf6SAnton Vorontsov const struct spi_device *sdev) 27375368bf6SAnton Vorontsov { 27475368bf6SAnton Vorontsov while (id->name[0]) { 27575368bf6SAnton Vorontsov if (!strcmp(sdev->modalias, id->name)) 27675368bf6SAnton Vorontsov return id; 27775368bf6SAnton Vorontsov id++; 27875368bf6SAnton Vorontsov } 27975368bf6SAnton Vorontsov return NULL; 28075368bf6SAnton Vorontsov } 28175368bf6SAnton Vorontsov 28275368bf6SAnton Vorontsov const struct spi_device_id *spi_get_device_id(const struct spi_device *sdev) 28375368bf6SAnton Vorontsov { 28475368bf6SAnton Vorontsov const struct spi_driver *sdrv = to_spi_driver(sdev->dev.driver); 28575368bf6SAnton Vorontsov 28675368bf6SAnton Vorontsov return spi_match_id(sdrv->id_table, sdev); 28775368bf6SAnton Vorontsov } 28875368bf6SAnton Vorontsov EXPORT_SYMBOL_GPL(spi_get_device_id); 28975368bf6SAnton Vorontsov 2908ae12a0dSDavid Brownell static int spi_match_device(struct device *dev, struct device_driver *drv) 2918ae12a0dSDavid Brownell { 2928ae12a0dSDavid Brownell const struct spi_device *spi = to_spi_device(dev); 29375368bf6SAnton Vorontsov const struct spi_driver *sdrv = to_spi_driver(drv); 29475368bf6SAnton Vorontsov 2952b7a32f7SSinan Akman /* Attempt an OF style match */ 2962b7a32f7SSinan Akman if (of_driver_match_device(dev, drv)) 2972b7a32f7SSinan Akman return 1; 2982b7a32f7SSinan Akman 29964bee4d2SMika Westerberg /* Then try ACPI */ 30064bee4d2SMika Westerberg if (acpi_driver_match_device(dev, drv)) 30164bee4d2SMika Westerberg return 1; 30264bee4d2SMika Westerberg 30375368bf6SAnton Vorontsov if (sdrv->id_table) 30475368bf6SAnton Vorontsov return !!spi_match_id(sdrv->id_table, spi); 3058ae12a0dSDavid Brownell 30635f74fcaSKay Sievers return strcmp(spi->modalias, drv->name) == 0; 3078ae12a0dSDavid Brownell } 3088ae12a0dSDavid Brownell 3097eff2e7aSKay Sievers static int spi_uevent(struct device *dev, struct kobj_uevent_env *env) 3108ae12a0dSDavid Brownell { 3118ae12a0dSDavid Brownell const struct spi_device *spi = to_spi_device(dev); 3128c4ff6d0SZhang Rui int rc; 3138c4ff6d0SZhang Rui 3148c4ff6d0SZhang Rui rc = acpi_device_uevent_modalias(dev, env); 3158c4ff6d0SZhang Rui if (rc != -ENODEV) 3168c4ff6d0SZhang Rui return rc; 3178ae12a0dSDavid Brownell 318e0626e38SAnton Vorontsov add_uevent_var(env, "MODALIAS=%s%s", SPI_MODULE_PREFIX, spi->modalias); 3198ae12a0dSDavid Brownell return 0; 3208ae12a0dSDavid Brownell } 3218ae12a0dSDavid Brownell 3228ae12a0dSDavid Brownell struct bus_type spi_bus_type = { 3238ae12a0dSDavid Brownell .name = "spi", 324aa7da564SGreg Kroah-Hartman .dev_groups = spi_dev_groups, 3258ae12a0dSDavid Brownell .match = spi_match_device, 3268ae12a0dSDavid Brownell .uevent = spi_uevent, 3278ae12a0dSDavid Brownell }; 3288ae12a0dSDavid Brownell EXPORT_SYMBOL_GPL(spi_bus_type); 3298ae12a0dSDavid Brownell 330b885244eSDavid Brownell 331b885244eSDavid Brownell static int spi_drv_probe(struct device *dev) 332b885244eSDavid Brownell { 333b885244eSDavid Brownell const struct spi_driver *sdrv = to_spi_driver(dev->driver); 33444af7927SJon Hunter struct spi_device *spi = to_spi_device(dev); 33533cf00e5SMika Westerberg int ret; 336b885244eSDavid Brownell 33786be408bSSylwester Nawrocki ret = of_clk_set_defaults(dev->of_node, false); 33886be408bSSylwester Nawrocki if (ret) 33986be408bSSylwester Nawrocki return ret; 34086be408bSSylwester Nawrocki 34144af7927SJon Hunter if (dev->of_node) { 34244af7927SJon Hunter spi->irq = of_irq_get(dev->of_node, 0); 34344af7927SJon Hunter if (spi->irq == -EPROBE_DEFER) 34444af7927SJon Hunter return -EPROBE_DEFER; 34544af7927SJon Hunter if (spi->irq < 0) 34644af7927SJon Hunter spi->irq = 0; 34744af7927SJon Hunter } 34844af7927SJon Hunter 349676e7c25SUlf Hansson ret = dev_pm_domain_attach(dev, true); 350676e7c25SUlf Hansson if (ret != -EPROBE_DEFER) { 35144af7927SJon Hunter ret = sdrv->probe(spi); 35233cf00e5SMika Westerberg if (ret) 353676e7c25SUlf Hansson dev_pm_domain_detach(dev, true); 354676e7c25SUlf Hansson } 35533cf00e5SMika Westerberg 35633cf00e5SMika Westerberg return ret; 357b885244eSDavid Brownell } 358b885244eSDavid Brownell 359b885244eSDavid Brownell static int spi_drv_remove(struct device *dev) 360b885244eSDavid Brownell { 361b885244eSDavid Brownell const struct spi_driver *sdrv = to_spi_driver(dev->driver); 36233cf00e5SMika Westerberg int ret; 363b885244eSDavid Brownell 364aec35f4eSJean Delvare ret = sdrv->remove(to_spi_device(dev)); 365676e7c25SUlf Hansson dev_pm_domain_detach(dev, true); 36633cf00e5SMika Westerberg 36733cf00e5SMika Westerberg return ret; 368b885244eSDavid Brownell } 369b885244eSDavid Brownell 370b885244eSDavid Brownell static void spi_drv_shutdown(struct device *dev) 371b885244eSDavid Brownell { 372b885244eSDavid Brownell const struct spi_driver *sdrv = to_spi_driver(dev->driver); 373b885244eSDavid Brownell 374b885244eSDavid Brownell sdrv->shutdown(to_spi_device(dev)); 375b885244eSDavid Brownell } 376b885244eSDavid Brownell 37733e34dc6SDavid Brownell /** 378ca5d2485SAndrew F. Davis * __spi_register_driver - register a SPI driver 37933e34dc6SDavid Brownell * @sdrv: the driver to register 38033e34dc6SDavid Brownell * Context: can sleep 38197d56dc6SJavier Martinez Canillas * 38297d56dc6SJavier Martinez Canillas * Return: zero on success, else a negative error code. 38333e34dc6SDavid Brownell */ 384ca5d2485SAndrew F. Davis int __spi_register_driver(struct module *owner, struct spi_driver *sdrv) 385b885244eSDavid Brownell { 386ca5d2485SAndrew F. Davis sdrv->driver.owner = owner; 387b885244eSDavid Brownell sdrv->driver.bus = &spi_bus_type; 388b885244eSDavid Brownell if (sdrv->probe) 389b885244eSDavid Brownell sdrv->driver.probe = spi_drv_probe; 390b885244eSDavid Brownell if (sdrv->remove) 391b885244eSDavid Brownell sdrv->driver.remove = spi_drv_remove; 392b885244eSDavid Brownell if (sdrv->shutdown) 393b885244eSDavid Brownell sdrv->driver.shutdown = spi_drv_shutdown; 394b885244eSDavid Brownell return driver_register(&sdrv->driver); 395b885244eSDavid Brownell } 396ca5d2485SAndrew F. Davis EXPORT_SYMBOL_GPL(__spi_register_driver); 397b885244eSDavid Brownell 3988ae12a0dSDavid Brownell /*-------------------------------------------------------------------------*/ 3998ae12a0dSDavid Brownell 4008ae12a0dSDavid Brownell /* SPI devices should normally not be created by SPI device drivers; that 4018ae12a0dSDavid Brownell * would make them board-specific. Similarly with SPI master drivers. 4028ae12a0dSDavid Brownell * Device registration normally goes into like arch/.../mach.../board-YYY.c 4038ae12a0dSDavid Brownell * with other readonly (flashable) information about mainboard devices. 4048ae12a0dSDavid Brownell */ 4058ae12a0dSDavid Brownell 4068ae12a0dSDavid Brownell struct boardinfo { 4078ae12a0dSDavid Brownell struct list_head list; 4082b9603a0SFeng Tang struct spi_board_info board_info; 4098ae12a0dSDavid Brownell }; 4108ae12a0dSDavid Brownell 4118ae12a0dSDavid Brownell static LIST_HEAD(board_list); 4122b9603a0SFeng Tang static LIST_HEAD(spi_master_list); 4132b9603a0SFeng Tang 4142b9603a0SFeng Tang /* 4152b9603a0SFeng Tang * Used to protect add/del opertion for board_info list and 4162b9603a0SFeng Tang * spi_master list, and their matching process 4172b9603a0SFeng Tang */ 41894040828SMatthias Kaehlcke static DEFINE_MUTEX(board_lock); 4198ae12a0dSDavid Brownell 420dc87c98eSGrant Likely /** 421dc87c98eSGrant Likely * spi_alloc_device - Allocate a new SPI device 422dc87c98eSGrant Likely * @master: Controller to which device is connected 423dc87c98eSGrant Likely * Context: can sleep 424dc87c98eSGrant Likely * 425dc87c98eSGrant Likely * Allows a driver to allocate and initialize a spi_device without 426dc87c98eSGrant Likely * registering it immediately. This allows a driver to directly 427dc87c98eSGrant Likely * fill the spi_device with device parameters before calling 428dc87c98eSGrant Likely * spi_add_device() on it. 429dc87c98eSGrant Likely * 430dc87c98eSGrant Likely * Caller is responsible to call spi_add_device() on the returned 431dc87c98eSGrant Likely * spi_device structure to add it to the SPI master. If the caller 432dc87c98eSGrant Likely * needs to discard the spi_device without adding it, then it should 433dc87c98eSGrant Likely * call spi_dev_put() on it. 434dc87c98eSGrant Likely * 43597d56dc6SJavier Martinez Canillas * Return: a pointer to the new device, or NULL. 436dc87c98eSGrant Likely */ 437dc87c98eSGrant Likely struct spi_device *spi_alloc_device(struct spi_master *master) 438dc87c98eSGrant Likely { 439dc87c98eSGrant Likely struct spi_device *spi; 440dc87c98eSGrant Likely 441dc87c98eSGrant Likely if (!spi_master_get(master)) 442dc87c98eSGrant Likely return NULL; 443dc87c98eSGrant Likely 4445fe5f05eSJingoo Han spi = kzalloc(sizeof(*spi), GFP_KERNEL); 445dc87c98eSGrant Likely if (!spi) { 446dc87c98eSGrant Likely spi_master_put(master); 447dc87c98eSGrant Likely return NULL; 448dc87c98eSGrant Likely } 449dc87c98eSGrant Likely 450dc87c98eSGrant Likely spi->master = master; 451178db7d3SLaurent Pinchart spi->dev.parent = &master->dev; 452dc87c98eSGrant Likely spi->dev.bus = &spi_bus_type; 453dc87c98eSGrant Likely spi->dev.release = spidev_release; 454446411e1SAndreas Larsson spi->cs_gpio = -ENOENT; 455eca2ebc7SMartin Sperl 456eca2ebc7SMartin Sperl spin_lock_init(&spi->statistics.lock); 457eca2ebc7SMartin Sperl 458dc87c98eSGrant Likely device_initialize(&spi->dev); 459dc87c98eSGrant Likely return spi; 460dc87c98eSGrant Likely } 461dc87c98eSGrant Likely EXPORT_SYMBOL_GPL(spi_alloc_device); 462dc87c98eSGrant Likely 463e13ac47bSJarkko Nikula static void spi_dev_set_name(struct spi_device *spi) 464e13ac47bSJarkko Nikula { 465e13ac47bSJarkko Nikula struct acpi_device *adev = ACPI_COMPANION(&spi->dev); 466e13ac47bSJarkko Nikula 467e13ac47bSJarkko Nikula if (adev) { 468e13ac47bSJarkko Nikula dev_set_name(&spi->dev, "spi-%s", acpi_dev_name(adev)); 469e13ac47bSJarkko Nikula return; 470e13ac47bSJarkko Nikula } 471e13ac47bSJarkko Nikula 472e13ac47bSJarkko Nikula dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->master->dev), 473e13ac47bSJarkko Nikula spi->chip_select); 474e13ac47bSJarkko Nikula } 475e13ac47bSJarkko Nikula 476b6fb8d3aSMika Westerberg static int spi_dev_check(struct device *dev, void *data) 477b6fb8d3aSMika Westerberg { 478b6fb8d3aSMika Westerberg struct spi_device *spi = to_spi_device(dev); 479b6fb8d3aSMika Westerberg struct spi_device *new_spi = data; 480b6fb8d3aSMika Westerberg 481b6fb8d3aSMika Westerberg if (spi->master == new_spi->master && 482b6fb8d3aSMika Westerberg spi->chip_select == new_spi->chip_select) 483b6fb8d3aSMika Westerberg return -EBUSY; 484b6fb8d3aSMika Westerberg return 0; 485b6fb8d3aSMika Westerberg } 486b6fb8d3aSMika Westerberg 487dc87c98eSGrant Likely /** 488dc87c98eSGrant Likely * spi_add_device - Add spi_device allocated with spi_alloc_device 489dc87c98eSGrant Likely * @spi: spi_device to register 490dc87c98eSGrant Likely * 491dc87c98eSGrant Likely * Companion function to spi_alloc_device. Devices allocated with 492dc87c98eSGrant Likely * spi_alloc_device can be added onto the spi bus with this function. 493dc87c98eSGrant Likely * 49497d56dc6SJavier Martinez Canillas * Return: 0 on success; negative errno on failure 495dc87c98eSGrant Likely */ 496dc87c98eSGrant Likely int spi_add_device(struct spi_device *spi) 497dc87c98eSGrant Likely { 498e48880e0SDavid Brownell static DEFINE_MUTEX(spi_add_lock); 49974317984SJean-Christophe PLAGNIOL-VILLARD struct spi_master *master = spi->master; 50074317984SJean-Christophe PLAGNIOL-VILLARD struct device *dev = master->dev.parent; 501dc87c98eSGrant Likely int status; 502dc87c98eSGrant Likely 503dc87c98eSGrant Likely /* Chipselects are numbered 0..max; validate. */ 50474317984SJean-Christophe PLAGNIOL-VILLARD if (spi->chip_select >= master->num_chipselect) { 505dc87c98eSGrant Likely dev_err(dev, "cs%d >= max %d\n", 506dc87c98eSGrant Likely spi->chip_select, 50774317984SJean-Christophe PLAGNIOL-VILLARD master->num_chipselect); 508dc87c98eSGrant Likely return -EINVAL; 509dc87c98eSGrant Likely } 510dc87c98eSGrant Likely 511dc87c98eSGrant Likely /* Set the bus ID string */ 512e13ac47bSJarkko Nikula spi_dev_set_name(spi); 513e48880e0SDavid Brownell 514e48880e0SDavid Brownell /* We need to make sure there's no other device with this 515e48880e0SDavid Brownell * chipselect **BEFORE** we call setup(), else we'll trash 516e48880e0SDavid Brownell * its configuration. Lock against concurrent add() calls. 517e48880e0SDavid Brownell */ 518e48880e0SDavid Brownell mutex_lock(&spi_add_lock); 519e48880e0SDavid Brownell 520b6fb8d3aSMika Westerberg status = bus_for_each_dev(&spi_bus_type, NULL, spi, spi_dev_check); 521b6fb8d3aSMika Westerberg if (status) { 522e48880e0SDavid Brownell dev_err(dev, "chipselect %d already in use\n", 523e48880e0SDavid Brownell spi->chip_select); 524e48880e0SDavid Brownell goto done; 525e48880e0SDavid Brownell } 526e48880e0SDavid Brownell 52774317984SJean-Christophe PLAGNIOL-VILLARD if (master->cs_gpios) 52874317984SJean-Christophe PLAGNIOL-VILLARD spi->cs_gpio = master->cs_gpios[spi->chip_select]; 52974317984SJean-Christophe PLAGNIOL-VILLARD 530e48880e0SDavid Brownell /* Drivers may modify this initial i/o setup, but will 531e48880e0SDavid Brownell * normally rely on the device being setup. Devices 532e48880e0SDavid Brownell * using SPI_CS_HIGH can't coexist well otherwise... 533e48880e0SDavid Brownell */ 5347d077197SDavid Brownell status = spi_setup(spi); 535dc87c98eSGrant Likely if (status < 0) { 536eb288a1fSLinus Walleij dev_err(dev, "can't setup %s, status %d\n", 537eb288a1fSLinus Walleij dev_name(&spi->dev), status); 538e48880e0SDavid Brownell goto done; 539dc87c98eSGrant Likely } 540dc87c98eSGrant Likely 541e48880e0SDavid Brownell /* Device may be bound to an active driver when this returns */ 542dc87c98eSGrant Likely status = device_add(&spi->dev); 543e48880e0SDavid Brownell if (status < 0) 544eb288a1fSLinus Walleij dev_err(dev, "can't add %s, status %d\n", 545eb288a1fSLinus Walleij dev_name(&spi->dev), status); 546e48880e0SDavid Brownell else 54735f74fcaSKay Sievers dev_dbg(dev, "registered child %s\n", dev_name(&spi->dev)); 548e48880e0SDavid Brownell 549e48880e0SDavid Brownell done: 550e48880e0SDavid Brownell mutex_unlock(&spi_add_lock); 551e48880e0SDavid Brownell return status; 552dc87c98eSGrant Likely } 553dc87c98eSGrant Likely EXPORT_SYMBOL_GPL(spi_add_device); 5548ae12a0dSDavid Brownell 55533e34dc6SDavid Brownell /** 55633e34dc6SDavid Brownell * spi_new_device - instantiate one new SPI device 55733e34dc6SDavid Brownell * @master: Controller to which device is connected 55833e34dc6SDavid Brownell * @chip: Describes the SPI device 55933e34dc6SDavid Brownell * Context: can sleep 56033e34dc6SDavid Brownell * 56133e34dc6SDavid Brownell * On typical mainboards, this is purely internal; and it's not needed 5628ae12a0dSDavid Brownell * after board init creates the hard-wired devices. Some development 5638ae12a0dSDavid Brownell * platforms may not be able to use spi_register_board_info though, and 5648ae12a0dSDavid Brownell * this is exported so that for example a USB or parport based adapter 5658ae12a0dSDavid Brownell * driver could add devices (which it would learn about out-of-band). 566082c8cb4SDavid Brownell * 56797d56dc6SJavier Martinez Canillas * Return: the new device, or NULL. 5688ae12a0dSDavid Brownell */ 569e9d5a461SAdrian Bunk struct spi_device *spi_new_device(struct spi_master *master, 570e9d5a461SAdrian Bunk struct spi_board_info *chip) 5718ae12a0dSDavid Brownell { 5728ae12a0dSDavid Brownell struct spi_device *proxy; 5738ae12a0dSDavid Brownell int status; 5748ae12a0dSDavid Brownell 575082c8cb4SDavid Brownell /* NOTE: caller did any chip->bus_num checks necessary. 576082c8cb4SDavid Brownell * 577082c8cb4SDavid Brownell * Also, unless we change the return value convention to use 578082c8cb4SDavid Brownell * error-or-pointer (not NULL-or-pointer), troubleshootability 579082c8cb4SDavid Brownell * suggests syslogged diagnostics are best here (ugh). 580082c8cb4SDavid Brownell */ 581082c8cb4SDavid Brownell 582dc87c98eSGrant Likely proxy = spi_alloc_device(master); 583dc87c98eSGrant Likely if (!proxy) 5848ae12a0dSDavid Brownell return NULL; 5858ae12a0dSDavid Brownell 586102eb975SGrant Likely WARN_ON(strlen(chip->modalias) >= sizeof(proxy->modalias)); 587102eb975SGrant Likely 5888ae12a0dSDavid Brownell proxy->chip_select = chip->chip_select; 5898ae12a0dSDavid Brownell proxy->max_speed_hz = chip->max_speed_hz; 590980a01c9SDavid Brownell proxy->mode = chip->mode; 5918ae12a0dSDavid Brownell proxy->irq = chip->irq; 592102eb975SGrant Likely strlcpy(proxy->modalias, chip->modalias, sizeof(proxy->modalias)); 5938ae12a0dSDavid Brownell proxy->dev.platform_data = (void *) chip->platform_data; 5948ae12a0dSDavid Brownell proxy->controller_data = chip->controller_data; 5958ae12a0dSDavid Brownell proxy->controller_state = NULL; 5968ae12a0dSDavid Brownell 597dc87c98eSGrant Likely status = spi_add_device(proxy); 5988ae12a0dSDavid Brownell if (status < 0) { 599dc87c98eSGrant Likely spi_dev_put(proxy); 6008ae12a0dSDavid Brownell return NULL; 6018ae12a0dSDavid Brownell } 602dc87c98eSGrant Likely 603dc87c98eSGrant Likely return proxy; 604dc87c98eSGrant Likely } 6058ae12a0dSDavid Brownell EXPORT_SYMBOL_GPL(spi_new_device); 6068ae12a0dSDavid Brownell 607*3b1884c2SGeert Uytterhoeven /** 608*3b1884c2SGeert Uytterhoeven * spi_unregister_device - unregister a single SPI device 609*3b1884c2SGeert Uytterhoeven * @spi: spi_device to unregister 610*3b1884c2SGeert Uytterhoeven * 611*3b1884c2SGeert Uytterhoeven * Start making the passed SPI device vanish. Normally this would be handled 612*3b1884c2SGeert Uytterhoeven * by spi_unregister_master(). 613*3b1884c2SGeert Uytterhoeven */ 614*3b1884c2SGeert Uytterhoeven void spi_unregister_device(struct spi_device *spi) 615*3b1884c2SGeert Uytterhoeven { 616*3b1884c2SGeert Uytterhoeven if (spi) 617*3b1884c2SGeert Uytterhoeven device_unregister(&spi->dev); 618*3b1884c2SGeert Uytterhoeven } 619*3b1884c2SGeert Uytterhoeven EXPORT_SYMBOL_GPL(spi_unregister_device); 620*3b1884c2SGeert Uytterhoeven 6212b9603a0SFeng Tang static void spi_match_master_to_boardinfo(struct spi_master *master, 6222b9603a0SFeng Tang struct spi_board_info *bi) 6232b9603a0SFeng Tang { 6242b9603a0SFeng Tang struct spi_device *dev; 6252b9603a0SFeng Tang 6262b9603a0SFeng Tang if (master->bus_num != bi->bus_num) 6272b9603a0SFeng Tang return; 6282b9603a0SFeng Tang 6292b9603a0SFeng Tang dev = spi_new_device(master, bi); 6302b9603a0SFeng Tang if (!dev) 6312b9603a0SFeng Tang dev_err(master->dev.parent, "can't create new device for %s\n", 6322b9603a0SFeng Tang bi->modalias); 6332b9603a0SFeng Tang } 6342b9603a0SFeng Tang 63533e34dc6SDavid Brownell /** 63633e34dc6SDavid Brownell * spi_register_board_info - register SPI devices for a given board 63733e34dc6SDavid Brownell * @info: array of chip descriptors 63833e34dc6SDavid Brownell * @n: how many descriptors are provided 63933e34dc6SDavid Brownell * Context: can sleep 64033e34dc6SDavid Brownell * 6418ae12a0dSDavid Brownell * Board-specific early init code calls this (probably during arch_initcall) 6428ae12a0dSDavid Brownell * with segments of the SPI device table. Any device nodes are created later, 6438ae12a0dSDavid Brownell * after the relevant parent SPI controller (bus_num) is defined. We keep 6448ae12a0dSDavid Brownell * this table of devices forever, so that reloading a controller driver will 6458ae12a0dSDavid Brownell * not make Linux forget about these hard-wired devices. 6468ae12a0dSDavid Brownell * 6478ae12a0dSDavid Brownell * Other code can also call this, e.g. a particular add-on board might provide 6488ae12a0dSDavid Brownell * SPI devices through its expansion connector, so code initializing that board 6498ae12a0dSDavid Brownell * would naturally declare its SPI devices. 6508ae12a0dSDavid Brownell * 6518ae12a0dSDavid Brownell * The board info passed can safely be __initdata ... but be careful of 6528ae12a0dSDavid Brownell * any embedded pointers (platform_data, etc), they're copied as-is. 65397d56dc6SJavier Martinez Canillas * 65497d56dc6SJavier Martinez Canillas * Return: zero on success, else a negative error code. 6558ae12a0dSDavid Brownell */ 656fd4a319bSGrant Likely int spi_register_board_info(struct spi_board_info const *info, unsigned n) 6578ae12a0dSDavid Brownell { 6588ae12a0dSDavid Brownell struct boardinfo *bi; 6592b9603a0SFeng Tang int i; 6608ae12a0dSDavid Brownell 661c7908a37SXiubo Li if (!n) 662c7908a37SXiubo Li return -EINVAL; 663c7908a37SXiubo Li 6642b9603a0SFeng Tang bi = kzalloc(n * sizeof(*bi), GFP_KERNEL); 6658ae12a0dSDavid Brownell if (!bi) 6668ae12a0dSDavid Brownell return -ENOMEM; 6678ae12a0dSDavid Brownell 6682b9603a0SFeng Tang for (i = 0; i < n; i++, bi++, info++) { 6692b9603a0SFeng Tang struct spi_master *master; 6702b9603a0SFeng Tang 6712b9603a0SFeng Tang memcpy(&bi->board_info, info, sizeof(*info)); 67294040828SMatthias Kaehlcke mutex_lock(&board_lock); 6738ae12a0dSDavid Brownell list_add_tail(&bi->list, &board_list); 6742b9603a0SFeng Tang list_for_each_entry(master, &spi_master_list, list) 6752b9603a0SFeng Tang spi_match_master_to_boardinfo(master, &bi->board_info); 67694040828SMatthias Kaehlcke mutex_unlock(&board_lock); 6772b9603a0SFeng Tang } 6782b9603a0SFeng Tang 6798ae12a0dSDavid Brownell return 0; 6808ae12a0dSDavid Brownell } 6818ae12a0dSDavid Brownell 6828ae12a0dSDavid Brownell /*-------------------------------------------------------------------------*/ 6838ae12a0dSDavid Brownell 684b158935fSMark Brown static void spi_set_cs(struct spi_device *spi, bool enable) 685b158935fSMark Brown { 686b158935fSMark Brown if (spi->mode & SPI_CS_HIGH) 687b158935fSMark Brown enable = !enable; 688b158935fSMark Brown 689243f07beSAndy Shevchenko if (gpio_is_valid(spi->cs_gpio)) 690b158935fSMark Brown gpio_set_value(spi->cs_gpio, !enable); 691b158935fSMark Brown else if (spi->master->set_cs) 692b158935fSMark Brown spi->master->set_cs(spi, !enable); 693b158935fSMark Brown } 694b158935fSMark Brown 6952de440f5SGeert Uytterhoeven #ifdef CONFIG_HAS_DMA 6966ad45a27SMark Brown static int spi_map_buf(struct spi_master *master, struct device *dev, 6976ad45a27SMark Brown struct sg_table *sgt, void *buf, size_t len, 6986ad45a27SMark Brown enum dma_data_direction dir) 6996ad45a27SMark Brown { 7006ad45a27SMark Brown const bool vmalloced_buf = is_vmalloc_addr(buf); 70165598c13SAndrew Gabbasov int desc_len; 70265598c13SAndrew Gabbasov int sgs; 7036ad45a27SMark Brown struct page *vm_page; 7046ad45a27SMark Brown void *sg_buf; 7056ad45a27SMark Brown size_t min; 7066ad45a27SMark Brown int i, ret; 7076ad45a27SMark Brown 70865598c13SAndrew Gabbasov if (vmalloced_buf) { 70965598c13SAndrew Gabbasov desc_len = PAGE_SIZE; 71065598c13SAndrew Gabbasov sgs = DIV_ROUND_UP(len + offset_in_page(buf), desc_len); 71165598c13SAndrew Gabbasov } else { 71265598c13SAndrew Gabbasov desc_len = master->max_dma_len; 71365598c13SAndrew Gabbasov sgs = DIV_ROUND_UP(len, desc_len); 71465598c13SAndrew Gabbasov } 71565598c13SAndrew Gabbasov 7166ad45a27SMark Brown ret = sg_alloc_table(sgt, sgs, GFP_KERNEL); 7176ad45a27SMark Brown if (ret != 0) 7186ad45a27SMark Brown return ret; 7196ad45a27SMark Brown 7206ad45a27SMark Brown for (i = 0; i < sgs; i++) { 7216ad45a27SMark Brown 7226ad45a27SMark Brown if (vmalloced_buf) { 72365598c13SAndrew Gabbasov min = min_t(size_t, 72465598c13SAndrew Gabbasov len, desc_len - offset_in_page(buf)); 7256ad45a27SMark Brown vm_page = vmalloc_to_page(buf); 7266ad45a27SMark Brown if (!vm_page) { 7276ad45a27SMark Brown sg_free_table(sgt); 7286ad45a27SMark Brown return -ENOMEM; 7296ad45a27SMark Brown } 730c1aefbddSCharles Keepax sg_set_page(&sgt->sgl[i], vm_page, 731c1aefbddSCharles Keepax min, offset_in_page(buf)); 7326ad45a27SMark Brown } else { 73365598c13SAndrew Gabbasov min = min_t(size_t, len, desc_len); 7346ad45a27SMark Brown sg_buf = buf; 735c1aefbddSCharles Keepax sg_set_buf(&sgt->sgl[i], sg_buf, min); 7366ad45a27SMark Brown } 7376ad45a27SMark Brown 7386ad45a27SMark Brown 7396ad45a27SMark Brown buf += min; 7406ad45a27SMark Brown len -= min; 7416ad45a27SMark Brown } 7426ad45a27SMark Brown 7436ad45a27SMark Brown ret = dma_map_sg(dev, sgt->sgl, sgt->nents, dir); 74489e4b66aSGeert Uytterhoeven if (!ret) 74589e4b66aSGeert Uytterhoeven ret = -ENOMEM; 7466ad45a27SMark Brown if (ret < 0) { 7476ad45a27SMark Brown sg_free_table(sgt); 7486ad45a27SMark Brown return ret; 7496ad45a27SMark Brown } 7506ad45a27SMark Brown 7516ad45a27SMark Brown sgt->nents = ret; 7526ad45a27SMark Brown 7536ad45a27SMark Brown return 0; 7546ad45a27SMark Brown } 7556ad45a27SMark Brown 7566ad45a27SMark Brown static void spi_unmap_buf(struct spi_master *master, struct device *dev, 7576ad45a27SMark Brown struct sg_table *sgt, enum dma_data_direction dir) 7586ad45a27SMark Brown { 7596ad45a27SMark Brown if (sgt->orig_nents) { 7606ad45a27SMark Brown dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir); 7616ad45a27SMark Brown sg_free_table(sgt); 7626ad45a27SMark Brown } 7636ad45a27SMark Brown } 7646ad45a27SMark Brown 7652de440f5SGeert Uytterhoeven static int __spi_map_msg(struct spi_master *master, struct spi_message *msg) 76699adef31SMark Brown { 76799adef31SMark Brown struct device *tx_dev, *rx_dev; 76899adef31SMark Brown struct spi_transfer *xfer; 7696ad45a27SMark Brown int ret; 7703a2eba9bSMark Brown 7716ad45a27SMark Brown if (!master->can_dma) 77299adef31SMark Brown return 0; 77399adef31SMark Brown 774c37f45b5SLeilk Liu if (master->dma_tx) 7753fc25421SGeert Uytterhoeven tx_dev = master->dma_tx->device->dev; 776c37f45b5SLeilk Liu else 777c37f45b5SLeilk Liu tx_dev = &master->dev; 778c37f45b5SLeilk Liu 779c37f45b5SLeilk Liu if (master->dma_rx) 7803fc25421SGeert Uytterhoeven rx_dev = master->dma_rx->device->dev; 781c37f45b5SLeilk Liu else 782c37f45b5SLeilk Liu rx_dev = &master->dev; 78399adef31SMark Brown 78499adef31SMark Brown list_for_each_entry(xfer, &msg->transfers, transfer_list) { 78599adef31SMark Brown if (!master->can_dma(master, msg->spi, xfer)) 78699adef31SMark Brown continue; 78799adef31SMark Brown 78899adef31SMark Brown if (xfer->tx_buf != NULL) { 7896ad45a27SMark Brown ret = spi_map_buf(master, tx_dev, &xfer->tx_sg, 7906ad45a27SMark Brown (void *)xfer->tx_buf, xfer->len, 79199adef31SMark Brown DMA_TO_DEVICE); 7926ad45a27SMark Brown if (ret != 0) 7936ad45a27SMark Brown return ret; 79499adef31SMark Brown } 79599adef31SMark Brown 79699adef31SMark Brown if (xfer->rx_buf != NULL) { 7976ad45a27SMark Brown ret = spi_map_buf(master, rx_dev, &xfer->rx_sg, 79899adef31SMark Brown xfer->rx_buf, xfer->len, 79999adef31SMark Brown DMA_FROM_DEVICE); 8006ad45a27SMark Brown if (ret != 0) { 8016ad45a27SMark Brown spi_unmap_buf(master, tx_dev, &xfer->tx_sg, 8026ad45a27SMark Brown DMA_TO_DEVICE); 8036ad45a27SMark Brown return ret; 80499adef31SMark Brown } 80599adef31SMark Brown } 80699adef31SMark Brown } 80799adef31SMark Brown 80899adef31SMark Brown master->cur_msg_mapped = true; 80999adef31SMark Brown 81099adef31SMark Brown return 0; 81199adef31SMark Brown } 81299adef31SMark Brown 8134b786458SMartin Sperl static int __spi_unmap_msg(struct spi_master *master, struct spi_message *msg) 81499adef31SMark Brown { 81599adef31SMark Brown struct spi_transfer *xfer; 81699adef31SMark Brown struct device *tx_dev, *rx_dev; 81799adef31SMark Brown 8186ad45a27SMark Brown if (!master->cur_msg_mapped || !master->can_dma) 81999adef31SMark Brown return 0; 82099adef31SMark Brown 821c37f45b5SLeilk Liu if (master->dma_tx) 8223fc25421SGeert Uytterhoeven tx_dev = master->dma_tx->device->dev; 823c37f45b5SLeilk Liu else 824c37f45b5SLeilk Liu tx_dev = &master->dev; 825c37f45b5SLeilk Liu 826c37f45b5SLeilk Liu if (master->dma_rx) 8273fc25421SGeert Uytterhoeven rx_dev = master->dma_rx->device->dev; 828c37f45b5SLeilk Liu else 829c37f45b5SLeilk Liu rx_dev = &master->dev; 83099adef31SMark Brown 83199adef31SMark Brown list_for_each_entry(xfer, &msg->transfers, transfer_list) { 83299adef31SMark Brown if (!master->can_dma(master, msg->spi, xfer)) 83399adef31SMark Brown continue; 83499adef31SMark Brown 8356ad45a27SMark Brown spi_unmap_buf(master, rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE); 8366ad45a27SMark Brown spi_unmap_buf(master, tx_dev, &xfer->tx_sg, DMA_TO_DEVICE); 83799adef31SMark Brown } 83899adef31SMark Brown 83999adef31SMark Brown return 0; 84099adef31SMark Brown } 8412de440f5SGeert Uytterhoeven #else /* !CONFIG_HAS_DMA */ 8422de440f5SGeert Uytterhoeven static inline int __spi_map_msg(struct spi_master *master, 8432de440f5SGeert Uytterhoeven struct spi_message *msg) 8442de440f5SGeert Uytterhoeven { 8452de440f5SGeert Uytterhoeven return 0; 8462de440f5SGeert Uytterhoeven } 8472de440f5SGeert Uytterhoeven 8484b786458SMartin Sperl static inline int __spi_unmap_msg(struct spi_master *master, 8492de440f5SGeert Uytterhoeven struct spi_message *msg) 8502de440f5SGeert Uytterhoeven { 8512de440f5SGeert Uytterhoeven return 0; 8522de440f5SGeert Uytterhoeven } 8532de440f5SGeert Uytterhoeven #endif /* !CONFIG_HAS_DMA */ 8542de440f5SGeert Uytterhoeven 8554b786458SMartin Sperl static inline int spi_unmap_msg(struct spi_master *master, 8564b786458SMartin Sperl struct spi_message *msg) 8574b786458SMartin Sperl { 8584b786458SMartin Sperl struct spi_transfer *xfer; 8594b786458SMartin Sperl 8604b786458SMartin Sperl list_for_each_entry(xfer, &msg->transfers, transfer_list) { 8614b786458SMartin Sperl /* 8624b786458SMartin Sperl * Restore the original value of tx_buf or rx_buf if they are 8634b786458SMartin Sperl * NULL. 8644b786458SMartin Sperl */ 8654b786458SMartin Sperl if (xfer->tx_buf == master->dummy_tx) 8664b786458SMartin Sperl xfer->tx_buf = NULL; 8674b786458SMartin Sperl if (xfer->rx_buf == master->dummy_rx) 8684b786458SMartin Sperl xfer->rx_buf = NULL; 8694b786458SMartin Sperl } 8704b786458SMartin Sperl 8714b786458SMartin Sperl return __spi_unmap_msg(master, msg); 8724b786458SMartin Sperl } 8734b786458SMartin Sperl 8742de440f5SGeert Uytterhoeven static int spi_map_msg(struct spi_master *master, struct spi_message *msg) 8752de440f5SGeert Uytterhoeven { 8762de440f5SGeert Uytterhoeven struct spi_transfer *xfer; 8772de440f5SGeert Uytterhoeven void *tmp; 8782de440f5SGeert Uytterhoeven unsigned int max_tx, max_rx; 8792de440f5SGeert Uytterhoeven 8802de440f5SGeert Uytterhoeven if (master->flags & (SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX)) { 8812de440f5SGeert Uytterhoeven max_tx = 0; 8822de440f5SGeert Uytterhoeven max_rx = 0; 8832de440f5SGeert Uytterhoeven 8842de440f5SGeert Uytterhoeven list_for_each_entry(xfer, &msg->transfers, transfer_list) { 8852de440f5SGeert Uytterhoeven if ((master->flags & SPI_MASTER_MUST_TX) && 8862de440f5SGeert Uytterhoeven !xfer->tx_buf) 8872de440f5SGeert Uytterhoeven max_tx = max(xfer->len, max_tx); 8882de440f5SGeert Uytterhoeven if ((master->flags & SPI_MASTER_MUST_RX) && 8892de440f5SGeert Uytterhoeven !xfer->rx_buf) 8902de440f5SGeert Uytterhoeven max_rx = max(xfer->len, max_rx); 8912de440f5SGeert Uytterhoeven } 8922de440f5SGeert Uytterhoeven 8932de440f5SGeert Uytterhoeven if (max_tx) { 8942de440f5SGeert Uytterhoeven tmp = krealloc(master->dummy_tx, max_tx, 8952de440f5SGeert Uytterhoeven GFP_KERNEL | GFP_DMA); 8962de440f5SGeert Uytterhoeven if (!tmp) 8972de440f5SGeert Uytterhoeven return -ENOMEM; 8982de440f5SGeert Uytterhoeven master->dummy_tx = tmp; 8992de440f5SGeert Uytterhoeven memset(tmp, 0, max_tx); 9002de440f5SGeert Uytterhoeven } 9012de440f5SGeert Uytterhoeven 9022de440f5SGeert Uytterhoeven if (max_rx) { 9032de440f5SGeert Uytterhoeven tmp = krealloc(master->dummy_rx, max_rx, 9042de440f5SGeert Uytterhoeven GFP_KERNEL | GFP_DMA); 9052de440f5SGeert Uytterhoeven if (!tmp) 9062de440f5SGeert Uytterhoeven return -ENOMEM; 9072de440f5SGeert Uytterhoeven master->dummy_rx = tmp; 9082de440f5SGeert Uytterhoeven } 9092de440f5SGeert Uytterhoeven 9102de440f5SGeert Uytterhoeven if (max_tx || max_rx) { 9112de440f5SGeert Uytterhoeven list_for_each_entry(xfer, &msg->transfers, 9122de440f5SGeert Uytterhoeven transfer_list) { 9132de440f5SGeert Uytterhoeven if (!xfer->tx_buf) 9142de440f5SGeert Uytterhoeven xfer->tx_buf = master->dummy_tx; 9152de440f5SGeert Uytterhoeven if (!xfer->rx_buf) 9162de440f5SGeert Uytterhoeven xfer->rx_buf = master->dummy_rx; 9172de440f5SGeert Uytterhoeven } 9182de440f5SGeert Uytterhoeven } 9192de440f5SGeert Uytterhoeven } 9202de440f5SGeert Uytterhoeven 9212de440f5SGeert Uytterhoeven return __spi_map_msg(master, msg); 9222de440f5SGeert Uytterhoeven } 92399adef31SMark Brown 924b158935fSMark Brown /* 925b158935fSMark Brown * spi_transfer_one_message - Default implementation of transfer_one_message() 926b158935fSMark Brown * 927b158935fSMark Brown * This is a standard implementation of transfer_one_message() for 928b158935fSMark Brown * drivers which impelment a transfer_one() operation. It provides 929b158935fSMark Brown * standard handling of delays and chip select management. 930b158935fSMark Brown */ 931b158935fSMark Brown static int spi_transfer_one_message(struct spi_master *master, 932b158935fSMark Brown struct spi_message *msg) 933b158935fSMark Brown { 934b158935fSMark Brown struct spi_transfer *xfer; 935b158935fSMark Brown bool keep_cs = false; 936b158935fSMark Brown int ret = 0; 937682a71b2SNicholas Mc Guire unsigned long ms = 1; 938eca2ebc7SMartin Sperl struct spi_statistics *statm = &master->statistics; 939eca2ebc7SMartin Sperl struct spi_statistics *stats = &msg->spi->statistics; 940b158935fSMark Brown 941b158935fSMark Brown spi_set_cs(msg->spi, true); 942b158935fSMark Brown 943eca2ebc7SMartin Sperl SPI_STATISTICS_INCREMENT_FIELD(statm, messages); 944eca2ebc7SMartin Sperl SPI_STATISTICS_INCREMENT_FIELD(stats, messages); 945eca2ebc7SMartin Sperl 946b158935fSMark Brown list_for_each_entry(xfer, &msg->transfers, transfer_list) { 947b158935fSMark Brown trace_spi_transfer_start(msg, xfer); 948b158935fSMark Brown 949eca2ebc7SMartin Sperl spi_statistics_add_transfer_stats(statm, xfer, master); 950eca2ebc7SMartin Sperl spi_statistics_add_transfer_stats(stats, xfer, master); 951eca2ebc7SMartin Sperl 95238ec10f6SMark Brown if (xfer->tx_buf || xfer->rx_buf) { 95316735d02SWolfram Sang reinit_completion(&master->xfer_completion); 954b158935fSMark Brown 955b158935fSMark Brown ret = master->transfer_one(master, msg->spi, xfer); 956b158935fSMark Brown if (ret < 0) { 957eca2ebc7SMartin Sperl SPI_STATISTICS_INCREMENT_FIELD(statm, 958eca2ebc7SMartin Sperl errors); 959eca2ebc7SMartin Sperl SPI_STATISTICS_INCREMENT_FIELD(stats, 960eca2ebc7SMartin Sperl errors); 961b158935fSMark Brown dev_err(&msg->spi->dev, 962b158935fSMark Brown "SPI transfer failed: %d\n", ret); 963b158935fSMark Brown goto out; 964b158935fSMark Brown } 965b158935fSMark Brown 96613a42798SAxel Lin if (ret > 0) { 96713a42798SAxel Lin ret = 0; 96816a0ce4eSMark Brown ms = xfer->len * 8 * 1000 / xfer->speed_hz; 969eee668a9SHarini Katakam ms += ms + 100; /* some tolerance */ 97016a0ce4eSMark Brown 97116a0ce4eSMark Brown ms = wait_for_completion_timeout(&master->xfer_completion, 97216a0ce4eSMark Brown msecs_to_jiffies(ms)); 97316a0ce4eSMark Brown } 97416a0ce4eSMark Brown 97516a0ce4eSMark Brown if (ms == 0) { 976eca2ebc7SMartin Sperl SPI_STATISTICS_INCREMENT_FIELD(statm, 977eca2ebc7SMartin Sperl timedout); 978eca2ebc7SMartin Sperl SPI_STATISTICS_INCREMENT_FIELD(stats, 979eca2ebc7SMartin Sperl timedout); 98038ec10f6SMark Brown dev_err(&msg->spi->dev, 98138ec10f6SMark Brown "SPI transfer timed out\n"); 98216a0ce4eSMark Brown msg->status = -ETIMEDOUT; 98313a42798SAxel Lin } 98438ec10f6SMark Brown } else { 98538ec10f6SMark Brown if (xfer->len) 98638ec10f6SMark Brown dev_err(&msg->spi->dev, 98738ec10f6SMark Brown "Bufferless transfer has length %u\n", 98838ec10f6SMark Brown xfer->len); 98938ec10f6SMark Brown } 990b158935fSMark Brown 991b158935fSMark Brown trace_spi_transfer_stop(msg, xfer); 992b158935fSMark Brown 993b158935fSMark Brown if (msg->status != -EINPROGRESS) 994b158935fSMark Brown goto out; 995b158935fSMark Brown 996b158935fSMark Brown if (xfer->delay_usecs) 997b158935fSMark Brown udelay(xfer->delay_usecs); 998b158935fSMark Brown 999b158935fSMark Brown if (xfer->cs_change) { 1000b158935fSMark Brown if (list_is_last(&xfer->transfer_list, 1001b158935fSMark Brown &msg->transfers)) { 1002b158935fSMark Brown keep_cs = true; 1003b158935fSMark Brown } else { 10040b73aa63SMark Brown spi_set_cs(msg->spi, false); 10050b73aa63SMark Brown udelay(10); 10060b73aa63SMark Brown spi_set_cs(msg->spi, true); 1007b158935fSMark Brown } 1008b158935fSMark Brown } 1009b158935fSMark Brown 1010b158935fSMark Brown msg->actual_length += xfer->len; 1011b158935fSMark Brown } 1012b158935fSMark Brown 1013b158935fSMark Brown out: 1014b158935fSMark Brown if (ret != 0 || !keep_cs) 1015b158935fSMark Brown spi_set_cs(msg->spi, false); 1016b158935fSMark Brown 1017b158935fSMark Brown if (msg->status == -EINPROGRESS) 1018b158935fSMark Brown msg->status = ret; 1019b158935fSMark Brown 1020ff61eb42SGeert Uytterhoeven if (msg->status && master->handle_err) 1021b716c4ffSAndy Shevchenko master->handle_err(master, msg); 1022b716c4ffSAndy Shevchenko 1023b158935fSMark Brown spi_finalize_current_message(master); 1024b158935fSMark Brown 1025b158935fSMark Brown return ret; 1026b158935fSMark Brown } 1027b158935fSMark Brown 1028b158935fSMark Brown /** 1029b158935fSMark Brown * spi_finalize_current_transfer - report completion of a transfer 10302c675689SThierry Reding * @master: the master reporting completion 1031b158935fSMark Brown * 1032b158935fSMark Brown * Called by SPI drivers using the core transfer_one_message() 1033b158935fSMark Brown * implementation to notify it that the current interrupt driven 10349e8f4882SGeert Uytterhoeven * transfer has finished and the next one may be scheduled. 1035b158935fSMark Brown */ 1036b158935fSMark Brown void spi_finalize_current_transfer(struct spi_master *master) 1037b158935fSMark Brown { 1038b158935fSMark Brown complete(&master->xfer_completion); 1039b158935fSMark Brown } 1040b158935fSMark Brown EXPORT_SYMBOL_GPL(spi_finalize_current_transfer); 1041b158935fSMark Brown 1042ffbbdd21SLinus Walleij /** 1043fc9e0f71SMark Brown * __spi_pump_messages - function which processes spi message queue 1044fc9e0f71SMark Brown * @master: master to process queue for 1045fc9e0f71SMark Brown * @in_kthread: true if we are in the context of the message pump thread 1046ffbbdd21SLinus Walleij * 1047ffbbdd21SLinus Walleij * This function checks if there is any spi message in the queue that 1048ffbbdd21SLinus Walleij * needs processing and if so call out to the driver to initialize hardware 1049ffbbdd21SLinus Walleij * and transfer each message. 1050ffbbdd21SLinus Walleij * 10510461a414SMark Brown * Note that it is called both from the kthread itself and also from 10520461a414SMark Brown * inside spi_sync(); the queue extraction handling at the top of the 10530461a414SMark Brown * function should deal with this safely. 1054ffbbdd21SLinus Walleij */ 1055fc9e0f71SMark Brown static void __spi_pump_messages(struct spi_master *master, bool in_kthread) 1056ffbbdd21SLinus Walleij { 1057ffbbdd21SLinus Walleij unsigned long flags; 1058ffbbdd21SLinus Walleij bool was_busy = false; 1059ffbbdd21SLinus Walleij int ret; 1060ffbbdd21SLinus Walleij 1061983aee5dSMark Brown /* Lock queue */ 1062ffbbdd21SLinus Walleij spin_lock_irqsave(&master->queue_lock, flags); 1063983aee5dSMark Brown 1064983aee5dSMark Brown /* Make sure we are not already running a message */ 1065983aee5dSMark Brown if (master->cur_msg) { 1066983aee5dSMark Brown spin_unlock_irqrestore(&master->queue_lock, flags); 1067983aee5dSMark Brown return; 1068983aee5dSMark Brown } 1069983aee5dSMark Brown 10700461a414SMark Brown /* If another context is idling the device then defer */ 10710461a414SMark Brown if (master->idling) { 10720461a414SMark Brown queue_kthread_work(&master->kworker, &master->pump_messages); 10730461a414SMark Brown spin_unlock_irqrestore(&master->queue_lock, flags); 10740461a414SMark Brown return; 10750461a414SMark Brown } 10760461a414SMark Brown 1077983aee5dSMark Brown /* Check if the queue is idle */ 1078ffbbdd21SLinus Walleij if (list_empty(&master->queue) || !master->running) { 1079b0b36b86SBryan Freed if (!master->busy) { 10809af4acc0SDan Carpenter spin_unlock_irqrestore(&master->queue_lock, flags); 1081ffbbdd21SLinus Walleij return; 1082ffbbdd21SLinus Walleij } 1083fc9e0f71SMark Brown 1084fc9e0f71SMark Brown /* Only do teardown in the thread */ 1085fc9e0f71SMark Brown if (!in_kthread) { 1086fc9e0f71SMark Brown queue_kthread_work(&master->kworker, 1087fc9e0f71SMark Brown &master->pump_messages); 1088ffbbdd21SLinus Walleij spin_unlock_irqrestore(&master->queue_lock, flags); 1089fc9e0f71SMark Brown return; 1090fc9e0f71SMark Brown } 1091fc9e0f71SMark Brown 1092ffbbdd21SLinus Walleij master->busy = false; 10930461a414SMark Brown master->idling = true; 1094ffbbdd21SLinus Walleij spin_unlock_irqrestore(&master->queue_lock, flags); 10950461a414SMark Brown 10963a2eba9bSMark Brown kfree(master->dummy_rx); 10973a2eba9bSMark Brown master->dummy_rx = NULL; 10983a2eba9bSMark Brown kfree(master->dummy_tx); 10993a2eba9bSMark Brown master->dummy_tx = NULL; 1100b0b36b86SBryan Freed if (master->unprepare_transfer_hardware && 1101b0b36b86SBryan Freed master->unprepare_transfer_hardware(master)) 1102b0b36b86SBryan Freed dev_err(&master->dev, 1103b0b36b86SBryan Freed "failed to unprepare transfer hardware\n"); 110449834de2SMark Brown if (master->auto_runtime_pm) { 110549834de2SMark Brown pm_runtime_mark_last_busy(master->dev.parent); 110649834de2SMark Brown pm_runtime_put_autosuspend(master->dev.parent); 110749834de2SMark Brown } 110856ec1978SMark Brown trace_spi_master_idle(master); 1109ffbbdd21SLinus Walleij 11100461a414SMark Brown spin_lock_irqsave(&master->queue_lock, flags); 11110461a414SMark Brown master->idling = false; 1112ffbbdd21SLinus Walleij spin_unlock_irqrestore(&master->queue_lock, flags); 1113ffbbdd21SLinus Walleij return; 1114ffbbdd21SLinus Walleij } 1115ffbbdd21SLinus Walleij 1116ffbbdd21SLinus Walleij /* Extract head of queue */ 1117ffbbdd21SLinus Walleij master->cur_msg = 1118a89e2d27SAxel Lin list_first_entry(&master->queue, struct spi_message, queue); 1119ffbbdd21SLinus Walleij 1120ffbbdd21SLinus Walleij list_del_init(&master->cur_msg->queue); 1121ffbbdd21SLinus Walleij if (master->busy) 1122ffbbdd21SLinus Walleij was_busy = true; 1123ffbbdd21SLinus Walleij else 1124ffbbdd21SLinus Walleij master->busy = true; 1125ffbbdd21SLinus Walleij spin_unlock_irqrestore(&master->queue_lock, flags); 1126ffbbdd21SLinus Walleij 112749834de2SMark Brown if (!was_busy && master->auto_runtime_pm) { 112849834de2SMark Brown ret = pm_runtime_get_sync(master->dev.parent); 112949834de2SMark Brown if (ret < 0) { 113049834de2SMark Brown dev_err(&master->dev, "Failed to power device: %d\n", 113149834de2SMark Brown ret); 113249834de2SMark Brown return; 113349834de2SMark Brown } 113449834de2SMark Brown } 113549834de2SMark Brown 113656ec1978SMark Brown if (!was_busy) 113756ec1978SMark Brown trace_spi_master_busy(master); 113856ec1978SMark Brown 11397dfd2bd7SShubhrajyoti D if (!was_busy && master->prepare_transfer_hardware) { 1140ffbbdd21SLinus Walleij ret = master->prepare_transfer_hardware(master); 1141ffbbdd21SLinus Walleij if (ret) { 1142ffbbdd21SLinus Walleij dev_err(&master->dev, 1143ffbbdd21SLinus Walleij "failed to prepare transfer hardware\n"); 114449834de2SMark Brown 114549834de2SMark Brown if (master->auto_runtime_pm) 114649834de2SMark Brown pm_runtime_put(master->dev.parent); 1147ffbbdd21SLinus Walleij return; 1148ffbbdd21SLinus Walleij } 1149ffbbdd21SLinus Walleij } 1150ffbbdd21SLinus Walleij 115156ec1978SMark Brown trace_spi_message_start(master->cur_msg); 115256ec1978SMark Brown 11532841a5fcSMark Brown if (master->prepare_message) { 11542841a5fcSMark Brown ret = master->prepare_message(master, master->cur_msg); 11552841a5fcSMark Brown if (ret) { 11562841a5fcSMark Brown dev_err(&master->dev, 11572841a5fcSMark Brown "failed to prepare message: %d\n", ret); 11582841a5fcSMark Brown master->cur_msg->status = ret; 11592841a5fcSMark Brown spi_finalize_current_message(master); 11602841a5fcSMark Brown return; 11612841a5fcSMark Brown } 11622841a5fcSMark Brown master->cur_msg_prepared = true; 11632841a5fcSMark Brown } 11642841a5fcSMark Brown 116599adef31SMark Brown ret = spi_map_msg(master, master->cur_msg); 116699adef31SMark Brown if (ret) { 116799adef31SMark Brown master->cur_msg->status = ret; 116899adef31SMark Brown spi_finalize_current_message(master); 116999adef31SMark Brown return; 117099adef31SMark Brown } 117199adef31SMark Brown 1172ffbbdd21SLinus Walleij ret = master->transfer_one_message(master, master->cur_msg); 1173ffbbdd21SLinus Walleij if (ret) { 1174ffbbdd21SLinus Walleij dev_err(&master->dev, 11751f802f82SGeert Uytterhoeven "failed to transfer one message from queue\n"); 1176ffbbdd21SLinus Walleij return; 1177ffbbdd21SLinus Walleij } 1178ffbbdd21SLinus Walleij } 1179ffbbdd21SLinus Walleij 1180fc9e0f71SMark Brown /** 1181fc9e0f71SMark Brown * spi_pump_messages - kthread work function which processes spi message queue 1182fc9e0f71SMark Brown * @work: pointer to kthread work struct contained in the master struct 1183fc9e0f71SMark Brown */ 1184fc9e0f71SMark Brown static void spi_pump_messages(struct kthread_work *work) 1185fc9e0f71SMark Brown { 1186fc9e0f71SMark Brown struct spi_master *master = 1187fc9e0f71SMark Brown container_of(work, struct spi_master, pump_messages); 1188fc9e0f71SMark Brown 1189fc9e0f71SMark Brown __spi_pump_messages(master, true); 1190fc9e0f71SMark Brown } 1191fc9e0f71SMark Brown 1192ffbbdd21SLinus Walleij static int spi_init_queue(struct spi_master *master) 1193ffbbdd21SLinus Walleij { 1194ffbbdd21SLinus Walleij struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 }; 1195ffbbdd21SLinus Walleij 1196ffbbdd21SLinus Walleij master->running = false; 1197ffbbdd21SLinus Walleij master->busy = false; 1198ffbbdd21SLinus Walleij 1199ffbbdd21SLinus Walleij init_kthread_worker(&master->kworker); 1200ffbbdd21SLinus Walleij master->kworker_task = kthread_run(kthread_worker_fn, 1201f170168bSKees Cook &master->kworker, "%s", 1202ffbbdd21SLinus Walleij dev_name(&master->dev)); 1203ffbbdd21SLinus Walleij if (IS_ERR(master->kworker_task)) { 1204ffbbdd21SLinus Walleij dev_err(&master->dev, "failed to create message pump task\n"); 120598a8f5a0SJarkko Nikula return PTR_ERR(master->kworker_task); 1206ffbbdd21SLinus Walleij } 1207ffbbdd21SLinus Walleij init_kthread_work(&master->pump_messages, spi_pump_messages); 1208ffbbdd21SLinus Walleij 1209ffbbdd21SLinus Walleij /* 1210ffbbdd21SLinus Walleij * Master config will indicate if this controller should run the 1211ffbbdd21SLinus Walleij * message pump with high (realtime) priority to reduce the transfer 1212ffbbdd21SLinus Walleij * latency on the bus by minimising the delay between a transfer 1213ffbbdd21SLinus Walleij * request and the scheduling of the message pump thread. Without this 1214ffbbdd21SLinus Walleij * setting the message pump thread will remain at default priority. 1215ffbbdd21SLinus Walleij */ 1216ffbbdd21SLinus Walleij if (master->rt) { 1217ffbbdd21SLinus Walleij dev_info(&master->dev, 1218ffbbdd21SLinus Walleij "will run message pump with realtime priority\n"); 1219ffbbdd21SLinus Walleij sched_setscheduler(master->kworker_task, SCHED_FIFO, ¶m); 1220ffbbdd21SLinus Walleij } 1221ffbbdd21SLinus Walleij 1222ffbbdd21SLinus Walleij return 0; 1223ffbbdd21SLinus Walleij } 1224ffbbdd21SLinus Walleij 1225ffbbdd21SLinus Walleij /** 1226ffbbdd21SLinus Walleij * spi_get_next_queued_message() - called by driver to check for queued 1227ffbbdd21SLinus Walleij * messages 1228ffbbdd21SLinus Walleij * @master: the master to check for queued messages 1229ffbbdd21SLinus Walleij * 1230ffbbdd21SLinus Walleij * If there are more messages in the queue, the next message is returned from 1231ffbbdd21SLinus Walleij * this call. 123297d56dc6SJavier Martinez Canillas * 123397d56dc6SJavier Martinez Canillas * Return: the next message in the queue, else NULL if the queue is empty. 1234ffbbdd21SLinus Walleij */ 1235ffbbdd21SLinus Walleij struct spi_message *spi_get_next_queued_message(struct spi_master *master) 1236ffbbdd21SLinus Walleij { 1237ffbbdd21SLinus Walleij struct spi_message *next; 1238ffbbdd21SLinus Walleij unsigned long flags; 1239ffbbdd21SLinus Walleij 1240ffbbdd21SLinus Walleij /* get a pointer to the next message, if any */ 1241ffbbdd21SLinus Walleij spin_lock_irqsave(&master->queue_lock, flags); 12421cfd97f9SAxel Lin next = list_first_entry_or_null(&master->queue, struct spi_message, 12431cfd97f9SAxel Lin queue); 1244ffbbdd21SLinus Walleij spin_unlock_irqrestore(&master->queue_lock, flags); 1245ffbbdd21SLinus Walleij 1246ffbbdd21SLinus Walleij return next; 1247ffbbdd21SLinus Walleij } 1248ffbbdd21SLinus Walleij EXPORT_SYMBOL_GPL(spi_get_next_queued_message); 1249ffbbdd21SLinus Walleij 1250ffbbdd21SLinus Walleij /** 1251ffbbdd21SLinus Walleij * spi_finalize_current_message() - the current message is complete 1252ffbbdd21SLinus Walleij * @master: the master to return the message to 1253ffbbdd21SLinus Walleij * 1254ffbbdd21SLinus Walleij * Called by the driver to notify the core that the message in the front of the 1255ffbbdd21SLinus Walleij * queue is complete and can be removed from the queue. 1256ffbbdd21SLinus Walleij */ 1257ffbbdd21SLinus Walleij void spi_finalize_current_message(struct spi_master *master) 1258ffbbdd21SLinus Walleij { 1259ffbbdd21SLinus Walleij struct spi_message *mesg; 1260ffbbdd21SLinus Walleij unsigned long flags; 12612841a5fcSMark Brown int ret; 1262ffbbdd21SLinus Walleij 1263ffbbdd21SLinus Walleij spin_lock_irqsave(&master->queue_lock, flags); 1264ffbbdd21SLinus Walleij mesg = master->cur_msg; 1265ffbbdd21SLinus Walleij spin_unlock_irqrestore(&master->queue_lock, flags); 1266ffbbdd21SLinus Walleij 126799adef31SMark Brown spi_unmap_msg(master, mesg); 126899adef31SMark Brown 12692841a5fcSMark Brown if (master->cur_msg_prepared && master->unprepare_message) { 12702841a5fcSMark Brown ret = master->unprepare_message(master, mesg); 12712841a5fcSMark Brown if (ret) { 12722841a5fcSMark Brown dev_err(&master->dev, 12732841a5fcSMark Brown "failed to unprepare message: %d\n", ret); 12742841a5fcSMark Brown } 12752841a5fcSMark Brown } 1276391949b6SUwe Kleine-König 12778e76ef88SMartin Sperl spin_lock_irqsave(&master->queue_lock, flags); 12788e76ef88SMartin Sperl master->cur_msg = NULL; 12792841a5fcSMark Brown master->cur_msg_prepared = false; 12808e76ef88SMartin Sperl queue_kthread_work(&master->kworker, &master->pump_messages); 12818e76ef88SMartin Sperl spin_unlock_irqrestore(&master->queue_lock, flags); 12828e76ef88SMartin Sperl 12838e76ef88SMartin Sperl trace_spi_message_done(mesg); 12842841a5fcSMark Brown 1285ffbbdd21SLinus Walleij mesg->state = NULL; 1286ffbbdd21SLinus Walleij if (mesg->complete) 1287ffbbdd21SLinus Walleij mesg->complete(mesg->context); 1288ffbbdd21SLinus Walleij } 1289ffbbdd21SLinus Walleij EXPORT_SYMBOL_GPL(spi_finalize_current_message); 1290ffbbdd21SLinus Walleij 1291ffbbdd21SLinus Walleij static int spi_start_queue(struct spi_master *master) 1292ffbbdd21SLinus Walleij { 1293ffbbdd21SLinus Walleij unsigned long flags; 1294ffbbdd21SLinus Walleij 1295ffbbdd21SLinus Walleij spin_lock_irqsave(&master->queue_lock, flags); 1296ffbbdd21SLinus Walleij 1297ffbbdd21SLinus Walleij if (master->running || master->busy) { 1298ffbbdd21SLinus Walleij spin_unlock_irqrestore(&master->queue_lock, flags); 1299ffbbdd21SLinus Walleij return -EBUSY; 1300ffbbdd21SLinus Walleij } 1301ffbbdd21SLinus Walleij 1302ffbbdd21SLinus Walleij master->running = true; 1303ffbbdd21SLinus Walleij master->cur_msg = NULL; 1304ffbbdd21SLinus Walleij spin_unlock_irqrestore(&master->queue_lock, flags); 1305ffbbdd21SLinus Walleij 1306ffbbdd21SLinus Walleij queue_kthread_work(&master->kworker, &master->pump_messages); 1307ffbbdd21SLinus Walleij 1308ffbbdd21SLinus Walleij return 0; 1309ffbbdd21SLinus Walleij } 1310ffbbdd21SLinus Walleij 1311ffbbdd21SLinus Walleij static int spi_stop_queue(struct spi_master *master) 1312ffbbdd21SLinus Walleij { 1313ffbbdd21SLinus Walleij unsigned long flags; 1314ffbbdd21SLinus Walleij unsigned limit = 500; 1315ffbbdd21SLinus Walleij int ret = 0; 1316ffbbdd21SLinus Walleij 1317ffbbdd21SLinus Walleij spin_lock_irqsave(&master->queue_lock, flags); 1318ffbbdd21SLinus Walleij 1319ffbbdd21SLinus Walleij /* 1320ffbbdd21SLinus Walleij * This is a bit lame, but is optimized for the common execution path. 1321ffbbdd21SLinus Walleij * A wait_queue on the master->busy could be used, but then the common 1322ffbbdd21SLinus Walleij * execution path (pump_messages) would be required to call wake_up or 1323ffbbdd21SLinus Walleij * friends on every SPI message. Do this instead. 1324ffbbdd21SLinus Walleij */ 1325ffbbdd21SLinus Walleij while ((!list_empty(&master->queue) || master->busy) && limit--) { 1326ffbbdd21SLinus Walleij spin_unlock_irqrestore(&master->queue_lock, flags); 1327f97b26b0SAxel Lin usleep_range(10000, 11000); 1328ffbbdd21SLinus Walleij spin_lock_irqsave(&master->queue_lock, flags); 1329ffbbdd21SLinus Walleij } 1330ffbbdd21SLinus Walleij 1331ffbbdd21SLinus Walleij if (!list_empty(&master->queue) || master->busy) 1332ffbbdd21SLinus Walleij ret = -EBUSY; 1333ffbbdd21SLinus Walleij else 1334ffbbdd21SLinus Walleij master->running = false; 1335ffbbdd21SLinus Walleij 1336ffbbdd21SLinus Walleij spin_unlock_irqrestore(&master->queue_lock, flags); 1337ffbbdd21SLinus Walleij 1338ffbbdd21SLinus Walleij if (ret) { 1339ffbbdd21SLinus Walleij dev_warn(&master->dev, 1340ffbbdd21SLinus Walleij "could not stop message queue\n"); 1341ffbbdd21SLinus Walleij return ret; 1342ffbbdd21SLinus Walleij } 1343ffbbdd21SLinus Walleij return ret; 1344ffbbdd21SLinus Walleij } 1345ffbbdd21SLinus Walleij 1346ffbbdd21SLinus Walleij static int spi_destroy_queue(struct spi_master *master) 1347ffbbdd21SLinus Walleij { 1348ffbbdd21SLinus Walleij int ret; 1349ffbbdd21SLinus Walleij 1350ffbbdd21SLinus Walleij ret = spi_stop_queue(master); 1351ffbbdd21SLinus Walleij 1352ffbbdd21SLinus Walleij /* 1353ffbbdd21SLinus Walleij * flush_kthread_worker will block until all work is done. 1354ffbbdd21SLinus Walleij * If the reason that stop_queue timed out is that the work will never 1355ffbbdd21SLinus Walleij * finish, then it does no good to call flush/stop thread, so 1356ffbbdd21SLinus Walleij * return anyway. 1357ffbbdd21SLinus Walleij */ 1358ffbbdd21SLinus Walleij if (ret) { 1359ffbbdd21SLinus Walleij dev_err(&master->dev, "problem destroying queue\n"); 1360ffbbdd21SLinus Walleij return ret; 1361ffbbdd21SLinus Walleij } 1362ffbbdd21SLinus Walleij 1363ffbbdd21SLinus Walleij flush_kthread_worker(&master->kworker); 1364ffbbdd21SLinus Walleij kthread_stop(master->kworker_task); 1365ffbbdd21SLinus Walleij 1366ffbbdd21SLinus Walleij return 0; 1367ffbbdd21SLinus Walleij } 1368ffbbdd21SLinus Walleij 13690461a414SMark Brown static int __spi_queued_transfer(struct spi_device *spi, 13700461a414SMark Brown struct spi_message *msg, 13710461a414SMark Brown bool need_pump) 1372ffbbdd21SLinus Walleij { 1373ffbbdd21SLinus Walleij struct spi_master *master = spi->master; 1374ffbbdd21SLinus Walleij unsigned long flags; 1375ffbbdd21SLinus Walleij 1376ffbbdd21SLinus Walleij spin_lock_irqsave(&master->queue_lock, flags); 1377ffbbdd21SLinus Walleij 1378ffbbdd21SLinus Walleij if (!master->running) { 1379ffbbdd21SLinus Walleij spin_unlock_irqrestore(&master->queue_lock, flags); 1380ffbbdd21SLinus Walleij return -ESHUTDOWN; 1381ffbbdd21SLinus Walleij } 1382ffbbdd21SLinus Walleij msg->actual_length = 0; 1383ffbbdd21SLinus Walleij msg->status = -EINPROGRESS; 1384ffbbdd21SLinus Walleij 1385ffbbdd21SLinus Walleij list_add_tail(&msg->queue, &master->queue); 13860461a414SMark Brown if (!master->busy && need_pump) 1387ffbbdd21SLinus Walleij queue_kthread_work(&master->kworker, &master->pump_messages); 1388ffbbdd21SLinus Walleij 1389ffbbdd21SLinus Walleij spin_unlock_irqrestore(&master->queue_lock, flags); 1390ffbbdd21SLinus Walleij return 0; 1391ffbbdd21SLinus Walleij } 1392ffbbdd21SLinus Walleij 13930461a414SMark Brown /** 13940461a414SMark Brown * spi_queued_transfer - transfer function for queued transfers 13950461a414SMark Brown * @spi: spi device which is requesting transfer 13960461a414SMark Brown * @msg: spi message which is to handled is queued to driver queue 139797d56dc6SJavier Martinez Canillas * 139897d56dc6SJavier Martinez Canillas * Return: zero on success, else a negative error code. 13990461a414SMark Brown */ 14000461a414SMark Brown static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg) 14010461a414SMark Brown { 14020461a414SMark Brown return __spi_queued_transfer(spi, msg, true); 14030461a414SMark Brown } 14040461a414SMark Brown 1405ffbbdd21SLinus Walleij static int spi_master_initialize_queue(struct spi_master *master) 1406ffbbdd21SLinus Walleij { 1407ffbbdd21SLinus Walleij int ret; 1408ffbbdd21SLinus Walleij 1409ffbbdd21SLinus Walleij master->transfer = spi_queued_transfer; 1410b158935fSMark Brown if (!master->transfer_one_message) 1411b158935fSMark Brown master->transfer_one_message = spi_transfer_one_message; 1412ffbbdd21SLinus Walleij 1413ffbbdd21SLinus Walleij /* Initialize and start queue */ 1414ffbbdd21SLinus Walleij ret = spi_init_queue(master); 1415ffbbdd21SLinus Walleij if (ret) { 1416ffbbdd21SLinus Walleij dev_err(&master->dev, "problem initializing queue\n"); 1417ffbbdd21SLinus Walleij goto err_init_queue; 1418ffbbdd21SLinus Walleij } 1419c3676d5cSMark Brown master->queued = true; 1420ffbbdd21SLinus Walleij ret = spi_start_queue(master); 1421ffbbdd21SLinus Walleij if (ret) { 1422ffbbdd21SLinus Walleij dev_err(&master->dev, "problem starting queue\n"); 1423ffbbdd21SLinus Walleij goto err_start_queue; 1424ffbbdd21SLinus Walleij } 1425ffbbdd21SLinus Walleij 1426ffbbdd21SLinus Walleij return 0; 1427ffbbdd21SLinus Walleij 1428ffbbdd21SLinus Walleij err_start_queue: 1429ffbbdd21SLinus Walleij spi_destroy_queue(master); 1430c3676d5cSMark Brown err_init_queue: 1431ffbbdd21SLinus Walleij return ret; 1432ffbbdd21SLinus Walleij } 1433ffbbdd21SLinus Walleij 1434ffbbdd21SLinus Walleij /*-------------------------------------------------------------------------*/ 1435ffbbdd21SLinus Walleij 14367cb94361SAndreas Larsson #if defined(CONFIG_OF) 1437aff5e3f8SPantelis Antoniou static struct spi_device * 1438aff5e3f8SPantelis Antoniou of_register_spi_device(struct spi_master *master, struct device_node *nc) 1439d57a4282SGrant Likely { 1440d57a4282SGrant Likely struct spi_device *spi; 1441d57a4282SGrant Likely int rc; 144289da4293STrent Piepho u32 value; 1443d57a4282SGrant Likely 1444d57a4282SGrant Likely /* Alloc an spi_device */ 1445d57a4282SGrant Likely spi = spi_alloc_device(master); 1446d57a4282SGrant Likely if (!spi) { 1447d57a4282SGrant Likely dev_err(&master->dev, "spi_device alloc error for %s\n", 1448d57a4282SGrant Likely nc->full_name); 1449aff5e3f8SPantelis Antoniou rc = -ENOMEM; 1450aff5e3f8SPantelis Antoniou goto err_out; 1451d57a4282SGrant Likely } 1452d57a4282SGrant Likely 1453d57a4282SGrant Likely /* Select device driver */ 1454aff5e3f8SPantelis Antoniou rc = of_modalias_node(nc, spi->modalias, 1455aff5e3f8SPantelis Antoniou sizeof(spi->modalias)); 1456aff5e3f8SPantelis Antoniou if (rc < 0) { 1457d57a4282SGrant Likely dev_err(&master->dev, "cannot find modalias for %s\n", 1458d57a4282SGrant Likely nc->full_name); 1459aff5e3f8SPantelis Antoniou goto err_out; 1460d57a4282SGrant Likely } 1461d57a4282SGrant Likely 1462d57a4282SGrant Likely /* Device address */ 146389da4293STrent Piepho rc = of_property_read_u32(nc, "reg", &value); 146489da4293STrent Piepho if (rc) { 146589da4293STrent Piepho dev_err(&master->dev, "%s has no valid 'reg' property (%d)\n", 146689da4293STrent Piepho nc->full_name, rc); 1467aff5e3f8SPantelis Antoniou goto err_out; 1468d57a4282SGrant Likely } 146989da4293STrent Piepho spi->chip_select = value; 1470d57a4282SGrant Likely 1471d57a4282SGrant Likely /* Mode (clock phase/polarity/etc.) */ 1472d57a4282SGrant Likely if (of_find_property(nc, "spi-cpha", NULL)) 1473d57a4282SGrant Likely spi->mode |= SPI_CPHA; 1474d57a4282SGrant Likely if (of_find_property(nc, "spi-cpol", NULL)) 1475d57a4282SGrant Likely spi->mode |= SPI_CPOL; 1476d57a4282SGrant Likely if (of_find_property(nc, "spi-cs-high", NULL)) 1477d57a4282SGrant Likely spi->mode |= SPI_CS_HIGH; 1478c20151dfSLars-Peter Clausen if (of_find_property(nc, "spi-3wire", NULL)) 1479c20151dfSLars-Peter Clausen spi->mode |= SPI_3WIRE; 1480cd6339e6SZhao Qiang if (of_find_property(nc, "spi-lsb-first", NULL)) 1481cd6339e6SZhao Qiang spi->mode |= SPI_LSB_FIRST; 1482d57a4282SGrant Likely 1483f477b7fbSwangyuhang /* Device DUAL/QUAD mode */ 148489da4293STrent Piepho if (!of_property_read_u32(nc, "spi-tx-bus-width", &value)) { 148589da4293STrent Piepho switch (value) { 148689da4293STrent Piepho case 1: 1487f477b7fbSwangyuhang break; 148889da4293STrent Piepho case 2: 1489f477b7fbSwangyuhang spi->mode |= SPI_TX_DUAL; 1490f477b7fbSwangyuhang break; 149189da4293STrent Piepho case 4: 1492f477b7fbSwangyuhang spi->mode |= SPI_TX_QUAD; 1493f477b7fbSwangyuhang break; 1494f477b7fbSwangyuhang default: 149580874d8cSGeert Uytterhoeven dev_warn(&master->dev, 1496a110f93dSwangyuhang "spi-tx-bus-width %d not supported\n", 149789da4293STrent Piepho value); 149880874d8cSGeert Uytterhoeven break; 1499f477b7fbSwangyuhang } 1500a822e99cSMark Brown } 1501f477b7fbSwangyuhang 150289da4293STrent Piepho if (!of_property_read_u32(nc, "spi-rx-bus-width", &value)) { 150389da4293STrent Piepho switch (value) { 150489da4293STrent Piepho case 1: 1505f477b7fbSwangyuhang break; 150689da4293STrent Piepho case 2: 1507f477b7fbSwangyuhang spi->mode |= SPI_RX_DUAL; 1508f477b7fbSwangyuhang break; 150989da4293STrent Piepho case 4: 1510f477b7fbSwangyuhang spi->mode |= SPI_RX_QUAD; 1511f477b7fbSwangyuhang break; 1512f477b7fbSwangyuhang default: 151380874d8cSGeert Uytterhoeven dev_warn(&master->dev, 1514a110f93dSwangyuhang "spi-rx-bus-width %d not supported\n", 151589da4293STrent Piepho value); 151680874d8cSGeert Uytterhoeven break; 1517f477b7fbSwangyuhang } 1518a822e99cSMark Brown } 1519f477b7fbSwangyuhang 1520d57a4282SGrant Likely /* Device speed */ 152189da4293STrent Piepho rc = of_property_read_u32(nc, "spi-max-frequency", &value); 152289da4293STrent Piepho if (rc) { 152389da4293STrent Piepho dev_err(&master->dev, "%s has no valid 'spi-max-frequency' property (%d)\n", 152489da4293STrent Piepho nc->full_name, rc); 1525aff5e3f8SPantelis Antoniou goto err_out; 1526d57a4282SGrant Likely } 152789da4293STrent Piepho spi->max_speed_hz = value; 1528d57a4282SGrant Likely 1529d57a4282SGrant Likely /* Store a pointer to the node in the device structure */ 1530d57a4282SGrant Likely of_node_get(nc); 1531d57a4282SGrant Likely spi->dev.of_node = nc; 1532d57a4282SGrant Likely 1533d57a4282SGrant Likely /* Register the new device */ 1534d57a4282SGrant Likely rc = spi_add_device(spi); 1535d57a4282SGrant Likely if (rc) { 1536d57a4282SGrant Likely dev_err(&master->dev, "spi_device register error %s\n", 1537d57a4282SGrant Likely nc->full_name); 1538aff5e3f8SPantelis Antoniou goto err_out; 1539d57a4282SGrant Likely } 1540d57a4282SGrant Likely 1541aff5e3f8SPantelis Antoniou return spi; 1542aff5e3f8SPantelis Antoniou 1543aff5e3f8SPantelis Antoniou err_out: 1544aff5e3f8SPantelis Antoniou spi_dev_put(spi); 1545aff5e3f8SPantelis Antoniou return ERR_PTR(rc); 1546aff5e3f8SPantelis Antoniou } 1547aff5e3f8SPantelis Antoniou 1548aff5e3f8SPantelis Antoniou /** 1549aff5e3f8SPantelis Antoniou * of_register_spi_devices() - Register child devices onto the SPI bus 1550aff5e3f8SPantelis Antoniou * @master: Pointer to spi_master device 1551aff5e3f8SPantelis Antoniou * 1552aff5e3f8SPantelis Antoniou * Registers an spi_device for each child node of master node which has a 'reg' 1553aff5e3f8SPantelis Antoniou * property. 1554aff5e3f8SPantelis Antoniou */ 1555aff5e3f8SPantelis Antoniou static void of_register_spi_devices(struct spi_master *master) 1556aff5e3f8SPantelis Antoniou { 1557aff5e3f8SPantelis Antoniou struct spi_device *spi; 1558aff5e3f8SPantelis Antoniou struct device_node *nc; 1559aff5e3f8SPantelis Antoniou 1560aff5e3f8SPantelis Antoniou if (!master->dev.of_node) 1561aff5e3f8SPantelis Antoniou return; 1562aff5e3f8SPantelis Antoniou 1563aff5e3f8SPantelis Antoniou for_each_available_child_of_node(master->dev.of_node, nc) { 1564aff5e3f8SPantelis Antoniou spi = of_register_spi_device(master, nc); 1565aff5e3f8SPantelis Antoniou if (IS_ERR(spi)) 1566aff5e3f8SPantelis Antoniou dev_warn(&master->dev, "Failed to create SPI device for %s\n", 1567aff5e3f8SPantelis Antoniou nc->full_name); 1568d57a4282SGrant Likely } 1569d57a4282SGrant Likely } 1570d57a4282SGrant Likely #else 1571d57a4282SGrant Likely static void of_register_spi_devices(struct spi_master *master) { } 1572d57a4282SGrant Likely #endif 1573d57a4282SGrant Likely 157464bee4d2SMika Westerberg #ifdef CONFIG_ACPI 157564bee4d2SMika Westerberg static int acpi_spi_add_resource(struct acpi_resource *ares, void *data) 157664bee4d2SMika Westerberg { 157764bee4d2SMika Westerberg struct spi_device *spi = data; 157864bee4d2SMika Westerberg 157964bee4d2SMika Westerberg if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) { 158064bee4d2SMika Westerberg struct acpi_resource_spi_serialbus *sb; 158164bee4d2SMika Westerberg 158264bee4d2SMika Westerberg sb = &ares->data.spi_serial_bus; 158364bee4d2SMika Westerberg if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_SPI) { 158464bee4d2SMika Westerberg spi->chip_select = sb->device_selection; 158564bee4d2SMika Westerberg spi->max_speed_hz = sb->connection_speed; 158664bee4d2SMika Westerberg 158764bee4d2SMika Westerberg if (sb->clock_phase == ACPI_SPI_SECOND_PHASE) 158864bee4d2SMika Westerberg spi->mode |= SPI_CPHA; 158964bee4d2SMika Westerberg if (sb->clock_polarity == ACPI_SPI_START_HIGH) 159064bee4d2SMika Westerberg spi->mode |= SPI_CPOL; 159164bee4d2SMika Westerberg if (sb->device_polarity == ACPI_SPI_ACTIVE_HIGH) 159264bee4d2SMika Westerberg spi->mode |= SPI_CS_HIGH; 159364bee4d2SMika Westerberg } 159464bee4d2SMika Westerberg } else if (spi->irq < 0) { 159564bee4d2SMika Westerberg struct resource r; 159664bee4d2SMika Westerberg 159764bee4d2SMika Westerberg if (acpi_dev_resource_interrupt(ares, 0, &r)) 159864bee4d2SMika Westerberg spi->irq = r.start; 159964bee4d2SMika Westerberg } 160064bee4d2SMika Westerberg 160164bee4d2SMika Westerberg /* Always tell the ACPI core to skip this resource */ 160264bee4d2SMika Westerberg return 1; 160364bee4d2SMika Westerberg } 160464bee4d2SMika Westerberg 160564bee4d2SMika Westerberg static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level, 160664bee4d2SMika Westerberg void *data, void **return_value) 160764bee4d2SMika Westerberg { 160864bee4d2SMika Westerberg struct spi_master *master = data; 160964bee4d2SMika Westerberg struct list_head resource_list; 161064bee4d2SMika Westerberg struct acpi_device *adev; 161164bee4d2SMika Westerberg struct spi_device *spi; 161264bee4d2SMika Westerberg int ret; 161364bee4d2SMika Westerberg 161464bee4d2SMika Westerberg if (acpi_bus_get_device(handle, &adev)) 161564bee4d2SMika Westerberg return AE_OK; 161664bee4d2SMika Westerberg if (acpi_bus_get_status(adev) || !adev->status.present) 161764bee4d2SMika Westerberg return AE_OK; 161864bee4d2SMika Westerberg 161964bee4d2SMika Westerberg spi = spi_alloc_device(master); 162064bee4d2SMika Westerberg if (!spi) { 162164bee4d2SMika Westerberg dev_err(&master->dev, "failed to allocate SPI device for %s\n", 162264bee4d2SMika Westerberg dev_name(&adev->dev)); 162364bee4d2SMika Westerberg return AE_NO_MEMORY; 162464bee4d2SMika Westerberg } 162564bee4d2SMika Westerberg 16267b199811SRafael J. Wysocki ACPI_COMPANION_SET(&spi->dev, adev); 162764bee4d2SMika Westerberg spi->irq = -1; 162864bee4d2SMika Westerberg 162964bee4d2SMika Westerberg INIT_LIST_HEAD(&resource_list); 163064bee4d2SMika Westerberg ret = acpi_dev_get_resources(adev, &resource_list, 163164bee4d2SMika Westerberg acpi_spi_add_resource, spi); 163264bee4d2SMika Westerberg acpi_dev_free_resource_list(&resource_list); 163364bee4d2SMika Westerberg 163464bee4d2SMika Westerberg if (ret < 0 || !spi->max_speed_hz) { 163564bee4d2SMika Westerberg spi_dev_put(spi); 163664bee4d2SMika Westerberg return AE_OK; 163764bee4d2SMika Westerberg } 163864bee4d2SMika Westerberg 163933cf00e5SMika Westerberg adev->power.flags.ignore_parent = true; 1640cf9eb39cSJarkko Nikula strlcpy(spi->modalias, acpi_device_hid(adev), sizeof(spi->modalias)); 164164bee4d2SMika Westerberg if (spi_add_device(spi)) { 164233cf00e5SMika Westerberg adev->power.flags.ignore_parent = false; 164364bee4d2SMika Westerberg dev_err(&master->dev, "failed to add SPI device %s from ACPI\n", 164464bee4d2SMika Westerberg dev_name(&adev->dev)); 164564bee4d2SMika Westerberg spi_dev_put(spi); 164664bee4d2SMika Westerberg } 164764bee4d2SMika Westerberg 164864bee4d2SMika Westerberg return AE_OK; 164964bee4d2SMika Westerberg } 165064bee4d2SMika Westerberg 165164bee4d2SMika Westerberg static void acpi_register_spi_devices(struct spi_master *master) 165264bee4d2SMika Westerberg { 165364bee4d2SMika Westerberg acpi_status status; 165464bee4d2SMika Westerberg acpi_handle handle; 165564bee4d2SMika Westerberg 165629896178SRafael J. Wysocki handle = ACPI_HANDLE(master->dev.parent); 165764bee4d2SMika Westerberg if (!handle) 165864bee4d2SMika Westerberg return; 165964bee4d2SMika Westerberg 166064bee4d2SMika Westerberg status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1, 166164bee4d2SMika Westerberg acpi_spi_add_device, NULL, 166264bee4d2SMika Westerberg master, NULL); 166364bee4d2SMika Westerberg if (ACPI_FAILURE(status)) 166464bee4d2SMika Westerberg dev_warn(&master->dev, "failed to enumerate SPI slaves\n"); 166564bee4d2SMika Westerberg } 166664bee4d2SMika Westerberg #else 166764bee4d2SMika Westerberg static inline void acpi_register_spi_devices(struct spi_master *master) {} 166864bee4d2SMika Westerberg #endif /* CONFIG_ACPI */ 166964bee4d2SMika Westerberg 167049dce689STony Jones static void spi_master_release(struct device *dev) 16718ae12a0dSDavid Brownell { 16728ae12a0dSDavid Brownell struct spi_master *master; 16738ae12a0dSDavid Brownell 167449dce689STony Jones master = container_of(dev, struct spi_master, dev); 16758ae12a0dSDavid Brownell kfree(master); 16768ae12a0dSDavid Brownell } 16778ae12a0dSDavid Brownell 16788ae12a0dSDavid Brownell static struct class spi_master_class = { 16798ae12a0dSDavid Brownell .name = "spi_master", 16808ae12a0dSDavid Brownell .owner = THIS_MODULE, 168149dce689STony Jones .dev_release = spi_master_release, 1682eca2ebc7SMartin Sperl .dev_groups = spi_master_groups, 16838ae12a0dSDavid Brownell }; 16848ae12a0dSDavid Brownell 16858ae12a0dSDavid Brownell 16868ae12a0dSDavid Brownell /** 16878ae12a0dSDavid Brownell * spi_alloc_master - allocate SPI master controller 16888ae12a0dSDavid Brownell * @dev: the controller, possibly using the platform_bus 168933e34dc6SDavid Brownell * @size: how much zeroed driver-private data to allocate; the pointer to this 169049dce689STony Jones * memory is in the driver_data field of the returned device, 16910c868461SDavid Brownell * accessible with spi_master_get_devdata(). 169233e34dc6SDavid Brownell * Context: can sleep 16938ae12a0dSDavid Brownell * 16948ae12a0dSDavid Brownell * This call is used only by SPI master controller drivers, which are the 16958ae12a0dSDavid Brownell * only ones directly touching chip registers. It's how they allocate 1696ba1a0513Sdmitry pervushin * an spi_master structure, prior to calling spi_register_master(). 16978ae12a0dSDavid Brownell * 169897d56dc6SJavier Martinez Canillas * This must be called from context that can sleep. 16998ae12a0dSDavid Brownell * 17008ae12a0dSDavid Brownell * The caller is responsible for assigning the bus number and initializing 1701ba1a0513Sdmitry pervushin * the master's methods before calling spi_register_master(); and (after errors 1702a394d635SGuenter Roeck * adding the device) calling spi_master_put() to prevent a memory leak. 170397d56dc6SJavier Martinez Canillas * 170497d56dc6SJavier Martinez Canillas * Return: the SPI master structure on success, else NULL. 17058ae12a0dSDavid Brownell */ 1706e9d5a461SAdrian Bunk struct spi_master *spi_alloc_master(struct device *dev, unsigned size) 17078ae12a0dSDavid Brownell { 17088ae12a0dSDavid Brownell struct spi_master *master; 17098ae12a0dSDavid Brownell 17100c868461SDavid Brownell if (!dev) 17110c868461SDavid Brownell return NULL; 17120c868461SDavid Brownell 17135fe5f05eSJingoo Han master = kzalloc(size + sizeof(*master), GFP_KERNEL); 17148ae12a0dSDavid Brownell if (!master) 17158ae12a0dSDavid Brownell return NULL; 17168ae12a0dSDavid Brownell 171749dce689STony Jones device_initialize(&master->dev); 17181e8a52e1SGrant Likely master->bus_num = -1; 17191e8a52e1SGrant Likely master->num_chipselect = 1; 172049dce689STony Jones master->dev.class = &spi_master_class; 172149dce689STony Jones master->dev.parent = get_device(dev); 17220c868461SDavid Brownell spi_master_set_devdata(master, &master[1]); 17238ae12a0dSDavid Brownell 17248ae12a0dSDavid Brownell return master; 17258ae12a0dSDavid Brownell } 17268ae12a0dSDavid Brownell EXPORT_SYMBOL_GPL(spi_alloc_master); 17278ae12a0dSDavid Brownell 172874317984SJean-Christophe PLAGNIOL-VILLARD #ifdef CONFIG_OF 172974317984SJean-Christophe PLAGNIOL-VILLARD static int of_spi_register_master(struct spi_master *master) 173074317984SJean-Christophe PLAGNIOL-VILLARD { 1731e80beb27SGrant Likely int nb, i, *cs; 173274317984SJean-Christophe PLAGNIOL-VILLARD struct device_node *np = master->dev.of_node; 173374317984SJean-Christophe PLAGNIOL-VILLARD 173474317984SJean-Christophe PLAGNIOL-VILLARD if (!np) 173574317984SJean-Christophe PLAGNIOL-VILLARD return 0; 173674317984SJean-Christophe PLAGNIOL-VILLARD 173774317984SJean-Christophe PLAGNIOL-VILLARD nb = of_gpio_named_count(np, "cs-gpios"); 17385fe5f05eSJingoo Han master->num_chipselect = max_t(int, nb, master->num_chipselect); 173974317984SJean-Christophe PLAGNIOL-VILLARD 17408ec5d84eSAndreas Larsson /* Return error only for an incorrectly formed cs-gpios property */ 17418ec5d84eSAndreas Larsson if (nb == 0 || nb == -ENOENT) 174274317984SJean-Christophe PLAGNIOL-VILLARD return 0; 17438ec5d84eSAndreas Larsson else if (nb < 0) 17448ec5d84eSAndreas Larsson return nb; 174574317984SJean-Christophe PLAGNIOL-VILLARD 174674317984SJean-Christophe PLAGNIOL-VILLARD cs = devm_kzalloc(&master->dev, 174774317984SJean-Christophe PLAGNIOL-VILLARD sizeof(int) * master->num_chipselect, 174874317984SJean-Christophe PLAGNIOL-VILLARD GFP_KERNEL); 174974317984SJean-Christophe PLAGNIOL-VILLARD master->cs_gpios = cs; 175074317984SJean-Christophe PLAGNIOL-VILLARD 175174317984SJean-Christophe PLAGNIOL-VILLARD if (!master->cs_gpios) 175274317984SJean-Christophe PLAGNIOL-VILLARD return -ENOMEM; 175374317984SJean-Christophe PLAGNIOL-VILLARD 17540da83bb1SAndreas Larsson for (i = 0; i < master->num_chipselect; i++) 1755446411e1SAndreas Larsson cs[i] = -ENOENT; 175674317984SJean-Christophe PLAGNIOL-VILLARD 175774317984SJean-Christophe PLAGNIOL-VILLARD for (i = 0; i < nb; i++) 175874317984SJean-Christophe PLAGNIOL-VILLARD cs[i] = of_get_named_gpio(np, "cs-gpios", i); 175974317984SJean-Christophe PLAGNIOL-VILLARD 176074317984SJean-Christophe PLAGNIOL-VILLARD return 0; 176174317984SJean-Christophe PLAGNIOL-VILLARD } 176274317984SJean-Christophe PLAGNIOL-VILLARD #else 176374317984SJean-Christophe PLAGNIOL-VILLARD static int of_spi_register_master(struct spi_master *master) 176474317984SJean-Christophe PLAGNIOL-VILLARD { 176574317984SJean-Christophe PLAGNIOL-VILLARD return 0; 176674317984SJean-Christophe PLAGNIOL-VILLARD } 176774317984SJean-Christophe PLAGNIOL-VILLARD #endif 176874317984SJean-Christophe PLAGNIOL-VILLARD 17698ae12a0dSDavid Brownell /** 17708ae12a0dSDavid Brownell * spi_register_master - register SPI master controller 17718ae12a0dSDavid Brownell * @master: initialized master, originally from spi_alloc_master() 177233e34dc6SDavid Brownell * Context: can sleep 17738ae12a0dSDavid Brownell * 17748ae12a0dSDavid Brownell * SPI master controllers connect to their drivers using some non-SPI bus, 17758ae12a0dSDavid Brownell * such as the platform bus. The final stage of probe() in that code 17768ae12a0dSDavid Brownell * includes calling spi_register_master() to hook up to this SPI bus glue. 17778ae12a0dSDavid Brownell * 17788ae12a0dSDavid Brownell * SPI controllers use board specific (often SOC specific) bus numbers, 17798ae12a0dSDavid Brownell * and board-specific addressing for SPI devices combines those numbers 17808ae12a0dSDavid Brownell * with chip select numbers. Since SPI does not directly support dynamic 17818ae12a0dSDavid Brownell * device identification, boards need configuration tables telling which 17828ae12a0dSDavid Brownell * chip is at which address. 17838ae12a0dSDavid Brownell * 17848ae12a0dSDavid Brownell * This must be called from context that can sleep. It returns zero on 17858ae12a0dSDavid Brownell * success, else a negative error code (dropping the master's refcount). 17860c868461SDavid Brownell * After a successful return, the caller is responsible for calling 17870c868461SDavid Brownell * spi_unregister_master(). 178897d56dc6SJavier Martinez Canillas * 178997d56dc6SJavier Martinez Canillas * Return: zero on success, else a negative error code. 17908ae12a0dSDavid Brownell */ 1791e9d5a461SAdrian Bunk int spi_register_master(struct spi_master *master) 17928ae12a0dSDavid Brownell { 1793e44a45aeSDavid Brownell static atomic_t dyn_bus_id = ATOMIC_INIT((1<<15) - 1); 179449dce689STony Jones struct device *dev = master->dev.parent; 17952b9603a0SFeng Tang struct boardinfo *bi; 17968ae12a0dSDavid Brownell int status = -ENODEV; 17978ae12a0dSDavid Brownell int dynamic = 0; 17988ae12a0dSDavid Brownell 17990c868461SDavid Brownell if (!dev) 18000c868461SDavid Brownell return -ENODEV; 18010c868461SDavid Brownell 180274317984SJean-Christophe PLAGNIOL-VILLARD status = of_spi_register_master(master); 180374317984SJean-Christophe PLAGNIOL-VILLARD if (status) 180474317984SJean-Christophe PLAGNIOL-VILLARD return status; 180574317984SJean-Christophe PLAGNIOL-VILLARD 1806082c8cb4SDavid Brownell /* even if it's just one always-selected device, there must 1807082c8cb4SDavid Brownell * be at least one chipselect 1808082c8cb4SDavid Brownell */ 1809082c8cb4SDavid Brownell if (master->num_chipselect == 0) 1810082c8cb4SDavid Brownell return -EINVAL; 1811082c8cb4SDavid Brownell 1812bb29785eSGrant Likely if ((master->bus_num < 0) && master->dev.of_node) 1813bb29785eSGrant Likely master->bus_num = of_alias_get_id(master->dev.of_node, "spi"); 1814bb29785eSGrant Likely 18158ae12a0dSDavid Brownell /* convention: dynamically assigned bus IDs count down from the max */ 1816a020ed75SDavid Brownell if (master->bus_num < 0) { 1817082c8cb4SDavid Brownell /* FIXME switch to an IDR based scheme, something like 1818082c8cb4SDavid Brownell * I2C now uses, so we can't run out of "dynamic" IDs 1819082c8cb4SDavid Brownell */ 18208ae12a0dSDavid Brownell master->bus_num = atomic_dec_return(&dyn_bus_id); 1821b885244eSDavid Brownell dynamic = 1; 18228ae12a0dSDavid Brownell } 18238ae12a0dSDavid Brownell 18245424d43eSMark Brown INIT_LIST_HEAD(&master->queue); 18255424d43eSMark Brown spin_lock_init(&master->queue_lock); 1826cf32b71eSErnst Schwab spin_lock_init(&master->bus_lock_spinlock); 1827cf32b71eSErnst Schwab mutex_init(&master->bus_lock_mutex); 1828cf32b71eSErnst Schwab master->bus_lock_flag = 0; 1829b158935fSMark Brown init_completion(&master->xfer_completion); 18306ad45a27SMark Brown if (!master->max_dma_len) 18316ad45a27SMark Brown master->max_dma_len = INT_MAX; 1832cf32b71eSErnst Schwab 18338ae12a0dSDavid Brownell /* register the device, then userspace will see it. 18348ae12a0dSDavid Brownell * registration fails if the bus ID is in use. 18358ae12a0dSDavid Brownell */ 183635f74fcaSKay Sievers dev_set_name(&master->dev, "spi%u", master->bus_num); 183749dce689STony Jones status = device_add(&master->dev); 1838b885244eSDavid Brownell if (status < 0) 18398ae12a0dSDavid Brownell goto done; 184035f74fcaSKay Sievers dev_dbg(dev, "registered master %s%s\n", dev_name(&master->dev), 18418ae12a0dSDavid Brownell dynamic ? " (dynamic)" : ""); 18428ae12a0dSDavid Brownell 1843ffbbdd21SLinus Walleij /* If we're using a queued driver, start the queue */ 1844ffbbdd21SLinus Walleij if (master->transfer) 1845ffbbdd21SLinus Walleij dev_info(dev, "master is unqueued, this is deprecated\n"); 1846ffbbdd21SLinus Walleij else { 1847ffbbdd21SLinus Walleij status = spi_master_initialize_queue(master); 1848ffbbdd21SLinus Walleij if (status) { 1849e93b0724SAxel Lin device_del(&master->dev); 1850ffbbdd21SLinus Walleij goto done; 1851ffbbdd21SLinus Walleij } 1852ffbbdd21SLinus Walleij } 1853eca2ebc7SMartin Sperl /* add statistics */ 1854eca2ebc7SMartin Sperl spin_lock_init(&master->statistics.lock); 1855ffbbdd21SLinus Walleij 18562b9603a0SFeng Tang mutex_lock(&board_lock); 18572b9603a0SFeng Tang list_add_tail(&master->list, &spi_master_list); 18582b9603a0SFeng Tang list_for_each_entry(bi, &board_list, list) 18592b9603a0SFeng Tang spi_match_master_to_boardinfo(master, &bi->board_info); 18602b9603a0SFeng Tang mutex_unlock(&board_lock); 18612b9603a0SFeng Tang 186264bee4d2SMika Westerberg /* Register devices from the device tree and ACPI */ 186312b15e83SAnatolij Gustschin of_register_spi_devices(master); 186464bee4d2SMika Westerberg acpi_register_spi_devices(master); 18658ae12a0dSDavid Brownell done: 18668ae12a0dSDavid Brownell return status; 18678ae12a0dSDavid Brownell } 18688ae12a0dSDavid Brownell EXPORT_SYMBOL_GPL(spi_register_master); 18698ae12a0dSDavid Brownell 1870666d5b4cSMark Brown static void devm_spi_unregister(struct device *dev, void *res) 1871666d5b4cSMark Brown { 1872666d5b4cSMark Brown spi_unregister_master(*(struct spi_master **)res); 1873666d5b4cSMark Brown } 1874666d5b4cSMark Brown 1875666d5b4cSMark Brown /** 1876666d5b4cSMark Brown * dev_spi_register_master - register managed SPI master controller 1877666d5b4cSMark Brown * @dev: device managing SPI master 1878666d5b4cSMark Brown * @master: initialized master, originally from spi_alloc_master() 1879666d5b4cSMark Brown * Context: can sleep 1880666d5b4cSMark Brown * 1881666d5b4cSMark Brown * Register a SPI device as with spi_register_master() which will 1882666d5b4cSMark Brown * automatically be unregister 188397d56dc6SJavier Martinez Canillas * 188497d56dc6SJavier Martinez Canillas * Return: zero on success, else a negative error code. 1885666d5b4cSMark Brown */ 1886666d5b4cSMark Brown int devm_spi_register_master(struct device *dev, struct spi_master *master) 1887666d5b4cSMark Brown { 1888666d5b4cSMark Brown struct spi_master **ptr; 1889666d5b4cSMark Brown int ret; 1890666d5b4cSMark Brown 1891666d5b4cSMark Brown ptr = devres_alloc(devm_spi_unregister, sizeof(*ptr), GFP_KERNEL); 1892666d5b4cSMark Brown if (!ptr) 1893666d5b4cSMark Brown return -ENOMEM; 1894666d5b4cSMark Brown 1895666d5b4cSMark Brown ret = spi_register_master(master); 18964b92894eSStephen Warren if (!ret) { 1897666d5b4cSMark Brown *ptr = master; 1898666d5b4cSMark Brown devres_add(dev, ptr); 1899666d5b4cSMark Brown } else { 1900666d5b4cSMark Brown devres_free(ptr); 1901666d5b4cSMark Brown } 1902666d5b4cSMark Brown 1903666d5b4cSMark Brown return ret; 1904666d5b4cSMark Brown } 1905666d5b4cSMark Brown EXPORT_SYMBOL_GPL(devm_spi_register_master); 1906666d5b4cSMark Brown 190734860089SDavid Lamparter static int __unregister(struct device *dev, void *null) 19088ae12a0dSDavid Brownell { 19090c868461SDavid Brownell spi_unregister_device(to_spi_device(dev)); 19108ae12a0dSDavid Brownell return 0; 19118ae12a0dSDavid Brownell } 19128ae12a0dSDavid Brownell 19138ae12a0dSDavid Brownell /** 19148ae12a0dSDavid Brownell * spi_unregister_master - unregister SPI master controller 19158ae12a0dSDavid Brownell * @master: the master being unregistered 191633e34dc6SDavid Brownell * Context: can sleep 19178ae12a0dSDavid Brownell * 19188ae12a0dSDavid Brownell * This call is used only by SPI master controller drivers, which are the 19198ae12a0dSDavid Brownell * only ones directly touching chip registers. 19208ae12a0dSDavid Brownell * 19218ae12a0dSDavid Brownell * This must be called from context that can sleep. 19228ae12a0dSDavid Brownell */ 19238ae12a0dSDavid Brownell void spi_unregister_master(struct spi_master *master) 19248ae12a0dSDavid Brownell { 192589fc9a1aSJeff Garzik int dummy; 192689fc9a1aSJeff Garzik 1927ffbbdd21SLinus Walleij if (master->queued) { 1928ffbbdd21SLinus Walleij if (spi_destroy_queue(master)) 1929ffbbdd21SLinus Walleij dev_err(&master->dev, "queue remove failed\n"); 1930ffbbdd21SLinus Walleij } 1931ffbbdd21SLinus Walleij 19322b9603a0SFeng Tang mutex_lock(&board_lock); 19332b9603a0SFeng Tang list_del(&master->list); 19342b9603a0SFeng Tang mutex_unlock(&board_lock); 19352b9603a0SFeng Tang 193697dbf37dSSebastian Andrzej Siewior dummy = device_for_each_child(&master->dev, NULL, __unregister); 193749dce689STony Jones device_unregister(&master->dev); 19388ae12a0dSDavid Brownell } 19398ae12a0dSDavid Brownell EXPORT_SYMBOL_GPL(spi_unregister_master); 19408ae12a0dSDavid Brownell 1941ffbbdd21SLinus Walleij int spi_master_suspend(struct spi_master *master) 1942ffbbdd21SLinus Walleij { 1943ffbbdd21SLinus Walleij int ret; 1944ffbbdd21SLinus Walleij 1945ffbbdd21SLinus Walleij /* Basically no-ops for non-queued masters */ 1946ffbbdd21SLinus Walleij if (!master->queued) 1947ffbbdd21SLinus Walleij return 0; 1948ffbbdd21SLinus Walleij 1949ffbbdd21SLinus Walleij ret = spi_stop_queue(master); 1950ffbbdd21SLinus Walleij if (ret) 1951ffbbdd21SLinus Walleij dev_err(&master->dev, "queue stop failed\n"); 1952ffbbdd21SLinus Walleij 1953ffbbdd21SLinus Walleij return ret; 1954ffbbdd21SLinus Walleij } 1955ffbbdd21SLinus Walleij EXPORT_SYMBOL_GPL(spi_master_suspend); 1956ffbbdd21SLinus Walleij 1957ffbbdd21SLinus Walleij int spi_master_resume(struct spi_master *master) 1958ffbbdd21SLinus Walleij { 1959ffbbdd21SLinus Walleij int ret; 1960ffbbdd21SLinus Walleij 1961ffbbdd21SLinus Walleij if (!master->queued) 1962ffbbdd21SLinus Walleij return 0; 1963ffbbdd21SLinus Walleij 1964ffbbdd21SLinus Walleij ret = spi_start_queue(master); 1965ffbbdd21SLinus Walleij if (ret) 1966ffbbdd21SLinus Walleij dev_err(&master->dev, "queue restart failed\n"); 1967ffbbdd21SLinus Walleij 1968ffbbdd21SLinus Walleij return ret; 1969ffbbdd21SLinus Walleij } 1970ffbbdd21SLinus Walleij EXPORT_SYMBOL_GPL(spi_master_resume); 1971ffbbdd21SLinus Walleij 19729f3b795aSMichał Mirosław static int __spi_master_match(struct device *dev, const void *data) 19735ed2c832SDave Young { 19745ed2c832SDave Young struct spi_master *m; 19759f3b795aSMichał Mirosław const u16 *bus_num = data; 19765ed2c832SDave Young 19775ed2c832SDave Young m = container_of(dev, struct spi_master, dev); 19785ed2c832SDave Young return m->bus_num == *bus_num; 19795ed2c832SDave Young } 19805ed2c832SDave Young 19818ae12a0dSDavid Brownell /** 19828ae12a0dSDavid Brownell * spi_busnum_to_master - look up master associated with bus_num 19838ae12a0dSDavid Brownell * @bus_num: the master's bus number 198433e34dc6SDavid Brownell * Context: can sleep 19858ae12a0dSDavid Brownell * 19868ae12a0dSDavid Brownell * This call may be used with devices that are registered after 19878ae12a0dSDavid Brownell * arch init time. It returns a refcounted pointer to the relevant 19888ae12a0dSDavid Brownell * spi_master (which the caller must release), or NULL if there is 19898ae12a0dSDavid Brownell * no such master registered. 199097d56dc6SJavier Martinez Canillas * 199197d56dc6SJavier Martinez Canillas * Return: the SPI master structure on success, else NULL. 19928ae12a0dSDavid Brownell */ 19938ae12a0dSDavid Brownell struct spi_master *spi_busnum_to_master(u16 bus_num) 19948ae12a0dSDavid Brownell { 199549dce689STony Jones struct device *dev; 19961e9a51dcSAtsushi Nemoto struct spi_master *master = NULL; 19978ae12a0dSDavid Brownell 1998695794aeSGreg Kroah-Hartman dev = class_find_device(&spi_master_class, NULL, &bus_num, 19995ed2c832SDave Young __spi_master_match); 20005ed2c832SDave Young if (dev) 20015ed2c832SDave Young master = container_of(dev, struct spi_master, dev); 20025ed2c832SDave Young /* reference got in class_find_device */ 20031e9a51dcSAtsushi Nemoto return master; 20048ae12a0dSDavid Brownell } 20058ae12a0dSDavid Brownell EXPORT_SYMBOL_GPL(spi_busnum_to_master); 20068ae12a0dSDavid Brownell 20078ae12a0dSDavid Brownell 20088ae12a0dSDavid Brownell /*-------------------------------------------------------------------------*/ 20098ae12a0dSDavid Brownell 20107d077197SDavid Brownell /* Core methods for SPI master protocol drivers. Some of the 20117d077197SDavid Brownell * other core methods are currently defined as inline functions. 20127d077197SDavid Brownell */ 20137d077197SDavid Brownell 201463ab645fSStefan Brüns static int __spi_validate_bits_per_word(struct spi_master *master, u8 bits_per_word) 201563ab645fSStefan Brüns { 201663ab645fSStefan Brüns if (master->bits_per_word_mask) { 201763ab645fSStefan Brüns /* Only 32 bits fit in the mask */ 201863ab645fSStefan Brüns if (bits_per_word > 32) 201963ab645fSStefan Brüns return -EINVAL; 202063ab645fSStefan Brüns if (!(master->bits_per_word_mask & 202163ab645fSStefan Brüns SPI_BPW_MASK(bits_per_word))) 202263ab645fSStefan Brüns return -EINVAL; 202363ab645fSStefan Brüns } 202463ab645fSStefan Brüns 202563ab645fSStefan Brüns return 0; 202663ab645fSStefan Brüns } 202763ab645fSStefan Brüns 20287d077197SDavid Brownell /** 20297d077197SDavid Brownell * spi_setup - setup SPI mode and clock rate 20307d077197SDavid Brownell * @spi: the device whose settings are being modified 20317d077197SDavid Brownell * Context: can sleep, and no requests are queued to the device 20327d077197SDavid Brownell * 20337d077197SDavid Brownell * SPI protocol drivers may need to update the transfer mode if the 20347d077197SDavid Brownell * device doesn't work with its default. They may likewise need 20357d077197SDavid Brownell * to update clock rates or word sizes from initial values. This function 20367d077197SDavid Brownell * changes those settings, and must be called from a context that can sleep. 20377d077197SDavid Brownell * Except for SPI_CS_HIGH, which takes effect immediately, the changes take 20387d077197SDavid Brownell * effect the next time the device is selected and data is transferred to 20397d077197SDavid Brownell * or from it. When this function returns, the spi device is deselected. 20407d077197SDavid Brownell * 20417d077197SDavid Brownell * Note that this call will fail if the protocol driver specifies an option 20427d077197SDavid Brownell * that the underlying controller or its driver does not support. For 20437d077197SDavid Brownell * example, not all hardware supports wire transfers using nine bit words, 20447d077197SDavid Brownell * LSB-first wire encoding, or active-high chipselects. 204597d56dc6SJavier Martinez Canillas * 204697d56dc6SJavier Martinez Canillas * Return: zero on success, else a negative error code. 20477d077197SDavid Brownell */ 20487d077197SDavid Brownell int spi_setup(struct spi_device *spi) 20497d077197SDavid Brownell { 205083596fbeSGeert Uytterhoeven unsigned bad_bits, ugly_bits; 20515ab8d262SAndy Shevchenko int status; 20527d077197SDavid Brownell 2053f477b7fbSwangyuhang /* check mode to prevent that DUAL and QUAD set at the same time 2054f477b7fbSwangyuhang */ 2055f477b7fbSwangyuhang if (((spi->mode & SPI_TX_DUAL) && (spi->mode & SPI_TX_QUAD)) || 2056f477b7fbSwangyuhang ((spi->mode & SPI_RX_DUAL) && (spi->mode & SPI_RX_QUAD))) { 2057f477b7fbSwangyuhang dev_err(&spi->dev, 2058f477b7fbSwangyuhang "setup: can not select dual and quad at the same time\n"); 2059f477b7fbSwangyuhang return -EINVAL; 2060f477b7fbSwangyuhang } 2061f477b7fbSwangyuhang /* if it is SPI_3WIRE mode, DUAL and QUAD should be forbidden 2062f477b7fbSwangyuhang */ 2063f477b7fbSwangyuhang if ((spi->mode & SPI_3WIRE) && (spi->mode & 2064f477b7fbSwangyuhang (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD))) 2065f477b7fbSwangyuhang return -EINVAL; 2066e7db06b5SDavid Brownell /* help drivers fail *cleanly* when they need options 2067e7db06b5SDavid Brownell * that aren't supported with their current master 2068e7db06b5SDavid Brownell */ 2069e7db06b5SDavid Brownell bad_bits = spi->mode & ~spi->master->mode_bits; 207083596fbeSGeert Uytterhoeven ugly_bits = bad_bits & 207183596fbeSGeert Uytterhoeven (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD); 207283596fbeSGeert Uytterhoeven if (ugly_bits) { 207383596fbeSGeert Uytterhoeven dev_warn(&spi->dev, 207483596fbeSGeert Uytterhoeven "setup: ignoring unsupported mode bits %x\n", 207583596fbeSGeert Uytterhoeven ugly_bits); 207683596fbeSGeert Uytterhoeven spi->mode &= ~ugly_bits; 207783596fbeSGeert Uytterhoeven bad_bits &= ~ugly_bits; 207883596fbeSGeert Uytterhoeven } 2079e7db06b5SDavid Brownell if (bad_bits) { 2080eb288a1fSLinus Walleij dev_err(&spi->dev, "setup: unsupported mode bits %x\n", 2081e7db06b5SDavid Brownell bad_bits); 2082e7db06b5SDavid Brownell return -EINVAL; 2083e7db06b5SDavid Brownell } 2084e7db06b5SDavid Brownell 20857d077197SDavid Brownell if (!spi->bits_per_word) 20867d077197SDavid Brownell spi->bits_per_word = 8; 20877d077197SDavid Brownell 20885ab8d262SAndy Shevchenko status = __spi_validate_bits_per_word(spi->master, spi->bits_per_word); 20895ab8d262SAndy Shevchenko if (status) 20905ab8d262SAndy Shevchenko return status; 209163ab645fSStefan Brüns 2092052eb2d4SAxel Lin if (!spi->max_speed_hz) 2093052eb2d4SAxel Lin spi->max_speed_hz = spi->master->max_speed_hz; 2094052eb2d4SAxel Lin 2095caae070cSLaxman Dewangan if (spi->master->setup) 20967d077197SDavid Brownell status = spi->master->setup(spi); 20977d077197SDavid Brownell 2098abeedb01SFranklin S Cooper Jr spi_set_cs(spi, false); 2099abeedb01SFranklin S Cooper Jr 21005fe5f05eSJingoo Han dev_dbg(&spi->dev, "setup mode %d, %s%s%s%s%u bits/w, %u Hz max --> %d\n", 21017d077197SDavid Brownell (int) (spi->mode & (SPI_CPOL | SPI_CPHA)), 21027d077197SDavid Brownell (spi->mode & SPI_CS_HIGH) ? "cs_high, " : "", 21037d077197SDavid Brownell (spi->mode & SPI_LSB_FIRST) ? "lsb, " : "", 21047d077197SDavid Brownell (spi->mode & SPI_3WIRE) ? "3wire, " : "", 21057d077197SDavid Brownell (spi->mode & SPI_LOOP) ? "loopback, " : "", 21067d077197SDavid Brownell spi->bits_per_word, spi->max_speed_hz, 21077d077197SDavid Brownell status); 21087d077197SDavid Brownell 21097d077197SDavid Brownell return status; 21107d077197SDavid Brownell } 21117d077197SDavid Brownell EXPORT_SYMBOL_GPL(spi_setup); 21127d077197SDavid Brownell 211390808738SMark Brown static int __spi_validate(struct spi_device *spi, struct spi_message *message) 2114cf32b71eSErnst Schwab { 2115cf32b71eSErnst Schwab struct spi_master *master = spi->master; 2116e6811d1dSLaxman Dewangan struct spi_transfer *xfer; 21176ea31293SAtsushi Nemoto int w_size; 2118cf32b71eSErnst Schwab 211924a0013aSMark Brown if (list_empty(&message->transfers)) 212024a0013aSMark Brown return -EINVAL; 212124a0013aSMark Brown 2122cf32b71eSErnst Schwab /* Half-duplex links include original MicroWire, and ones with 2123cf32b71eSErnst Schwab * only one data pin like SPI_3WIRE (switches direction) or where 2124cf32b71eSErnst Schwab * either MOSI or MISO is missing. They can also be caused by 2125cf32b71eSErnst Schwab * software limitations. 2126cf32b71eSErnst Schwab */ 2127cf32b71eSErnst Schwab if ((master->flags & SPI_MASTER_HALF_DUPLEX) 2128cf32b71eSErnst Schwab || (spi->mode & SPI_3WIRE)) { 2129cf32b71eSErnst Schwab unsigned flags = master->flags; 2130cf32b71eSErnst Schwab 2131cf32b71eSErnst Schwab list_for_each_entry(xfer, &message->transfers, transfer_list) { 2132cf32b71eSErnst Schwab if (xfer->rx_buf && xfer->tx_buf) 2133cf32b71eSErnst Schwab return -EINVAL; 2134cf32b71eSErnst Schwab if ((flags & SPI_MASTER_NO_TX) && xfer->tx_buf) 2135cf32b71eSErnst Schwab return -EINVAL; 2136cf32b71eSErnst Schwab if ((flags & SPI_MASTER_NO_RX) && xfer->rx_buf) 2137cf32b71eSErnst Schwab return -EINVAL; 2138cf32b71eSErnst Schwab } 2139cf32b71eSErnst Schwab } 2140cf32b71eSErnst Schwab 2141e6811d1dSLaxman Dewangan /** 2142059b8ffeSLaxman Dewangan * Set transfer bits_per_word and max speed as spi device default if 2143059b8ffeSLaxman Dewangan * it is not set for this transfer. 2144f477b7fbSwangyuhang * Set transfer tx_nbits and rx_nbits as single transfer default 2145f477b7fbSwangyuhang * (SPI_NBITS_SINGLE) if it is not set for this transfer. 2146e6811d1dSLaxman Dewangan */ 2147e6811d1dSLaxman Dewangan list_for_each_entry(xfer, &message->transfers, transfer_list) { 2148078726ceSSourav Poddar message->frame_length += xfer->len; 2149e6811d1dSLaxman Dewangan if (!xfer->bits_per_word) 2150e6811d1dSLaxman Dewangan xfer->bits_per_word = spi->bits_per_word; 2151a6f87fadSAxel Lin 2152a6f87fadSAxel Lin if (!xfer->speed_hz) 2153059b8ffeSLaxman Dewangan xfer->speed_hz = spi->max_speed_hz; 21547dc9fbc3SMark Brown if (!xfer->speed_hz) 21557dc9fbc3SMark Brown xfer->speed_hz = master->max_speed_hz; 2156a6f87fadSAxel Lin 215756ede94aSGabor Juhos if (master->max_speed_hz && 215856ede94aSGabor Juhos xfer->speed_hz > master->max_speed_hz) 215956ede94aSGabor Juhos xfer->speed_hz = master->max_speed_hz; 216056ede94aSGabor Juhos 216163ab645fSStefan Brüns if (__spi_validate_bits_per_word(master, xfer->bits_per_word)) 2162543bb255SStephen Warren return -EINVAL; 2163a2fd4f9fSMark Brown 21644d94bd21SIvan T. Ivanov /* 21654d94bd21SIvan T. Ivanov * SPI transfer length should be multiple of SPI word size 21664d94bd21SIvan T. Ivanov * where SPI word size should be power-of-two multiple 21674d94bd21SIvan T. Ivanov */ 21684d94bd21SIvan T. Ivanov if (xfer->bits_per_word <= 8) 21694d94bd21SIvan T. Ivanov w_size = 1; 21704d94bd21SIvan T. Ivanov else if (xfer->bits_per_word <= 16) 21714d94bd21SIvan T. Ivanov w_size = 2; 21724d94bd21SIvan T. Ivanov else 21734d94bd21SIvan T. Ivanov w_size = 4; 21744d94bd21SIvan T. Ivanov 21754d94bd21SIvan T. Ivanov /* No partial transfers accepted */ 21766ea31293SAtsushi Nemoto if (xfer->len % w_size) 21774d94bd21SIvan T. Ivanov return -EINVAL; 21784d94bd21SIvan T. Ivanov 2179a2fd4f9fSMark Brown if (xfer->speed_hz && master->min_speed_hz && 2180a2fd4f9fSMark Brown xfer->speed_hz < master->min_speed_hz) 2181a2fd4f9fSMark Brown return -EINVAL; 2182f477b7fbSwangyuhang 2183f477b7fbSwangyuhang if (xfer->tx_buf && !xfer->tx_nbits) 2184f477b7fbSwangyuhang xfer->tx_nbits = SPI_NBITS_SINGLE; 2185f477b7fbSwangyuhang if (xfer->rx_buf && !xfer->rx_nbits) 2186f477b7fbSwangyuhang xfer->rx_nbits = SPI_NBITS_SINGLE; 2187f477b7fbSwangyuhang /* check transfer tx/rx_nbits: 21881afd9989SGeert Uytterhoeven * 1. check the value matches one of single, dual and quad 21891afd9989SGeert Uytterhoeven * 2. check tx/rx_nbits match the mode in spi_device 2190f477b7fbSwangyuhang */ 2191db90a441SSourav Poddar if (xfer->tx_buf) { 2192f477b7fbSwangyuhang if (xfer->tx_nbits != SPI_NBITS_SINGLE && 2193f477b7fbSwangyuhang xfer->tx_nbits != SPI_NBITS_DUAL && 2194f477b7fbSwangyuhang xfer->tx_nbits != SPI_NBITS_QUAD) 2195a2fd4f9fSMark Brown return -EINVAL; 2196f477b7fbSwangyuhang if ((xfer->tx_nbits == SPI_NBITS_DUAL) && 2197f477b7fbSwangyuhang !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD))) 2198f477b7fbSwangyuhang return -EINVAL; 2199f477b7fbSwangyuhang if ((xfer->tx_nbits == SPI_NBITS_QUAD) && 2200f477b7fbSwangyuhang !(spi->mode & SPI_TX_QUAD)) 2201f477b7fbSwangyuhang return -EINVAL; 2202db90a441SSourav Poddar } 2203f477b7fbSwangyuhang /* check transfer rx_nbits */ 2204db90a441SSourav Poddar if (xfer->rx_buf) { 2205f477b7fbSwangyuhang if (xfer->rx_nbits != SPI_NBITS_SINGLE && 2206f477b7fbSwangyuhang xfer->rx_nbits != SPI_NBITS_DUAL && 2207f477b7fbSwangyuhang xfer->rx_nbits != SPI_NBITS_QUAD) 2208f477b7fbSwangyuhang return -EINVAL; 2209f477b7fbSwangyuhang if ((xfer->rx_nbits == SPI_NBITS_DUAL) && 2210f477b7fbSwangyuhang !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD))) 2211f477b7fbSwangyuhang return -EINVAL; 2212f477b7fbSwangyuhang if ((xfer->rx_nbits == SPI_NBITS_QUAD) && 2213f477b7fbSwangyuhang !(spi->mode & SPI_RX_QUAD)) 2214f477b7fbSwangyuhang return -EINVAL; 2215e6811d1dSLaxman Dewangan } 2216e6811d1dSLaxman Dewangan } 2217e6811d1dSLaxman Dewangan 2218cf32b71eSErnst Schwab message->status = -EINPROGRESS; 221990808738SMark Brown 222090808738SMark Brown return 0; 222190808738SMark Brown } 222290808738SMark Brown 222390808738SMark Brown static int __spi_async(struct spi_device *spi, struct spi_message *message) 222490808738SMark Brown { 222590808738SMark Brown struct spi_master *master = spi->master; 222690808738SMark Brown 222790808738SMark Brown message->spi = spi; 222890808738SMark Brown 2229eca2ebc7SMartin Sperl SPI_STATISTICS_INCREMENT_FIELD(&master->statistics, spi_async); 2230eca2ebc7SMartin Sperl SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_async); 2231eca2ebc7SMartin Sperl 223290808738SMark Brown trace_spi_message_submit(message); 223390808738SMark Brown 2234cf32b71eSErnst Schwab return master->transfer(spi, message); 2235cf32b71eSErnst Schwab } 2236cf32b71eSErnst Schwab 2237568d0697SDavid Brownell /** 2238568d0697SDavid Brownell * spi_async - asynchronous SPI transfer 2239568d0697SDavid Brownell * @spi: device with which data will be exchanged 2240568d0697SDavid Brownell * @message: describes the data transfers, including completion callback 2241568d0697SDavid Brownell * Context: any (irqs may be blocked, etc) 2242568d0697SDavid Brownell * 2243568d0697SDavid Brownell * This call may be used in_irq and other contexts which can't sleep, 2244568d0697SDavid Brownell * as well as from task contexts which can sleep. 2245568d0697SDavid Brownell * 2246568d0697SDavid Brownell * The completion callback is invoked in a context which can't sleep. 2247568d0697SDavid Brownell * Before that invocation, the value of message->status is undefined. 2248568d0697SDavid Brownell * When the callback is issued, message->status holds either zero (to 2249568d0697SDavid Brownell * indicate complete success) or a negative error code. After that 2250568d0697SDavid Brownell * callback returns, the driver which issued the transfer request may 2251568d0697SDavid Brownell * deallocate the associated memory; it's no longer in use by any SPI 2252568d0697SDavid Brownell * core or controller driver code. 2253568d0697SDavid Brownell * 2254568d0697SDavid Brownell * Note that although all messages to a spi_device are handled in 2255568d0697SDavid Brownell * FIFO order, messages may go to different devices in other orders. 2256568d0697SDavid Brownell * Some device might be higher priority, or have various "hard" access 2257568d0697SDavid Brownell * time requirements, for example. 2258568d0697SDavid Brownell * 2259568d0697SDavid Brownell * On detection of any fault during the transfer, processing of 2260568d0697SDavid Brownell * the entire message is aborted, and the device is deselected. 2261568d0697SDavid Brownell * Until returning from the associated message completion callback, 2262568d0697SDavid Brownell * no other spi_message queued to that device will be processed. 2263568d0697SDavid Brownell * (This rule applies equally to all the synchronous transfer calls, 2264568d0697SDavid Brownell * which are wrappers around this core asynchronous primitive.) 226597d56dc6SJavier Martinez Canillas * 226697d56dc6SJavier Martinez Canillas * Return: zero on success, else a negative error code. 2267568d0697SDavid Brownell */ 2268568d0697SDavid Brownell int spi_async(struct spi_device *spi, struct spi_message *message) 2269568d0697SDavid Brownell { 2270568d0697SDavid Brownell struct spi_master *master = spi->master; 2271cf32b71eSErnst Schwab int ret; 2272cf32b71eSErnst Schwab unsigned long flags; 2273568d0697SDavid Brownell 227490808738SMark Brown ret = __spi_validate(spi, message); 227590808738SMark Brown if (ret != 0) 227690808738SMark Brown return ret; 227790808738SMark Brown 2278cf32b71eSErnst Schwab spin_lock_irqsave(&master->bus_lock_spinlock, flags); 2279568d0697SDavid Brownell 2280cf32b71eSErnst Schwab if (master->bus_lock_flag) 2281cf32b71eSErnst Schwab ret = -EBUSY; 2282cf32b71eSErnst Schwab else 2283cf32b71eSErnst Schwab ret = __spi_async(spi, message); 2284568d0697SDavid Brownell 2285cf32b71eSErnst Schwab spin_unlock_irqrestore(&master->bus_lock_spinlock, flags); 2286cf32b71eSErnst Schwab 2287cf32b71eSErnst Schwab return ret; 2288568d0697SDavid Brownell } 2289568d0697SDavid Brownell EXPORT_SYMBOL_GPL(spi_async); 2290568d0697SDavid Brownell 2291cf32b71eSErnst Schwab /** 2292cf32b71eSErnst Schwab * spi_async_locked - version of spi_async with exclusive bus usage 2293cf32b71eSErnst Schwab * @spi: device with which data will be exchanged 2294cf32b71eSErnst Schwab * @message: describes the data transfers, including completion callback 2295cf32b71eSErnst Schwab * Context: any (irqs may be blocked, etc) 2296cf32b71eSErnst Schwab * 2297cf32b71eSErnst Schwab * This call may be used in_irq and other contexts which can't sleep, 2298cf32b71eSErnst Schwab * as well as from task contexts which can sleep. 2299cf32b71eSErnst Schwab * 2300cf32b71eSErnst Schwab * The completion callback is invoked in a context which can't sleep. 2301cf32b71eSErnst Schwab * Before that invocation, the value of message->status is undefined. 2302cf32b71eSErnst Schwab * When the callback is issued, message->status holds either zero (to 2303cf32b71eSErnst Schwab * indicate complete success) or a negative error code. After that 2304cf32b71eSErnst Schwab * callback returns, the driver which issued the transfer request may 2305cf32b71eSErnst Schwab * deallocate the associated memory; it's no longer in use by any SPI 2306cf32b71eSErnst Schwab * core or controller driver code. 2307cf32b71eSErnst Schwab * 2308cf32b71eSErnst Schwab * Note that although all messages to a spi_device are handled in 2309cf32b71eSErnst Schwab * FIFO order, messages may go to different devices in other orders. 2310cf32b71eSErnst Schwab * Some device might be higher priority, or have various "hard" access 2311cf32b71eSErnst Schwab * time requirements, for example. 2312cf32b71eSErnst Schwab * 2313cf32b71eSErnst Schwab * On detection of any fault during the transfer, processing of 2314cf32b71eSErnst Schwab * the entire message is aborted, and the device is deselected. 2315cf32b71eSErnst Schwab * Until returning from the associated message completion callback, 2316cf32b71eSErnst Schwab * no other spi_message queued to that device will be processed. 2317cf32b71eSErnst Schwab * (This rule applies equally to all the synchronous transfer calls, 2318cf32b71eSErnst Schwab * which are wrappers around this core asynchronous primitive.) 231997d56dc6SJavier Martinez Canillas * 232097d56dc6SJavier Martinez Canillas * Return: zero on success, else a negative error code. 2321cf32b71eSErnst Schwab */ 2322cf32b71eSErnst Schwab int spi_async_locked(struct spi_device *spi, struct spi_message *message) 2323cf32b71eSErnst Schwab { 2324cf32b71eSErnst Schwab struct spi_master *master = spi->master; 2325cf32b71eSErnst Schwab int ret; 2326cf32b71eSErnst Schwab unsigned long flags; 2327cf32b71eSErnst Schwab 232890808738SMark Brown ret = __spi_validate(spi, message); 232990808738SMark Brown if (ret != 0) 233090808738SMark Brown return ret; 233190808738SMark Brown 2332cf32b71eSErnst Schwab spin_lock_irqsave(&master->bus_lock_spinlock, flags); 2333cf32b71eSErnst Schwab 2334cf32b71eSErnst Schwab ret = __spi_async(spi, message); 2335cf32b71eSErnst Schwab 2336cf32b71eSErnst Schwab spin_unlock_irqrestore(&master->bus_lock_spinlock, flags); 2337cf32b71eSErnst Schwab 2338cf32b71eSErnst Schwab return ret; 2339cf32b71eSErnst Schwab 2340cf32b71eSErnst Schwab } 2341cf32b71eSErnst Schwab EXPORT_SYMBOL_GPL(spi_async_locked); 2342cf32b71eSErnst Schwab 23437d077197SDavid Brownell 23447d077197SDavid Brownell /*-------------------------------------------------------------------------*/ 23457d077197SDavid Brownell 23467d077197SDavid Brownell /* Utility methods for SPI master protocol drivers, layered on 23477d077197SDavid Brownell * top of the core. Some other utility methods are defined as 23487d077197SDavid Brownell * inline functions. 23497d077197SDavid Brownell */ 23507d077197SDavid Brownell 23515d870c8eSAndrew Morton static void spi_complete(void *arg) 23525d870c8eSAndrew Morton { 23535d870c8eSAndrew Morton complete(arg); 23545d870c8eSAndrew Morton } 23555d870c8eSAndrew Morton 2356cf32b71eSErnst Schwab static int __spi_sync(struct spi_device *spi, struct spi_message *message, 2357cf32b71eSErnst Schwab int bus_locked) 2358cf32b71eSErnst Schwab { 2359cf32b71eSErnst Schwab DECLARE_COMPLETION_ONSTACK(done); 2360cf32b71eSErnst Schwab int status; 2361cf32b71eSErnst Schwab struct spi_master *master = spi->master; 23620461a414SMark Brown unsigned long flags; 23630461a414SMark Brown 23640461a414SMark Brown status = __spi_validate(spi, message); 23650461a414SMark Brown if (status != 0) 23660461a414SMark Brown return status; 2367cf32b71eSErnst Schwab 2368cf32b71eSErnst Schwab message->complete = spi_complete; 2369cf32b71eSErnst Schwab message->context = &done; 23700461a414SMark Brown message->spi = spi; 2371cf32b71eSErnst Schwab 2372eca2ebc7SMartin Sperl SPI_STATISTICS_INCREMENT_FIELD(&master->statistics, spi_sync); 2373eca2ebc7SMartin Sperl SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_sync); 2374eca2ebc7SMartin Sperl 2375cf32b71eSErnst Schwab if (!bus_locked) 2376cf32b71eSErnst Schwab mutex_lock(&master->bus_lock_mutex); 2377cf32b71eSErnst Schwab 23780461a414SMark Brown /* If we're not using the legacy transfer method then we will 23790461a414SMark Brown * try to transfer in the calling context so special case. 23800461a414SMark Brown * This code would be less tricky if we could remove the 23810461a414SMark Brown * support for driver implemented message queues. 23820461a414SMark Brown */ 23830461a414SMark Brown if (master->transfer == spi_queued_transfer) { 23840461a414SMark Brown spin_lock_irqsave(&master->bus_lock_spinlock, flags); 23850461a414SMark Brown 23860461a414SMark Brown trace_spi_message_submit(message); 23870461a414SMark Brown 23880461a414SMark Brown status = __spi_queued_transfer(spi, message, false); 23890461a414SMark Brown 23900461a414SMark Brown spin_unlock_irqrestore(&master->bus_lock_spinlock, flags); 23910461a414SMark Brown } else { 2392cf32b71eSErnst Schwab status = spi_async_locked(spi, message); 23930461a414SMark Brown } 2394cf32b71eSErnst Schwab 2395cf32b71eSErnst Schwab if (!bus_locked) 2396cf32b71eSErnst Schwab mutex_unlock(&master->bus_lock_mutex); 2397cf32b71eSErnst Schwab 2398cf32b71eSErnst Schwab if (status == 0) { 23990461a414SMark Brown /* Push out the messages in the calling context if we 24000461a414SMark Brown * can. 24010461a414SMark Brown */ 2402eca2ebc7SMartin Sperl if (master->transfer == spi_queued_transfer) { 2403eca2ebc7SMartin Sperl SPI_STATISTICS_INCREMENT_FIELD(&master->statistics, 2404eca2ebc7SMartin Sperl spi_sync_immediate); 2405eca2ebc7SMartin Sperl SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, 2406eca2ebc7SMartin Sperl spi_sync_immediate); 2407fc9e0f71SMark Brown __spi_pump_messages(master, false); 2408eca2ebc7SMartin Sperl } 24090461a414SMark Brown 2410cf32b71eSErnst Schwab wait_for_completion(&done); 2411cf32b71eSErnst Schwab status = message->status; 2412cf32b71eSErnst Schwab } 2413cf32b71eSErnst Schwab message->context = NULL; 2414cf32b71eSErnst Schwab return status; 2415cf32b71eSErnst Schwab } 2416cf32b71eSErnst Schwab 24178ae12a0dSDavid Brownell /** 24188ae12a0dSDavid Brownell * spi_sync - blocking/synchronous SPI data transfers 24198ae12a0dSDavid Brownell * @spi: device with which data will be exchanged 24208ae12a0dSDavid Brownell * @message: describes the data transfers 242133e34dc6SDavid Brownell * Context: can sleep 24228ae12a0dSDavid Brownell * 24238ae12a0dSDavid Brownell * This call may only be used from a context that may sleep. The sleep 24248ae12a0dSDavid Brownell * is non-interruptible, and has no timeout. Low-overhead controller 24258ae12a0dSDavid Brownell * drivers may DMA directly into and out of the message buffers. 24268ae12a0dSDavid Brownell * 24278ae12a0dSDavid Brownell * Note that the SPI device's chip select is active during the message, 24288ae12a0dSDavid Brownell * and then is normally disabled between messages. Drivers for some 24298ae12a0dSDavid Brownell * frequently-used devices may want to minimize costs of selecting a chip, 24308ae12a0dSDavid Brownell * by leaving it selected in anticipation that the next message will go 24318ae12a0dSDavid Brownell * to the same chip. (That may increase power usage.) 24328ae12a0dSDavid Brownell * 24330c868461SDavid Brownell * Also, the caller is guaranteeing that the memory associated with the 24340c868461SDavid Brownell * message will not be freed before this call returns. 24350c868461SDavid Brownell * 243697d56dc6SJavier Martinez Canillas * Return: zero on success, else a negative error code. 24378ae12a0dSDavid Brownell */ 24388ae12a0dSDavid Brownell int spi_sync(struct spi_device *spi, struct spi_message *message) 24398ae12a0dSDavid Brownell { 2440cf32b71eSErnst Schwab return __spi_sync(spi, message, 0); 24418ae12a0dSDavid Brownell } 24428ae12a0dSDavid Brownell EXPORT_SYMBOL_GPL(spi_sync); 24438ae12a0dSDavid Brownell 2444cf32b71eSErnst Schwab /** 2445cf32b71eSErnst Schwab * spi_sync_locked - version of spi_sync with exclusive bus usage 2446cf32b71eSErnst Schwab * @spi: device with which data will be exchanged 2447cf32b71eSErnst Schwab * @message: describes the data transfers 2448cf32b71eSErnst Schwab * Context: can sleep 2449cf32b71eSErnst Schwab * 2450cf32b71eSErnst Schwab * This call may only be used from a context that may sleep. The sleep 2451cf32b71eSErnst Schwab * is non-interruptible, and has no timeout. Low-overhead controller 2452cf32b71eSErnst Schwab * drivers may DMA directly into and out of the message buffers. 2453cf32b71eSErnst Schwab * 2454cf32b71eSErnst Schwab * This call should be used by drivers that require exclusive access to the 245525985edcSLucas De Marchi * SPI bus. It has to be preceded by a spi_bus_lock call. The SPI bus must 2456cf32b71eSErnst Schwab * be released by a spi_bus_unlock call when the exclusive access is over. 2457cf32b71eSErnst Schwab * 245897d56dc6SJavier Martinez Canillas * Return: zero on success, else a negative error code. 2459cf32b71eSErnst Schwab */ 2460cf32b71eSErnst Schwab int spi_sync_locked(struct spi_device *spi, struct spi_message *message) 2461cf32b71eSErnst Schwab { 2462cf32b71eSErnst Schwab return __spi_sync(spi, message, 1); 2463cf32b71eSErnst Schwab } 2464cf32b71eSErnst Schwab EXPORT_SYMBOL_GPL(spi_sync_locked); 2465cf32b71eSErnst Schwab 2466cf32b71eSErnst Schwab /** 2467cf32b71eSErnst Schwab * spi_bus_lock - obtain a lock for exclusive SPI bus usage 2468cf32b71eSErnst Schwab * @master: SPI bus master that should be locked for exclusive bus access 2469cf32b71eSErnst Schwab * Context: can sleep 2470cf32b71eSErnst Schwab * 2471cf32b71eSErnst Schwab * This call may only be used from a context that may sleep. The sleep 2472cf32b71eSErnst Schwab * is non-interruptible, and has no timeout. 2473cf32b71eSErnst Schwab * 2474cf32b71eSErnst Schwab * This call should be used by drivers that require exclusive access to the 2475cf32b71eSErnst Schwab * SPI bus. The SPI bus must be released by a spi_bus_unlock call when the 2476cf32b71eSErnst Schwab * exclusive access is over. Data transfer must be done by spi_sync_locked 2477cf32b71eSErnst Schwab * and spi_async_locked calls when the SPI bus lock is held. 2478cf32b71eSErnst Schwab * 247997d56dc6SJavier Martinez Canillas * Return: always zero. 2480cf32b71eSErnst Schwab */ 2481cf32b71eSErnst Schwab int spi_bus_lock(struct spi_master *master) 2482cf32b71eSErnst Schwab { 2483cf32b71eSErnst Schwab unsigned long flags; 2484cf32b71eSErnst Schwab 2485cf32b71eSErnst Schwab mutex_lock(&master->bus_lock_mutex); 2486cf32b71eSErnst Schwab 2487cf32b71eSErnst Schwab spin_lock_irqsave(&master->bus_lock_spinlock, flags); 2488cf32b71eSErnst Schwab master->bus_lock_flag = 1; 2489cf32b71eSErnst Schwab spin_unlock_irqrestore(&master->bus_lock_spinlock, flags); 2490cf32b71eSErnst Schwab 2491cf32b71eSErnst Schwab /* mutex remains locked until spi_bus_unlock is called */ 2492cf32b71eSErnst Schwab 2493cf32b71eSErnst Schwab return 0; 2494cf32b71eSErnst Schwab } 2495cf32b71eSErnst Schwab EXPORT_SYMBOL_GPL(spi_bus_lock); 2496cf32b71eSErnst Schwab 2497cf32b71eSErnst Schwab /** 2498cf32b71eSErnst Schwab * spi_bus_unlock - release the lock for exclusive SPI bus usage 2499cf32b71eSErnst Schwab * @master: SPI bus master that was locked for exclusive bus access 2500cf32b71eSErnst Schwab * Context: can sleep 2501cf32b71eSErnst Schwab * 2502cf32b71eSErnst Schwab * This call may only be used from a context that may sleep. The sleep 2503cf32b71eSErnst Schwab * is non-interruptible, and has no timeout. 2504cf32b71eSErnst Schwab * 2505cf32b71eSErnst Schwab * This call releases an SPI bus lock previously obtained by an spi_bus_lock 2506cf32b71eSErnst Schwab * call. 2507cf32b71eSErnst Schwab * 250897d56dc6SJavier Martinez Canillas * Return: always zero. 2509cf32b71eSErnst Schwab */ 2510cf32b71eSErnst Schwab int spi_bus_unlock(struct spi_master *master) 2511cf32b71eSErnst Schwab { 2512cf32b71eSErnst Schwab master->bus_lock_flag = 0; 2513cf32b71eSErnst Schwab 2514cf32b71eSErnst Schwab mutex_unlock(&master->bus_lock_mutex); 2515cf32b71eSErnst Schwab 2516cf32b71eSErnst Schwab return 0; 2517cf32b71eSErnst Schwab } 2518cf32b71eSErnst Schwab EXPORT_SYMBOL_GPL(spi_bus_unlock); 2519cf32b71eSErnst Schwab 2520a9948b61SDavid Brownell /* portable code must never pass more than 32 bytes */ 2521a9948b61SDavid Brownell #define SPI_BUFSIZ max(32, SMP_CACHE_BYTES) 25228ae12a0dSDavid Brownell 25238ae12a0dSDavid Brownell static u8 *buf; 25248ae12a0dSDavid Brownell 25258ae12a0dSDavid Brownell /** 25268ae12a0dSDavid Brownell * spi_write_then_read - SPI synchronous write followed by read 25278ae12a0dSDavid Brownell * @spi: device with which data will be exchanged 25288ae12a0dSDavid Brownell * @txbuf: data to be written (need not be dma-safe) 25298ae12a0dSDavid Brownell * @n_tx: size of txbuf, in bytes 253027570497SJiri Pirko * @rxbuf: buffer into which data will be read (need not be dma-safe) 253127570497SJiri Pirko * @n_rx: size of rxbuf, in bytes 253233e34dc6SDavid Brownell * Context: can sleep 25338ae12a0dSDavid Brownell * 25348ae12a0dSDavid Brownell * This performs a half duplex MicroWire style transaction with the 25358ae12a0dSDavid Brownell * device, sending txbuf and then reading rxbuf. The return value 25368ae12a0dSDavid Brownell * is zero for success, else a negative errno status code. 2537b885244eSDavid Brownell * This call may only be used from a context that may sleep. 25388ae12a0dSDavid Brownell * 25390c868461SDavid Brownell * Parameters to this routine are always copied using a small buffer; 254033e34dc6SDavid Brownell * portable code should never use this for more than 32 bytes. 254133e34dc6SDavid Brownell * Performance-sensitive or bulk transfer code should instead use 25420c868461SDavid Brownell * spi_{async,sync}() calls with dma-safe buffers. 254397d56dc6SJavier Martinez Canillas * 254497d56dc6SJavier Martinez Canillas * Return: zero on success, else a negative error code. 25458ae12a0dSDavid Brownell */ 25468ae12a0dSDavid Brownell int spi_write_then_read(struct spi_device *spi, 25470c4a1590SMark Brown const void *txbuf, unsigned n_tx, 25480c4a1590SMark Brown void *rxbuf, unsigned n_rx) 25498ae12a0dSDavid Brownell { 2550068f4070SDavid Brownell static DEFINE_MUTEX(lock); 25518ae12a0dSDavid Brownell 25528ae12a0dSDavid Brownell int status; 25538ae12a0dSDavid Brownell struct spi_message message; 2554bdff549eSDavid Brownell struct spi_transfer x[2]; 25558ae12a0dSDavid Brownell u8 *local_buf; 25568ae12a0dSDavid Brownell 2557b3a223eeSMark Brown /* Use preallocated DMA-safe buffer if we can. We can't avoid 2558b3a223eeSMark Brown * copying here, (as a pure convenience thing), but we can 2559b3a223eeSMark Brown * keep heap costs out of the hot path unless someone else is 2560b3a223eeSMark Brown * using the pre-allocated buffer or the transfer is too large. 25618ae12a0dSDavid Brownell */ 2562b3a223eeSMark Brown if ((n_tx + n_rx) > SPI_BUFSIZ || !mutex_trylock(&lock)) { 25632cd94c8aSMark Brown local_buf = kmalloc(max((unsigned)SPI_BUFSIZ, n_tx + n_rx), 25642cd94c8aSMark Brown GFP_KERNEL | GFP_DMA); 2565b3a223eeSMark Brown if (!local_buf) 2566b3a223eeSMark Brown return -ENOMEM; 2567b3a223eeSMark Brown } else { 2568b3a223eeSMark Brown local_buf = buf; 2569b3a223eeSMark Brown } 25708ae12a0dSDavid Brownell 25718275c642SVitaly Wool spi_message_init(&message); 25725fe5f05eSJingoo Han memset(x, 0, sizeof(x)); 2573bdff549eSDavid Brownell if (n_tx) { 2574bdff549eSDavid Brownell x[0].len = n_tx; 2575bdff549eSDavid Brownell spi_message_add_tail(&x[0], &message); 2576bdff549eSDavid Brownell } 2577bdff549eSDavid Brownell if (n_rx) { 2578bdff549eSDavid Brownell x[1].len = n_rx; 2579bdff549eSDavid Brownell spi_message_add_tail(&x[1], &message); 2580bdff549eSDavid Brownell } 25818275c642SVitaly Wool 25828ae12a0dSDavid Brownell memcpy(local_buf, txbuf, n_tx); 2583bdff549eSDavid Brownell x[0].tx_buf = local_buf; 2584bdff549eSDavid Brownell x[1].rx_buf = local_buf + n_tx; 25858ae12a0dSDavid Brownell 25868ae12a0dSDavid Brownell /* do the i/o */ 25878ae12a0dSDavid Brownell status = spi_sync(spi, &message); 25889b938b74SMarc Pignat if (status == 0) 2589bdff549eSDavid Brownell memcpy(rxbuf, x[1].rx_buf, n_rx); 25908ae12a0dSDavid Brownell 2591bdff549eSDavid Brownell if (x[0].tx_buf == buf) 2592068f4070SDavid Brownell mutex_unlock(&lock); 25938ae12a0dSDavid Brownell else 25948ae12a0dSDavid Brownell kfree(local_buf); 25958ae12a0dSDavid Brownell 25968ae12a0dSDavid Brownell return status; 25978ae12a0dSDavid Brownell } 25988ae12a0dSDavid Brownell EXPORT_SYMBOL_GPL(spi_write_then_read); 25998ae12a0dSDavid Brownell 26008ae12a0dSDavid Brownell /*-------------------------------------------------------------------------*/ 26018ae12a0dSDavid Brownell 2602ce79d54aSPantelis Antoniou #if IS_ENABLED(CONFIG_OF_DYNAMIC) 2603ce79d54aSPantelis Antoniou static int __spi_of_device_match(struct device *dev, void *data) 2604ce79d54aSPantelis Antoniou { 2605ce79d54aSPantelis Antoniou return dev->of_node == data; 2606ce79d54aSPantelis Antoniou } 2607ce79d54aSPantelis Antoniou 2608ce79d54aSPantelis Antoniou /* must call put_device() when done with returned spi_device device */ 2609ce79d54aSPantelis Antoniou static struct spi_device *of_find_spi_device_by_node(struct device_node *node) 2610ce79d54aSPantelis Antoniou { 2611ce79d54aSPantelis Antoniou struct device *dev = bus_find_device(&spi_bus_type, NULL, node, 2612ce79d54aSPantelis Antoniou __spi_of_device_match); 2613ce79d54aSPantelis Antoniou return dev ? to_spi_device(dev) : NULL; 2614ce79d54aSPantelis Antoniou } 2615ce79d54aSPantelis Antoniou 2616ce79d54aSPantelis Antoniou static int __spi_of_master_match(struct device *dev, const void *data) 2617ce79d54aSPantelis Antoniou { 2618ce79d54aSPantelis Antoniou return dev->of_node == data; 2619ce79d54aSPantelis Antoniou } 2620ce79d54aSPantelis Antoniou 2621ce79d54aSPantelis Antoniou /* the spi masters are not using spi_bus, so we find it with another way */ 2622ce79d54aSPantelis Antoniou static struct spi_master *of_find_spi_master_by_node(struct device_node *node) 2623ce79d54aSPantelis Antoniou { 2624ce79d54aSPantelis Antoniou struct device *dev; 2625ce79d54aSPantelis Antoniou 2626ce79d54aSPantelis Antoniou dev = class_find_device(&spi_master_class, NULL, node, 2627ce79d54aSPantelis Antoniou __spi_of_master_match); 2628ce79d54aSPantelis Antoniou if (!dev) 2629ce79d54aSPantelis Antoniou return NULL; 2630ce79d54aSPantelis Antoniou 2631ce79d54aSPantelis Antoniou /* reference got in class_find_device */ 2632ce79d54aSPantelis Antoniou return container_of(dev, struct spi_master, dev); 2633ce79d54aSPantelis Antoniou } 2634ce79d54aSPantelis Antoniou 2635ce79d54aSPantelis Antoniou static int of_spi_notify(struct notifier_block *nb, unsigned long action, 2636ce79d54aSPantelis Antoniou void *arg) 2637ce79d54aSPantelis Antoniou { 2638ce79d54aSPantelis Antoniou struct of_reconfig_data *rd = arg; 2639ce79d54aSPantelis Antoniou struct spi_master *master; 2640ce79d54aSPantelis Antoniou struct spi_device *spi; 2641ce79d54aSPantelis Antoniou 2642ce79d54aSPantelis Antoniou switch (of_reconfig_get_state_change(action, arg)) { 2643ce79d54aSPantelis Antoniou case OF_RECONFIG_CHANGE_ADD: 2644ce79d54aSPantelis Antoniou master = of_find_spi_master_by_node(rd->dn->parent); 2645ce79d54aSPantelis Antoniou if (master == NULL) 2646ce79d54aSPantelis Antoniou return NOTIFY_OK; /* not for us */ 2647ce79d54aSPantelis Antoniou 2648ce79d54aSPantelis Antoniou spi = of_register_spi_device(master, rd->dn); 2649ce79d54aSPantelis Antoniou put_device(&master->dev); 2650ce79d54aSPantelis Antoniou 2651ce79d54aSPantelis Antoniou if (IS_ERR(spi)) { 2652ce79d54aSPantelis Antoniou pr_err("%s: failed to create for '%s'\n", 2653ce79d54aSPantelis Antoniou __func__, rd->dn->full_name); 2654ce79d54aSPantelis Antoniou return notifier_from_errno(PTR_ERR(spi)); 2655ce79d54aSPantelis Antoniou } 2656ce79d54aSPantelis Antoniou break; 2657ce79d54aSPantelis Antoniou 2658ce79d54aSPantelis Antoniou case OF_RECONFIG_CHANGE_REMOVE: 2659ce79d54aSPantelis Antoniou /* find our device by node */ 2660ce79d54aSPantelis Antoniou spi = of_find_spi_device_by_node(rd->dn); 2661ce79d54aSPantelis Antoniou if (spi == NULL) 2662ce79d54aSPantelis Antoniou return NOTIFY_OK; /* no? not meant for us */ 2663ce79d54aSPantelis Antoniou 2664ce79d54aSPantelis Antoniou /* unregister takes one ref away */ 2665ce79d54aSPantelis Antoniou spi_unregister_device(spi); 2666ce79d54aSPantelis Antoniou 2667ce79d54aSPantelis Antoniou /* and put the reference of the find */ 2668ce79d54aSPantelis Antoniou put_device(&spi->dev); 2669ce79d54aSPantelis Antoniou break; 2670ce79d54aSPantelis Antoniou } 2671ce79d54aSPantelis Antoniou 2672ce79d54aSPantelis Antoniou return NOTIFY_OK; 2673ce79d54aSPantelis Antoniou } 2674ce79d54aSPantelis Antoniou 2675ce79d54aSPantelis Antoniou static struct notifier_block spi_of_notifier = { 2676ce79d54aSPantelis Antoniou .notifier_call = of_spi_notify, 2677ce79d54aSPantelis Antoniou }; 2678ce79d54aSPantelis Antoniou #else /* IS_ENABLED(CONFIG_OF_DYNAMIC) */ 2679ce79d54aSPantelis Antoniou extern struct notifier_block spi_of_notifier; 2680ce79d54aSPantelis Antoniou #endif /* IS_ENABLED(CONFIG_OF_DYNAMIC) */ 2681ce79d54aSPantelis Antoniou 26828ae12a0dSDavid Brownell static int __init spi_init(void) 26838ae12a0dSDavid Brownell { 2684b885244eSDavid Brownell int status; 26858ae12a0dSDavid Brownell 2686e94b1766SChristoph Lameter buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL); 2687b885244eSDavid Brownell if (!buf) { 2688b885244eSDavid Brownell status = -ENOMEM; 2689b885244eSDavid Brownell goto err0; 26908ae12a0dSDavid Brownell } 2691b885244eSDavid Brownell 2692b885244eSDavid Brownell status = bus_register(&spi_bus_type); 2693b885244eSDavid Brownell if (status < 0) 2694b885244eSDavid Brownell goto err1; 2695b885244eSDavid Brownell 2696b885244eSDavid Brownell status = class_register(&spi_master_class); 2697b885244eSDavid Brownell if (status < 0) 2698b885244eSDavid Brownell goto err2; 2699ce79d54aSPantelis Antoniou 27005267720eSFabio Estevam if (IS_ENABLED(CONFIG_OF_DYNAMIC)) 2701ce79d54aSPantelis Antoniou WARN_ON(of_reconfig_notifier_register(&spi_of_notifier)); 2702ce79d54aSPantelis Antoniou 2703b885244eSDavid Brownell return 0; 2704b885244eSDavid Brownell 2705b885244eSDavid Brownell err2: 2706b885244eSDavid Brownell bus_unregister(&spi_bus_type); 2707b885244eSDavid Brownell err1: 2708b885244eSDavid Brownell kfree(buf); 2709b885244eSDavid Brownell buf = NULL; 2710b885244eSDavid Brownell err0: 2711b885244eSDavid Brownell return status; 2712b885244eSDavid Brownell } 2713b885244eSDavid Brownell 27148ae12a0dSDavid Brownell /* board_info is normally registered in arch_initcall(), 27158ae12a0dSDavid Brownell * but even essential drivers wait till later 2716b885244eSDavid Brownell * 2717b885244eSDavid Brownell * REVISIT only boardinfo really needs static linking. the rest (device and 2718b885244eSDavid Brownell * driver registration) _could_ be dynamically linked (modular) ... costs 2719b885244eSDavid Brownell * include needing to have boardinfo data structures be much more public. 27208ae12a0dSDavid Brownell */ 2721673c0c00SDavid Brownell postcore_initcall(spi_init); 27228ae12a0dSDavid Brownell 2723