18ae12a0dSDavid Brownell /* 2ca632f55SGrant Likely * SPI init/core code 38ae12a0dSDavid Brownell * 48ae12a0dSDavid Brownell * Copyright (C) 2005 David Brownell 5d57a4282SGrant Likely * Copyright (C) 2008 Secret Lab Technologies Ltd. 68ae12a0dSDavid Brownell * 78ae12a0dSDavid Brownell * This program is free software; you can redistribute it and/or modify 88ae12a0dSDavid Brownell * it under the terms of the GNU General Public License as published by 98ae12a0dSDavid Brownell * the Free Software Foundation; either version 2 of the License, or 108ae12a0dSDavid Brownell * (at your option) any later version. 118ae12a0dSDavid Brownell * 128ae12a0dSDavid Brownell * This program is distributed in the hope that it will be useful, 138ae12a0dSDavid Brownell * but WITHOUT ANY WARRANTY; without even the implied warranty of 148ae12a0dSDavid Brownell * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 158ae12a0dSDavid Brownell * GNU General Public License for more details. 168ae12a0dSDavid Brownell */ 178ae12a0dSDavid Brownell 188ae12a0dSDavid Brownell #include <linux/kernel.h> 198ae12a0dSDavid Brownell #include <linux/device.h> 208ae12a0dSDavid Brownell #include <linux/init.h> 218ae12a0dSDavid Brownell #include <linux/cache.h> 2299adef31SMark Brown #include <linux/dma-mapping.h> 2399adef31SMark Brown #include <linux/dmaengine.h> 2494040828SMatthias Kaehlcke #include <linux/mutex.h> 252b7a32f7SSinan Akman #include <linux/of_device.h> 26d57a4282SGrant Likely #include <linux/of_irq.h> 2786be408bSSylwester Nawrocki #include <linux/clk/clk-conf.h> 285a0e3ad6STejun Heo #include <linux/slab.h> 29e0626e38SAnton Vorontsov #include <linux/mod_devicetable.h> 308ae12a0dSDavid Brownell #include <linux/spi/spi.h> 3174317984SJean-Christophe PLAGNIOL-VILLARD #include <linux/of_gpio.h> 323ae22e8cSMark Brown #include <linux/pm_runtime.h> 33f48c767cSUlf Hansson #include <linux/pm_domain.h> 34025ed130SPaul Gortmaker #include <linux/export.h> 358bd75c77SClark Williams #include <linux/sched/rt.h> 36ffbbdd21SLinus Walleij #include <linux/delay.h> 37ffbbdd21SLinus Walleij #include <linux/kthread.h> 3864bee4d2SMika Westerberg #include <linux/ioport.h> 3964bee4d2SMika Westerberg #include <linux/acpi.h> 408ae12a0dSDavid Brownell 4156ec1978SMark Brown #define CREATE_TRACE_POINTS 4256ec1978SMark Brown #include <trace/events/spi.h> 4356ec1978SMark Brown 448ae12a0dSDavid Brownell static void spidev_release(struct device *dev) 458ae12a0dSDavid Brownell { 460ffa0285SHans-Peter Nilsson struct spi_device *spi = to_spi_device(dev); 478ae12a0dSDavid Brownell 488ae12a0dSDavid Brownell /* spi masters may cleanup for released devices */ 498ae12a0dSDavid Brownell if (spi->master->cleanup) 508ae12a0dSDavid Brownell spi->master->cleanup(spi); 518ae12a0dSDavid Brownell 520c868461SDavid Brownell spi_master_put(spi->master); 5307a389feSRoman Tereshonkov kfree(spi); 548ae12a0dSDavid Brownell } 558ae12a0dSDavid Brownell 568ae12a0dSDavid Brownell static ssize_t 578ae12a0dSDavid Brownell modalias_show(struct device *dev, struct device_attribute *a, char *buf) 588ae12a0dSDavid Brownell { 598ae12a0dSDavid Brownell const struct spi_device *spi = to_spi_device(dev); 608c4ff6d0SZhang Rui int len; 618c4ff6d0SZhang Rui 628c4ff6d0SZhang Rui len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1); 638c4ff6d0SZhang Rui if (len != -ENODEV) 648c4ff6d0SZhang Rui return len; 658ae12a0dSDavid Brownell 66d8e328b3SGrant Likely return sprintf(buf, "%s%s\n", SPI_MODULE_PREFIX, spi->modalias); 678ae12a0dSDavid Brownell } 68aa7da564SGreg Kroah-Hartman static DEVICE_ATTR_RO(modalias); 698ae12a0dSDavid Brownell 70eca2ebc7SMartin Sperl #define SPI_STATISTICS_ATTRS(field, file) \ 71eca2ebc7SMartin Sperl static ssize_t spi_master_##field##_show(struct device *dev, \ 72eca2ebc7SMartin Sperl struct device_attribute *attr, \ 73eca2ebc7SMartin Sperl char *buf) \ 74eca2ebc7SMartin Sperl { \ 75eca2ebc7SMartin Sperl struct spi_master *master = container_of(dev, \ 76eca2ebc7SMartin Sperl struct spi_master, dev); \ 77eca2ebc7SMartin Sperl return spi_statistics_##field##_show(&master->statistics, buf); \ 78eca2ebc7SMartin Sperl } \ 79eca2ebc7SMartin Sperl static struct device_attribute dev_attr_spi_master_##field = { \ 80eca2ebc7SMartin Sperl .attr = { .name = file, .mode = S_IRUGO }, \ 81eca2ebc7SMartin Sperl .show = spi_master_##field##_show, \ 82eca2ebc7SMartin Sperl }; \ 83eca2ebc7SMartin Sperl static ssize_t spi_device_##field##_show(struct device *dev, \ 84eca2ebc7SMartin Sperl struct device_attribute *attr, \ 85eca2ebc7SMartin Sperl char *buf) \ 86eca2ebc7SMartin Sperl { \ 87d1eba93bSGeliang Tang struct spi_device *spi = to_spi_device(dev); \ 88eca2ebc7SMartin Sperl return spi_statistics_##field##_show(&spi->statistics, buf); \ 89eca2ebc7SMartin Sperl } \ 90eca2ebc7SMartin Sperl static struct device_attribute dev_attr_spi_device_##field = { \ 91eca2ebc7SMartin Sperl .attr = { .name = file, .mode = S_IRUGO }, \ 92eca2ebc7SMartin Sperl .show = spi_device_##field##_show, \ 93eca2ebc7SMartin Sperl } 94eca2ebc7SMartin Sperl 95eca2ebc7SMartin Sperl #define SPI_STATISTICS_SHOW_NAME(name, file, field, format_string) \ 96eca2ebc7SMartin Sperl static ssize_t spi_statistics_##name##_show(struct spi_statistics *stat, \ 97eca2ebc7SMartin Sperl char *buf) \ 98eca2ebc7SMartin Sperl { \ 99eca2ebc7SMartin Sperl unsigned long flags; \ 100eca2ebc7SMartin Sperl ssize_t len; \ 101eca2ebc7SMartin Sperl spin_lock_irqsave(&stat->lock, flags); \ 102eca2ebc7SMartin Sperl len = sprintf(buf, format_string, stat->field); \ 103eca2ebc7SMartin Sperl spin_unlock_irqrestore(&stat->lock, flags); \ 104eca2ebc7SMartin Sperl return len; \ 105eca2ebc7SMartin Sperl } \ 106eca2ebc7SMartin Sperl SPI_STATISTICS_ATTRS(name, file) 107eca2ebc7SMartin Sperl 108eca2ebc7SMartin Sperl #define SPI_STATISTICS_SHOW(field, format_string) \ 109eca2ebc7SMartin Sperl SPI_STATISTICS_SHOW_NAME(field, __stringify(field), \ 110eca2ebc7SMartin Sperl field, format_string) 111eca2ebc7SMartin Sperl 112eca2ebc7SMartin Sperl SPI_STATISTICS_SHOW(messages, "%lu"); 113eca2ebc7SMartin Sperl SPI_STATISTICS_SHOW(transfers, "%lu"); 114eca2ebc7SMartin Sperl SPI_STATISTICS_SHOW(errors, "%lu"); 115eca2ebc7SMartin Sperl SPI_STATISTICS_SHOW(timedout, "%lu"); 116eca2ebc7SMartin Sperl 117eca2ebc7SMartin Sperl SPI_STATISTICS_SHOW(spi_sync, "%lu"); 118eca2ebc7SMartin Sperl SPI_STATISTICS_SHOW(spi_sync_immediate, "%lu"); 119eca2ebc7SMartin Sperl SPI_STATISTICS_SHOW(spi_async, "%lu"); 120eca2ebc7SMartin Sperl 121eca2ebc7SMartin Sperl SPI_STATISTICS_SHOW(bytes, "%llu"); 122eca2ebc7SMartin Sperl SPI_STATISTICS_SHOW(bytes_rx, "%llu"); 123eca2ebc7SMartin Sperl SPI_STATISTICS_SHOW(bytes_tx, "%llu"); 124eca2ebc7SMartin Sperl 1256b7bc061SMartin Sperl #define SPI_STATISTICS_TRANSFER_BYTES_HISTO(index, number) \ 1266b7bc061SMartin Sperl SPI_STATISTICS_SHOW_NAME(transfer_bytes_histo##index, \ 1276b7bc061SMartin Sperl "transfer_bytes_histo_" number, \ 1286b7bc061SMartin Sperl transfer_bytes_histo[index], "%lu") 1296b7bc061SMartin Sperl SPI_STATISTICS_TRANSFER_BYTES_HISTO(0, "0-1"); 1306b7bc061SMartin Sperl SPI_STATISTICS_TRANSFER_BYTES_HISTO(1, "2-3"); 1316b7bc061SMartin Sperl SPI_STATISTICS_TRANSFER_BYTES_HISTO(2, "4-7"); 1326b7bc061SMartin Sperl SPI_STATISTICS_TRANSFER_BYTES_HISTO(3, "8-15"); 1336b7bc061SMartin Sperl SPI_STATISTICS_TRANSFER_BYTES_HISTO(4, "16-31"); 1346b7bc061SMartin Sperl SPI_STATISTICS_TRANSFER_BYTES_HISTO(5, "32-63"); 1356b7bc061SMartin Sperl SPI_STATISTICS_TRANSFER_BYTES_HISTO(6, "64-127"); 1366b7bc061SMartin Sperl SPI_STATISTICS_TRANSFER_BYTES_HISTO(7, "128-255"); 1376b7bc061SMartin Sperl SPI_STATISTICS_TRANSFER_BYTES_HISTO(8, "256-511"); 1386b7bc061SMartin Sperl SPI_STATISTICS_TRANSFER_BYTES_HISTO(9, "512-1023"); 1396b7bc061SMartin Sperl SPI_STATISTICS_TRANSFER_BYTES_HISTO(10, "1024-2047"); 1406b7bc061SMartin Sperl SPI_STATISTICS_TRANSFER_BYTES_HISTO(11, "2048-4095"); 1416b7bc061SMartin Sperl SPI_STATISTICS_TRANSFER_BYTES_HISTO(12, "4096-8191"); 1426b7bc061SMartin Sperl SPI_STATISTICS_TRANSFER_BYTES_HISTO(13, "8192-16383"); 1436b7bc061SMartin Sperl SPI_STATISTICS_TRANSFER_BYTES_HISTO(14, "16384-32767"); 1446b7bc061SMartin Sperl SPI_STATISTICS_TRANSFER_BYTES_HISTO(15, "32768-65535"); 1456b7bc061SMartin Sperl SPI_STATISTICS_TRANSFER_BYTES_HISTO(16, "65536+"); 1466b7bc061SMartin Sperl 147d9f12122SMartin Sperl SPI_STATISTICS_SHOW(transfers_split_maxsize, "%lu"); 148d9f12122SMartin Sperl 149aa7da564SGreg Kroah-Hartman static struct attribute *spi_dev_attrs[] = { 150aa7da564SGreg Kroah-Hartman &dev_attr_modalias.attr, 151aa7da564SGreg Kroah-Hartman NULL, 1528ae12a0dSDavid Brownell }; 153eca2ebc7SMartin Sperl 154eca2ebc7SMartin Sperl static const struct attribute_group spi_dev_group = { 155eca2ebc7SMartin Sperl .attrs = spi_dev_attrs, 156eca2ebc7SMartin Sperl }; 157eca2ebc7SMartin Sperl 158eca2ebc7SMartin Sperl static struct attribute *spi_device_statistics_attrs[] = { 159eca2ebc7SMartin Sperl &dev_attr_spi_device_messages.attr, 160eca2ebc7SMartin Sperl &dev_attr_spi_device_transfers.attr, 161eca2ebc7SMartin Sperl &dev_attr_spi_device_errors.attr, 162eca2ebc7SMartin Sperl &dev_attr_spi_device_timedout.attr, 163eca2ebc7SMartin Sperl &dev_attr_spi_device_spi_sync.attr, 164eca2ebc7SMartin Sperl &dev_attr_spi_device_spi_sync_immediate.attr, 165eca2ebc7SMartin Sperl &dev_attr_spi_device_spi_async.attr, 166eca2ebc7SMartin Sperl &dev_attr_spi_device_bytes.attr, 167eca2ebc7SMartin Sperl &dev_attr_spi_device_bytes_rx.attr, 168eca2ebc7SMartin Sperl &dev_attr_spi_device_bytes_tx.attr, 1696b7bc061SMartin Sperl &dev_attr_spi_device_transfer_bytes_histo0.attr, 1706b7bc061SMartin Sperl &dev_attr_spi_device_transfer_bytes_histo1.attr, 1716b7bc061SMartin Sperl &dev_attr_spi_device_transfer_bytes_histo2.attr, 1726b7bc061SMartin Sperl &dev_attr_spi_device_transfer_bytes_histo3.attr, 1736b7bc061SMartin Sperl &dev_attr_spi_device_transfer_bytes_histo4.attr, 1746b7bc061SMartin Sperl &dev_attr_spi_device_transfer_bytes_histo5.attr, 1756b7bc061SMartin Sperl &dev_attr_spi_device_transfer_bytes_histo6.attr, 1766b7bc061SMartin Sperl &dev_attr_spi_device_transfer_bytes_histo7.attr, 1776b7bc061SMartin Sperl &dev_attr_spi_device_transfer_bytes_histo8.attr, 1786b7bc061SMartin Sperl &dev_attr_spi_device_transfer_bytes_histo9.attr, 1796b7bc061SMartin Sperl &dev_attr_spi_device_transfer_bytes_histo10.attr, 1806b7bc061SMartin Sperl &dev_attr_spi_device_transfer_bytes_histo11.attr, 1816b7bc061SMartin Sperl &dev_attr_spi_device_transfer_bytes_histo12.attr, 1826b7bc061SMartin Sperl &dev_attr_spi_device_transfer_bytes_histo13.attr, 1836b7bc061SMartin Sperl &dev_attr_spi_device_transfer_bytes_histo14.attr, 1846b7bc061SMartin Sperl &dev_attr_spi_device_transfer_bytes_histo15.attr, 1856b7bc061SMartin Sperl &dev_attr_spi_device_transfer_bytes_histo16.attr, 186d9f12122SMartin Sperl &dev_attr_spi_device_transfers_split_maxsize.attr, 187eca2ebc7SMartin Sperl NULL, 188eca2ebc7SMartin Sperl }; 189eca2ebc7SMartin Sperl 190eca2ebc7SMartin Sperl static const struct attribute_group spi_device_statistics_group = { 191eca2ebc7SMartin Sperl .name = "statistics", 192eca2ebc7SMartin Sperl .attrs = spi_device_statistics_attrs, 193eca2ebc7SMartin Sperl }; 194eca2ebc7SMartin Sperl 195eca2ebc7SMartin Sperl static const struct attribute_group *spi_dev_groups[] = { 196eca2ebc7SMartin Sperl &spi_dev_group, 197eca2ebc7SMartin Sperl &spi_device_statistics_group, 198eca2ebc7SMartin Sperl NULL, 199eca2ebc7SMartin Sperl }; 200eca2ebc7SMartin Sperl 201eca2ebc7SMartin Sperl static struct attribute *spi_master_statistics_attrs[] = { 202eca2ebc7SMartin Sperl &dev_attr_spi_master_messages.attr, 203eca2ebc7SMartin Sperl &dev_attr_spi_master_transfers.attr, 204eca2ebc7SMartin Sperl &dev_attr_spi_master_errors.attr, 205eca2ebc7SMartin Sperl &dev_attr_spi_master_timedout.attr, 206eca2ebc7SMartin Sperl &dev_attr_spi_master_spi_sync.attr, 207eca2ebc7SMartin Sperl &dev_attr_spi_master_spi_sync_immediate.attr, 208eca2ebc7SMartin Sperl &dev_attr_spi_master_spi_async.attr, 209eca2ebc7SMartin Sperl &dev_attr_spi_master_bytes.attr, 210eca2ebc7SMartin Sperl &dev_attr_spi_master_bytes_rx.attr, 211eca2ebc7SMartin Sperl &dev_attr_spi_master_bytes_tx.attr, 2126b7bc061SMartin Sperl &dev_attr_spi_master_transfer_bytes_histo0.attr, 2136b7bc061SMartin Sperl &dev_attr_spi_master_transfer_bytes_histo1.attr, 2146b7bc061SMartin Sperl &dev_attr_spi_master_transfer_bytes_histo2.attr, 2156b7bc061SMartin Sperl &dev_attr_spi_master_transfer_bytes_histo3.attr, 2166b7bc061SMartin Sperl &dev_attr_spi_master_transfer_bytes_histo4.attr, 2176b7bc061SMartin Sperl &dev_attr_spi_master_transfer_bytes_histo5.attr, 2186b7bc061SMartin Sperl &dev_attr_spi_master_transfer_bytes_histo6.attr, 2196b7bc061SMartin Sperl &dev_attr_spi_master_transfer_bytes_histo7.attr, 2206b7bc061SMartin Sperl &dev_attr_spi_master_transfer_bytes_histo8.attr, 2216b7bc061SMartin Sperl &dev_attr_spi_master_transfer_bytes_histo9.attr, 2226b7bc061SMartin Sperl &dev_attr_spi_master_transfer_bytes_histo10.attr, 2236b7bc061SMartin Sperl &dev_attr_spi_master_transfer_bytes_histo11.attr, 2246b7bc061SMartin Sperl &dev_attr_spi_master_transfer_bytes_histo12.attr, 2256b7bc061SMartin Sperl &dev_attr_spi_master_transfer_bytes_histo13.attr, 2266b7bc061SMartin Sperl &dev_attr_spi_master_transfer_bytes_histo14.attr, 2276b7bc061SMartin Sperl &dev_attr_spi_master_transfer_bytes_histo15.attr, 2286b7bc061SMartin Sperl &dev_attr_spi_master_transfer_bytes_histo16.attr, 229d9f12122SMartin Sperl &dev_attr_spi_master_transfers_split_maxsize.attr, 230eca2ebc7SMartin Sperl NULL, 231eca2ebc7SMartin Sperl }; 232eca2ebc7SMartin Sperl 233eca2ebc7SMartin Sperl static const struct attribute_group spi_master_statistics_group = { 234eca2ebc7SMartin Sperl .name = "statistics", 235eca2ebc7SMartin Sperl .attrs = spi_master_statistics_attrs, 236eca2ebc7SMartin Sperl }; 237eca2ebc7SMartin Sperl 238eca2ebc7SMartin Sperl static const struct attribute_group *spi_master_groups[] = { 239eca2ebc7SMartin Sperl &spi_master_statistics_group, 240eca2ebc7SMartin Sperl NULL, 241eca2ebc7SMartin Sperl }; 242eca2ebc7SMartin Sperl 243eca2ebc7SMartin Sperl void spi_statistics_add_transfer_stats(struct spi_statistics *stats, 244eca2ebc7SMartin Sperl struct spi_transfer *xfer, 245eca2ebc7SMartin Sperl struct spi_master *master) 246eca2ebc7SMartin Sperl { 247eca2ebc7SMartin Sperl unsigned long flags; 2486b7bc061SMartin Sperl int l2len = min(fls(xfer->len), SPI_STATISTICS_HISTO_SIZE) - 1; 2496b7bc061SMartin Sperl 2506b7bc061SMartin Sperl if (l2len < 0) 2516b7bc061SMartin Sperl l2len = 0; 252eca2ebc7SMartin Sperl 253eca2ebc7SMartin Sperl spin_lock_irqsave(&stats->lock, flags); 254eca2ebc7SMartin Sperl 255eca2ebc7SMartin Sperl stats->transfers++; 2566b7bc061SMartin Sperl stats->transfer_bytes_histo[l2len]++; 257eca2ebc7SMartin Sperl 258eca2ebc7SMartin Sperl stats->bytes += xfer->len; 259eca2ebc7SMartin Sperl if ((xfer->tx_buf) && 260eca2ebc7SMartin Sperl (xfer->tx_buf != master->dummy_tx)) 261eca2ebc7SMartin Sperl stats->bytes_tx += xfer->len; 262eca2ebc7SMartin Sperl if ((xfer->rx_buf) && 263eca2ebc7SMartin Sperl (xfer->rx_buf != master->dummy_rx)) 264eca2ebc7SMartin Sperl stats->bytes_rx += xfer->len; 265eca2ebc7SMartin Sperl 266eca2ebc7SMartin Sperl spin_unlock_irqrestore(&stats->lock, flags); 267eca2ebc7SMartin Sperl } 268eca2ebc7SMartin Sperl EXPORT_SYMBOL_GPL(spi_statistics_add_transfer_stats); 2698ae12a0dSDavid Brownell 2708ae12a0dSDavid Brownell /* modalias support makes "modprobe $MODALIAS" new-style hotplug work, 2718ae12a0dSDavid Brownell * and the sysfs version makes coldplug work too. 2728ae12a0dSDavid Brownell */ 2738ae12a0dSDavid Brownell 27475368bf6SAnton Vorontsov static const struct spi_device_id *spi_match_id(const struct spi_device_id *id, 27575368bf6SAnton Vorontsov const struct spi_device *sdev) 27675368bf6SAnton Vorontsov { 27775368bf6SAnton Vorontsov while (id->name[0]) { 27875368bf6SAnton Vorontsov if (!strcmp(sdev->modalias, id->name)) 27975368bf6SAnton Vorontsov return id; 28075368bf6SAnton Vorontsov id++; 28175368bf6SAnton Vorontsov } 28275368bf6SAnton Vorontsov return NULL; 28375368bf6SAnton Vorontsov } 28475368bf6SAnton Vorontsov 28575368bf6SAnton Vorontsov const struct spi_device_id *spi_get_device_id(const struct spi_device *sdev) 28675368bf6SAnton Vorontsov { 28775368bf6SAnton Vorontsov const struct spi_driver *sdrv = to_spi_driver(sdev->dev.driver); 28875368bf6SAnton Vorontsov 28975368bf6SAnton Vorontsov return spi_match_id(sdrv->id_table, sdev); 29075368bf6SAnton Vorontsov } 29175368bf6SAnton Vorontsov EXPORT_SYMBOL_GPL(spi_get_device_id); 29275368bf6SAnton Vorontsov 2938ae12a0dSDavid Brownell static int spi_match_device(struct device *dev, struct device_driver *drv) 2948ae12a0dSDavid Brownell { 2958ae12a0dSDavid Brownell const struct spi_device *spi = to_spi_device(dev); 29675368bf6SAnton Vorontsov const struct spi_driver *sdrv = to_spi_driver(drv); 29775368bf6SAnton Vorontsov 2982b7a32f7SSinan Akman /* Attempt an OF style match */ 2992b7a32f7SSinan Akman if (of_driver_match_device(dev, drv)) 3002b7a32f7SSinan Akman return 1; 3012b7a32f7SSinan Akman 30264bee4d2SMika Westerberg /* Then try ACPI */ 30364bee4d2SMika Westerberg if (acpi_driver_match_device(dev, drv)) 30464bee4d2SMika Westerberg return 1; 30564bee4d2SMika Westerberg 30675368bf6SAnton Vorontsov if (sdrv->id_table) 30775368bf6SAnton Vorontsov return !!spi_match_id(sdrv->id_table, spi); 3088ae12a0dSDavid Brownell 30935f74fcaSKay Sievers return strcmp(spi->modalias, drv->name) == 0; 3108ae12a0dSDavid Brownell } 3118ae12a0dSDavid Brownell 3127eff2e7aSKay Sievers static int spi_uevent(struct device *dev, struct kobj_uevent_env *env) 3138ae12a0dSDavid Brownell { 3148ae12a0dSDavid Brownell const struct spi_device *spi = to_spi_device(dev); 3158c4ff6d0SZhang Rui int rc; 3168c4ff6d0SZhang Rui 3178c4ff6d0SZhang Rui rc = acpi_device_uevent_modalias(dev, env); 3188c4ff6d0SZhang Rui if (rc != -ENODEV) 3198c4ff6d0SZhang Rui return rc; 3208ae12a0dSDavid Brownell 321e0626e38SAnton Vorontsov add_uevent_var(env, "MODALIAS=%s%s", SPI_MODULE_PREFIX, spi->modalias); 3228ae12a0dSDavid Brownell return 0; 3238ae12a0dSDavid Brownell } 3248ae12a0dSDavid Brownell 3258ae12a0dSDavid Brownell struct bus_type spi_bus_type = { 3268ae12a0dSDavid Brownell .name = "spi", 327aa7da564SGreg Kroah-Hartman .dev_groups = spi_dev_groups, 3288ae12a0dSDavid Brownell .match = spi_match_device, 3298ae12a0dSDavid Brownell .uevent = spi_uevent, 3308ae12a0dSDavid Brownell }; 3318ae12a0dSDavid Brownell EXPORT_SYMBOL_GPL(spi_bus_type); 3328ae12a0dSDavid Brownell 333b885244eSDavid Brownell 334b885244eSDavid Brownell static int spi_drv_probe(struct device *dev) 335b885244eSDavid Brownell { 336b885244eSDavid Brownell const struct spi_driver *sdrv = to_spi_driver(dev->driver); 33744af7927SJon Hunter struct spi_device *spi = to_spi_device(dev); 33833cf00e5SMika Westerberg int ret; 339b885244eSDavid Brownell 34086be408bSSylwester Nawrocki ret = of_clk_set_defaults(dev->of_node, false); 34186be408bSSylwester Nawrocki if (ret) 34286be408bSSylwester Nawrocki return ret; 34386be408bSSylwester Nawrocki 34444af7927SJon Hunter if (dev->of_node) { 34544af7927SJon Hunter spi->irq = of_irq_get(dev->of_node, 0); 34644af7927SJon Hunter if (spi->irq == -EPROBE_DEFER) 34744af7927SJon Hunter return -EPROBE_DEFER; 34844af7927SJon Hunter if (spi->irq < 0) 34944af7927SJon Hunter spi->irq = 0; 35044af7927SJon Hunter } 35144af7927SJon Hunter 352676e7c25SUlf Hansson ret = dev_pm_domain_attach(dev, true); 353676e7c25SUlf Hansson if (ret != -EPROBE_DEFER) { 35444af7927SJon Hunter ret = sdrv->probe(spi); 35533cf00e5SMika Westerberg if (ret) 356676e7c25SUlf Hansson dev_pm_domain_detach(dev, true); 357676e7c25SUlf Hansson } 35833cf00e5SMika Westerberg 35933cf00e5SMika Westerberg return ret; 360b885244eSDavid Brownell } 361b885244eSDavid Brownell 362b885244eSDavid Brownell static int spi_drv_remove(struct device *dev) 363b885244eSDavid Brownell { 364b885244eSDavid Brownell const struct spi_driver *sdrv = to_spi_driver(dev->driver); 36533cf00e5SMika Westerberg int ret; 366b885244eSDavid Brownell 367aec35f4eSJean Delvare ret = sdrv->remove(to_spi_device(dev)); 368676e7c25SUlf Hansson dev_pm_domain_detach(dev, true); 36933cf00e5SMika Westerberg 37033cf00e5SMika Westerberg return ret; 371b885244eSDavid Brownell } 372b885244eSDavid Brownell 373b885244eSDavid Brownell static void spi_drv_shutdown(struct device *dev) 374b885244eSDavid Brownell { 375b885244eSDavid Brownell const struct spi_driver *sdrv = to_spi_driver(dev->driver); 376b885244eSDavid Brownell 377b885244eSDavid Brownell sdrv->shutdown(to_spi_device(dev)); 378b885244eSDavid Brownell } 379b885244eSDavid Brownell 38033e34dc6SDavid Brownell /** 381ca5d2485SAndrew F. Davis * __spi_register_driver - register a SPI driver 38288c9321dSThierry Reding * @owner: owner module of the driver to register 38333e34dc6SDavid Brownell * @sdrv: the driver to register 38433e34dc6SDavid Brownell * Context: can sleep 38597d56dc6SJavier Martinez Canillas * 38697d56dc6SJavier Martinez Canillas * Return: zero on success, else a negative error code. 38733e34dc6SDavid Brownell */ 388ca5d2485SAndrew F. Davis int __spi_register_driver(struct module *owner, struct spi_driver *sdrv) 389b885244eSDavid Brownell { 390ca5d2485SAndrew F. Davis sdrv->driver.owner = owner; 391b885244eSDavid Brownell sdrv->driver.bus = &spi_bus_type; 392b885244eSDavid Brownell if (sdrv->probe) 393b885244eSDavid Brownell sdrv->driver.probe = spi_drv_probe; 394b885244eSDavid Brownell if (sdrv->remove) 395b885244eSDavid Brownell sdrv->driver.remove = spi_drv_remove; 396b885244eSDavid Brownell if (sdrv->shutdown) 397b885244eSDavid Brownell sdrv->driver.shutdown = spi_drv_shutdown; 398b885244eSDavid Brownell return driver_register(&sdrv->driver); 399b885244eSDavid Brownell } 400ca5d2485SAndrew F. Davis EXPORT_SYMBOL_GPL(__spi_register_driver); 401b885244eSDavid Brownell 4028ae12a0dSDavid Brownell /*-------------------------------------------------------------------------*/ 4038ae12a0dSDavid Brownell 4048ae12a0dSDavid Brownell /* SPI devices should normally not be created by SPI device drivers; that 4058ae12a0dSDavid Brownell * would make them board-specific. Similarly with SPI master drivers. 4068ae12a0dSDavid Brownell * Device registration normally goes into like arch/.../mach.../board-YYY.c 4078ae12a0dSDavid Brownell * with other readonly (flashable) information about mainboard devices. 4088ae12a0dSDavid Brownell */ 4098ae12a0dSDavid Brownell 4108ae12a0dSDavid Brownell struct boardinfo { 4118ae12a0dSDavid Brownell struct list_head list; 4122b9603a0SFeng Tang struct spi_board_info board_info; 4138ae12a0dSDavid Brownell }; 4148ae12a0dSDavid Brownell 4158ae12a0dSDavid Brownell static LIST_HEAD(board_list); 4162b9603a0SFeng Tang static LIST_HEAD(spi_master_list); 4172b9603a0SFeng Tang 4182b9603a0SFeng Tang /* 4192b9603a0SFeng Tang * Used to protect add/del opertion for board_info list and 4202b9603a0SFeng Tang * spi_master list, and their matching process 4212b9603a0SFeng Tang */ 42294040828SMatthias Kaehlcke static DEFINE_MUTEX(board_lock); 4238ae12a0dSDavid Brownell 424dc87c98eSGrant Likely /** 425dc87c98eSGrant Likely * spi_alloc_device - Allocate a new SPI device 426dc87c98eSGrant Likely * @master: Controller to which device is connected 427dc87c98eSGrant Likely * Context: can sleep 428dc87c98eSGrant Likely * 429dc87c98eSGrant Likely * Allows a driver to allocate and initialize a spi_device without 430dc87c98eSGrant Likely * registering it immediately. This allows a driver to directly 431dc87c98eSGrant Likely * fill the spi_device with device parameters before calling 432dc87c98eSGrant Likely * spi_add_device() on it. 433dc87c98eSGrant Likely * 434dc87c98eSGrant Likely * Caller is responsible to call spi_add_device() on the returned 435dc87c98eSGrant Likely * spi_device structure to add it to the SPI master. If the caller 436dc87c98eSGrant Likely * needs to discard the spi_device without adding it, then it should 437dc87c98eSGrant Likely * call spi_dev_put() on it. 438dc87c98eSGrant Likely * 43997d56dc6SJavier Martinez Canillas * Return: a pointer to the new device, or NULL. 440dc87c98eSGrant Likely */ 441dc87c98eSGrant Likely struct spi_device *spi_alloc_device(struct spi_master *master) 442dc87c98eSGrant Likely { 443dc87c98eSGrant Likely struct spi_device *spi; 444dc87c98eSGrant Likely 445dc87c98eSGrant Likely if (!spi_master_get(master)) 446dc87c98eSGrant Likely return NULL; 447dc87c98eSGrant Likely 4485fe5f05eSJingoo Han spi = kzalloc(sizeof(*spi), GFP_KERNEL); 449dc87c98eSGrant Likely if (!spi) { 450dc87c98eSGrant Likely spi_master_put(master); 451dc87c98eSGrant Likely return NULL; 452dc87c98eSGrant Likely } 453dc87c98eSGrant Likely 454dc87c98eSGrant Likely spi->master = master; 455178db7d3SLaurent Pinchart spi->dev.parent = &master->dev; 456dc87c98eSGrant Likely spi->dev.bus = &spi_bus_type; 457dc87c98eSGrant Likely spi->dev.release = spidev_release; 458446411e1SAndreas Larsson spi->cs_gpio = -ENOENT; 459eca2ebc7SMartin Sperl 460eca2ebc7SMartin Sperl spin_lock_init(&spi->statistics.lock); 461eca2ebc7SMartin Sperl 462dc87c98eSGrant Likely device_initialize(&spi->dev); 463dc87c98eSGrant Likely return spi; 464dc87c98eSGrant Likely } 465dc87c98eSGrant Likely EXPORT_SYMBOL_GPL(spi_alloc_device); 466dc87c98eSGrant Likely 467e13ac47bSJarkko Nikula static void spi_dev_set_name(struct spi_device *spi) 468e13ac47bSJarkko Nikula { 469e13ac47bSJarkko Nikula struct acpi_device *adev = ACPI_COMPANION(&spi->dev); 470e13ac47bSJarkko Nikula 471e13ac47bSJarkko Nikula if (adev) { 472e13ac47bSJarkko Nikula dev_set_name(&spi->dev, "spi-%s", acpi_dev_name(adev)); 473e13ac47bSJarkko Nikula return; 474e13ac47bSJarkko Nikula } 475e13ac47bSJarkko Nikula 476e13ac47bSJarkko Nikula dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->master->dev), 477e13ac47bSJarkko Nikula spi->chip_select); 478e13ac47bSJarkko Nikula } 479e13ac47bSJarkko Nikula 480b6fb8d3aSMika Westerberg static int spi_dev_check(struct device *dev, void *data) 481b6fb8d3aSMika Westerberg { 482b6fb8d3aSMika Westerberg struct spi_device *spi = to_spi_device(dev); 483b6fb8d3aSMika Westerberg struct spi_device *new_spi = data; 484b6fb8d3aSMika Westerberg 485b6fb8d3aSMika Westerberg if (spi->master == new_spi->master && 486b6fb8d3aSMika Westerberg spi->chip_select == new_spi->chip_select) 487b6fb8d3aSMika Westerberg return -EBUSY; 488b6fb8d3aSMika Westerberg return 0; 489b6fb8d3aSMika Westerberg } 490b6fb8d3aSMika Westerberg 491dc87c98eSGrant Likely /** 492dc87c98eSGrant Likely * spi_add_device - Add spi_device allocated with spi_alloc_device 493dc87c98eSGrant Likely * @spi: spi_device to register 494dc87c98eSGrant Likely * 495dc87c98eSGrant Likely * Companion function to spi_alloc_device. Devices allocated with 496dc87c98eSGrant Likely * spi_alloc_device can be added onto the spi bus with this function. 497dc87c98eSGrant Likely * 49897d56dc6SJavier Martinez Canillas * Return: 0 on success; negative errno on failure 499dc87c98eSGrant Likely */ 500dc87c98eSGrant Likely int spi_add_device(struct spi_device *spi) 501dc87c98eSGrant Likely { 502e48880e0SDavid Brownell static DEFINE_MUTEX(spi_add_lock); 50374317984SJean-Christophe PLAGNIOL-VILLARD struct spi_master *master = spi->master; 50474317984SJean-Christophe PLAGNIOL-VILLARD struct device *dev = master->dev.parent; 505dc87c98eSGrant Likely int status; 506dc87c98eSGrant Likely 507dc87c98eSGrant Likely /* Chipselects are numbered 0..max; validate. */ 50874317984SJean-Christophe PLAGNIOL-VILLARD if (spi->chip_select >= master->num_chipselect) { 509dc87c98eSGrant Likely dev_err(dev, "cs%d >= max %d\n", 510dc87c98eSGrant Likely spi->chip_select, 51174317984SJean-Christophe PLAGNIOL-VILLARD master->num_chipselect); 512dc87c98eSGrant Likely return -EINVAL; 513dc87c98eSGrant Likely } 514dc87c98eSGrant Likely 515dc87c98eSGrant Likely /* Set the bus ID string */ 516e13ac47bSJarkko Nikula spi_dev_set_name(spi); 517e48880e0SDavid Brownell 518e48880e0SDavid Brownell /* We need to make sure there's no other device with this 519e48880e0SDavid Brownell * chipselect **BEFORE** we call setup(), else we'll trash 520e48880e0SDavid Brownell * its configuration. Lock against concurrent add() calls. 521e48880e0SDavid Brownell */ 522e48880e0SDavid Brownell mutex_lock(&spi_add_lock); 523e48880e0SDavid Brownell 524b6fb8d3aSMika Westerberg status = bus_for_each_dev(&spi_bus_type, NULL, spi, spi_dev_check); 525b6fb8d3aSMika Westerberg if (status) { 526e48880e0SDavid Brownell dev_err(dev, "chipselect %d already in use\n", 527e48880e0SDavid Brownell spi->chip_select); 528e48880e0SDavid Brownell goto done; 529e48880e0SDavid Brownell } 530e48880e0SDavid Brownell 53174317984SJean-Christophe PLAGNIOL-VILLARD if (master->cs_gpios) 53274317984SJean-Christophe PLAGNIOL-VILLARD spi->cs_gpio = master->cs_gpios[spi->chip_select]; 53374317984SJean-Christophe PLAGNIOL-VILLARD 534e48880e0SDavid Brownell /* Drivers may modify this initial i/o setup, but will 535e48880e0SDavid Brownell * normally rely on the device being setup. Devices 536e48880e0SDavid Brownell * using SPI_CS_HIGH can't coexist well otherwise... 537e48880e0SDavid Brownell */ 5387d077197SDavid Brownell status = spi_setup(spi); 539dc87c98eSGrant Likely if (status < 0) { 540eb288a1fSLinus Walleij dev_err(dev, "can't setup %s, status %d\n", 541eb288a1fSLinus Walleij dev_name(&spi->dev), status); 542e48880e0SDavid Brownell goto done; 543dc87c98eSGrant Likely } 544dc87c98eSGrant Likely 545e48880e0SDavid Brownell /* Device may be bound to an active driver when this returns */ 546dc87c98eSGrant Likely status = device_add(&spi->dev); 547e48880e0SDavid Brownell if (status < 0) 548eb288a1fSLinus Walleij dev_err(dev, "can't add %s, status %d\n", 549eb288a1fSLinus Walleij dev_name(&spi->dev), status); 550e48880e0SDavid Brownell else 55135f74fcaSKay Sievers dev_dbg(dev, "registered child %s\n", dev_name(&spi->dev)); 552e48880e0SDavid Brownell 553e48880e0SDavid Brownell done: 554e48880e0SDavid Brownell mutex_unlock(&spi_add_lock); 555e48880e0SDavid Brownell return status; 556dc87c98eSGrant Likely } 557dc87c98eSGrant Likely EXPORT_SYMBOL_GPL(spi_add_device); 5588ae12a0dSDavid Brownell 55933e34dc6SDavid Brownell /** 56033e34dc6SDavid Brownell * spi_new_device - instantiate one new SPI device 56133e34dc6SDavid Brownell * @master: Controller to which device is connected 56233e34dc6SDavid Brownell * @chip: Describes the SPI device 56333e34dc6SDavid Brownell * Context: can sleep 56433e34dc6SDavid Brownell * 56533e34dc6SDavid Brownell * On typical mainboards, this is purely internal; and it's not needed 5668ae12a0dSDavid Brownell * after board init creates the hard-wired devices. Some development 5678ae12a0dSDavid Brownell * platforms may not be able to use spi_register_board_info though, and 5688ae12a0dSDavid Brownell * this is exported so that for example a USB or parport based adapter 5698ae12a0dSDavid Brownell * driver could add devices (which it would learn about out-of-band). 570082c8cb4SDavid Brownell * 57197d56dc6SJavier Martinez Canillas * Return: the new device, or NULL. 5728ae12a0dSDavid Brownell */ 573e9d5a461SAdrian Bunk struct spi_device *spi_new_device(struct spi_master *master, 574e9d5a461SAdrian Bunk struct spi_board_info *chip) 5758ae12a0dSDavid Brownell { 5768ae12a0dSDavid Brownell struct spi_device *proxy; 5778ae12a0dSDavid Brownell int status; 5788ae12a0dSDavid Brownell 579082c8cb4SDavid Brownell /* NOTE: caller did any chip->bus_num checks necessary. 580082c8cb4SDavid Brownell * 581082c8cb4SDavid Brownell * Also, unless we change the return value convention to use 582082c8cb4SDavid Brownell * error-or-pointer (not NULL-or-pointer), troubleshootability 583082c8cb4SDavid Brownell * suggests syslogged diagnostics are best here (ugh). 584082c8cb4SDavid Brownell */ 585082c8cb4SDavid Brownell 586dc87c98eSGrant Likely proxy = spi_alloc_device(master); 587dc87c98eSGrant Likely if (!proxy) 5888ae12a0dSDavid Brownell return NULL; 5898ae12a0dSDavid Brownell 590102eb975SGrant Likely WARN_ON(strlen(chip->modalias) >= sizeof(proxy->modalias)); 591102eb975SGrant Likely 5928ae12a0dSDavid Brownell proxy->chip_select = chip->chip_select; 5938ae12a0dSDavid Brownell proxy->max_speed_hz = chip->max_speed_hz; 594980a01c9SDavid Brownell proxy->mode = chip->mode; 5958ae12a0dSDavid Brownell proxy->irq = chip->irq; 596102eb975SGrant Likely strlcpy(proxy->modalias, chip->modalias, sizeof(proxy->modalias)); 5978ae12a0dSDavid Brownell proxy->dev.platform_data = (void *) chip->platform_data; 5988ae12a0dSDavid Brownell proxy->controller_data = chip->controller_data; 5998ae12a0dSDavid Brownell proxy->controller_state = NULL; 6008ae12a0dSDavid Brownell 601dc87c98eSGrant Likely status = spi_add_device(proxy); 6028ae12a0dSDavid Brownell if (status < 0) { 603dc87c98eSGrant Likely spi_dev_put(proxy); 6048ae12a0dSDavid Brownell return NULL; 6058ae12a0dSDavid Brownell } 606dc87c98eSGrant Likely 607dc87c98eSGrant Likely return proxy; 608dc87c98eSGrant Likely } 6098ae12a0dSDavid Brownell EXPORT_SYMBOL_GPL(spi_new_device); 6108ae12a0dSDavid Brownell 6113b1884c2SGeert Uytterhoeven /** 6123b1884c2SGeert Uytterhoeven * spi_unregister_device - unregister a single SPI device 6133b1884c2SGeert Uytterhoeven * @spi: spi_device to unregister 6143b1884c2SGeert Uytterhoeven * 6153b1884c2SGeert Uytterhoeven * Start making the passed SPI device vanish. Normally this would be handled 6163b1884c2SGeert Uytterhoeven * by spi_unregister_master(). 6173b1884c2SGeert Uytterhoeven */ 6183b1884c2SGeert Uytterhoeven void spi_unregister_device(struct spi_device *spi) 6193b1884c2SGeert Uytterhoeven { 620bd6c1644SGeert Uytterhoeven if (!spi) 621bd6c1644SGeert Uytterhoeven return; 622bd6c1644SGeert Uytterhoeven 623bd6c1644SGeert Uytterhoeven if (spi->dev.of_node) 624bd6c1644SGeert Uytterhoeven of_node_clear_flag(spi->dev.of_node, OF_POPULATED); 6257f24467fSOctavian Purdila if (ACPI_COMPANION(&spi->dev)) 6267f24467fSOctavian Purdila acpi_device_clear_enumerated(ACPI_COMPANION(&spi->dev)); 6273b1884c2SGeert Uytterhoeven device_unregister(&spi->dev); 6283b1884c2SGeert Uytterhoeven } 6293b1884c2SGeert Uytterhoeven EXPORT_SYMBOL_GPL(spi_unregister_device); 6303b1884c2SGeert Uytterhoeven 6312b9603a0SFeng Tang static void spi_match_master_to_boardinfo(struct spi_master *master, 6322b9603a0SFeng Tang struct spi_board_info *bi) 6332b9603a0SFeng Tang { 6342b9603a0SFeng Tang struct spi_device *dev; 6352b9603a0SFeng Tang 6362b9603a0SFeng Tang if (master->bus_num != bi->bus_num) 6372b9603a0SFeng Tang return; 6382b9603a0SFeng Tang 6392b9603a0SFeng Tang dev = spi_new_device(master, bi); 6402b9603a0SFeng Tang if (!dev) 6412b9603a0SFeng Tang dev_err(master->dev.parent, "can't create new device for %s\n", 6422b9603a0SFeng Tang bi->modalias); 6432b9603a0SFeng Tang } 6442b9603a0SFeng Tang 64533e34dc6SDavid Brownell /** 64633e34dc6SDavid Brownell * spi_register_board_info - register SPI devices for a given board 64733e34dc6SDavid Brownell * @info: array of chip descriptors 64833e34dc6SDavid Brownell * @n: how many descriptors are provided 64933e34dc6SDavid Brownell * Context: can sleep 65033e34dc6SDavid Brownell * 6518ae12a0dSDavid Brownell * Board-specific early init code calls this (probably during arch_initcall) 6528ae12a0dSDavid Brownell * with segments of the SPI device table. Any device nodes are created later, 6538ae12a0dSDavid Brownell * after the relevant parent SPI controller (bus_num) is defined. We keep 6548ae12a0dSDavid Brownell * this table of devices forever, so that reloading a controller driver will 6558ae12a0dSDavid Brownell * not make Linux forget about these hard-wired devices. 6568ae12a0dSDavid Brownell * 6578ae12a0dSDavid Brownell * Other code can also call this, e.g. a particular add-on board might provide 6588ae12a0dSDavid Brownell * SPI devices through its expansion connector, so code initializing that board 6598ae12a0dSDavid Brownell * would naturally declare its SPI devices. 6608ae12a0dSDavid Brownell * 6618ae12a0dSDavid Brownell * The board info passed can safely be __initdata ... but be careful of 6628ae12a0dSDavid Brownell * any embedded pointers (platform_data, etc), they're copied as-is. 66397d56dc6SJavier Martinez Canillas * 66497d56dc6SJavier Martinez Canillas * Return: zero on success, else a negative error code. 6658ae12a0dSDavid Brownell */ 666fd4a319bSGrant Likely int spi_register_board_info(struct spi_board_info const *info, unsigned n) 6678ae12a0dSDavid Brownell { 6688ae12a0dSDavid Brownell struct boardinfo *bi; 6692b9603a0SFeng Tang int i; 6708ae12a0dSDavid Brownell 671c7908a37SXiubo Li if (!n) 672c7908a37SXiubo Li return -EINVAL; 673c7908a37SXiubo Li 6742b9603a0SFeng Tang bi = kzalloc(n * sizeof(*bi), GFP_KERNEL); 6758ae12a0dSDavid Brownell if (!bi) 6768ae12a0dSDavid Brownell return -ENOMEM; 6778ae12a0dSDavid Brownell 6782b9603a0SFeng Tang for (i = 0; i < n; i++, bi++, info++) { 6792b9603a0SFeng Tang struct spi_master *master; 6802b9603a0SFeng Tang 6812b9603a0SFeng Tang memcpy(&bi->board_info, info, sizeof(*info)); 68294040828SMatthias Kaehlcke mutex_lock(&board_lock); 6838ae12a0dSDavid Brownell list_add_tail(&bi->list, &board_list); 6842b9603a0SFeng Tang list_for_each_entry(master, &spi_master_list, list) 6852b9603a0SFeng Tang spi_match_master_to_boardinfo(master, &bi->board_info); 68694040828SMatthias Kaehlcke mutex_unlock(&board_lock); 6872b9603a0SFeng Tang } 6882b9603a0SFeng Tang 6898ae12a0dSDavid Brownell return 0; 6908ae12a0dSDavid Brownell } 6918ae12a0dSDavid Brownell 6928ae12a0dSDavid Brownell /*-------------------------------------------------------------------------*/ 6938ae12a0dSDavid Brownell 694b158935fSMark Brown static void spi_set_cs(struct spi_device *spi, bool enable) 695b158935fSMark Brown { 696b158935fSMark Brown if (spi->mode & SPI_CS_HIGH) 697b158935fSMark Brown enable = !enable; 698b158935fSMark Brown 699243f07beSAndy Shevchenko if (gpio_is_valid(spi->cs_gpio)) 700b158935fSMark Brown gpio_set_value(spi->cs_gpio, !enable); 701b158935fSMark Brown else if (spi->master->set_cs) 702b158935fSMark Brown spi->master->set_cs(spi, !enable); 703b158935fSMark Brown } 704b158935fSMark Brown 7052de440f5SGeert Uytterhoeven #ifdef CONFIG_HAS_DMA 7066ad45a27SMark Brown static int spi_map_buf(struct spi_master *master, struct device *dev, 7076ad45a27SMark Brown struct sg_table *sgt, void *buf, size_t len, 7086ad45a27SMark Brown enum dma_data_direction dir) 7096ad45a27SMark Brown { 7106ad45a27SMark Brown const bool vmalloced_buf = is_vmalloc_addr(buf); 711df88e91bSAndy Shevchenko unsigned int max_seg_size = dma_get_max_seg_size(dev); 71265598c13SAndrew Gabbasov int desc_len; 71365598c13SAndrew Gabbasov int sgs; 7146ad45a27SMark Brown struct page *vm_page; 7156ad45a27SMark Brown void *sg_buf; 7166ad45a27SMark Brown size_t min; 7176ad45a27SMark Brown int i, ret; 7186ad45a27SMark Brown 71965598c13SAndrew Gabbasov if (vmalloced_buf) { 720df88e91bSAndy Shevchenko desc_len = min_t(int, max_seg_size, PAGE_SIZE); 72165598c13SAndrew Gabbasov sgs = DIV_ROUND_UP(len + offset_in_page(buf), desc_len); 7220569a88fSVignesh R } else if (virt_addr_valid(buf)) { 723df88e91bSAndy Shevchenko desc_len = min_t(int, max_seg_size, master->max_dma_len); 72465598c13SAndrew Gabbasov sgs = DIV_ROUND_UP(len, desc_len); 7250569a88fSVignesh R } else { 7260569a88fSVignesh R return -EINVAL; 72765598c13SAndrew Gabbasov } 72865598c13SAndrew Gabbasov 7296ad45a27SMark Brown ret = sg_alloc_table(sgt, sgs, GFP_KERNEL); 7306ad45a27SMark Brown if (ret != 0) 7316ad45a27SMark Brown return ret; 7326ad45a27SMark Brown 7336ad45a27SMark Brown for (i = 0; i < sgs; i++) { 7346ad45a27SMark Brown 7356ad45a27SMark Brown if (vmalloced_buf) { 73665598c13SAndrew Gabbasov min = min_t(size_t, 73765598c13SAndrew Gabbasov len, desc_len - offset_in_page(buf)); 7386ad45a27SMark Brown vm_page = vmalloc_to_page(buf); 7396ad45a27SMark Brown if (!vm_page) { 7406ad45a27SMark Brown sg_free_table(sgt); 7416ad45a27SMark Brown return -ENOMEM; 7426ad45a27SMark Brown } 743c1aefbddSCharles Keepax sg_set_page(&sgt->sgl[i], vm_page, 744c1aefbddSCharles Keepax min, offset_in_page(buf)); 7456ad45a27SMark Brown } else { 74665598c13SAndrew Gabbasov min = min_t(size_t, len, desc_len); 7476ad45a27SMark Brown sg_buf = buf; 748c1aefbddSCharles Keepax sg_set_buf(&sgt->sgl[i], sg_buf, min); 7496ad45a27SMark Brown } 7506ad45a27SMark Brown 7516ad45a27SMark Brown buf += min; 7526ad45a27SMark Brown len -= min; 7536ad45a27SMark Brown } 7546ad45a27SMark Brown 7556ad45a27SMark Brown ret = dma_map_sg(dev, sgt->sgl, sgt->nents, dir); 75689e4b66aSGeert Uytterhoeven if (!ret) 75789e4b66aSGeert Uytterhoeven ret = -ENOMEM; 7586ad45a27SMark Brown if (ret < 0) { 7596ad45a27SMark Brown sg_free_table(sgt); 7606ad45a27SMark Brown return ret; 7616ad45a27SMark Brown } 7626ad45a27SMark Brown 7636ad45a27SMark Brown sgt->nents = ret; 7646ad45a27SMark Brown 7656ad45a27SMark Brown return 0; 7666ad45a27SMark Brown } 7676ad45a27SMark Brown 7686ad45a27SMark Brown static void spi_unmap_buf(struct spi_master *master, struct device *dev, 7696ad45a27SMark Brown struct sg_table *sgt, enum dma_data_direction dir) 7706ad45a27SMark Brown { 7716ad45a27SMark Brown if (sgt->orig_nents) { 7726ad45a27SMark Brown dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir); 7736ad45a27SMark Brown sg_free_table(sgt); 7746ad45a27SMark Brown } 7756ad45a27SMark Brown } 7766ad45a27SMark Brown 7772de440f5SGeert Uytterhoeven static int __spi_map_msg(struct spi_master *master, struct spi_message *msg) 77899adef31SMark Brown { 77999adef31SMark Brown struct device *tx_dev, *rx_dev; 78099adef31SMark Brown struct spi_transfer *xfer; 7816ad45a27SMark Brown int ret; 7823a2eba9bSMark Brown 7836ad45a27SMark Brown if (!master->can_dma) 78499adef31SMark Brown return 0; 78599adef31SMark Brown 786c37f45b5SLeilk Liu if (master->dma_tx) 7873fc25421SGeert Uytterhoeven tx_dev = master->dma_tx->device->dev; 788c37f45b5SLeilk Liu else 789c37f45b5SLeilk Liu tx_dev = &master->dev; 790c37f45b5SLeilk Liu 791c37f45b5SLeilk Liu if (master->dma_rx) 7923fc25421SGeert Uytterhoeven rx_dev = master->dma_rx->device->dev; 793c37f45b5SLeilk Liu else 794c37f45b5SLeilk Liu rx_dev = &master->dev; 79599adef31SMark Brown 79699adef31SMark Brown list_for_each_entry(xfer, &msg->transfers, transfer_list) { 79799adef31SMark Brown if (!master->can_dma(master, msg->spi, xfer)) 79899adef31SMark Brown continue; 79999adef31SMark Brown 80099adef31SMark Brown if (xfer->tx_buf != NULL) { 8016ad45a27SMark Brown ret = spi_map_buf(master, tx_dev, &xfer->tx_sg, 8026ad45a27SMark Brown (void *)xfer->tx_buf, xfer->len, 80399adef31SMark Brown DMA_TO_DEVICE); 8046ad45a27SMark Brown if (ret != 0) 8056ad45a27SMark Brown return ret; 80699adef31SMark Brown } 80799adef31SMark Brown 80899adef31SMark Brown if (xfer->rx_buf != NULL) { 8096ad45a27SMark Brown ret = spi_map_buf(master, rx_dev, &xfer->rx_sg, 81099adef31SMark Brown xfer->rx_buf, xfer->len, 81199adef31SMark Brown DMA_FROM_DEVICE); 8126ad45a27SMark Brown if (ret != 0) { 8136ad45a27SMark Brown spi_unmap_buf(master, tx_dev, &xfer->tx_sg, 8146ad45a27SMark Brown DMA_TO_DEVICE); 8156ad45a27SMark Brown return ret; 81699adef31SMark Brown } 81799adef31SMark Brown } 81899adef31SMark Brown } 81999adef31SMark Brown 82099adef31SMark Brown master->cur_msg_mapped = true; 82199adef31SMark Brown 82299adef31SMark Brown return 0; 82399adef31SMark Brown } 82499adef31SMark Brown 8254b786458SMartin Sperl static int __spi_unmap_msg(struct spi_master *master, struct spi_message *msg) 82699adef31SMark Brown { 82799adef31SMark Brown struct spi_transfer *xfer; 82899adef31SMark Brown struct device *tx_dev, *rx_dev; 82999adef31SMark Brown 8306ad45a27SMark Brown if (!master->cur_msg_mapped || !master->can_dma) 83199adef31SMark Brown return 0; 83299adef31SMark Brown 833c37f45b5SLeilk Liu if (master->dma_tx) 8343fc25421SGeert Uytterhoeven tx_dev = master->dma_tx->device->dev; 835c37f45b5SLeilk Liu else 836c37f45b5SLeilk Liu tx_dev = &master->dev; 837c37f45b5SLeilk Liu 838c37f45b5SLeilk Liu if (master->dma_rx) 8393fc25421SGeert Uytterhoeven rx_dev = master->dma_rx->device->dev; 840c37f45b5SLeilk Liu else 841c37f45b5SLeilk Liu rx_dev = &master->dev; 84299adef31SMark Brown 84399adef31SMark Brown list_for_each_entry(xfer, &msg->transfers, transfer_list) { 84499adef31SMark Brown if (!master->can_dma(master, msg->spi, xfer)) 84599adef31SMark Brown continue; 84699adef31SMark Brown 8476ad45a27SMark Brown spi_unmap_buf(master, rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE); 8486ad45a27SMark Brown spi_unmap_buf(master, tx_dev, &xfer->tx_sg, DMA_TO_DEVICE); 84999adef31SMark Brown } 85099adef31SMark Brown 85199adef31SMark Brown return 0; 85299adef31SMark Brown } 8532de440f5SGeert Uytterhoeven #else /* !CONFIG_HAS_DMA */ 854f4502dd1SVignesh R static inline int spi_map_buf(struct spi_master *master, 855f4502dd1SVignesh R struct device *dev, struct sg_table *sgt, 856f4502dd1SVignesh R void *buf, size_t len, 857f4502dd1SVignesh R enum dma_data_direction dir) 858f4502dd1SVignesh R { 859f4502dd1SVignesh R return -EINVAL; 860f4502dd1SVignesh R } 861f4502dd1SVignesh R 862f4502dd1SVignesh R static inline void spi_unmap_buf(struct spi_master *master, 863f4502dd1SVignesh R struct device *dev, struct sg_table *sgt, 864f4502dd1SVignesh R enum dma_data_direction dir) 865f4502dd1SVignesh R { 866f4502dd1SVignesh R } 867f4502dd1SVignesh R 8682de440f5SGeert Uytterhoeven static inline int __spi_map_msg(struct spi_master *master, 8692de440f5SGeert Uytterhoeven struct spi_message *msg) 8702de440f5SGeert Uytterhoeven { 8712de440f5SGeert Uytterhoeven return 0; 8722de440f5SGeert Uytterhoeven } 8732de440f5SGeert Uytterhoeven 8744b786458SMartin Sperl static inline int __spi_unmap_msg(struct spi_master *master, 8752de440f5SGeert Uytterhoeven struct spi_message *msg) 8762de440f5SGeert Uytterhoeven { 8772de440f5SGeert Uytterhoeven return 0; 8782de440f5SGeert Uytterhoeven } 8792de440f5SGeert Uytterhoeven #endif /* !CONFIG_HAS_DMA */ 8802de440f5SGeert Uytterhoeven 8814b786458SMartin Sperl static inline int spi_unmap_msg(struct spi_master *master, 8824b786458SMartin Sperl struct spi_message *msg) 8834b786458SMartin Sperl { 8844b786458SMartin Sperl struct spi_transfer *xfer; 8854b786458SMartin Sperl 8864b786458SMartin Sperl list_for_each_entry(xfer, &msg->transfers, transfer_list) { 8874b786458SMartin Sperl /* 8884b786458SMartin Sperl * Restore the original value of tx_buf or rx_buf if they are 8894b786458SMartin Sperl * NULL. 8904b786458SMartin Sperl */ 8914b786458SMartin Sperl if (xfer->tx_buf == master->dummy_tx) 8924b786458SMartin Sperl xfer->tx_buf = NULL; 8934b786458SMartin Sperl if (xfer->rx_buf == master->dummy_rx) 8944b786458SMartin Sperl xfer->rx_buf = NULL; 8954b786458SMartin Sperl } 8964b786458SMartin Sperl 8974b786458SMartin Sperl return __spi_unmap_msg(master, msg); 8984b786458SMartin Sperl } 8994b786458SMartin Sperl 9002de440f5SGeert Uytterhoeven static int spi_map_msg(struct spi_master *master, struct spi_message *msg) 9012de440f5SGeert Uytterhoeven { 9022de440f5SGeert Uytterhoeven struct spi_transfer *xfer; 9032de440f5SGeert Uytterhoeven void *tmp; 9042de440f5SGeert Uytterhoeven unsigned int max_tx, max_rx; 9052de440f5SGeert Uytterhoeven 9062de440f5SGeert Uytterhoeven if (master->flags & (SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX)) { 9072de440f5SGeert Uytterhoeven max_tx = 0; 9082de440f5SGeert Uytterhoeven max_rx = 0; 9092de440f5SGeert Uytterhoeven 9102de440f5SGeert Uytterhoeven list_for_each_entry(xfer, &msg->transfers, transfer_list) { 9112de440f5SGeert Uytterhoeven if ((master->flags & SPI_MASTER_MUST_TX) && 9122de440f5SGeert Uytterhoeven !xfer->tx_buf) 9132de440f5SGeert Uytterhoeven max_tx = max(xfer->len, max_tx); 9142de440f5SGeert Uytterhoeven if ((master->flags & SPI_MASTER_MUST_RX) && 9152de440f5SGeert Uytterhoeven !xfer->rx_buf) 9162de440f5SGeert Uytterhoeven max_rx = max(xfer->len, max_rx); 9172de440f5SGeert Uytterhoeven } 9182de440f5SGeert Uytterhoeven 9192de440f5SGeert Uytterhoeven if (max_tx) { 9202de440f5SGeert Uytterhoeven tmp = krealloc(master->dummy_tx, max_tx, 9212de440f5SGeert Uytterhoeven GFP_KERNEL | GFP_DMA); 9222de440f5SGeert Uytterhoeven if (!tmp) 9232de440f5SGeert Uytterhoeven return -ENOMEM; 9242de440f5SGeert Uytterhoeven master->dummy_tx = tmp; 9252de440f5SGeert Uytterhoeven memset(tmp, 0, max_tx); 9262de440f5SGeert Uytterhoeven } 9272de440f5SGeert Uytterhoeven 9282de440f5SGeert Uytterhoeven if (max_rx) { 9292de440f5SGeert Uytterhoeven tmp = krealloc(master->dummy_rx, max_rx, 9302de440f5SGeert Uytterhoeven GFP_KERNEL | GFP_DMA); 9312de440f5SGeert Uytterhoeven if (!tmp) 9322de440f5SGeert Uytterhoeven return -ENOMEM; 9332de440f5SGeert Uytterhoeven master->dummy_rx = tmp; 9342de440f5SGeert Uytterhoeven } 9352de440f5SGeert Uytterhoeven 9362de440f5SGeert Uytterhoeven if (max_tx || max_rx) { 9372de440f5SGeert Uytterhoeven list_for_each_entry(xfer, &msg->transfers, 9382de440f5SGeert Uytterhoeven transfer_list) { 9392de440f5SGeert Uytterhoeven if (!xfer->tx_buf) 9402de440f5SGeert Uytterhoeven xfer->tx_buf = master->dummy_tx; 9412de440f5SGeert Uytterhoeven if (!xfer->rx_buf) 9422de440f5SGeert Uytterhoeven xfer->rx_buf = master->dummy_rx; 9432de440f5SGeert Uytterhoeven } 9442de440f5SGeert Uytterhoeven } 9452de440f5SGeert Uytterhoeven } 9462de440f5SGeert Uytterhoeven 9472de440f5SGeert Uytterhoeven return __spi_map_msg(master, msg); 9482de440f5SGeert Uytterhoeven } 94999adef31SMark Brown 950b158935fSMark Brown /* 951b158935fSMark Brown * spi_transfer_one_message - Default implementation of transfer_one_message() 952b158935fSMark Brown * 953b158935fSMark Brown * This is a standard implementation of transfer_one_message() for 9548ba811a7SMoritz Fischer * drivers which implement a transfer_one() operation. It provides 955b158935fSMark Brown * standard handling of delays and chip select management. 956b158935fSMark Brown */ 957b158935fSMark Brown static int spi_transfer_one_message(struct spi_master *master, 958b158935fSMark Brown struct spi_message *msg) 959b158935fSMark Brown { 960b158935fSMark Brown struct spi_transfer *xfer; 961b158935fSMark Brown bool keep_cs = false; 962b158935fSMark Brown int ret = 0; 963682a71b2SNicholas Mc Guire unsigned long ms = 1; 964eca2ebc7SMartin Sperl struct spi_statistics *statm = &master->statistics; 965eca2ebc7SMartin Sperl struct spi_statistics *stats = &msg->spi->statistics; 966b158935fSMark Brown 967b158935fSMark Brown spi_set_cs(msg->spi, true); 968b158935fSMark Brown 969eca2ebc7SMartin Sperl SPI_STATISTICS_INCREMENT_FIELD(statm, messages); 970eca2ebc7SMartin Sperl SPI_STATISTICS_INCREMENT_FIELD(stats, messages); 971eca2ebc7SMartin Sperl 972b158935fSMark Brown list_for_each_entry(xfer, &msg->transfers, transfer_list) { 973b158935fSMark Brown trace_spi_transfer_start(msg, xfer); 974b158935fSMark Brown 975eca2ebc7SMartin Sperl spi_statistics_add_transfer_stats(statm, xfer, master); 976eca2ebc7SMartin Sperl spi_statistics_add_transfer_stats(stats, xfer, master); 977eca2ebc7SMartin Sperl 97838ec10f6SMark Brown if (xfer->tx_buf || xfer->rx_buf) { 97916735d02SWolfram Sang reinit_completion(&master->xfer_completion); 980b158935fSMark Brown 981b158935fSMark Brown ret = master->transfer_one(master, msg->spi, xfer); 982b158935fSMark Brown if (ret < 0) { 983eca2ebc7SMartin Sperl SPI_STATISTICS_INCREMENT_FIELD(statm, 984eca2ebc7SMartin Sperl errors); 985eca2ebc7SMartin Sperl SPI_STATISTICS_INCREMENT_FIELD(stats, 986eca2ebc7SMartin Sperl errors); 987b158935fSMark Brown dev_err(&msg->spi->dev, 988b158935fSMark Brown "SPI transfer failed: %d\n", ret); 989b158935fSMark Brown goto out; 990b158935fSMark Brown } 991b158935fSMark Brown 99213a42798SAxel Lin if (ret > 0) { 99313a42798SAxel Lin ret = 0; 99416a0ce4eSMark Brown ms = xfer->len * 8 * 1000 / xfer->speed_hz; 995eee668a9SHarini Katakam ms += ms + 100; /* some tolerance */ 99616a0ce4eSMark Brown 99716a0ce4eSMark Brown ms = wait_for_completion_timeout(&master->xfer_completion, 99816a0ce4eSMark Brown msecs_to_jiffies(ms)); 99916a0ce4eSMark Brown } 100016a0ce4eSMark Brown 100116a0ce4eSMark Brown if (ms == 0) { 1002eca2ebc7SMartin Sperl SPI_STATISTICS_INCREMENT_FIELD(statm, 1003eca2ebc7SMartin Sperl timedout); 1004eca2ebc7SMartin Sperl SPI_STATISTICS_INCREMENT_FIELD(stats, 1005eca2ebc7SMartin Sperl timedout); 100638ec10f6SMark Brown dev_err(&msg->spi->dev, 100738ec10f6SMark Brown "SPI transfer timed out\n"); 100816a0ce4eSMark Brown msg->status = -ETIMEDOUT; 100913a42798SAxel Lin } 101038ec10f6SMark Brown } else { 101138ec10f6SMark Brown if (xfer->len) 101238ec10f6SMark Brown dev_err(&msg->spi->dev, 101338ec10f6SMark Brown "Bufferless transfer has length %u\n", 101438ec10f6SMark Brown xfer->len); 101538ec10f6SMark Brown } 1016b158935fSMark Brown 1017b158935fSMark Brown trace_spi_transfer_stop(msg, xfer); 1018b158935fSMark Brown 1019b158935fSMark Brown if (msg->status != -EINPROGRESS) 1020b158935fSMark Brown goto out; 1021b158935fSMark Brown 1022b158935fSMark Brown if (xfer->delay_usecs) 1023b158935fSMark Brown udelay(xfer->delay_usecs); 1024b158935fSMark Brown 1025b158935fSMark Brown if (xfer->cs_change) { 1026b158935fSMark Brown if (list_is_last(&xfer->transfer_list, 1027b158935fSMark Brown &msg->transfers)) { 1028b158935fSMark Brown keep_cs = true; 1029b158935fSMark Brown } else { 10300b73aa63SMark Brown spi_set_cs(msg->spi, false); 10310b73aa63SMark Brown udelay(10); 10320b73aa63SMark Brown spi_set_cs(msg->spi, true); 1033b158935fSMark Brown } 1034b158935fSMark Brown } 1035b158935fSMark Brown 1036b158935fSMark Brown msg->actual_length += xfer->len; 1037b158935fSMark Brown } 1038b158935fSMark Brown 1039b158935fSMark Brown out: 1040b158935fSMark Brown if (ret != 0 || !keep_cs) 1041b158935fSMark Brown spi_set_cs(msg->spi, false); 1042b158935fSMark Brown 1043b158935fSMark Brown if (msg->status == -EINPROGRESS) 1044b158935fSMark Brown msg->status = ret; 1045b158935fSMark Brown 1046ff61eb42SGeert Uytterhoeven if (msg->status && master->handle_err) 1047b716c4ffSAndy Shevchenko master->handle_err(master, msg); 1048b716c4ffSAndy Shevchenko 1049d780c371SMartin Sperl spi_res_release(master, msg); 1050d780c371SMartin Sperl 1051b158935fSMark Brown spi_finalize_current_message(master); 1052b158935fSMark Brown 1053b158935fSMark Brown return ret; 1054b158935fSMark Brown } 1055b158935fSMark Brown 1056b158935fSMark Brown /** 1057b158935fSMark Brown * spi_finalize_current_transfer - report completion of a transfer 10582c675689SThierry Reding * @master: the master reporting completion 1059b158935fSMark Brown * 1060b158935fSMark Brown * Called by SPI drivers using the core transfer_one_message() 1061b158935fSMark Brown * implementation to notify it that the current interrupt driven 10629e8f4882SGeert Uytterhoeven * transfer has finished and the next one may be scheduled. 1063b158935fSMark Brown */ 1064b158935fSMark Brown void spi_finalize_current_transfer(struct spi_master *master) 1065b158935fSMark Brown { 1066b158935fSMark Brown complete(&master->xfer_completion); 1067b158935fSMark Brown } 1068b158935fSMark Brown EXPORT_SYMBOL_GPL(spi_finalize_current_transfer); 1069b158935fSMark Brown 1070ffbbdd21SLinus Walleij /** 1071fc9e0f71SMark Brown * __spi_pump_messages - function which processes spi message queue 1072fc9e0f71SMark Brown * @master: master to process queue for 1073fc9e0f71SMark Brown * @in_kthread: true if we are in the context of the message pump thread 1074ffbbdd21SLinus Walleij * 1075ffbbdd21SLinus Walleij * This function checks if there is any spi message in the queue that 1076ffbbdd21SLinus Walleij * needs processing and if so call out to the driver to initialize hardware 1077ffbbdd21SLinus Walleij * and transfer each message. 1078ffbbdd21SLinus Walleij * 10790461a414SMark Brown * Note that it is called both from the kthread itself and also from 10800461a414SMark Brown * inside spi_sync(); the queue extraction handling at the top of the 10810461a414SMark Brown * function should deal with this safely. 1082ffbbdd21SLinus Walleij */ 1083ef4d96ecSMark Brown static void __spi_pump_messages(struct spi_master *master, bool in_kthread) 1084ffbbdd21SLinus Walleij { 1085ffbbdd21SLinus Walleij unsigned long flags; 1086ffbbdd21SLinus Walleij bool was_busy = false; 1087ffbbdd21SLinus Walleij int ret; 1088ffbbdd21SLinus Walleij 1089983aee5dSMark Brown /* Lock queue */ 1090ffbbdd21SLinus Walleij spin_lock_irqsave(&master->queue_lock, flags); 1091983aee5dSMark Brown 1092983aee5dSMark Brown /* Make sure we are not already running a message */ 1093983aee5dSMark Brown if (master->cur_msg) { 1094983aee5dSMark Brown spin_unlock_irqrestore(&master->queue_lock, flags); 1095983aee5dSMark Brown return; 1096983aee5dSMark Brown } 1097983aee5dSMark Brown 10980461a414SMark Brown /* If another context is idling the device then defer */ 10990461a414SMark Brown if (master->idling) { 11000461a414SMark Brown queue_kthread_work(&master->kworker, &master->pump_messages); 11010461a414SMark Brown spin_unlock_irqrestore(&master->queue_lock, flags); 11020461a414SMark Brown return; 11030461a414SMark Brown } 11040461a414SMark Brown 1105983aee5dSMark Brown /* Check if the queue is idle */ 1106ffbbdd21SLinus Walleij if (list_empty(&master->queue) || !master->running) { 1107b0b36b86SBryan Freed if (!master->busy) { 11089af4acc0SDan Carpenter spin_unlock_irqrestore(&master->queue_lock, flags); 1109ffbbdd21SLinus Walleij return; 1110ffbbdd21SLinus Walleij } 1111fc9e0f71SMark Brown 1112fc9e0f71SMark Brown /* Only do teardown in the thread */ 1113fc9e0f71SMark Brown if (!in_kthread) { 1114fc9e0f71SMark Brown queue_kthread_work(&master->kworker, 1115fc9e0f71SMark Brown &master->pump_messages); 1116ffbbdd21SLinus Walleij spin_unlock_irqrestore(&master->queue_lock, flags); 1117fc9e0f71SMark Brown return; 1118fc9e0f71SMark Brown } 1119fc9e0f71SMark Brown 1120ffbbdd21SLinus Walleij master->busy = false; 11210461a414SMark Brown master->idling = true; 1122ffbbdd21SLinus Walleij spin_unlock_irqrestore(&master->queue_lock, flags); 11230461a414SMark Brown 11243a2eba9bSMark Brown kfree(master->dummy_rx); 11253a2eba9bSMark Brown master->dummy_rx = NULL; 11263a2eba9bSMark Brown kfree(master->dummy_tx); 11273a2eba9bSMark Brown master->dummy_tx = NULL; 1128b0b36b86SBryan Freed if (master->unprepare_transfer_hardware && 1129b0b36b86SBryan Freed master->unprepare_transfer_hardware(master)) 1130b0b36b86SBryan Freed dev_err(&master->dev, 1131b0b36b86SBryan Freed "failed to unprepare transfer hardware\n"); 113249834de2SMark Brown if (master->auto_runtime_pm) { 113349834de2SMark Brown pm_runtime_mark_last_busy(master->dev.parent); 113449834de2SMark Brown pm_runtime_put_autosuspend(master->dev.parent); 113549834de2SMark Brown } 113656ec1978SMark Brown trace_spi_master_idle(master); 1137ffbbdd21SLinus Walleij 11380461a414SMark Brown spin_lock_irqsave(&master->queue_lock, flags); 11390461a414SMark Brown master->idling = false; 1140ffbbdd21SLinus Walleij spin_unlock_irqrestore(&master->queue_lock, flags); 1141ffbbdd21SLinus Walleij return; 1142ffbbdd21SLinus Walleij } 1143ffbbdd21SLinus Walleij 1144ffbbdd21SLinus Walleij /* Extract head of queue */ 1145ffbbdd21SLinus Walleij master->cur_msg = 1146a89e2d27SAxel Lin list_first_entry(&master->queue, struct spi_message, queue); 1147ffbbdd21SLinus Walleij 1148ffbbdd21SLinus Walleij list_del_init(&master->cur_msg->queue); 1149ffbbdd21SLinus Walleij if (master->busy) 1150ffbbdd21SLinus Walleij was_busy = true; 1151ffbbdd21SLinus Walleij else 1152ffbbdd21SLinus Walleij master->busy = true; 1153ffbbdd21SLinus Walleij spin_unlock_irqrestore(&master->queue_lock, flags); 1154ffbbdd21SLinus Walleij 1155ef4d96ecSMark Brown mutex_lock(&master->io_mutex); 1156ef4d96ecSMark Brown 115749834de2SMark Brown if (!was_busy && master->auto_runtime_pm) { 115849834de2SMark Brown ret = pm_runtime_get_sync(master->dev.parent); 115949834de2SMark Brown if (ret < 0) { 116049834de2SMark Brown dev_err(&master->dev, "Failed to power device: %d\n", 116149834de2SMark Brown ret); 1162*764f2166SMark Brown mutex_unlock(&master->io_mutex); 116349834de2SMark Brown return; 116449834de2SMark Brown } 116549834de2SMark Brown } 116649834de2SMark Brown 116756ec1978SMark Brown if (!was_busy) 116856ec1978SMark Brown trace_spi_master_busy(master); 116956ec1978SMark Brown 11707dfd2bd7SShubhrajyoti D if (!was_busy && master->prepare_transfer_hardware) { 1171ffbbdd21SLinus Walleij ret = master->prepare_transfer_hardware(master); 1172ffbbdd21SLinus Walleij if (ret) { 1173ffbbdd21SLinus Walleij dev_err(&master->dev, 1174ffbbdd21SLinus Walleij "failed to prepare transfer hardware\n"); 117549834de2SMark Brown 117649834de2SMark Brown if (master->auto_runtime_pm) 117749834de2SMark Brown pm_runtime_put(master->dev.parent); 1178*764f2166SMark Brown mutex_unlock(&master->io_mutex); 1179ffbbdd21SLinus Walleij return; 1180ffbbdd21SLinus Walleij } 1181ffbbdd21SLinus Walleij } 1182ffbbdd21SLinus Walleij 118356ec1978SMark Brown trace_spi_message_start(master->cur_msg); 118456ec1978SMark Brown 11852841a5fcSMark Brown if (master->prepare_message) { 11862841a5fcSMark Brown ret = master->prepare_message(master, master->cur_msg); 11872841a5fcSMark Brown if (ret) { 11882841a5fcSMark Brown dev_err(&master->dev, 11892841a5fcSMark Brown "failed to prepare message: %d\n", ret); 11902841a5fcSMark Brown master->cur_msg->status = ret; 11912841a5fcSMark Brown spi_finalize_current_message(master); 119249023d2eSJon Hunter goto out; 11932841a5fcSMark Brown } 11942841a5fcSMark Brown master->cur_msg_prepared = true; 11952841a5fcSMark Brown } 11962841a5fcSMark Brown 119799adef31SMark Brown ret = spi_map_msg(master, master->cur_msg); 119899adef31SMark Brown if (ret) { 119999adef31SMark Brown master->cur_msg->status = ret; 120099adef31SMark Brown spi_finalize_current_message(master); 120149023d2eSJon Hunter goto out; 120299adef31SMark Brown } 120399adef31SMark Brown 1204ffbbdd21SLinus Walleij ret = master->transfer_one_message(master, master->cur_msg); 1205ffbbdd21SLinus Walleij if (ret) { 1206ffbbdd21SLinus Walleij dev_err(&master->dev, 12071f802f82SGeert Uytterhoeven "failed to transfer one message from queue\n"); 120849023d2eSJon Hunter goto out; 1209ffbbdd21SLinus Walleij } 121049023d2eSJon Hunter 121149023d2eSJon Hunter out: 1212ef4d96ecSMark Brown mutex_unlock(&master->io_mutex); 121362826970SMark Brown 121462826970SMark Brown /* Prod the scheduler in case transfer_one() was busy waiting */ 121549023d2eSJon Hunter if (!ret) 121662826970SMark Brown cond_resched(); 1217ffbbdd21SLinus Walleij } 1218ffbbdd21SLinus Walleij 1219fc9e0f71SMark Brown /** 1220fc9e0f71SMark Brown * spi_pump_messages - kthread work function which processes spi message queue 1221fc9e0f71SMark Brown * @work: pointer to kthread work struct contained in the master struct 1222fc9e0f71SMark Brown */ 1223fc9e0f71SMark Brown static void spi_pump_messages(struct kthread_work *work) 1224fc9e0f71SMark Brown { 1225fc9e0f71SMark Brown struct spi_master *master = 1226fc9e0f71SMark Brown container_of(work, struct spi_master, pump_messages); 1227fc9e0f71SMark Brown 1228ef4d96ecSMark Brown __spi_pump_messages(master, true); 1229fc9e0f71SMark Brown } 1230fc9e0f71SMark Brown 1231ffbbdd21SLinus Walleij static int spi_init_queue(struct spi_master *master) 1232ffbbdd21SLinus Walleij { 1233ffbbdd21SLinus Walleij struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 }; 1234ffbbdd21SLinus Walleij 1235ffbbdd21SLinus Walleij master->running = false; 1236ffbbdd21SLinus Walleij master->busy = false; 1237ffbbdd21SLinus Walleij 1238ffbbdd21SLinus Walleij init_kthread_worker(&master->kworker); 1239ffbbdd21SLinus Walleij master->kworker_task = kthread_run(kthread_worker_fn, 1240f170168bSKees Cook &master->kworker, "%s", 1241ffbbdd21SLinus Walleij dev_name(&master->dev)); 1242ffbbdd21SLinus Walleij if (IS_ERR(master->kworker_task)) { 1243ffbbdd21SLinus Walleij dev_err(&master->dev, "failed to create message pump task\n"); 124498a8f5a0SJarkko Nikula return PTR_ERR(master->kworker_task); 1245ffbbdd21SLinus Walleij } 1246ffbbdd21SLinus Walleij init_kthread_work(&master->pump_messages, spi_pump_messages); 1247ffbbdd21SLinus Walleij 1248ffbbdd21SLinus Walleij /* 1249ffbbdd21SLinus Walleij * Master config will indicate if this controller should run the 1250ffbbdd21SLinus Walleij * message pump with high (realtime) priority to reduce the transfer 1251ffbbdd21SLinus Walleij * latency on the bus by minimising the delay between a transfer 1252ffbbdd21SLinus Walleij * request and the scheduling of the message pump thread. Without this 1253ffbbdd21SLinus Walleij * setting the message pump thread will remain at default priority. 1254ffbbdd21SLinus Walleij */ 1255ffbbdd21SLinus Walleij if (master->rt) { 1256ffbbdd21SLinus Walleij dev_info(&master->dev, 1257ffbbdd21SLinus Walleij "will run message pump with realtime priority\n"); 1258ffbbdd21SLinus Walleij sched_setscheduler(master->kworker_task, SCHED_FIFO, ¶m); 1259ffbbdd21SLinus Walleij } 1260ffbbdd21SLinus Walleij 1261ffbbdd21SLinus Walleij return 0; 1262ffbbdd21SLinus Walleij } 1263ffbbdd21SLinus Walleij 1264ffbbdd21SLinus Walleij /** 1265ffbbdd21SLinus Walleij * spi_get_next_queued_message() - called by driver to check for queued 1266ffbbdd21SLinus Walleij * messages 1267ffbbdd21SLinus Walleij * @master: the master to check for queued messages 1268ffbbdd21SLinus Walleij * 1269ffbbdd21SLinus Walleij * If there are more messages in the queue, the next message is returned from 1270ffbbdd21SLinus Walleij * this call. 127197d56dc6SJavier Martinez Canillas * 127297d56dc6SJavier Martinez Canillas * Return: the next message in the queue, else NULL if the queue is empty. 1273ffbbdd21SLinus Walleij */ 1274ffbbdd21SLinus Walleij struct spi_message *spi_get_next_queued_message(struct spi_master *master) 1275ffbbdd21SLinus Walleij { 1276ffbbdd21SLinus Walleij struct spi_message *next; 1277ffbbdd21SLinus Walleij unsigned long flags; 1278ffbbdd21SLinus Walleij 1279ffbbdd21SLinus Walleij /* get a pointer to the next message, if any */ 1280ffbbdd21SLinus Walleij spin_lock_irqsave(&master->queue_lock, flags); 12811cfd97f9SAxel Lin next = list_first_entry_or_null(&master->queue, struct spi_message, 12821cfd97f9SAxel Lin queue); 1283ffbbdd21SLinus Walleij spin_unlock_irqrestore(&master->queue_lock, flags); 1284ffbbdd21SLinus Walleij 1285ffbbdd21SLinus Walleij return next; 1286ffbbdd21SLinus Walleij } 1287ffbbdd21SLinus Walleij EXPORT_SYMBOL_GPL(spi_get_next_queued_message); 1288ffbbdd21SLinus Walleij 1289ffbbdd21SLinus Walleij /** 1290ffbbdd21SLinus Walleij * spi_finalize_current_message() - the current message is complete 1291ffbbdd21SLinus Walleij * @master: the master to return the message to 1292ffbbdd21SLinus Walleij * 1293ffbbdd21SLinus Walleij * Called by the driver to notify the core that the message in the front of the 1294ffbbdd21SLinus Walleij * queue is complete and can be removed from the queue. 1295ffbbdd21SLinus Walleij */ 1296ffbbdd21SLinus Walleij void spi_finalize_current_message(struct spi_master *master) 1297ffbbdd21SLinus Walleij { 1298ffbbdd21SLinus Walleij struct spi_message *mesg; 1299ffbbdd21SLinus Walleij unsigned long flags; 13002841a5fcSMark Brown int ret; 1301ffbbdd21SLinus Walleij 1302ffbbdd21SLinus Walleij spin_lock_irqsave(&master->queue_lock, flags); 1303ffbbdd21SLinus Walleij mesg = master->cur_msg; 1304ffbbdd21SLinus Walleij spin_unlock_irqrestore(&master->queue_lock, flags); 1305ffbbdd21SLinus Walleij 130699adef31SMark Brown spi_unmap_msg(master, mesg); 130799adef31SMark Brown 13082841a5fcSMark Brown if (master->cur_msg_prepared && master->unprepare_message) { 13092841a5fcSMark Brown ret = master->unprepare_message(master, mesg); 13102841a5fcSMark Brown if (ret) { 13112841a5fcSMark Brown dev_err(&master->dev, 13122841a5fcSMark Brown "failed to unprepare message: %d\n", ret); 13132841a5fcSMark Brown } 13142841a5fcSMark Brown } 1315391949b6SUwe Kleine-König 13168e76ef88SMartin Sperl spin_lock_irqsave(&master->queue_lock, flags); 13178e76ef88SMartin Sperl master->cur_msg = NULL; 13182841a5fcSMark Brown master->cur_msg_prepared = false; 13198e76ef88SMartin Sperl queue_kthread_work(&master->kworker, &master->pump_messages); 13208e76ef88SMartin Sperl spin_unlock_irqrestore(&master->queue_lock, flags); 13218e76ef88SMartin Sperl 13228e76ef88SMartin Sperl trace_spi_message_done(mesg); 13232841a5fcSMark Brown 1324ffbbdd21SLinus Walleij mesg->state = NULL; 1325ffbbdd21SLinus Walleij if (mesg->complete) 1326ffbbdd21SLinus Walleij mesg->complete(mesg->context); 1327ffbbdd21SLinus Walleij } 1328ffbbdd21SLinus Walleij EXPORT_SYMBOL_GPL(spi_finalize_current_message); 1329ffbbdd21SLinus Walleij 1330ffbbdd21SLinus Walleij static int spi_start_queue(struct spi_master *master) 1331ffbbdd21SLinus Walleij { 1332ffbbdd21SLinus Walleij unsigned long flags; 1333ffbbdd21SLinus Walleij 1334ffbbdd21SLinus Walleij spin_lock_irqsave(&master->queue_lock, flags); 1335ffbbdd21SLinus Walleij 1336ffbbdd21SLinus Walleij if (master->running || master->busy) { 1337ffbbdd21SLinus Walleij spin_unlock_irqrestore(&master->queue_lock, flags); 1338ffbbdd21SLinus Walleij return -EBUSY; 1339ffbbdd21SLinus Walleij } 1340ffbbdd21SLinus Walleij 1341ffbbdd21SLinus Walleij master->running = true; 1342ffbbdd21SLinus Walleij master->cur_msg = NULL; 1343ffbbdd21SLinus Walleij spin_unlock_irqrestore(&master->queue_lock, flags); 1344ffbbdd21SLinus Walleij 1345ffbbdd21SLinus Walleij queue_kthread_work(&master->kworker, &master->pump_messages); 1346ffbbdd21SLinus Walleij 1347ffbbdd21SLinus Walleij return 0; 1348ffbbdd21SLinus Walleij } 1349ffbbdd21SLinus Walleij 1350ffbbdd21SLinus Walleij static int spi_stop_queue(struct spi_master *master) 1351ffbbdd21SLinus Walleij { 1352ffbbdd21SLinus Walleij unsigned long flags; 1353ffbbdd21SLinus Walleij unsigned limit = 500; 1354ffbbdd21SLinus Walleij int ret = 0; 1355ffbbdd21SLinus Walleij 1356ffbbdd21SLinus Walleij spin_lock_irqsave(&master->queue_lock, flags); 1357ffbbdd21SLinus Walleij 1358ffbbdd21SLinus Walleij /* 1359ffbbdd21SLinus Walleij * This is a bit lame, but is optimized for the common execution path. 1360ffbbdd21SLinus Walleij * A wait_queue on the master->busy could be used, but then the common 1361ffbbdd21SLinus Walleij * execution path (pump_messages) would be required to call wake_up or 1362ffbbdd21SLinus Walleij * friends on every SPI message. Do this instead. 1363ffbbdd21SLinus Walleij */ 1364ffbbdd21SLinus Walleij while ((!list_empty(&master->queue) || master->busy) && limit--) { 1365ffbbdd21SLinus Walleij spin_unlock_irqrestore(&master->queue_lock, flags); 1366f97b26b0SAxel Lin usleep_range(10000, 11000); 1367ffbbdd21SLinus Walleij spin_lock_irqsave(&master->queue_lock, flags); 1368ffbbdd21SLinus Walleij } 1369ffbbdd21SLinus Walleij 1370ffbbdd21SLinus Walleij if (!list_empty(&master->queue) || master->busy) 1371ffbbdd21SLinus Walleij ret = -EBUSY; 1372ffbbdd21SLinus Walleij else 1373ffbbdd21SLinus Walleij master->running = false; 1374ffbbdd21SLinus Walleij 1375ffbbdd21SLinus Walleij spin_unlock_irqrestore(&master->queue_lock, flags); 1376ffbbdd21SLinus Walleij 1377ffbbdd21SLinus Walleij if (ret) { 1378ffbbdd21SLinus Walleij dev_warn(&master->dev, 1379ffbbdd21SLinus Walleij "could not stop message queue\n"); 1380ffbbdd21SLinus Walleij return ret; 1381ffbbdd21SLinus Walleij } 1382ffbbdd21SLinus Walleij return ret; 1383ffbbdd21SLinus Walleij } 1384ffbbdd21SLinus Walleij 1385ffbbdd21SLinus Walleij static int spi_destroy_queue(struct spi_master *master) 1386ffbbdd21SLinus Walleij { 1387ffbbdd21SLinus Walleij int ret; 1388ffbbdd21SLinus Walleij 1389ffbbdd21SLinus Walleij ret = spi_stop_queue(master); 1390ffbbdd21SLinus Walleij 1391ffbbdd21SLinus Walleij /* 1392ffbbdd21SLinus Walleij * flush_kthread_worker will block until all work is done. 1393ffbbdd21SLinus Walleij * If the reason that stop_queue timed out is that the work will never 1394ffbbdd21SLinus Walleij * finish, then it does no good to call flush/stop thread, so 1395ffbbdd21SLinus Walleij * return anyway. 1396ffbbdd21SLinus Walleij */ 1397ffbbdd21SLinus Walleij if (ret) { 1398ffbbdd21SLinus Walleij dev_err(&master->dev, "problem destroying queue\n"); 1399ffbbdd21SLinus Walleij return ret; 1400ffbbdd21SLinus Walleij } 1401ffbbdd21SLinus Walleij 1402ffbbdd21SLinus Walleij flush_kthread_worker(&master->kworker); 1403ffbbdd21SLinus Walleij kthread_stop(master->kworker_task); 1404ffbbdd21SLinus Walleij 1405ffbbdd21SLinus Walleij return 0; 1406ffbbdd21SLinus Walleij } 1407ffbbdd21SLinus Walleij 14080461a414SMark Brown static int __spi_queued_transfer(struct spi_device *spi, 14090461a414SMark Brown struct spi_message *msg, 14100461a414SMark Brown bool need_pump) 1411ffbbdd21SLinus Walleij { 1412ffbbdd21SLinus Walleij struct spi_master *master = spi->master; 1413ffbbdd21SLinus Walleij unsigned long flags; 1414ffbbdd21SLinus Walleij 1415ffbbdd21SLinus Walleij spin_lock_irqsave(&master->queue_lock, flags); 1416ffbbdd21SLinus Walleij 1417ffbbdd21SLinus Walleij if (!master->running) { 1418ffbbdd21SLinus Walleij spin_unlock_irqrestore(&master->queue_lock, flags); 1419ffbbdd21SLinus Walleij return -ESHUTDOWN; 1420ffbbdd21SLinus Walleij } 1421ffbbdd21SLinus Walleij msg->actual_length = 0; 1422ffbbdd21SLinus Walleij msg->status = -EINPROGRESS; 1423ffbbdd21SLinus Walleij 1424ffbbdd21SLinus Walleij list_add_tail(&msg->queue, &master->queue); 14250461a414SMark Brown if (!master->busy && need_pump) 1426ffbbdd21SLinus Walleij queue_kthread_work(&master->kworker, &master->pump_messages); 1427ffbbdd21SLinus Walleij 1428ffbbdd21SLinus Walleij spin_unlock_irqrestore(&master->queue_lock, flags); 1429ffbbdd21SLinus Walleij return 0; 1430ffbbdd21SLinus Walleij } 1431ffbbdd21SLinus Walleij 14320461a414SMark Brown /** 14330461a414SMark Brown * spi_queued_transfer - transfer function for queued transfers 14340461a414SMark Brown * @spi: spi device which is requesting transfer 14350461a414SMark Brown * @msg: spi message which is to handled is queued to driver queue 143697d56dc6SJavier Martinez Canillas * 143797d56dc6SJavier Martinez Canillas * Return: zero on success, else a negative error code. 14380461a414SMark Brown */ 14390461a414SMark Brown static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg) 14400461a414SMark Brown { 14410461a414SMark Brown return __spi_queued_transfer(spi, msg, true); 14420461a414SMark Brown } 14430461a414SMark Brown 1444ffbbdd21SLinus Walleij static int spi_master_initialize_queue(struct spi_master *master) 1445ffbbdd21SLinus Walleij { 1446ffbbdd21SLinus Walleij int ret; 1447ffbbdd21SLinus Walleij 1448ffbbdd21SLinus Walleij master->transfer = spi_queued_transfer; 1449b158935fSMark Brown if (!master->transfer_one_message) 1450b158935fSMark Brown master->transfer_one_message = spi_transfer_one_message; 1451ffbbdd21SLinus Walleij 1452ffbbdd21SLinus Walleij /* Initialize and start queue */ 1453ffbbdd21SLinus Walleij ret = spi_init_queue(master); 1454ffbbdd21SLinus Walleij if (ret) { 1455ffbbdd21SLinus Walleij dev_err(&master->dev, "problem initializing queue\n"); 1456ffbbdd21SLinus Walleij goto err_init_queue; 1457ffbbdd21SLinus Walleij } 1458c3676d5cSMark Brown master->queued = true; 1459ffbbdd21SLinus Walleij ret = spi_start_queue(master); 1460ffbbdd21SLinus Walleij if (ret) { 1461ffbbdd21SLinus Walleij dev_err(&master->dev, "problem starting queue\n"); 1462ffbbdd21SLinus Walleij goto err_start_queue; 1463ffbbdd21SLinus Walleij } 1464ffbbdd21SLinus Walleij 1465ffbbdd21SLinus Walleij return 0; 1466ffbbdd21SLinus Walleij 1467ffbbdd21SLinus Walleij err_start_queue: 1468ffbbdd21SLinus Walleij spi_destroy_queue(master); 1469c3676d5cSMark Brown err_init_queue: 1470ffbbdd21SLinus Walleij return ret; 1471ffbbdd21SLinus Walleij } 1472ffbbdd21SLinus Walleij 1473ffbbdd21SLinus Walleij /*-------------------------------------------------------------------------*/ 1474ffbbdd21SLinus Walleij 14757cb94361SAndreas Larsson #if defined(CONFIG_OF) 1476aff5e3f8SPantelis Antoniou static struct spi_device * 1477aff5e3f8SPantelis Antoniou of_register_spi_device(struct spi_master *master, struct device_node *nc) 1478d57a4282SGrant Likely { 1479d57a4282SGrant Likely struct spi_device *spi; 1480d57a4282SGrant Likely int rc; 148189da4293STrent Piepho u32 value; 1482d57a4282SGrant Likely 1483d57a4282SGrant Likely /* Alloc an spi_device */ 1484d57a4282SGrant Likely spi = spi_alloc_device(master); 1485d57a4282SGrant Likely if (!spi) { 1486d57a4282SGrant Likely dev_err(&master->dev, "spi_device alloc error for %s\n", 1487d57a4282SGrant Likely nc->full_name); 1488aff5e3f8SPantelis Antoniou rc = -ENOMEM; 1489aff5e3f8SPantelis Antoniou goto err_out; 1490d57a4282SGrant Likely } 1491d57a4282SGrant Likely 1492d57a4282SGrant Likely /* Select device driver */ 1493aff5e3f8SPantelis Antoniou rc = of_modalias_node(nc, spi->modalias, 1494aff5e3f8SPantelis Antoniou sizeof(spi->modalias)); 1495aff5e3f8SPantelis Antoniou if (rc < 0) { 1496d57a4282SGrant Likely dev_err(&master->dev, "cannot find modalias for %s\n", 1497d57a4282SGrant Likely nc->full_name); 1498aff5e3f8SPantelis Antoniou goto err_out; 1499d57a4282SGrant Likely } 1500d57a4282SGrant Likely 1501d57a4282SGrant Likely /* Device address */ 150289da4293STrent Piepho rc = of_property_read_u32(nc, "reg", &value); 150389da4293STrent Piepho if (rc) { 150489da4293STrent Piepho dev_err(&master->dev, "%s has no valid 'reg' property (%d)\n", 150589da4293STrent Piepho nc->full_name, rc); 1506aff5e3f8SPantelis Antoniou goto err_out; 1507d57a4282SGrant Likely } 150889da4293STrent Piepho spi->chip_select = value; 1509d57a4282SGrant Likely 1510d57a4282SGrant Likely /* Mode (clock phase/polarity/etc.) */ 1511d57a4282SGrant Likely if (of_find_property(nc, "spi-cpha", NULL)) 1512d57a4282SGrant Likely spi->mode |= SPI_CPHA; 1513d57a4282SGrant Likely if (of_find_property(nc, "spi-cpol", NULL)) 1514d57a4282SGrant Likely spi->mode |= SPI_CPOL; 1515d57a4282SGrant Likely if (of_find_property(nc, "spi-cs-high", NULL)) 1516d57a4282SGrant Likely spi->mode |= SPI_CS_HIGH; 1517c20151dfSLars-Peter Clausen if (of_find_property(nc, "spi-3wire", NULL)) 1518c20151dfSLars-Peter Clausen spi->mode |= SPI_3WIRE; 1519cd6339e6SZhao Qiang if (of_find_property(nc, "spi-lsb-first", NULL)) 1520cd6339e6SZhao Qiang spi->mode |= SPI_LSB_FIRST; 1521d57a4282SGrant Likely 1522f477b7fbSwangyuhang /* Device DUAL/QUAD mode */ 152389da4293STrent Piepho if (!of_property_read_u32(nc, "spi-tx-bus-width", &value)) { 152489da4293STrent Piepho switch (value) { 152589da4293STrent Piepho case 1: 1526f477b7fbSwangyuhang break; 152789da4293STrent Piepho case 2: 1528f477b7fbSwangyuhang spi->mode |= SPI_TX_DUAL; 1529f477b7fbSwangyuhang break; 153089da4293STrent Piepho case 4: 1531f477b7fbSwangyuhang spi->mode |= SPI_TX_QUAD; 1532f477b7fbSwangyuhang break; 1533f477b7fbSwangyuhang default: 153480874d8cSGeert Uytterhoeven dev_warn(&master->dev, 1535a110f93dSwangyuhang "spi-tx-bus-width %d not supported\n", 153689da4293STrent Piepho value); 153780874d8cSGeert Uytterhoeven break; 1538f477b7fbSwangyuhang } 1539a822e99cSMark Brown } 1540f477b7fbSwangyuhang 154189da4293STrent Piepho if (!of_property_read_u32(nc, "spi-rx-bus-width", &value)) { 154289da4293STrent Piepho switch (value) { 154389da4293STrent Piepho case 1: 1544f477b7fbSwangyuhang break; 154589da4293STrent Piepho case 2: 1546f477b7fbSwangyuhang spi->mode |= SPI_RX_DUAL; 1547f477b7fbSwangyuhang break; 154889da4293STrent Piepho case 4: 1549f477b7fbSwangyuhang spi->mode |= SPI_RX_QUAD; 1550f477b7fbSwangyuhang break; 1551f477b7fbSwangyuhang default: 155280874d8cSGeert Uytterhoeven dev_warn(&master->dev, 1553a110f93dSwangyuhang "spi-rx-bus-width %d not supported\n", 155489da4293STrent Piepho value); 155580874d8cSGeert Uytterhoeven break; 1556f477b7fbSwangyuhang } 1557a822e99cSMark Brown } 1558f477b7fbSwangyuhang 1559d57a4282SGrant Likely /* Device speed */ 156089da4293STrent Piepho rc = of_property_read_u32(nc, "spi-max-frequency", &value); 156189da4293STrent Piepho if (rc) { 156289da4293STrent Piepho dev_err(&master->dev, "%s has no valid 'spi-max-frequency' property (%d)\n", 156389da4293STrent Piepho nc->full_name, rc); 1564aff5e3f8SPantelis Antoniou goto err_out; 1565d57a4282SGrant Likely } 156689da4293STrent Piepho spi->max_speed_hz = value; 1567d57a4282SGrant Likely 1568d57a4282SGrant Likely /* Store a pointer to the node in the device structure */ 1569d57a4282SGrant Likely of_node_get(nc); 1570d57a4282SGrant Likely spi->dev.of_node = nc; 1571d57a4282SGrant Likely 1572d57a4282SGrant Likely /* Register the new device */ 1573d57a4282SGrant Likely rc = spi_add_device(spi); 1574d57a4282SGrant Likely if (rc) { 1575d57a4282SGrant Likely dev_err(&master->dev, "spi_device register error %s\n", 1576d57a4282SGrant Likely nc->full_name); 1577aff5e3f8SPantelis Antoniou goto err_out; 1578d57a4282SGrant Likely } 1579d57a4282SGrant Likely 1580aff5e3f8SPantelis Antoniou return spi; 1581aff5e3f8SPantelis Antoniou 1582aff5e3f8SPantelis Antoniou err_out: 1583aff5e3f8SPantelis Antoniou spi_dev_put(spi); 1584aff5e3f8SPantelis Antoniou return ERR_PTR(rc); 1585aff5e3f8SPantelis Antoniou } 1586aff5e3f8SPantelis Antoniou 1587aff5e3f8SPantelis Antoniou /** 1588aff5e3f8SPantelis Antoniou * of_register_spi_devices() - Register child devices onto the SPI bus 1589aff5e3f8SPantelis Antoniou * @master: Pointer to spi_master device 1590aff5e3f8SPantelis Antoniou * 1591aff5e3f8SPantelis Antoniou * Registers an spi_device for each child node of master node which has a 'reg' 1592aff5e3f8SPantelis Antoniou * property. 1593aff5e3f8SPantelis Antoniou */ 1594aff5e3f8SPantelis Antoniou static void of_register_spi_devices(struct spi_master *master) 1595aff5e3f8SPantelis Antoniou { 1596aff5e3f8SPantelis Antoniou struct spi_device *spi; 1597aff5e3f8SPantelis Antoniou struct device_node *nc; 1598aff5e3f8SPantelis Antoniou 1599aff5e3f8SPantelis Antoniou if (!master->dev.of_node) 1600aff5e3f8SPantelis Antoniou return; 1601aff5e3f8SPantelis Antoniou 1602aff5e3f8SPantelis Antoniou for_each_available_child_of_node(master->dev.of_node, nc) { 1603bd6c1644SGeert Uytterhoeven if (of_node_test_and_set_flag(nc, OF_POPULATED)) 1604bd6c1644SGeert Uytterhoeven continue; 1605aff5e3f8SPantelis Antoniou spi = of_register_spi_device(master, nc); 1606aff5e3f8SPantelis Antoniou if (IS_ERR(spi)) 1607aff5e3f8SPantelis Antoniou dev_warn(&master->dev, "Failed to create SPI device for %s\n", 1608aff5e3f8SPantelis Antoniou nc->full_name); 1609d57a4282SGrant Likely } 1610d57a4282SGrant Likely } 1611d57a4282SGrant Likely #else 1612d57a4282SGrant Likely static void of_register_spi_devices(struct spi_master *master) { } 1613d57a4282SGrant Likely #endif 1614d57a4282SGrant Likely 161564bee4d2SMika Westerberg #ifdef CONFIG_ACPI 161664bee4d2SMika Westerberg static int acpi_spi_add_resource(struct acpi_resource *ares, void *data) 161764bee4d2SMika Westerberg { 161864bee4d2SMika Westerberg struct spi_device *spi = data; 1619a0a90718SMika Westerberg struct spi_master *master = spi->master; 162064bee4d2SMika Westerberg 162164bee4d2SMika Westerberg if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) { 162264bee4d2SMika Westerberg struct acpi_resource_spi_serialbus *sb; 162364bee4d2SMika Westerberg 162464bee4d2SMika Westerberg sb = &ares->data.spi_serial_bus; 162564bee4d2SMika Westerberg if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_SPI) { 1626a0a90718SMika Westerberg /* 1627a0a90718SMika Westerberg * ACPI DeviceSelection numbering is handled by the 1628a0a90718SMika Westerberg * host controller driver in Windows and can vary 1629a0a90718SMika Westerberg * from driver to driver. In Linux we always expect 1630a0a90718SMika Westerberg * 0 .. max - 1 so we need to ask the driver to 1631a0a90718SMika Westerberg * translate between the two schemes. 1632a0a90718SMika Westerberg */ 1633a0a90718SMika Westerberg if (master->fw_translate_cs) { 1634a0a90718SMika Westerberg int cs = master->fw_translate_cs(master, 1635a0a90718SMika Westerberg sb->device_selection); 1636a0a90718SMika Westerberg if (cs < 0) 1637a0a90718SMika Westerberg return cs; 1638a0a90718SMika Westerberg spi->chip_select = cs; 1639a0a90718SMika Westerberg } else { 164064bee4d2SMika Westerberg spi->chip_select = sb->device_selection; 1641a0a90718SMika Westerberg } 1642a0a90718SMika Westerberg 164364bee4d2SMika Westerberg spi->max_speed_hz = sb->connection_speed; 164464bee4d2SMika Westerberg 164564bee4d2SMika Westerberg if (sb->clock_phase == ACPI_SPI_SECOND_PHASE) 164664bee4d2SMika Westerberg spi->mode |= SPI_CPHA; 164764bee4d2SMika Westerberg if (sb->clock_polarity == ACPI_SPI_START_HIGH) 164864bee4d2SMika Westerberg spi->mode |= SPI_CPOL; 164964bee4d2SMika Westerberg if (sb->device_polarity == ACPI_SPI_ACTIVE_HIGH) 165064bee4d2SMika Westerberg spi->mode |= SPI_CS_HIGH; 165164bee4d2SMika Westerberg } 165264bee4d2SMika Westerberg } else if (spi->irq < 0) { 165364bee4d2SMika Westerberg struct resource r; 165464bee4d2SMika Westerberg 165564bee4d2SMika Westerberg if (acpi_dev_resource_interrupt(ares, 0, &r)) 165664bee4d2SMika Westerberg spi->irq = r.start; 165764bee4d2SMika Westerberg } 165864bee4d2SMika Westerberg 165964bee4d2SMika Westerberg /* Always tell the ACPI core to skip this resource */ 166064bee4d2SMika Westerberg return 1; 166164bee4d2SMika Westerberg } 166264bee4d2SMika Westerberg 16637f24467fSOctavian Purdila static acpi_status acpi_register_spi_device(struct spi_master *master, 16647f24467fSOctavian Purdila struct acpi_device *adev) 166564bee4d2SMika Westerberg { 166664bee4d2SMika Westerberg struct list_head resource_list; 166764bee4d2SMika Westerberg struct spi_device *spi; 166864bee4d2SMika Westerberg int ret; 166964bee4d2SMika Westerberg 16707f24467fSOctavian Purdila if (acpi_bus_get_status(adev) || !adev->status.present || 16717f24467fSOctavian Purdila acpi_device_enumerated(adev)) 167264bee4d2SMika Westerberg return AE_OK; 167364bee4d2SMika Westerberg 167464bee4d2SMika Westerberg spi = spi_alloc_device(master); 167564bee4d2SMika Westerberg if (!spi) { 167664bee4d2SMika Westerberg dev_err(&master->dev, "failed to allocate SPI device for %s\n", 167764bee4d2SMika Westerberg dev_name(&adev->dev)); 167864bee4d2SMika Westerberg return AE_NO_MEMORY; 167964bee4d2SMika Westerberg } 168064bee4d2SMika Westerberg 16817b199811SRafael J. Wysocki ACPI_COMPANION_SET(&spi->dev, adev); 168264bee4d2SMika Westerberg spi->irq = -1; 168364bee4d2SMika Westerberg 168464bee4d2SMika Westerberg INIT_LIST_HEAD(&resource_list); 168564bee4d2SMika Westerberg ret = acpi_dev_get_resources(adev, &resource_list, 168664bee4d2SMika Westerberg acpi_spi_add_resource, spi); 168764bee4d2SMika Westerberg acpi_dev_free_resource_list(&resource_list); 168864bee4d2SMika Westerberg 168964bee4d2SMika Westerberg if (ret < 0 || !spi->max_speed_hz) { 169064bee4d2SMika Westerberg spi_dev_put(spi); 169164bee4d2SMika Westerberg return AE_OK; 169264bee4d2SMika Westerberg } 169364bee4d2SMika Westerberg 169433ada67dSChristophe RICARD if (spi->irq < 0) 169533ada67dSChristophe RICARD spi->irq = acpi_dev_gpio_irq_get(adev, 0); 169633ada67dSChristophe RICARD 16977f24467fSOctavian Purdila acpi_device_set_enumerated(adev); 16987f24467fSOctavian Purdila 169933cf00e5SMika Westerberg adev->power.flags.ignore_parent = true; 1700cf9eb39cSJarkko Nikula strlcpy(spi->modalias, acpi_device_hid(adev), sizeof(spi->modalias)); 170164bee4d2SMika Westerberg if (spi_add_device(spi)) { 170233cf00e5SMika Westerberg adev->power.flags.ignore_parent = false; 170364bee4d2SMika Westerberg dev_err(&master->dev, "failed to add SPI device %s from ACPI\n", 170464bee4d2SMika Westerberg dev_name(&adev->dev)); 170564bee4d2SMika Westerberg spi_dev_put(spi); 170664bee4d2SMika Westerberg } 170764bee4d2SMika Westerberg 170864bee4d2SMika Westerberg return AE_OK; 170964bee4d2SMika Westerberg } 171064bee4d2SMika Westerberg 17117f24467fSOctavian Purdila static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level, 17127f24467fSOctavian Purdila void *data, void **return_value) 17137f24467fSOctavian Purdila { 17147f24467fSOctavian Purdila struct spi_master *master = data; 17157f24467fSOctavian Purdila struct acpi_device *adev; 17167f24467fSOctavian Purdila 17177f24467fSOctavian Purdila if (acpi_bus_get_device(handle, &adev)) 17187f24467fSOctavian Purdila return AE_OK; 17197f24467fSOctavian Purdila 17207f24467fSOctavian Purdila return acpi_register_spi_device(master, adev); 17217f24467fSOctavian Purdila } 17227f24467fSOctavian Purdila 172364bee4d2SMika Westerberg static void acpi_register_spi_devices(struct spi_master *master) 172464bee4d2SMika Westerberg { 172564bee4d2SMika Westerberg acpi_status status; 172664bee4d2SMika Westerberg acpi_handle handle; 172764bee4d2SMika Westerberg 172829896178SRafael J. Wysocki handle = ACPI_HANDLE(master->dev.parent); 172964bee4d2SMika Westerberg if (!handle) 173064bee4d2SMika Westerberg return; 173164bee4d2SMika Westerberg 173264bee4d2SMika Westerberg status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1, 173364bee4d2SMika Westerberg acpi_spi_add_device, NULL, 173464bee4d2SMika Westerberg master, NULL); 173564bee4d2SMika Westerberg if (ACPI_FAILURE(status)) 173664bee4d2SMika Westerberg dev_warn(&master->dev, "failed to enumerate SPI slaves\n"); 173764bee4d2SMika Westerberg } 173864bee4d2SMika Westerberg #else 173964bee4d2SMika Westerberg static inline void acpi_register_spi_devices(struct spi_master *master) {} 174064bee4d2SMika Westerberg #endif /* CONFIG_ACPI */ 174164bee4d2SMika Westerberg 174249dce689STony Jones static void spi_master_release(struct device *dev) 17438ae12a0dSDavid Brownell { 17448ae12a0dSDavid Brownell struct spi_master *master; 17458ae12a0dSDavid Brownell 174649dce689STony Jones master = container_of(dev, struct spi_master, dev); 17478ae12a0dSDavid Brownell kfree(master); 17488ae12a0dSDavid Brownell } 17498ae12a0dSDavid Brownell 17508ae12a0dSDavid Brownell static struct class spi_master_class = { 17518ae12a0dSDavid Brownell .name = "spi_master", 17528ae12a0dSDavid Brownell .owner = THIS_MODULE, 175349dce689STony Jones .dev_release = spi_master_release, 1754eca2ebc7SMartin Sperl .dev_groups = spi_master_groups, 17558ae12a0dSDavid Brownell }; 17568ae12a0dSDavid Brownell 17578ae12a0dSDavid Brownell 17588ae12a0dSDavid Brownell /** 17598ae12a0dSDavid Brownell * spi_alloc_master - allocate SPI master controller 17608ae12a0dSDavid Brownell * @dev: the controller, possibly using the platform_bus 176133e34dc6SDavid Brownell * @size: how much zeroed driver-private data to allocate; the pointer to this 176249dce689STony Jones * memory is in the driver_data field of the returned device, 17630c868461SDavid Brownell * accessible with spi_master_get_devdata(). 176433e34dc6SDavid Brownell * Context: can sleep 17658ae12a0dSDavid Brownell * 17668ae12a0dSDavid Brownell * This call is used only by SPI master controller drivers, which are the 17678ae12a0dSDavid Brownell * only ones directly touching chip registers. It's how they allocate 1768ba1a0513Sdmitry pervushin * an spi_master structure, prior to calling spi_register_master(). 17698ae12a0dSDavid Brownell * 177097d56dc6SJavier Martinez Canillas * This must be called from context that can sleep. 17718ae12a0dSDavid Brownell * 17728ae12a0dSDavid Brownell * The caller is responsible for assigning the bus number and initializing 1773ba1a0513Sdmitry pervushin * the master's methods before calling spi_register_master(); and (after errors 1774a394d635SGuenter Roeck * adding the device) calling spi_master_put() to prevent a memory leak. 177597d56dc6SJavier Martinez Canillas * 177697d56dc6SJavier Martinez Canillas * Return: the SPI master structure on success, else NULL. 17778ae12a0dSDavid Brownell */ 1778e9d5a461SAdrian Bunk struct spi_master *spi_alloc_master(struct device *dev, unsigned size) 17798ae12a0dSDavid Brownell { 17808ae12a0dSDavid Brownell struct spi_master *master; 17818ae12a0dSDavid Brownell 17820c868461SDavid Brownell if (!dev) 17830c868461SDavid Brownell return NULL; 17840c868461SDavid Brownell 17855fe5f05eSJingoo Han master = kzalloc(size + sizeof(*master), GFP_KERNEL); 17868ae12a0dSDavid Brownell if (!master) 17878ae12a0dSDavid Brownell return NULL; 17888ae12a0dSDavid Brownell 178949dce689STony Jones device_initialize(&master->dev); 17901e8a52e1SGrant Likely master->bus_num = -1; 17911e8a52e1SGrant Likely master->num_chipselect = 1; 179249dce689STony Jones master->dev.class = &spi_master_class; 1793157f38f9SJohan Hovold master->dev.parent = dev; 1794d7e2ee25SLinus Walleij pm_suspend_ignore_children(&master->dev, true); 17950c868461SDavid Brownell spi_master_set_devdata(master, &master[1]); 17968ae12a0dSDavid Brownell 17978ae12a0dSDavid Brownell return master; 17988ae12a0dSDavid Brownell } 17998ae12a0dSDavid Brownell EXPORT_SYMBOL_GPL(spi_alloc_master); 18008ae12a0dSDavid Brownell 180174317984SJean-Christophe PLAGNIOL-VILLARD #ifdef CONFIG_OF 180274317984SJean-Christophe PLAGNIOL-VILLARD static int of_spi_register_master(struct spi_master *master) 180374317984SJean-Christophe PLAGNIOL-VILLARD { 1804e80beb27SGrant Likely int nb, i, *cs; 180574317984SJean-Christophe PLAGNIOL-VILLARD struct device_node *np = master->dev.of_node; 180674317984SJean-Christophe PLAGNIOL-VILLARD 180774317984SJean-Christophe PLAGNIOL-VILLARD if (!np) 180874317984SJean-Christophe PLAGNIOL-VILLARD return 0; 180974317984SJean-Christophe PLAGNIOL-VILLARD 181074317984SJean-Christophe PLAGNIOL-VILLARD nb = of_gpio_named_count(np, "cs-gpios"); 18115fe5f05eSJingoo Han master->num_chipselect = max_t(int, nb, master->num_chipselect); 181274317984SJean-Christophe PLAGNIOL-VILLARD 18138ec5d84eSAndreas Larsson /* Return error only for an incorrectly formed cs-gpios property */ 18148ec5d84eSAndreas Larsson if (nb == 0 || nb == -ENOENT) 181574317984SJean-Christophe PLAGNIOL-VILLARD return 0; 18168ec5d84eSAndreas Larsson else if (nb < 0) 18178ec5d84eSAndreas Larsson return nb; 181874317984SJean-Christophe PLAGNIOL-VILLARD 181974317984SJean-Christophe PLAGNIOL-VILLARD cs = devm_kzalloc(&master->dev, 182074317984SJean-Christophe PLAGNIOL-VILLARD sizeof(int) * master->num_chipselect, 182174317984SJean-Christophe PLAGNIOL-VILLARD GFP_KERNEL); 182274317984SJean-Christophe PLAGNIOL-VILLARD master->cs_gpios = cs; 182374317984SJean-Christophe PLAGNIOL-VILLARD 182474317984SJean-Christophe PLAGNIOL-VILLARD if (!master->cs_gpios) 182574317984SJean-Christophe PLAGNIOL-VILLARD return -ENOMEM; 182674317984SJean-Christophe PLAGNIOL-VILLARD 18270da83bb1SAndreas Larsson for (i = 0; i < master->num_chipselect; i++) 1828446411e1SAndreas Larsson cs[i] = -ENOENT; 182974317984SJean-Christophe PLAGNIOL-VILLARD 183074317984SJean-Christophe PLAGNIOL-VILLARD for (i = 0; i < nb; i++) 183174317984SJean-Christophe PLAGNIOL-VILLARD cs[i] = of_get_named_gpio(np, "cs-gpios", i); 183274317984SJean-Christophe PLAGNIOL-VILLARD 183374317984SJean-Christophe PLAGNIOL-VILLARD return 0; 183474317984SJean-Christophe PLAGNIOL-VILLARD } 183574317984SJean-Christophe PLAGNIOL-VILLARD #else 183674317984SJean-Christophe PLAGNIOL-VILLARD static int of_spi_register_master(struct spi_master *master) 183774317984SJean-Christophe PLAGNIOL-VILLARD { 183874317984SJean-Christophe PLAGNIOL-VILLARD return 0; 183974317984SJean-Christophe PLAGNIOL-VILLARD } 184074317984SJean-Christophe PLAGNIOL-VILLARD #endif 184174317984SJean-Christophe PLAGNIOL-VILLARD 18428ae12a0dSDavid Brownell /** 18438ae12a0dSDavid Brownell * spi_register_master - register SPI master controller 18448ae12a0dSDavid Brownell * @master: initialized master, originally from spi_alloc_master() 184533e34dc6SDavid Brownell * Context: can sleep 18468ae12a0dSDavid Brownell * 18478ae12a0dSDavid Brownell * SPI master controllers connect to their drivers using some non-SPI bus, 18488ae12a0dSDavid Brownell * such as the platform bus. The final stage of probe() in that code 18498ae12a0dSDavid Brownell * includes calling spi_register_master() to hook up to this SPI bus glue. 18508ae12a0dSDavid Brownell * 18518ae12a0dSDavid Brownell * SPI controllers use board specific (often SOC specific) bus numbers, 18528ae12a0dSDavid Brownell * and board-specific addressing for SPI devices combines those numbers 18538ae12a0dSDavid Brownell * with chip select numbers. Since SPI does not directly support dynamic 18548ae12a0dSDavid Brownell * device identification, boards need configuration tables telling which 18558ae12a0dSDavid Brownell * chip is at which address. 18568ae12a0dSDavid Brownell * 18578ae12a0dSDavid Brownell * This must be called from context that can sleep. It returns zero on 18588ae12a0dSDavid Brownell * success, else a negative error code (dropping the master's refcount). 18590c868461SDavid Brownell * After a successful return, the caller is responsible for calling 18600c868461SDavid Brownell * spi_unregister_master(). 186197d56dc6SJavier Martinez Canillas * 186297d56dc6SJavier Martinez Canillas * Return: zero on success, else a negative error code. 18638ae12a0dSDavid Brownell */ 1864e9d5a461SAdrian Bunk int spi_register_master(struct spi_master *master) 18658ae12a0dSDavid Brownell { 1866e44a45aeSDavid Brownell static atomic_t dyn_bus_id = ATOMIC_INIT((1<<15) - 1); 186749dce689STony Jones struct device *dev = master->dev.parent; 18682b9603a0SFeng Tang struct boardinfo *bi; 18698ae12a0dSDavid Brownell int status = -ENODEV; 18708ae12a0dSDavid Brownell int dynamic = 0; 18718ae12a0dSDavid Brownell 18720c868461SDavid Brownell if (!dev) 18730c868461SDavid Brownell return -ENODEV; 18740c868461SDavid Brownell 187574317984SJean-Christophe PLAGNIOL-VILLARD status = of_spi_register_master(master); 187674317984SJean-Christophe PLAGNIOL-VILLARD if (status) 187774317984SJean-Christophe PLAGNIOL-VILLARD return status; 187874317984SJean-Christophe PLAGNIOL-VILLARD 1879082c8cb4SDavid Brownell /* even if it's just one always-selected device, there must 1880082c8cb4SDavid Brownell * be at least one chipselect 1881082c8cb4SDavid Brownell */ 1882082c8cb4SDavid Brownell if (master->num_chipselect == 0) 1883082c8cb4SDavid Brownell return -EINVAL; 1884082c8cb4SDavid Brownell 1885bb29785eSGrant Likely if ((master->bus_num < 0) && master->dev.of_node) 1886bb29785eSGrant Likely master->bus_num = of_alias_get_id(master->dev.of_node, "spi"); 1887bb29785eSGrant Likely 18888ae12a0dSDavid Brownell /* convention: dynamically assigned bus IDs count down from the max */ 1889a020ed75SDavid Brownell if (master->bus_num < 0) { 1890082c8cb4SDavid Brownell /* FIXME switch to an IDR based scheme, something like 1891082c8cb4SDavid Brownell * I2C now uses, so we can't run out of "dynamic" IDs 1892082c8cb4SDavid Brownell */ 18938ae12a0dSDavid Brownell master->bus_num = atomic_dec_return(&dyn_bus_id); 1894b885244eSDavid Brownell dynamic = 1; 18958ae12a0dSDavid Brownell } 18968ae12a0dSDavid Brownell 18975424d43eSMark Brown INIT_LIST_HEAD(&master->queue); 18985424d43eSMark Brown spin_lock_init(&master->queue_lock); 1899cf32b71eSErnst Schwab spin_lock_init(&master->bus_lock_spinlock); 1900cf32b71eSErnst Schwab mutex_init(&master->bus_lock_mutex); 1901ef4d96ecSMark Brown mutex_init(&master->io_mutex); 1902cf32b71eSErnst Schwab master->bus_lock_flag = 0; 1903b158935fSMark Brown init_completion(&master->xfer_completion); 19046ad45a27SMark Brown if (!master->max_dma_len) 19056ad45a27SMark Brown master->max_dma_len = INT_MAX; 1906cf32b71eSErnst Schwab 19078ae12a0dSDavid Brownell /* register the device, then userspace will see it. 19088ae12a0dSDavid Brownell * registration fails if the bus ID is in use. 19098ae12a0dSDavid Brownell */ 191035f74fcaSKay Sievers dev_set_name(&master->dev, "spi%u", master->bus_num); 191149dce689STony Jones status = device_add(&master->dev); 1912b885244eSDavid Brownell if (status < 0) 19138ae12a0dSDavid Brownell goto done; 191435f74fcaSKay Sievers dev_dbg(dev, "registered master %s%s\n", dev_name(&master->dev), 19158ae12a0dSDavid Brownell dynamic ? " (dynamic)" : ""); 19168ae12a0dSDavid Brownell 1917ffbbdd21SLinus Walleij /* If we're using a queued driver, start the queue */ 1918ffbbdd21SLinus Walleij if (master->transfer) 1919ffbbdd21SLinus Walleij dev_info(dev, "master is unqueued, this is deprecated\n"); 1920ffbbdd21SLinus Walleij else { 1921ffbbdd21SLinus Walleij status = spi_master_initialize_queue(master); 1922ffbbdd21SLinus Walleij if (status) { 1923e93b0724SAxel Lin device_del(&master->dev); 1924ffbbdd21SLinus Walleij goto done; 1925ffbbdd21SLinus Walleij } 1926ffbbdd21SLinus Walleij } 1927eca2ebc7SMartin Sperl /* add statistics */ 1928eca2ebc7SMartin Sperl spin_lock_init(&master->statistics.lock); 1929ffbbdd21SLinus Walleij 19302b9603a0SFeng Tang mutex_lock(&board_lock); 19312b9603a0SFeng Tang list_add_tail(&master->list, &spi_master_list); 19322b9603a0SFeng Tang list_for_each_entry(bi, &board_list, list) 19332b9603a0SFeng Tang spi_match_master_to_boardinfo(master, &bi->board_info); 19342b9603a0SFeng Tang mutex_unlock(&board_lock); 19352b9603a0SFeng Tang 193664bee4d2SMika Westerberg /* Register devices from the device tree and ACPI */ 193712b15e83SAnatolij Gustschin of_register_spi_devices(master); 193864bee4d2SMika Westerberg acpi_register_spi_devices(master); 19398ae12a0dSDavid Brownell done: 19408ae12a0dSDavid Brownell return status; 19418ae12a0dSDavid Brownell } 19428ae12a0dSDavid Brownell EXPORT_SYMBOL_GPL(spi_register_master); 19438ae12a0dSDavid Brownell 1944666d5b4cSMark Brown static void devm_spi_unregister(struct device *dev, void *res) 1945666d5b4cSMark Brown { 1946666d5b4cSMark Brown spi_unregister_master(*(struct spi_master **)res); 1947666d5b4cSMark Brown } 1948666d5b4cSMark Brown 1949666d5b4cSMark Brown /** 1950666d5b4cSMark Brown * dev_spi_register_master - register managed SPI master controller 1951666d5b4cSMark Brown * @dev: device managing SPI master 1952666d5b4cSMark Brown * @master: initialized master, originally from spi_alloc_master() 1953666d5b4cSMark Brown * Context: can sleep 1954666d5b4cSMark Brown * 1955666d5b4cSMark Brown * Register a SPI device as with spi_register_master() which will 1956666d5b4cSMark Brown * automatically be unregister 195797d56dc6SJavier Martinez Canillas * 195897d56dc6SJavier Martinez Canillas * Return: zero on success, else a negative error code. 1959666d5b4cSMark Brown */ 1960666d5b4cSMark Brown int devm_spi_register_master(struct device *dev, struct spi_master *master) 1961666d5b4cSMark Brown { 1962666d5b4cSMark Brown struct spi_master **ptr; 1963666d5b4cSMark Brown int ret; 1964666d5b4cSMark Brown 1965666d5b4cSMark Brown ptr = devres_alloc(devm_spi_unregister, sizeof(*ptr), GFP_KERNEL); 1966666d5b4cSMark Brown if (!ptr) 1967666d5b4cSMark Brown return -ENOMEM; 1968666d5b4cSMark Brown 1969666d5b4cSMark Brown ret = spi_register_master(master); 19704b92894eSStephen Warren if (!ret) { 1971666d5b4cSMark Brown *ptr = master; 1972666d5b4cSMark Brown devres_add(dev, ptr); 1973666d5b4cSMark Brown } else { 1974666d5b4cSMark Brown devres_free(ptr); 1975666d5b4cSMark Brown } 1976666d5b4cSMark Brown 1977666d5b4cSMark Brown return ret; 1978666d5b4cSMark Brown } 1979666d5b4cSMark Brown EXPORT_SYMBOL_GPL(devm_spi_register_master); 1980666d5b4cSMark Brown 198134860089SDavid Lamparter static int __unregister(struct device *dev, void *null) 19828ae12a0dSDavid Brownell { 19830c868461SDavid Brownell spi_unregister_device(to_spi_device(dev)); 19848ae12a0dSDavid Brownell return 0; 19858ae12a0dSDavid Brownell } 19868ae12a0dSDavid Brownell 19878ae12a0dSDavid Brownell /** 19888ae12a0dSDavid Brownell * spi_unregister_master - unregister SPI master controller 19898ae12a0dSDavid Brownell * @master: the master being unregistered 199033e34dc6SDavid Brownell * Context: can sleep 19918ae12a0dSDavid Brownell * 19928ae12a0dSDavid Brownell * This call is used only by SPI master controller drivers, which are the 19938ae12a0dSDavid Brownell * only ones directly touching chip registers. 19948ae12a0dSDavid Brownell * 19958ae12a0dSDavid Brownell * This must be called from context that can sleep. 19968ae12a0dSDavid Brownell */ 19978ae12a0dSDavid Brownell void spi_unregister_master(struct spi_master *master) 19988ae12a0dSDavid Brownell { 199989fc9a1aSJeff Garzik int dummy; 200089fc9a1aSJeff Garzik 2001ffbbdd21SLinus Walleij if (master->queued) { 2002ffbbdd21SLinus Walleij if (spi_destroy_queue(master)) 2003ffbbdd21SLinus Walleij dev_err(&master->dev, "queue remove failed\n"); 2004ffbbdd21SLinus Walleij } 2005ffbbdd21SLinus Walleij 20062b9603a0SFeng Tang mutex_lock(&board_lock); 20072b9603a0SFeng Tang list_del(&master->list); 20082b9603a0SFeng Tang mutex_unlock(&board_lock); 20092b9603a0SFeng Tang 201097dbf37dSSebastian Andrzej Siewior dummy = device_for_each_child(&master->dev, NULL, __unregister); 201149dce689STony Jones device_unregister(&master->dev); 20128ae12a0dSDavid Brownell } 20138ae12a0dSDavid Brownell EXPORT_SYMBOL_GPL(spi_unregister_master); 20148ae12a0dSDavid Brownell 2015ffbbdd21SLinus Walleij int spi_master_suspend(struct spi_master *master) 2016ffbbdd21SLinus Walleij { 2017ffbbdd21SLinus Walleij int ret; 2018ffbbdd21SLinus Walleij 2019ffbbdd21SLinus Walleij /* Basically no-ops for non-queued masters */ 2020ffbbdd21SLinus Walleij if (!master->queued) 2021ffbbdd21SLinus Walleij return 0; 2022ffbbdd21SLinus Walleij 2023ffbbdd21SLinus Walleij ret = spi_stop_queue(master); 2024ffbbdd21SLinus Walleij if (ret) 2025ffbbdd21SLinus Walleij dev_err(&master->dev, "queue stop failed\n"); 2026ffbbdd21SLinus Walleij 2027ffbbdd21SLinus Walleij return ret; 2028ffbbdd21SLinus Walleij } 2029ffbbdd21SLinus Walleij EXPORT_SYMBOL_GPL(spi_master_suspend); 2030ffbbdd21SLinus Walleij 2031ffbbdd21SLinus Walleij int spi_master_resume(struct spi_master *master) 2032ffbbdd21SLinus Walleij { 2033ffbbdd21SLinus Walleij int ret; 2034ffbbdd21SLinus Walleij 2035ffbbdd21SLinus Walleij if (!master->queued) 2036ffbbdd21SLinus Walleij return 0; 2037ffbbdd21SLinus Walleij 2038ffbbdd21SLinus Walleij ret = spi_start_queue(master); 2039ffbbdd21SLinus Walleij if (ret) 2040ffbbdd21SLinus Walleij dev_err(&master->dev, "queue restart failed\n"); 2041ffbbdd21SLinus Walleij 2042ffbbdd21SLinus Walleij return ret; 2043ffbbdd21SLinus Walleij } 2044ffbbdd21SLinus Walleij EXPORT_SYMBOL_GPL(spi_master_resume); 2045ffbbdd21SLinus Walleij 20469f3b795aSMichał Mirosław static int __spi_master_match(struct device *dev, const void *data) 20475ed2c832SDave Young { 20485ed2c832SDave Young struct spi_master *m; 20499f3b795aSMichał Mirosław const u16 *bus_num = data; 20505ed2c832SDave Young 20515ed2c832SDave Young m = container_of(dev, struct spi_master, dev); 20525ed2c832SDave Young return m->bus_num == *bus_num; 20535ed2c832SDave Young } 20545ed2c832SDave Young 20558ae12a0dSDavid Brownell /** 20568ae12a0dSDavid Brownell * spi_busnum_to_master - look up master associated with bus_num 20578ae12a0dSDavid Brownell * @bus_num: the master's bus number 205833e34dc6SDavid Brownell * Context: can sleep 20598ae12a0dSDavid Brownell * 20608ae12a0dSDavid Brownell * This call may be used with devices that are registered after 20618ae12a0dSDavid Brownell * arch init time. It returns a refcounted pointer to the relevant 20628ae12a0dSDavid Brownell * spi_master (which the caller must release), or NULL if there is 20638ae12a0dSDavid Brownell * no such master registered. 206497d56dc6SJavier Martinez Canillas * 206597d56dc6SJavier Martinez Canillas * Return: the SPI master structure on success, else NULL. 20668ae12a0dSDavid Brownell */ 20678ae12a0dSDavid Brownell struct spi_master *spi_busnum_to_master(u16 bus_num) 20688ae12a0dSDavid Brownell { 206949dce689STony Jones struct device *dev; 20701e9a51dcSAtsushi Nemoto struct spi_master *master = NULL; 20718ae12a0dSDavid Brownell 2072695794aeSGreg Kroah-Hartman dev = class_find_device(&spi_master_class, NULL, &bus_num, 20735ed2c832SDave Young __spi_master_match); 20745ed2c832SDave Young if (dev) 20755ed2c832SDave Young master = container_of(dev, struct spi_master, dev); 20765ed2c832SDave Young /* reference got in class_find_device */ 20771e9a51dcSAtsushi Nemoto return master; 20788ae12a0dSDavid Brownell } 20798ae12a0dSDavid Brownell EXPORT_SYMBOL_GPL(spi_busnum_to_master); 20808ae12a0dSDavid Brownell 2081d780c371SMartin Sperl /*-------------------------------------------------------------------------*/ 2082d780c371SMartin Sperl 2083d780c371SMartin Sperl /* Core methods for SPI resource management */ 2084d780c371SMartin Sperl 2085d780c371SMartin Sperl /** 2086d780c371SMartin Sperl * spi_res_alloc - allocate a spi resource that is life-cycle managed 2087d780c371SMartin Sperl * during the processing of a spi_message while using 2088d780c371SMartin Sperl * spi_transfer_one 2089d780c371SMartin Sperl * @spi: the spi device for which we allocate memory 2090d780c371SMartin Sperl * @release: the release code to execute for this resource 2091d780c371SMartin Sperl * @size: size to alloc and return 2092d780c371SMartin Sperl * @gfp: GFP allocation flags 2093d780c371SMartin Sperl * 2094d780c371SMartin Sperl * Return: the pointer to the allocated data 2095d780c371SMartin Sperl * 2096d780c371SMartin Sperl * This may get enhanced in the future to allocate from a memory pool 2097d780c371SMartin Sperl * of the @spi_device or @spi_master to avoid repeated allocations. 2098d780c371SMartin Sperl */ 2099d780c371SMartin Sperl void *spi_res_alloc(struct spi_device *spi, 2100d780c371SMartin Sperl spi_res_release_t release, 2101d780c371SMartin Sperl size_t size, gfp_t gfp) 2102d780c371SMartin Sperl { 2103d780c371SMartin Sperl struct spi_res *sres; 2104d780c371SMartin Sperl 2105d780c371SMartin Sperl sres = kzalloc(sizeof(*sres) + size, gfp); 2106d780c371SMartin Sperl if (!sres) 2107d780c371SMartin Sperl return NULL; 2108d780c371SMartin Sperl 2109d780c371SMartin Sperl INIT_LIST_HEAD(&sres->entry); 2110d780c371SMartin Sperl sres->release = release; 2111d780c371SMartin Sperl 2112d780c371SMartin Sperl return sres->data; 2113d780c371SMartin Sperl } 2114d780c371SMartin Sperl EXPORT_SYMBOL_GPL(spi_res_alloc); 2115d780c371SMartin Sperl 2116d780c371SMartin Sperl /** 2117d780c371SMartin Sperl * spi_res_free - free an spi resource 2118d780c371SMartin Sperl * @res: pointer to the custom data of a resource 2119d780c371SMartin Sperl * 2120d780c371SMartin Sperl */ 2121d780c371SMartin Sperl void spi_res_free(void *res) 2122d780c371SMartin Sperl { 2123d780c371SMartin Sperl struct spi_res *sres = container_of(res, struct spi_res, data); 2124d780c371SMartin Sperl 2125d780c371SMartin Sperl if (!res) 2126d780c371SMartin Sperl return; 2127d780c371SMartin Sperl 2128d780c371SMartin Sperl WARN_ON(!list_empty(&sres->entry)); 2129d780c371SMartin Sperl kfree(sres); 2130d780c371SMartin Sperl } 2131d780c371SMartin Sperl EXPORT_SYMBOL_GPL(spi_res_free); 2132d780c371SMartin Sperl 2133d780c371SMartin Sperl /** 2134d780c371SMartin Sperl * spi_res_add - add a spi_res to the spi_message 2135d780c371SMartin Sperl * @message: the spi message 2136d780c371SMartin Sperl * @res: the spi_resource 2137d780c371SMartin Sperl */ 2138d780c371SMartin Sperl void spi_res_add(struct spi_message *message, void *res) 2139d780c371SMartin Sperl { 2140d780c371SMartin Sperl struct spi_res *sres = container_of(res, struct spi_res, data); 2141d780c371SMartin Sperl 2142d780c371SMartin Sperl WARN_ON(!list_empty(&sres->entry)); 2143d780c371SMartin Sperl list_add_tail(&sres->entry, &message->resources); 2144d780c371SMartin Sperl } 2145d780c371SMartin Sperl EXPORT_SYMBOL_GPL(spi_res_add); 2146d780c371SMartin Sperl 2147d780c371SMartin Sperl /** 2148d780c371SMartin Sperl * spi_res_release - release all spi resources for this message 2149d780c371SMartin Sperl * @master: the @spi_master 2150d780c371SMartin Sperl * @message: the @spi_message 2151d780c371SMartin Sperl */ 2152d780c371SMartin Sperl void spi_res_release(struct spi_master *master, 2153d780c371SMartin Sperl struct spi_message *message) 2154d780c371SMartin Sperl { 2155d780c371SMartin Sperl struct spi_res *res; 2156d780c371SMartin Sperl 2157d780c371SMartin Sperl while (!list_empty(&message->resources)) { 2158d780c371SMartin Sperl res = list_last_entry(&message->resources, 2159d780c371SMartin Sperl struct spi_res, entry); 2160d780c371SMartin Sperl 2161d780c371SMartin Sperl if (res->release) 2162d780c371SMartin Sperl res->release(master, message, res->data); 2163d780c371SMartin Sperl 2164d780c371SMartin Sperl list_del(&res->entry); 2165d780c371SMartin Sperl 2166d780c371SMartin Sperl kfree(res); 2167d780c371SMartin Sperl } 2168d780c371SMartin Sperl } 2169d780c371SMartin Sperl EXPORT_SYMBOL_GPL(spi_res_release); 21708ae12a0dSDavid Brownell 21718ae12a0dSDavid Brownell /*-------------------------------------------------------------------------*/ 21728ae12a0dSDavid Brownell 2173523baf5aSMartin Sperl /* Core methods for spi_message alterations */ 2174523baf5aSMartin Sperl 2175523baf5aSMartin Sperl static void __spi_replace_transfers_release(struct spi_master *master, 2176523baf5aSMartin Sperl struct spi_message *msg, 2177523baf5aSMartin Sperl void *res) 2178523baf5aSMartin Sperl { 2179523baf5aSMartin Sperl struct spi_replaced_transfers *rxfer = res; 2180523baf5aSMartin Sperl size_t i; 2181523baf5aSMartin Sperl 2182523baf5aSMartin Sperl /* call extra callback if requested */ 2183523baf5aSMartin Sperl if (rxfer->release) 2184523baf5aSMartin Sperl rxfer->release(master, msg, res); 2185523baf5aSMartin Sperl 2186523baf5aSMartin Sperl /* insert replaced transfers back into the message */ 2187523baf5aSMartin Sperl list_splice(&rxfer->replaced_transfers, rxfer->replaced_after); 2188523baf5aSMartin Sperl 2189523baf5aSMartin Sperl /* remove the formerly inserted entries */ 2190523baf5aSMartin Sperl for (i = 0; i < rxfer->inserted; i++) 2191523baf5aSMartin Sperl list_del(&rxfer->inserted_transfers[i].transfer_list); 2192523baf5aSMartin Sperl } 2193523baf5aSMartin Sperl 2194523baf5aSMartin Sperl /** 2195523baf5aSMartin Sperl * spi_replace_transfers - replace transfers with several transfers 2196523baf5aSMartin Sperl * and register change with spi_message.resources 2197523baf5aSMartin Sperl * @msg: the spi_message we work upon 2198523baf5aSMartin Sperl * @xfer_first: the first spi_transfer we want to replace 2199523baf5aSMartin Sperl * @remove: number of transfers to remove 2200523baf5aSMartin Sperl * @insert: the number of transfers we want to insert instead 2201523baf5aSMartin Sperl * @release: extra release code necessary in some circumstances 2202523baf5aSMartin Sperl * @extradatasize: extra data to allocate (with alignment guarantees 2203523baf5aSMartin Sperl * of struct @spi_transfer) 220405885397SMartin Sperl * @gfp: gfp flags 2205523baf5aSMartin Sperl * 2206523baf5aSMartin Sperl * Returns: pointer to @spi_replaced_transfers, 2207523baf5aSMartin Sperl * PTR_ERR(...) in case of errors. 2208523baf5aSMartin Sperl */ 2209523baf5aSMartin Sperl struct spi_replaced_transfers *spi_replace_transfers( 2210523baf5aSMartin Sperl struct spi_message *msg, 2211523baf5aSMartin Sperl struct spi_transfer *xfer_first, 2212523baf5aSMartin Sperl size_t remove, 2213523baf5aSMartin Sperl size_t insert, 2214523baf5aSMartin Sperl spi_replaced_release_t release, 2215523baf5aSMartin Sperl size_t extradatasize, 2216523baf5aSMartin Sperl gfp_t gfp) 2217523baf5aSMartin Sperl { 2218523baf5aSMartin Sperl struct spi_replaced_transfers *rxfer; 2219523baf5aSMartin Sperl struct spi_transfer *xfer; 2220523baf5aSMartin Sperl size_t i; 2221523baf5aSMartin Sperl 2222523baf5aSMartin Sperl /* allocate the structure using spi_res */ 2223523baf5aSMartin Sperl rxfer = spi_res_alloc(msg->spi, __spi_replace_transfers_release, 2224523baf5aSMartin Sperl insert * sizeof(struct spi_transfer) 2225523baf5aSMartin Sperl + sizeof(struct spi_replaced_transfers) 2226523baf5aSMartin Sperl + extradatasize, 2227523baf5aSMartin Sperl gfp); 2228523baf5aSMartin Sperl if (!rxfer) 2229523baf5aSMartin Sperl return ERR_PTR(-ENOMEM); 2230523baf5aSMartin Sperl 2231523baf5aSMartin Sperl /* the release code to invoke before running the generic release */ 2232523baf5aSMartin Sperl rxfer->release = release; 2233523baf5aSMartin Sperl 2234523baf5aSMartin Sperl /* assign extradata */ 2235523baf5aSMartin Sperl if (extradatasize) 2236523baf5aSMartin Sperl rxfer->extradata = 2237523baf5aSMartin Sperl &rxfer->inserted_transfers[insert]; 2238523baf5aSMartin Sperl 2239523baf5aSMartin Sperl /* init the replaced_transfers list */ 2240523baf5aSMartin Sperl INIT_LIST_HEAD(&rxfer->replaced_transfers); 2241523baf5aSMartin Sperl 2242523baf5aSMartin Sperl /* assign the list_entry after which we should reinsert 2243523baf5aSMartin Sperl * the @replaced_transfers - it may be spi_message.messages! 2244523baf5aSMartin Sperl */ 2245523baf5aSMartin Sperl rxfer->replaced_after = xfer_first->transfer_list.prev; 2246523baf5aSMartin Sperl 2247523baf5aSMartin Sperl /* remove the requested number of transfers */ 2248523baf5aSMartin Sperl for (i = 0; i < remove; i++) { 2249523baf5aSMartin Sperl /* if the entry after replaced_after it is msg->transfers 2250523baf5aSMartin Sperl * then we have been requested to remove more transfers 2251523baf5aSMartin Sperl * than are in the list 2252523baf5aSMartin Sperl */ 2253523baf5aSMartin Sperl if (rxfer->replaced_after->next == &msg->transfers) { 2254523baf5aSMartin Sperl dev_err(&msg->spi->dev, 2255523baf5aSMartin Sperl "requested to remove more spi_transfers than are available\n"); 2256523baf5aSMartin Sperl /* insert replaced transfers back into the message */ 2257523baf5aSMartin Sperl list_splice(&rxfer->replaced_transfers, 2258523baf5aSMartin Sperl rxfer->replaced_after); 2259523baf5aSMartin Sperl 2260523baf5aSMartin Sperl /* free the spi_replace_transfer structure */ 2261523baf5aSMartin Sperl spi_res_free(rxfer); 2262523baf5aSMartin Sperl 2263523baf5aSMartin Sperl /* and return with an error */ 2264523baf5aSMartin Sperl return ERR_PTR(-EINVAL); 2265523baf5aSMartin Sperl } 2266523baf5aSMartin Sperl 2267523baf5aSMartin Sperl /* remove the entry after replaced_after from list of 2268523baf5aSMartin Sperl * transfers and add it to list of replaced_transfers 2269523baf5aSMartin Sperl */ 2270523baf5aSMartin Sperl list_move_tail(rxfer->replaced_after->next, 2271523baf5aSMartin Sperl &rxfer->replaced_transfers); 2272523baf5aSMartin Sperl } 2273523baf5aSMartin Sperl 2274523baf5aSMartin Sperl /* create copy of the given xfer with identical settings 2275523baf5aSMartin Sperl * based on the first transfer to get removed 2276523baf5aSMartin Sperl */ 2277523baf5aSMartin Sperl for (i = 0; i < insert; i++) { 2278523baf5aSMartin Sperl /* we need to run in reverse order */ 2279523baf5aSMartin Sperl xfer = &rxfer->inserted_transfers[insert - 1 - i]; 2280523baf5aSMartin Sperl 2281523baf5aSMartin Sperl /* copy all spi_transfer data */ 2282523baf5aSMartin Sperl memcpy(xfer, xfer_first, sizeof(*xfer)); 2283523baf5aSMartin Sperl 2284523baf5aSMartin Sperl /* add to list */ 2285523baf5aSMartin Sperl list_add(&xfer->transfer_list, rxfer->replaced_after); 2286523baf5aSMartin Sperl 2287523baf5aSMartin Sperl /* clear cs_change and delay_usecs for all but the last */ 2288523baf5aSMartin Sperl if (i) { 2289523baf5aSMartin Sperl xfer->cs_change = false; 2290523baf5aSMartin Sperl xfer->delay_usecs = 0; 2291523baf5aSMartin Sperl } 2292523baf5aSMartin Sperl } 2293523baf5aSMartin Sperl 2294523baf5aSMartin Sperl /* set up inserted */ 2295523baf5aSMartin Sperl rxfer->inserted = insert; 2296523baf5aSMartin Sperl 2297523baf5aSMartin Sperl /* and register it with spi_res/spi_message */ 2298523baf5aSMartin Sperl spi_res_add(msg, rxfer); 2299523baf5aSMartin Sperl 2300523baf5aSMartin Sperl return rxfer; 2301523baf5aSMartin Sperl } 2302523baf5aSMartin Sperl EXPORT_SYMBOL_GPL(spi_replace_transfers); 2303523baf5aSMartin Sperl 230408933418SFabio Estevam static int __spi_split_transfer_maxsize(struct spi_master *master, 2305d9f12122SMartin Sperl struct spi_message *msg, 2306d9f12122SMartin Sperl struct spi_transfer **xferp, 2307d9f12122SMartin Sperl size_t maxsize, 2308d9f12122SMartin Sperl gfp_t gfp) 2309d9f12122SMartin Sperl { 2310d9f12122SMartin Sperl struct spi_transfer *xfer = *xferp, *xfers; 2311d9f12122SMartin Sperl struct spi_replaced_transfers *srt; 2312d9f12122SMartin Sperl size_t offset; 2313d9f12122SMartin Sperl size_t count, i; 2314d9f12122SMartin Sperl 2315d9f12122SMartin Sperl /* warn once about this fact that we are splitting a transfer */ 2316d9f12122SMartin Sperl dev_warn_once(&msg->spi->dev, 23177d62f51eSFabio Estevam "spi_transfer of length %i exceed max length of %zu - needed to split transfers\n", 2318d9f12122SMartin Sperl xfer->len, maxsize); 2319d9f12122SMartin Sperl 2320d9f12122SMartin Sperl /* calculate how many we have to replace */ 2321d9f12122SMartin Sperl count = DIV_ROUND_UP(xfer->len, maxsize); 2322d9f12122SMartin Sperl 2323d9f12122SMartin Sperl /* create replacement */ 2324d9f12122SMartin Sperl srt = spi_replace_transfers(msg, xfer, 1, count, NULL, 0, gfp); 2325657d32efSDan Carpenter if (IS_ERR(srt)) 2326657d32efSDan Carpenter return PTR_ERR(srt); 2327d9f12122SMartin Sperl xfers = srt->inserted_transfers; 2328d9f12122SMartin Sperl 2329d9f12122SMartin Sperl /* now handle each of those newly inserted spi_transfers 2330d9f12122SMartin Sperl * note that the replacements spi_transfers all are preset 2331d9f12122SMartin Sperl * to the same values as *xferp, so tx_buf, rx_buf and len 2332d9f12122SMartin Sperl * are all identical (as well as most others) 2333d9f12122SMartin Sperl * so we just have to fix up len and the pointers. 2334d9f12122SMartin Sperl * 2335d9f12122SMartin Sperl * this also includes support for the depreciated 2336d9f12122SMartin Sperl * spi_message.is_dma_mapped interface 2337d9f12122SMartin Sperl */ 2338d9f12122SMartin Sperl 2339d9f12122SMartin Sperl /* the first transfer just needs the length modified, so we 2340d9f12122SMartin Sperl * run it outside the loop 2341d9f12122SMartin Sperl */ 2342c8dab77aSFabio Estevam xfers[0].len = min_t(size_t, maxsize, xfer[0].len); 2343d9f12122SMartin Sperl 2344d9f12122SMartin Sperl /* all the others need rx_buf/tx_buf also set */ 2345d9f12122SMartin Sperl for (i = 1, offset = maxsize; i < count; offset += maxsize, i++) { 2346d9f12122SMartin Sperl /* update rx_buf, tx_buf and dma */ 2347d9f12122SMartin Sperl if (xfers[i].rx_buf) 2348d9f12122SMartin Sperl xfers[i].rx_buf += offset; 2349d9f12122SMartin Sperl if (xfers[i].rx_dma) 2350d9f12122SMartin Sperl xfers[i].rx_dma += offset; 2351d9f12122SMartin Sperl if (xfers[i].tx_buf) 2352d9f12122SMartin Sperl xfers[i].tx_buf += offset; 2353d9f12122SMartin Sperl if (xfers[i].tx_dma) 2354d9f12122SMartin Sperl xfers[i].tx_dma += offset; 2355d9f12122SMartin Sperl 2356d9f12122SMartin Sperl /* update length */ 2357d9f12122SMartin Sperl xfers[i].len = min(maxsize, xfers[i].len - offset); 2358d9f12122SMartin Sperl } 2359d9f12122SMartin Sperl 2360d9f12122SMartin Sperl /* we set up xferp to the last entry we have inserted, 2361d9f12122SMartin Sperl * so that we skip those already split transfers 2362d9f12122SMartin Sperl */ 2363d9f12122SMartin Sperl *xferp = &xfers[count - 1]; 2364d9f12122SMartin Sperl 2365d9f12122SMartin Sperl /* increment statistics counters */ 2366d9f12122SMartin Sperl SPI_STATISTICS_INCREMENT_FIELD(&master->statistics, 2367d9f12122SMartin Sperl transfers_split_maxsize); 2368d9f12122SMartin Sperl SPI_STATISTICS_INCREMENT_FIELD(&msg->spi->statistics, 2369d9f12122SMartin Sperl transfers_split_maxsize); 2370d9f12122SMartin Sperl 2371d9f12122SMartin Sperl return 0; 2372d9f12122SMartin Sperl } 2373d9f12122SMartin Sperl 2374d9f12122SMartin Sperl /** 2375d9f12122SMartin Sperl * spi_split_tranfers_maxsize - split spi transfers into multiple transfers 2376d9f12122SMartin Sperl * when an individual transfer exceeds a 2377d9f12122SMartin Sperl * certain size 2378d9f12122SMartin Sperl * @master: the @spi_master for this transfer 23793700ce95SMasanari Iida * @msg: the @spi_message to transform 23803700ce95SMasanari Iida * @maxsize: the maximum when to apply this 238110f11a22SJavier Martinez Canillas * @gfp: GFP allocation flags 2382d9f12122SMartin Sperl * 2383d9f12122SMartin Sperl * Return: status of transformation 2384d9f12122SMartin Sperl */ 2385d9f12122SMartin Sperl int spi_split_transfers_maxsize(struct spi_master *master, 2386d9f12122SMartin Sperl struct spi_message *msg, 2387d9f12122SMartin Sperl size_t maxsize, 2388d9f12122SMartin Sperl gfp_t gfp) 2389d9f12122SMartin Sperl { 2390d9f12122SMartin Sperl struct spi_transfer *xfer; 2391d9f12122SMartin Sperl int ret; 2392d9f12122SMartin Sperl 2393d9f12122SMartin Sperl /* iterate over the transfer_list, 2394d9f12122SMartin Sperl * but note that xfer is advanced to the last transfer inserted 2395d9f12122SMartin Sperl * to avoid checking sizes again unnecessarily (also xfer does 2396d9f12122SMartin Sperl * potentiall belong to a different list by the time the 2397d9f12122SMartin Sperl * replacement has happened 2398d9f12122SMartin Sperl */ 2399d9f12122SMartin Sperl list_for_each_entry(xfer, &msg->transfers, transfer_list) { 2400d9f12122SMartin Sperl if (xfer->len > maxsize) { 2401d9f12122SMartin Sperl ret = __spi_split_transfer_maxsize( 2402d9f12122SMartin Sperl master, msg, &xfer, maxsize, gfp); 2403d9f12122SMartin Sperl if (ret) 2404d9f12122SMartin Sperl return ret; 2405d9f12122SMartin Sperl } 2406d9f12122SMartin Sperl } 2407d9f12122SMartin Sperl 2408d9f12122SMartin Sperl return 0; 2409d9f12122SMartin Sperl } 2410d9f12122SMartin Sperl EXPORT_SYMBOL_GPL(spi_split_transfers_maxsize); 24118ae12a0dSDavid Brownell 24128ae12a0dSDavid Brownell /*-------------------------------------------------------------------------*/ 24138ae12a0dSDavid Brownell 24147d077197SDavid Brownell /* Core methods for SPI master protocol drivers. Some of the 24157d077197SDavid Brownell * other core methods are currently defined as inline functions. 24167d077197SDavid Brownell */ 24177d077197SDavid Brownell 241863ab645fSStefan Brüns static int __spi_validate_bits_per_word(struct spi_master *master, u8 bits_per_word) 241963ab645fSStefan Brüns { 242063ab645fSStefan Brüns if (master->bits_per_word_mask) { 242163ab645fSStefan Brüns /* Only 32 bits fit in the mask */ 242263ab645fSStefan Brüns if (bits_per_word > 32) 242363ab645fSStefan Brüns return -EINVAL; 242463ab645fSStefan Brüns if (!(master->bits_per_word_mask & 242563ab645fSStefan Brüns SPI_BPW_MASK(bits_per_word))) 242663ab645fSStefan Brüns return -EINVAL; 242763ab645fSStefan Brüns } 242863ab645fSStefan Brüns 242963ab645fSStefan Brüns return 0; 243063ab645fSStefan Brüns } 243163ab645fSStefan Brüns 24327d077197SDavid Brownell /** 24337d077197SDavid Brownell * spi_setup - setup SPI mode and clock rate 24347d077197SDavid Brownell * @spi: the device whose settings are being modified 24357d077197SDavid Brownell * Context: can sleep, and no requests are queued to the device 24367d077197SDavid Brownell * 24377d077197SDavid Brownell * SPI protocol drivers may need to update the transfer mode if the 24387d077197SDavid Brownell * device doesn't work with its default. They may likewise need 24397d077197SDavid Brownell * to update clock rates or word sizes from initial values. This function 24407d077197SDavid Brownell * changes those settings, and must be called from a context that can sleep. 24417d077197SDavid Brownell * Except for SPI_CS_HIGH, which takes effect immediately, the changes take 24427d077197SDavid Brownell * effect the next time the device is selected and data is transferred to 24437d077197SDavid Brownell * or from it. When this function returns, the spi device is deselected. 24447d077197SDavid Brownell * 24457d077197SDavid Brownell * Note that this call will fail if the protocol driver specifies an option 24467d077197SDavid Brownell * that the underlying controller or its driver does not support. For 24477d077197SDavid Brownell * example, not all hardware supports wire transfers using nine bit words, 24487d077197SDavid Brownell * LSB-first wire encoding, or active-high chipselects. 244997d56dc6SJavier Martinez Canillas * 245097d56dc6SJavier Martinez Canillas * Return: zero on success, else a negative error code. 24517d077197SDavid Brownell */ 24527d077197SDavid Brownell int spi_setup(struct spi_device *spi) 24537d077197SDavid Brownell { 245483596fbeSGeert Uytterhoeven unsigned bad_bits, ugly_bits; 24555ab8d262SAndy Shevchenko int status; 24567d077197SDavid Brownell 2457f477b7fbSwangyuhang /* check mode to prevent that DUAL and QUAD set at the same time 2458f477b7fbSwangyuhang */ 2459f477b7fbSwangyuhang if (((spi->mode & SPI_TX_DUAL) && (spi->mode & SPI_TX_QUAD)) || 2460f477b7fbSwangyuhang ((spi->mode & SPI_RX_DUAL) && (spi->mode & SPI_RX_QUAD))) { 2461f477b7fbSwangyuhang dev_err(&spi->dev, 2462f477b7fbSwangyuhang "setup: can not select dual and quad at the same time\n"); 2463f477b7fbSwangyuhang return -EINVAL; 2464f477b7fbSwangyuhang } 2465f477b7fbSwangyuhang /* if it is SPI_3WIRE mode, DUAL and QUAD should be forbidden 2466f477b7fbSwangyuhang */ 2467f477b7fbSwangyuhang if ((spi->mode & SPI_3WIRE) && (spi->mode & 2468f477b7fbSwangyuhang (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD))) 2469f477b7fbSwangyuhang return -EINVAL; 2470e7db06b5SDavid Brownell /* help drivers fail *cleanly* when they need options 2471e7db06b5SDavid Brownell * that aren't supported with their current master 2472e7db06b5SDavid Brownell */ 2473e7db06b5SDavid Brownell bad_bits = spi->mode & ~spi->master->mode_bits; 247483596fbeSGeert Uytterhoeven ugly_bits = bad_bits & 247583596fbeSGeert Uytterhoeven (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD); 247683596fbeSGeert Uytterhoeven if (ugly_bits) { 247783596fbeSGeert Uytterhoeven dev_warn(&spi->dev, 247883596fbeSGeert Uytterhoeven "setup: ignoring unsupported mode bits %x\n", 247983596fbeSGeert Uytterhoeven ugly_bits); 248083596fbeSGeert Uytterhoeven spi->mode &= ~ugly_bits; 248183596fbeSGeert Uytterhoeven bad_bits &= ~ugly_bits; 248283596fbeSGeert Uytterhoeven } 2483e7db06b5SDavid Brownell if (bad_bits) { 2484eb288a1fSLinus Walleij dev_err(&spi->dev, "setup: unsupported mode bits %x\n", 2485e7db06b5SDavid Brownell bad_bits); 2486e7db06b5SDavid Brownell return -EINVAL; 2487e7db06b5SDavid Brownell } 2488e7db06b5SDavid Brownell 24897d077197SDavid Brownell if (!spi->bits_per_word) 24907d077197SDavid Brownell spi->bits_per_word = 8; 24917d077197SDavid Brownell 24925ab8d262SAndy Shevchenko status = __spi_validate_bits_per_word(spi->master, spi->bits_per_word); 24935ab8d262SAndy Shevchenko if (status) 24945ab8d262SAndy Shevchenko return status; 249563ab645fSStefan Brüns 2496052eb2d4SAxel Lin if (!spi->max_speed_hz) 2497052eb2d4SAxel Lin spi->max_speed_hz = spi->master->max_speed_hz; 2498052eb2d4SAxel Lin 2499caae070cSLaxman Dewangan if (spi->master->setup) 25007d077197SDavid Brownell status = spi->master->setup(spi); 25017d077197SDavid Brownell 2502abeedb01SFranklin S Cooper Jr spi_set_cs(spi, false); 2503abeedb01SFranklin S Cooper Jr 25045fe5f05eSJingoo Han dev_dbg(&spi->dev, "setup mode %d, %s%s%s%s%u bits/w, %u Hz max --> %d\n", 25057d077197SDavid Brownell (int) (spi->mode & (SPI_CPOL | SPI_CPHA)), 25067d077197SDavid Brownell (spi->mode & SPI_CS_HIGH) ? "cs_high, " : "", 25077d077197SDavid Brownell (spi->mode & SPI_LSB_FIRST) ? "lsb, " : "", 25087d077197SDavid Brownell (spi->mode & SPI_3WIRE) ? "3wire, " : "", 25097d077197SDavid Brownell (spi->mode & SPI_LOOP) ? "loopback, " : "", 25107d077197SDavid Brownell spi->bits_per_word, spi->max_speed_hz, 25117d077197SDavid Brownell status); 25127d077197SDavid Brownell 25137d077197SDavid Brownell return status; 25147d077197SDavid Brownell } 25157d077197SDavid Brownell EXPORT_SYMBOL_GPL(spi_setup); 25167d077197SDavid Brownell 251790808738SMark Brown static int __spi_validate(struct spi_device *spi, struct spi_message *message) 2518cf32b71eSErnst Schwab { 2519cf32b71eSErnst Schwab struct spi_master *master = spi->master; 2520e6811d1dSLaxman Dewangan struct spi_transfer *xfer; 25216ea31293SAtsushi Nemoto int w_size; 2522cf32b71eSErnst Schwab 252324a0013aSMark Brown if (list_empty(&message->transfers)) 252424a0013aSMark Brown return -EINVAL; 252524a0013aSMark Brown 2526cf32b71eSErnst Schwab /* Half-duplex links include original MicroWire, and ones with 2527cf32b71eSErnst Schwab * only one data pin like SPI_3WIRE (switches direction) or where 2528cf32b71eSErnst Schwab * either MOSI or MISO is missing. They can also be caused by 2529cf32b71eSErnst Schwab * software limitations. 2530cf32b71eSErnst Schwab */ 2531cf32b71eSErnst Schwab if ((master->flags & SPI_MASTER_HALF_DUPLEX) 2532cf32b71eSErnst Schwab || (spi->mode & SPI_3WIRE)) { 2533cf32b71eSErnst Schwab unsigned flags = master->flags; 2534cf32b71eSErnst Schwab 2535cf32b71eSErnst Schwab list_for_each_entry(xfer, &message->transfers, transfer_list) { 2536cf32b71eSErnst Schwab if (xfer->rx_buf && xfer->tx_buf) 2537cf32b71eSErnst Schwab return -EINVAL; 2538cf32b71eSErnst Schwab if ((flags & SPI_MASTER_NO_TX) && xfer->tx_buf) 2539cf32b71eSErnst Schwab return -EINVAL; 2540cf32b71eSErnst Schwab if ((flags & SPI_MASTER_NO_RX) && xfer->rx_buf) 2541cf32b71eSErnst Schwab return -EINVAL; 2542cf32b71eSErnst Schwab } 2543cf32b71eSErnst Schwab } 2544cf32b71eSErnst Schwab 2545e6811d1dSLaxman Dewangan /** 2546059b8ffeSLaxman Dewangan * Set transfer bits_per_word and max speed as spi device default if 2547059b8ffeSLaxman Dewangan * it is not set for this transfer. 2548f477b7fbSwangyuhang * Set transfer tx_nbits and rx_nbits as single transfer default 2549f477b7fbSwangyuhang * (SPI_NBITS_SINGLE) if it is not set for this transfer. 2550e6811d1dSLaxman Dewangan */ 255177e80588SMartin Sperl message->frame_length = 0; 2552e6811d1dSLaxman Dewangan list_for_each_entry(xfer, &message->transfers, transfer_list) { 2553078726ceSSourav Poddar message->frame_length += xfer->len; 2554e6811d1dSLaxman Dewangan if (!xfer->bits_per_word) 2555e6811d1dSLaxman Dewangan xfer->bits_per_word = spi->bits_per_word; 2556a6f87fadSAxel Lin 2557a6f87fadSAxel Lin if (!xfer->speed_hz) 2558059b8ffeSLaxman Dewangan xfer->speed_hz = spi->max_speed_hz; 25597dc9fbc3SMark Brown if (!xfer->speed_hz) 25607dc9fbc3SMark Brown xfer->speed_hz = master->max_speed_hz; 2561a6f87fadSAxel Lin 256256ede94aSGabor Juhos if (master->max_speed_hz && 256356ede94aSGabor Juhos xfer->speed_hz > master->max_speed_hz) 256456ede94aSGabor Juhos xfer->speed_hz = master->max_speed_hz; 256556ede94aSGabor Juhos 256663ab645fSStefan Brüns if (__spi_validate_bits_per_word(master, xfer->bits_per_word)) 2567543bb255SStephen Warren return -EINVAL; 2568a2fd4f9fSMark Brown 25694d94bd21SIvan T. Ivanov /* 25704d94bd21SIvan T. Ivanov * SPI transfer length should be multiple of SPI word size 25714d94bd21SIvan T. Ivanov * where SPI word size should be power-of-two multiple 25724d94bd21SIvan T. Ivanov */ 25734d94bd21SIvan T. Ivanov if (xfer->bits_per_word <= 8) 25744d94bd21SIvan T. Ivanov w_size = 1; 25754d94bd21SIvan T. Ivanov else if (xfer->bits_per_word <= 16) 25764d94bd21SIvan T. Ivanov w_size = 2; 25774d94bd21SIvan T. Ivanov else 25784d94bd21SIvan T. Ivanov w_size = 4; 25794d94bd21SIvan T. Ivanov 25804d94bd21SIvan T. Ivanov /* No partial transfers accepted */ 25816ea31293SAtsushi Nemoto if (xfer->len % w_size) 25824d94bd21SIvan T. Ivanov return -EINVAL; 25834d94bd21SIvan T. Ivanov 2584a2fd4f9fSMark Brown if (xfer->speed_hz && master->min_speed_hz && 2585a2fd4f9fSMark Brown xfer->speed_hz < master->min_speed_hz) 2586a2fd4f9fSMark Brown return -EINVAL; 2587f477b7fbSwangyuhang 2588f477b7fbSwangyuhang if (xfer->tx_buf && !xfer->tx_nbits) 2589f477b7fbSwangyuhang xfer->tx_nbits = SPI_NBITS_SINGLE; 2590f477b7fbSwangyuhang if (xfer->rx_buf && !xfer->rx_nbits) 2591f477b7fbSwangyuhang xfer->rx_nbits = SPI_NBITS_SINGLE; 2592f477b7fbSwangyuhang /* check transfer tx/rx_nbits: 25931afd9989SGeert Uytterhoeven * 1. check the value matches one of single, dual and quad 25941afd9989SGeert Uytterhoeven * 2. check tx/rx_nbits match the mode in spi_device 2595f477b7fbSwangyuhang */ 2596db90a441SSourav Poddar if (xfer->tx_buf) { 2597f477b7fbSwangyuhang if (xfer->tx_nbits != SPI_NBITS_SINGLE && 2598f477b7fbSwangyuhang xfer->tx_nbits != SPI_NBITS_DUAL && 2599f477b7fbSwangyuhang xfer->tx_nbits != SPI_NBITS_QUAD) 2600a2fd4f9fSMark Brown return -EINVAL; 2601f477b7fbSwangyuhang if ((xfer->tx_nbits == SPI_NBITS_DUAL) && 2602f477b7fbSwangyuhang !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD))) 2603f477b7fbSwangyuhang return -EINVAL; 2604f477b7fbSwangyuhang if ((xfer->tx_nbits == SPI_NBITS_QUAD) && 2605f477b7fbSwangyuhang !(spi->mode & SPI_TX_QUAD)) 2606f477b7fbSwangyuhang return -EINVAL; 2607db90a441SSourav Poddar } 2608f477b7fbSwangyuhang /* check transfer rx_nbits */ 2609db90a441SSourav Poddar if (xfer->rx_buf) { 2610f477b7fbSwangyuhang if (xfer->rx_nbits != SPI_NBITS_SINGLE && 2611f477b7fbSwangyuhang xfer->rx_nbits != SPI_NBITS_DUAL && 2612f477b7fbSwangyuhang xfer->rx_nbits != SPI_NBITS_QUAD) 2613f477b7fbSwangyuhang return -EINVAL; 2614f477b7fbSwangyuhang if ((xfer->rx_nbits == SPI_NBITS_DUAL) && 2615f477b7fbSwangyuhang !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD))) 2616f477b7fbSwangyuhang return -EINVAL; 2617f477b7fbSwangyuhang if ((xfer->rx_nbits == SPI_NBITS_QUAD) && 2618f477b7fbSwangyuhang !(spi->mode & SPI_RX_QUAD)) 2619f477b7fbSwangyuhang return -EINVAL; 2620e6811d1dSLaxman Dewangan } 2621e6811d1dSLaxman Dewangan } 2622e6811d1dSLaxman Dewangan 2623cf32b71eSErnst Schwab message->status = -EINPROGRESS; 262490808738SMark Brown 262590808738SMark Brown return 0; 262690808738SMark Brown } 262790808738SMark Brown 262890808738SMark Brown static int __spi_async(struct spi_device *spi, struct spi_message *message) 262990808738SMark Brown { 263090808738SMark Brown struct spi_master *master = spi->master; 263190808738SMark Brown 263290808738SMark Brown message->spi = spi; 263390808738SMark Brown 2634eca2ebc7SMartin Sperl SPI_STATISTICS_INCREMENT_FIELD(&master->statistics, spi_async); 2635eca2ebc7SMartin Sperl SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_async); 2636eca2ebc7SMartin Sperl 263790808738SMark Brown trace_spi_message_submit(message); 263890808738SMark Brown 2639cf32b71eSErnst Schwab return master->transfer(spi, message); 2640cf32b71eSErnst Schwab } 2641cf32b71eSErnst Schwab 2642568d0697SDavid Brownell /** 2643568d0697SDavid Brownell * spi_async - asynchronous SPI transfer 2644568d0697SDavid Brownell * @spi: device with which data will be exchanged 2645568d0697SDavid Brownell * @message: describes the data transfers, including completion callback 2646568d0697SDavid Brownell * Context: any (irqs may be blocked, etc) 2647568d0697SDavid Brownell * 2648568d0697SDavid Brownell * This call may be used in_irq and other contexts which can't sleep, 2649568d0697SDavid Brownell * as well as from task contexts which can sleep. 2650568d0697SDavid Brownell * 2651568d0697SDavid Brownell * The completion callback is invoked in a context which can't sleep. 2652568d0697SDavid Brownell * Before that invocation, the value of message->status is undefined. 2653568d0697SDavid Brownell * When the callback is issued, message->status holds either zero (to 2654568d0697SDavid Brownell * indicate complete success) or a negative error code. After that 2655568d0697SDavid Brownell * callback returns, the driver which issued the transfer request may 2656568d0697SDavid Brownell * deallocate the associated memory; it's no longer in use by any SPI 2657568d0697SDavid Brownell * core or controller driver code. 2658568d0697SDavid Brownell * 2659568d0697SDavid Brownell * Note that although all messages to a spi_device are handled in 2660568d0697SDavid Brownell * FIFO order, messages may go to different devices in other orders. 2661568d0697SDavid Brownell * Some device might be higher priority, or have various "hard" access 2662568d0697SDavid Brownell * time requirements, for example. 2663568d0697SDavid Brownell * 2664568d0697SDavid Brownell * On detection of any fault during the transfer, processing of 2665568d0697SDavid Brownell * the entire message is aborted, and the device is deselected. 2666568d0697SDavid Brownell * Until returning from the associated message completion callback, 2667568d0697SDavid Brownell * no other spi_message queued to that device will be processed. 2668568d0697SDavid Brownell * (This rule applies equally to all the synchronous transfer calls, 2669568d0697SDavid Brownell * which are wrappers around this core asynchronous primitive.) 267097d56dc6SJavier Martinez Canillas * 267197d56dc6SJavier Martinez Canillas * Return: zero on success, else a negative error code. 2672568d0697SDavid Brownell */ 2673568d0697SDavid Brownell int spi_async(struct spi_device *spi, struct spi_message *message) 2674568d0697SDavid Brownell { 2675568d0697SDavid Brownell struct spi_master *master = spi->master; 2676cf32b71eSErnst Schwab int ret; 2677cf32b71eSErnst Schwab unsigned long flags; 2678568d0697SDavid Brownell 267990808738SMark Brown ret = __spi_validate(spi, message); 268090808738SMark Brown if (ret != 0) 268190808738SMark Brown return ret; 268290808738SMark Brown 2683cf32b71eSErnst Schwab spin_lock_irqsave(&master->bus_lock_spinlock, flags); 2684568d0697SDavid Brownell 2685cf32b71eSErnst Schwab if (master->bus_lock_flag) 2686cf32b71eSErnst Schwab ret = -EBUSY; 2687cf32b71eSErnst Schwab else 2688cf32b71eSErnst Schwab ret = __spi_async(spi, message); 2689568d0697SDavid Brownell 2690cf32b71eSErnst Schwab spin_unlock_irqrestore(&master->bus_lock_spinlock, flags); 2691cf32b71eSErnst Schwab 2692cf32b71eSErnst Schwab return ret; 2693568d0697SDavid Brownell } 2694568d0697SDavid Brownell EXPORT_SYMBOL_GPL(spi_async); 2695568d0697SDavid Brownell 2696cf32b71eSErnst Schwab /** 2697cf32b71eSErnst Schwab * spi_async_locked - version of spi_async with exclusive bus usage 2698cf32b71eSErnst Schwab * @spi: device with which data will be exchanged 2699cf32b71eSErnst Schwab * @message: describes the data transfers, including completion callback 2700cf32b71eSErnst Schwab * Context: any (irqs may be blocked, etc) 2701cf32b71eSErnst Schwab * 2702cf32b71eSErnst Schwab * This call may be used in_irq and other contexts which can't sleep, 2703cf32b71eSErnst Schwab * as well as from task contexts which can sleep. 2704cf32b71eSErnst Schwab * 2705cf32b71eSErnst Schwab * The completion callback is invoked in a context which can't sleep. 2706cf32b71eSErnst Schwab * Before that invocation, the value of message->status is undefined. 2707cf32b71eSErnst Schwab * When the callback is issued, message->status holds either zero (to 2708cf32b71eSErnst Schwab * indicate complete success) or a negative error code. After that 2709cf32b71eSErnst Schwab * callback returns, the driver which issued the transfer request may 2710cf32b71eSErnst Schwab * deallocate the associated memory; it's no longer in use by any SPI 2711cf32b71eSErnst Schwab * core or controller driver code. 2712cf32b71eSErnst Schwab * 2713cf32b71eSErnst Schwab * Note that although all messages to a spi_device are handled in 2714cf32b71eSErnst Schwab * FIFO order, messages may go to different devices in other orders. 2715cf32b71eSErnst Schwab * Some device might be higher priority, or have various "hard" access 2716cf32b71eSErnst Schwab * time requirements, for example. 2717cf32b71eSErnst Schwab * 2718cf32b71eSErnst Schwab * On detection of any fault during the transfer, processing of 2719cf32b71eSErnst Schwab * the entire message is aborted, and the device is deselected. 2720cf32b71eSErnst Schwab * Until returning from the associated message completion callback, 2721cf32b71eSErnst Schwab * no other spi_message queued to that device will be processed. 2722cf32b71eSErnst Schwab * (This rule applies equally to all the synchronous transfer calls, 2723cf32b71eSErnst Schwab * which are wrappers around this core asynchronous primitive.) 272497d56dc6SJavier Martinez Canillas * 272597d56dc6SJavier Martinez Canillas * Return: zero on success, else a negative error code. 2726cf32b71eSErnst Schwab */ 2727cf32b71eSErnst Schwab int spi_async_locked(struct spi_device *spi, struct spi_message *message) 2728cf32b71eSErnst Schwab { 2729cf32b71eSErnst Schwab struct spi_master *master = spi->master; 2730cf32b71eSErnst Schwab int ret; 2731cf32b71eSErnst Schwab unsigned long flags; 2732cf32b71eSErnst Schwab 273390808738SMark Brown ret = __spi_validate(spi, message); 273490808738SMark Brown if (ret != 0) 273590808738SMark Brown return ret; 273690808738SMark Brown 2737cf32b71eSErnst Schwab spin_lock_irqsave(&master->bus_lock_spinlock, flags); 2738cf32b71eSErnst Schwab 2739cf32b71eSErnst Schwab ret = __spi_async(spi, message); 2740cf32b71eSErnst Schwab 2741cf32b71eSErnst Schwab spin_unlock_irqrestore(&master->bus_lock_spinlock, flags); 2742cf32b71eSErnst Schwab 2743cf32b71eSErnst Schwab return ret; 2744cf32b71eSErnst Schwab 2745cf32b71eSErnst Schwab } 2746cf32b71eSErnst Schwab EXPORT_SYMBOL_GPL(spi_async_locked); 2747cf32b71eSErnst Schwab 27487d077197SDavid Brownell 2749556351f1SVignesh R int spi_flash_read(struct spi_device *spi, 2750556351f1SVignesh R struct spi_flash_read_message *msg) 2751556351f1SVignesh R 2752556351f1SVignesh R { 2753556351f1SVignesh R struct spi_master *master = spi->master; 2754f4502dd1SVignesh R struct device *rx_dev = NULL; 2755556351f1SVignesh R int ret; 2756556351f1SVignesh R 2757556351f1SVignesh R if ((msg->opcode_nbits == SPI_NBITS_DUAL || 2758556351f1SVignesh R msg->addr_nbits == SPI_NBITS_DUAL) && 2759556351f1SVignesh R !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD))) 2760556351f1SVignesh R return -EINVAL; 2761556351f1SVignesh R if ((msg->opcode_nbits == SPI_NBITS_QUAD || 2762556351f1SVignesh R msg->addr_nbits == SPI_NBITS_QUAD) && 2763556351f1SVignesh R !(spi->mode & SPI_TX_QUAD)) 2764556351f1SVignesh R return -EINVAL; 2765556351f1SVignesh R if (msg->data_nbits == SPI_NBITS_DUAL && 2766556351f1SVignesh R !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD))) 2767556351f1SVignesh R return -EINVAL; 2768556351f1SVignesh R if (msg->data_nbits == SPI_NBITS_QUAD && 2769556351f1SVignesh R !(spi->mode & SPI_RX_QUAD)) 2770556351f1SVignesh R return -EINVAL; 2771556351f1SVignesh R 2772556351f1SVignesh R if (master->auto_runtime_pm) { 2773556351f1SVignesh R ret = pm_runtime_get_sync(master->dev.parent); 2774556351f1SVignesh R if (ret < 0) { 2775556351f1SVignesh R dev_err(&master->dev, "Failed to power device: %d\n", 2776556351f1SVignesh R ret); 2777556351f1SVignesh R return ret; 2778556351f1SVignesh R } 2779556351f1SVignesh R } 2780f4502dd1SVignesh R 2781556351f1SVignesh R mutex_lock(&master->bus_lock_mutex); 2782ef4d96ecSMark Brown mutex_lock(&master->io_mutex); 2783f4502dd1SVignesh R if (master->dma_rx) { 2784f4502dd1SVignesh R rx_dev = master->dma_rx->device->dev; 2785f4502dd1SVignesh R ret = spi_map_buf(master, rx_dev, &msg->rx_sg, 2786f4502dd1SVignesh R msg->buf, msg->len, 2787f4502dd1SVignesh R DMA_FROM_DEVICE); 2788f4502dd1SVignesh R if (!ret) 2789f4502dd1SVignesh R msg->cur_msg_mapped = true; 2790f4502dd1SVignesh R } 2791556351f1SVignesh R ret = master->spi_flash_read(spi, msg); 2792f4502dd1SVignesh R if (msg->cur_msg_mapped) 2793f4502dd1SVignesh R spi_unmap_buf(master, rx_dev, &msg->rx_sg, 2794f4502dd1SVignesh R DMA_FROM_DEVICE); 2795ef4d96ecSMark Brown mutex_unlock(&master->io_mutex); 2796556351f1SVignesh R mutex_unlock(&master->bus_lock_mutex); 2797f4502dd1SVignesh R 2798556351f1SVignesh R if (master->auto_runtime_pm) 2799556351f1SVignesh R pm_runtime_put(master->dev.parent); 2800556351f1SVignesh R 2801556351f1SVignesh R return ret; 2802556351f1SVignesh R } 2803556351f1SVignesh R EXPORT_SYMBOL_GPL(spi_flash_read); 2804556351f1SVignesh R 28057d077197SDavid Brownell /*-------------------------------------------------------------------------*/ 28067d077197SDavid Brownell 28077d077197SDavid Brownell /* Utility methods for SPI master protocol drivers, layered on 28087d077197SDavid Brownell * top of the core. Some other utility methods are defined as 28097d077197SDavid Brownell * inline functions. 28107d077197SDavid Brownell */ 28117d077197SDavid Brownell 28125d870c8eSAndrew Morton static void spi_complete(void *arg) 28135d870c8eSAndrew Morton { 28145d870c8eSAndrew Morton complete(arg); 28155d870c8eSAndrew Morton } 28165d870c8eSAndrew Morton 2817ef4d96ecSMark Brown static int __spi_sync(struct spi_device *spi, struct spi_message *message) 2818cf32b71eSErnst Schwab { 2819cf32b71eSErnst Schwab DECLARE_COMPLETION_ONSTACK(done); 2820cf32b71eSErnst Schwab int status; 2821cf32b71eSErnst Schwab struct spi_master *master = spi->master; 28220461a414SMark Brown unsigned long flags; 28230461a414SMark Brown 28240461a414SMark Brown status = __spi_validate(spi, message); 28250461a414SMark Brown if (status != 0) 28260461a414SMark Brown return status; 2827cf32b71eSErnst Schwab 2828cf32b71eSErnst Schwab message->complete = spi_complete; 2829cf32b71eSErnst Schwab message->context = &done; 28300461a414SMark Brown message->spi = spi; 2831cf32b71eSErnst Schwab 2832eca2ebc7SMartin Sperl SPI_STATISTICS_INCREMENT_FIELD(&master->statistics, spi_sync); 2833eca2ebc7SMartin Sperl SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_sync); 2834eca2ebc7SMartin Sperl 28350461a414SMark Brown /* If we're not using the legacy transfer method then we will 28360461a414SMark Brown * try to transfer in the calling context so special case. 28370461a414SMark Brown * This code would be less tricky if we could remove the 28380461a414SMark Brown * support for driver implemented message queues. 28390461a414SMark Brown */ 28400461a414SMark Brown if (master->transfer == spi_queued_transfer) { 28410461a414SMark Brown spin_lock_irqsave(&master->bus_lock_spinlock, flags); 28420461a414SMark Brown 28430461a414SMark Brown trace_spi_message_submit(message); 28440461a414SMark Brown 28450461a414SMark Brown status = __spi_queued_transfer(spi, message, false); 28460461a414SMark Brown 28470461a414SMark Brown spin_unlock_irqrestore(&master->bus_lock_spinlock, flags); 28480461a414SMark Brown } else { 2849cf32b71eSErnst Schwab status = spi_async_locked(spi, message); 28500461a414SMark Brown } 2851cf32b71eSErnst Schwab 2852cf32b71eSErnst Schwab if (status == 0) { 28530461a414SMark Brown /* Push out the messages in the calling context if we 28540461a414SMark Brown * can. 28550461a414SMark Brown */ 2856eca2ebc7SMartin Sperl if (master->transfer == spi_queued_transfer) { 2857eca2ebc7SMartin Sperl SPI_STATISTICS_INCREMENT_FIELD(&master->statistics, 2858eca2ebc7SMartin Sperl spi_sync_immediate); 2859eca2ebc7SMartin Sperl SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, 2860eca2ebc7SMartin Sperl spi_sync_immediate); 2861ef4d96ecSMark Brown __spi_pump_messages(master, false); 2862eca2ebc7SMartin Sperl } 28630461a414SMark Brown 2864cf32b71eSErnst Schwab wait_for_completion(&done); 2865cf32b71eSErnst Schwab status = message->status; 2866cf32b71eSErnst Schwab } 2867cf32b71eSErnst Schwab message->context = NULL; 2868cf32b71eSErnst Schwab return status; 2869cf32b71eSErnst Schwab } 2870cf32b71eSErnst Schwab 28718ae12a0dSDavid Brownell /** 28728ae12a0dSDavid Brownell * spi_sync - blocking/synchronous SPI data transfers 28738ae12a0dSDavid Brownell * @spi: device with which data will be exchanged 28748ae12a0dSDavid Brownell * @message: describes the data transfers 287533e34dc6SDavid Brownell * Context: can sleep 28768ae12a0dSDavid Brownell * 28778ae12a0dSDavid Brownell * This call may only be used from a context that may sleep. The sleep 28788ae12a0dSDavid Brownell * is non-interruptible, and has no timeout. Low-overhead controller 28798ae12a0dSDavid Brownell * drivers may DMA directly into and out of the message buffers. 28808ae12a0dSDavid Brownell * 28818ae12a0dSDavid Brownell * Note that the SPI device's chip select is active during the message, 28828ae12a0dSDavid Brownell * and then is normally disabled between messages. Drivers for some 28838ae12a0dSDavid Brownell * frequently-used devices may want to minimize costs of selecting a chip, 28848ae12a0dSDavid Brownell * by leaving it selected in anticipation that the next message will go 28858ae12a0dSDavid Brownell * to the same chip. (That may increase power usage.) 28868ae12a0dSDavid Brownell * 28870c868461SDavid Brownell * Also, the caller is guaranteeing that the memory associated with the 28880c868461SDavid Brownell * message will not be freed before this call returns. 28890c868461SDavid Brownell * 289097d56dc6SJavier Martinez Canillas * Return: zero on success, else a negative error code. 28918ae12a0dSDavid Brownell */ 28928ae12a0dSDavid Brownell int spi_sync(struct spi_device *spi, struct spi_message *message) 28938ae12a0dSDavid Brownell { 2894ef4d96ecSMark Brown int ret; 2895ef4d96ecSMark Brown 2896ef4d96ecSMark Brown mutex_lock(&spi->master->bus_lock_mutex); 2897ef4d96ecSMark Brown ret = __spi_sync(spi, message); 2898ef4d96ecSMark Brown mutex_unlock(&spi->master->bus_lock_mutex); 2899ef4d96ecSMark Brown 2900ef4d96ecSMark Brown return ret; 29018ae12a0dSDavid Brownell } 29028ae12a0dSDavid Brownell EXPORT_SYMBOL_GPL(spi_sync); 29038ae12a0dSDavid Brownell 2904cf32b71eSErnst Schwab /** 2905cf32b71eSErnst Schwab * spi_sync_locked - version of spi_sync with exclusive bus usage 2906cf32b71eSErnst Schwab * @spi: device with which data will be exchanged 2907cf32b71eSErnst Schwab * @message: describes the data transfers 2908cf32b71eSErnst Schwab * Context: can sleep 2909cf32b71eSErnst Schwab * 2910cf32b71eSErnst Schwab * This call may only be used from a context that may sleep. The sleep 2911cf32b71eSErnst Schwab * is non-interruptible, and has no timeout. Low-overhead controller 2912cf32b71eSErnst Schwab * drivers may DMA directly into and out of the message buffers. 2913cf32b71eSErnst Schwab * 2914cf32b71eSErnst Schwab * This call should be used by drivers that require exclusive access to the 291525985edcSLucas De Marchi * SPI bus. It has to be preceded by a spi_bus_lock call. The SPI bus must 2916cf32b71eSErnst Schwab * be released by a spi_bus_unlock call when the exclusive access is over. 2917cf32b71eSErnst Schwab * 291897d56dc6SJavier Martinez Canillas * Return: zero on success, else a negative error code. 2919cf32b71eSErnst Schwab */ 2920cf32b71eSErnst Schwab int spi_sync_locked(struct spi_device *spi, struct spi_message *message) 2921cf32b71eSErnst Schwab { 2922ef4d96ecSMark Brown return __spi_sync(spi, message); 2923cf32b71eSErnst Schwab } 2924cf32b71eSErnst Schwab EXPORT_SYMBOL_GPL(spi_sync_locked); 2925cf32b71eSErnst Schwab 2926cf32b71eSErnst Schwab /** 2927cf32b71eSErnst Schwab * spi_bus_lock - obtain a lock for exclusive SPI bus usage 2928cf32b71eSErnst Schwab * @master: SPI bus master that should be locked for exclusive bus access 2929cf32b71eSErnst Schwab * Context: can sleep 2930cf32b71eSErnst Schwab * 2931cf32b71eSErnst Schwab * This call may only be used from a context that may sleep. The sleep 2932cf32b71eSErnst Schwab * is non-interruptible, and has no timeout. 2933cf32b71eSErnst Schwab * 2934cf32b71eSErnst Schwab * This call should be used by drivers that require exclusive access to the 2935cf32b71eSErnst Schwab * SPI bus. The SPI bus must be released by a spi_bus_unlock call when the 2936cf32b71eSErnst Schwab * exclusive access is over. Data transfer must be done by spi_sync_locked 2937cf32b71eSErnst Schwab * and spi_async_locked calls when the SPI bus lock is held. 2938cf32b71eSErnst Schwab * 293997d56dc6SJavier Martinez Canillas * Return: always zero. 2940cf32b71eSErnst Schwab */ 2941cf32b71eSErnst Schwab int spi_bus_lock(struct spi_master *master) 2942cf32b71eSErnst Schwab { 2943cf32b71eSErnst Schwab unsigned long flags; 2944cf32b71eSErnst Schwab 2945cf32b71eSErnst Schwab mutex_lock(&master->bus_lock_mutex); 2946cf32b71eSErnst Schwab 2947cf32b71eSErnst Schwab spin_lock_irqsave(&master->bus_lock_spinlock, flags); 2948cf32b71eSErnst Schwab master->bus_lock_flag = 1; 2949cf32b71eSErnst Schwab spin_unlock_irqrestore(&master->bus_lock_spinlock, flags); 2950cf32b71eSErnst Schwab 2951cf32b71eSErnst Schwab /* mutex remains locked until spi_bus_unlock is called */ 2952cf32b71eSErnst Schwab 2953cf32b71eSErnst Schwab return 0; 2954cf32b71eSErnst Schwab } 2955cf32b71eSErnst Schwab EXPORT_SYMBOL_GPL(spi_bus_lock); 2956cf32b71eSErnst Schwab 2957cf32b71eSErnst Schwab /** 2958cf32b71eSErnst Schwab * spi_bus_unlock - release the lock for exclusive SPI bus usage 2959cf32b71eSErnst Schwab * @master: SPI bus master that was locked for exclusive bus access 2960cf32b71eSErnst Schwab * Context: can sleep 2961cf32b71eSErnst Schwab * 2962cf32b71eSErnst Schwab * This call may only be used from a context that may sleep. The sleep 2963cf32b71eSErnst Schwab * is non-interruptible, and has no timeout. 2964cf32b71eSErnst Schwab * 2965cf32b71eSErnst Schwab * This call releases an SPI bus lock previously obtained by an spi_bus_lock 2966cf32b71eSErnst Schwab * call. 2967cf32b71eSErnst Schwab * 296897d56dc6SJavier Martinez Canillas * Return: always zero. 2969cf32b71eSErnst Schwab */ 2970cf32b71eSErnst Schwab int spi_bus_unlock(struct spi_master *master) 2971cf32b71eSErnst Schwab { 2972cf32b71eSErnst Schwab master->bus_lock_flag = 0; 2973cf32b71eSErnst Schwab 2974cf32b71eSErnst Schwab mutex_unlock(&master->bus_lock_mutex); 2975cf32b71eSErnst Schwab 2976cf32b71eSErnst Schwab return 0; 2977cf32b71eSErnst Schwab } 2978cf32b71eSErnst Schwab EXPORT_SYMBOL_GPL(spi_bus_unlock); 2979cf32b71eSErnst Schwab 2980a9948b61SDavid Brownell /* portable code must never pass more than 32 bytes */ 2981a9948b61SDavid Brownell #define SPI_BUFSIZ max(32, SMP_CACHE_BYTES) 29828ae12a0dSDavid Brownell 29838ae12a0dSDavid Brownell static u8 *buf; 29848ae12a0dSDavid Brownell 29858ae12a0dSDavid Brownell /** 29868ae12a0dSDavid Brownell * spi_write_then_read - SPI synchronous write followed by read 29878ae12a0dSDavid Brownell * @spi: device with which data will be exchanged 29888ae12a0dSDavid Brownell * @txbuf: data to be written (need not be dma-safe) 29898ae12a0dSDavid Brownell * @n_tx: size of txbuf, in bytes 299027570497SJiri Pirko * @rxbuf: buffer into which data will be read (need not be dma-safe) 299127570497SJiri Pirko * @n_rx: size of rxbuf, in bytes 299233e34dc6SDavid Brownell * Context: can sleep 29938ae12a0dSDavid Brownell * 29948ae12a0dSDavid Brownell * This performs a half duplex MicroWire style transaction with the 29958ae12a0dSDavid Brownell * device, sending txbuf and then reading rxbuf. The return value 29968ae12a0dSDavid Brownell * is zero for success, else a negative errno status code. 2997b885244eSDavid Brownell * This call may only be used from a context that may sleep. 29988ae12a0dSDavid Brownell * 29990c868461SDavid Brownell * Parameters to this routine are always copied using a small buffer; 300033e34dc6SDavid Brownell * portable code should never use this for more than 32 bytes. 300133e34dc6SDavid Brownell * Performance-sensitive or bulk transfer code should instead use 30020c868461SDavid Brownell * spi_{async,sync}() calls with dma-safe buffers. 300397d56dc6SJavier Martinez Canillas * 300497d56dc6SJavier Martinez Canillas * Return: zero on success, else a negative error code. 30058ae12a0dSDavid Brownell */ 30068ae12a0dSDavid Brownell int spi_write_then_read(struct spi_device *spi, 30070c4a1590SMark Brown const void *txbuf, unsigned n_tx, 30080c4a1590SMark Brown void *rxbuf, unsigned n_rx) 30098ae12a0dSDavid Brownell { 3010068f4070SDavid Brownell static DEFINE_MUTEX(lock); 30118ae12a0dSDavid Brownell 30128ae12a0dSDavid Brownell int status; 30138ae12a0dSDavid Brownell struct spi_message message; 3014bdff549eSDavid Brownell struct spi_transfer x[2]; 30158ae12a0dSDavid Brownell u8 *local_buf; 30168ae12a0dSDavid Brownell 3017b3a223eeSMark Brown /* Use preallocated DMA-safe buffer if we can. We can't avoid 3018b3a223eeSMark Brown * copying here, (as a pure convenience thing), but we can 3019b3a223eeSMark Brown * keep heap costs out of the hot path unless someone else is 3020b3a223eeSMark Brown * using the pre-allocated buffer or the transfer is too large. 30218ae12a0dSDavid Brownell */ 3022b3a223eeSMark Brown if ((n_tx + n_rx) > SPI_BUFSIZ || !mutex_trylock(&lock)) { 30232cd94c8aSMark Brown local_buf = kmalloc(max((unsigned)SPI_BUFSIZ, n_tx + n_rx), 30242cd94c8aSMark Brown GFP_KERNEL | GFP_DMA); 3025b3a223eeSMark Brown if (!local_buf) 3026b3a223eeSMark Brown return -ENOMEM; 3027b3a223eeSMark Brown } else { 3028b3a223eeSMark Brown local_buf = buf; 3029b3a223eeSMark Brown } 30308ae12a0dSDavid Brownell 30318275c642SVitaly Wool spi_message_init(&message); 30325fe5f05eSJingoo Han memset(x, 0, sizeof(x)); 3033bdff549eSDavid Brownell if (n_tx) { 3034bdff549eSDavid Brownell x[0].len = n_tx; 3035bdff549eSDavid Brownell spi_message_add_tail(&x[0], &message); 3036bdff549eSDavid Brownell } 3037bdff549eSDavid Brownell if (n_rx) { 3038bdff549eSDavid Brownell x[1].len = n_rx; 3039bdff549eSDavid Brownell spi_message_add_tail(&x[1], &message); 3040bdff549eSDavid Brownell } 30418275c642SVitaly Wool 30428ae12a0dSDavid Brownell memcpy(local_buf, txbuf, n_tx); 3043bdff549eSDavid Brownell x[0].tx_buf = local_buf; 3044bdff549eSDavid Brownell x[1].rx_buf = local_buf + n_tx; 30458ae12a0dSDavid Brownell 30468ae12a0dSDavid Brownell /* do the i/o */ 30478ae12a0dSDavid Brownell status = spi_sync(spi, &message); 30489b938b74SMarc Pignat if (status == 0) 3049bdff549eSDavid Brownell memcpy(rxbuf, x[1].rx_buf, n_rx); 30508ae12a0dSDavid Brownell 3051bdff549eSDavid Brownell if (x[0].tx_buf == buf) 3052068f4070SDavid Brownell mutex_unlock(&lock); 30538ae12a0dSDavid Brownell else 30548ae12a0dSDavid Brownell kfree(local_buf); 30558ae12a0dSDavid Brownell 30568ae12a0dSDavid Brownell return status; 30578ae12a0dSDavid Brownell } 30588ae12a0dSDavid Brownell EXPORT_SYMBOL_GPL(spi_write_then_read); 30598ae12a0dSDavid Brownell 30608ae12a0dSDavid Brownell /*-------------------------------------------------------------------------*/ 30618ae12a0dSDavid Brownell 3062ce79d54aSPantelis Antoniou #if IS_ENABLED(CONFIG_OF_DYNAMIC) 3063ce79d54aSPantelis Antoniou static int __spi_of_device_match(struct device *dev, void *data) 3064ce79d54aSPantelis Antoniou { 3065ce79d54aSPantelis Antoniou return dev->of_node == data; 3066ce79d54aSPantelis Antoniou } 3067ce79d54aSPantelis Antoniou 3068ce79d54aSPantelis Antoniou /* must call put_device() when done with returned spi_device device */ 3069ce79d54aSPantelis Antoniou static struct spi_device *of_find_spi_device_by_node(struct device_node *node) 3070ce79d54aSPantelis Antoniou { 3071ce79d54aSPantelis Antoniou struct device *dev = bus_find_device(&spi_bus_type, NULL, node, 3072ce79d54aSPantelis Antoniou __spi_of_device_match); 3073ce79d54aSPantelis Antoniou return dev ? to_spi_device(dev) : NULL; 3074ce79d54aSPantelis Antoniou } 3075ce79d54aSPantelis Antoniou 3076ce79d54aSPantelis Antoniou static int __spi_of_master_match(struct device *dev, const void *data) 3077ce79d54aSPantelis Antoniou { 3078ce79d54aSPantelis Antoniou return dev->of_node == data; 3079ce79d54aSPantelis Antoniou } 3080ce79d54aSPantelis Antoniou 3081ce79d54aSPantelis Antoniou /* the spi masters are not using spi_bus, so we find it with another way */ 3082ce79d54aSPantelis Antoniou static struct spi_master *of_find_spi_master_by_node(struct device_node *node) 3083ce79d54aSPantelis Antoniou { 3084ce79d54aSPantelis Antoniou struct device *dev; 3085ce79d54aSPantelis Antoniou 3086ce79d54aSPantelis Antoniou dev = class_find_device(&spi_master_class, NULL, node, 3087ce79d54aSPantelis Antoniou __spi_of_master_match); 3088ce79d54aSPantelis Antoniou if (!dev) 3089ce79d54aSPantelis Antoniou return NULL; 3090ce79d54aSPantelis Antoniou 3091ce79d54aSPantelis Antoniou /* reference got in class_find_device */ 3092ce79d54aSPantelis Antoniou return container_of(dev, struct spi_master, dev); 3093ce79d54aSPantelis Antoniou } 3094ce79d54aSPantelis Antoniou 3095ce79d54aSPantelis Antoniou static int of_spi_notify(struct notifier_block *nb, unsigned long action, 3096ce79d54aSPantelis Antoniou void *arg) 3097ce79d54aSPantelis Antoniou { 3098ce79d54aSPantelis Antoniou struct of_reconfig_data *rd = arg; 3099ce79d54aSPantelis Antoniou struct spi_master *master; 3100ce79d54aSPantelis Antoniou struct spi_device *spi; 3101ce79d54aSPantelis Antoniou 3102ce79d54aSPantelis Antoniou switch (of_reconfig_get_state_change(action, arg)) { 3103ce79d54aSPantelis Antoniou case OF_RECONFIG_CHANGE_ADD: 3104ce79d54aSPantelis Antoniou master = of_find_spi_master_by_node(rd->dn->parent); 3105ce79d54aSPantelis Antoniou if (master == NULL) 3106ce79d54aSPantelis Antoniou return NOTIFY_OK; /* not for us */ 3107ce79d54aSPantelis Antoniou 3108bd6c1644SGeert Uytterhoeven if (of_node_test_and_set_flag(rd->dn, OF_POPULATED)) { 3109bd6c1644SGeert Uytterhoeven put_device(&master->dev); 3110bd6c1644SGeert Uytterhoeven return NOTIFY_OK; 3111bd6c1644SGeert Uytterhoeven } 3112bd6c1644SGeert Uytterhoeven 3113ce79d54aSPantelis Antoniou spi = of_register_spi_device(master, rd->dn); 3114ce79d54aSPantelis Antoniou put_device(&master->dev); 3115ce79d54aSPantelis Antoniou 3116ce79d54aSPantelis Antoniou if (IS_ERR(spi)) { 3117ce79d54aSPantelis Antoniou pr_err("%s: failed to create for '%s'\n", 3118ce79d54aSPantelis Antoniou __func__, rd->dn->full_name); 3119ce79d54aSPantelis Antoniou return notifier_from_errno(PTR_ERR(spi)); 3120ce79d54aSPantelis Antoniou } 3121ce79d54aSPantelis Antoniou break; 3122ce79d54aSPantelis Antoniou 3123ce79d54aSPantelis Antoniou case OF_RECONFIG_CHANGE_REMOVE: 3124bd6c1644SGeert Uytterhoeven /* already depopulated? */ 3125bd6c1644SGeert Uytterhoeven if (!of_node_check_flag(rd->dn, OF_POPULATED)) 3126bd6c1644SGeert Uytterhoeven return NOTIFY_OK; 3127bd6c1644SGeert Uytterhoeven 3128ce79d54aSPantelis Antoniou /* find our device by node */ 3129ce79d54aSPantelis Antoniou spi = of_find_spi_device_by_node(rd->dn); 3130ce79d54aSPantelis Antoniou if (spi == NULL) 3131ce79d54aSPantelis Antoniou return NOTIFY_OK; /* no? not meant for us */ 3132ce79d54aSPantelis Antoniou 3133ce79d54aSPantelis Antoniou /* unregister takes one ref away */ 3134ce79d54aSPantelis Antoniou spi_unregister_device(spi); 3135ce79d54aSPantelis Antoniou 3136ce79d54aSPantelis Antoniou /* and put the reference of the find */ 3137ce79d54aSPantelis Antoniou put_device(&spi->dev); 3138ce79d54aSPantelis Antoniou break; 3139ce79d54aSPantelis Antoniou } 3140ce79d54aSPantelis Antoniou 3141ce79d54aSPantelis Antoniou return NOTIFY_OK; 3142ce79d54aSPantelis Antoniou } 3143ce79d54aSPantelis Antoniou 3144ce79d54aSPantelis Antoniou static struct notifier_block spi_of_notifier = { 3145ce79d54aSPantelis Antoniou .notifier_call = of_spi_notify, 3146ce79d54aSPantelis Antoniou }; 3147ce79d54aSPantelis Antoniou #else /* IS_ENABLED(CONFIG_OF_DYNAMIC) */ 3148ce79d54aSPantelis Antoniou extern struct notifier_block spi_of_notifier; 3149ce79d54aSPantelis Antoniou #endif /* IS_ENABLED(CONFIG_OF_DYNAMIC) */ 3150ce79d54aSPantelis Antoniou 31517f24467fSOctavian Purdila #if IS_ENABLED(CONFIG_ACPI) 31527f24467fSOctavian Purdila static int spi_acpi_master_match(struct device *dev, const void *data) 31537f24467fSOctavian Purdila { 31547f24467fSOctavian Purdila return ACPI_COMPANION(dev->parent) == data; 31557f24467fSOctavian Purdila } 31567f24467fSOctavian Purdila 31577f24467fSOctavian Purdila static int spi_acpi_device_match(struct device *dev, void *data) 31587f24467fSOctavian Purdila { 31597f24467fSOctavian Purdila return ACPI_COMPANION(dev) == data; 31607f24467fSOctavian Purdila } 31617f24467fSOctavian Purdila 31627f24467fSOctavian Purdila static struct spi_master *acpi_spi_find_master_by_adev(struct acpi_device *adev) 31637f24467fSOctavian Purdila { 31647f24467fSOctavian Purdila struct device *dev; 31657f24467fSOctavian Purdila 31667f24467fSOctavian Purdila dev = class_find_device(&spi_master_class, NULL, adev, 31677f24467fSOctavian Purdila spi_acpi_master_match); 31687f24467fSOctavian Purdila if (!dev) 31697f24467fSOctavian Purdila return NULL; 31707f24467fSOctavian Purdila 31717f24467fSOctavian Purdila return container_of(dev, struct spi_master, dev); 31727f24467fSOctavian Purdila } 31737f24467fSOctavian Purdila 31747f24467fSOctavian Purdila static struct spi_device *acpi_spi_find_device_by_adev(struct acpi_device *adev) 31757f24467fSOctavian Purdila { 31767f24467fSOctavian Purdila struct device *dev; 31777f24467fSOctavian Purdila 31787f24467fSOctavian Purdila dev = bus_find_device(&spi_bus_type, NULL, adev, spi_acpi_device_match); 31797f24467fSOctavian Purdila 31807f24467fSOctavian Purdila return dev ? to_spi_device(dev) : NULL; 31817f24467fSOctavian Purdila } 31827f24467fSOctavian Purdila 31837f24467fSOctavian Purdila static int acpi_spi_notify(struct notifier_block *nb, unsigned long value, 31847f24467fSOctavian Purdila void *arg) 31857f24467fSOctavian Purdila { 31867f24467fSOctavian Purdila struct acpi_device *adev = arg; 31877f24467fSOctavian Purdila struct spi_master *master; 31887f24467fSOctavian Purdila struct spi_device *spi; 31897f24467fSOctavian Purdila 31907f24467fSOctavian Purdila switch (value) { 31917f24467fSOctavian Purdila case ACPI_RECONFIG_DEVICE_ADD: 31927f24467fSOctavian Purdila master = acpi_spi_find_master_by_adev(adev->parent); 31937f24467fSOctavian Purdila if (!master) 31947f24467fSOctavian Purdila break; 31957f24467fSOctavian Purdila 31967f24467fSOctavian Purdila acpi_register_spi_device(master, adev); 31977f24467fSOctavian Purdila put_device(&master->dev); 31987f24467fSOctavian Purdila break; 31997f24467fSOctavian Purdila case ACPI_RECONFIG_DEVICE_REMOVE: 32007f24467fSOctavian Purdila if (!acpi_device_enumerated(adev)) 32017f24467fSOctavian Purdila break; 32027f24467fSOctavian Purdila 32037f24467fSOctavian Purdila spi = acpi_spi_find_device_by_adev(adev); 32047f24467fSOctavian Purdila if (!spi) 32057f24467fSOctavian Purdila break; 32067f24467fSOctavian Purdila 32077f24467fSOctavian Purdila spi_unregister_device(spi); 32087f24467fSOctavian Purdila put_device(&spi->dev); 32097f24467fSOctavian Purdila break; 32107f24467fSOctavian Purdila } 32117f24467fSOctavian Purdila 32127f24467fSOctavian Purdila return NOTIFY_OK; 32137f24467fSOctavian Purdila } 32147f24467fSOctavian Purdila 32157f24467fSOctavian Purdila static struct notifier_block spi_acpi_notifier = { 32167f24467fSOctavian Purdila .notifier_call = acpi_spi_notify, 32177f24467fSOctavian Purdila }; 32187f24467fSOctavian Purdila #else 32197f24467fSOctavian Purdila extern struct notifier_block spi_acpi_notifier; 32207f24467fSOctavian Purdila #endif 32217f24467fSOctavian Purdila 32228ae12a0dSDavid Brownell static int __init spi_init(void) 32238ae12a0dSDavid Brownell { 3224b885244eSDavid Brownell int status; 32258ae12a0dSDavid Brownell 3226e94b1766SChristoph Lameter buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL); 3227b885244eSDavid Brownell if (!buf) { 3228b885244eSDavid Brownell status = -ENOMEM; 3229b885244eSDavid Brownell goto err0; 32308ae12a0dSDavid Brownell } 3231b885244eSDavid Brownell 3232b885244eSDavid Brownell status = bus_register(&spi_bus_type); 3233b885244eSDavid Brownell if (status < 0) 3234b885244eSDavid Brownell goto err1; 3235b885244eSDavid Brownell 3236b885244eSDavid Brownell status = class_register(&spi_master_class); 3237b885244eSDavid Brownell if (status < 0) 3238b885244eSDavid Brownell goto err2; 3239ce79d54aSPantelis Antoniou 32405267720eSFabio Estevam if (IS_ENABLED(CONFIG_OF_DYNAMIC)) 3241ce79d54aSPantelis Antoniou WARN_ON(of_reconfig_notifier_register(&spi_of_notifier)); 32427f24467fSOctavian Purdila if (IS_ENABLED(CONFIG_ACPI)) 32437f24467fSOctavian Purdila WARN_ON(acpi_reconfig_notifier_register(&spi_acpi_notifier)); 3244ce79d54aSPantelis Antoniou 3245b885244eSDavid Brownell return 0; 3246b885244eSDavid Brownell 3247b885244eSDavid Brownell err2: 3248b885244eSDavid Brownell bus_unregister(&spi_bus_type); 3249b885244eSDavid Brownell err1: 3250b885244eSDavid Brownell kfree(buf); 3251b885244eSDavid Brownell buf = NULL; 3252b885244eSDavid Brownell err0: 3253b885244eSDavid Brownell return status; 3254b885244eSDavid Brownell } 3255b885244eSDavid Brownell 32568ae12a0dSDavid Brownell /* board_info is normally registered in arch_initcall(), 32578ae12a0dSDavid Brownell * but even essential drivers wait till later 3258b885244eSDavid Brownell * 3259b885244eSDavid Brownell * REVISIT only boardinfo really needs static linking. the rest (device and 3260b885244eSDavid Brownell * driver registration) _could_ be dynamically linked (modular) ... costs 3261b885244eSDavid Brownell * include needing to have boardinfo data structures be much more public. 32628ae12a0dSDavid Brownell */ 3263673c0c00SDavid Brownell postcore_initcall(spi_init); 32648ae12a0dSDavid Brownell 3265