18ae12a0dSDavid Brownell /* 2ca632f55SGrant Likely * SPI init/core code 38ae12a0dSDavid Brownell * 48ae12a0dSDavid Brownell * Copyright (C) 2005 David Brownell 5d57a4282SGrant Likely * Copyright (C) 2008 Secret Lab Technologies Ltd. 68ae12a0dSDavid Brownell * 78ae12a0dSDavid Brownell * This program is free software; you can redistribute it and/or modify 88ae12a0dSDavid Brownell * it under the terms of the GNU General Public License as published by 98ae12a0dSDavid Brownell * the Free Software Foundation; either version 2 of the License, or 108ae12a0dSDavid Brownell * (at your option) any later version. 118ae12a0dSDavid Brownell * 128ae12a0dSDavid Brownell * This program is distributed in the hope that it will be useful, 138ae12a0dSDavid Brownell * but WITHOUT ANY WARRANTY; without even the implied warranty of 148ae12a0dSDavid Brownell * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 158ae12a0dSDavid Brownell * GNU General Public License for more details. 168ae12a0dSDavid Brownell */ 178ae12a0dSDavid Brownell 188ae12a0dSDavid Brownell #include <linux/kernel.h> 198ae12a0dSDavid Brownell #include <linux/device.h> 208ae12a0dSDavid Brownell #include <linux/init.h> 218ae12a0dSDavid Brownell #include <linux/cache.h> 2299adef31SMark Brown #include <linux/dma-mapping.h> 2399adef31SMark Brown #include <linux/dmaengine.h> 2494040828SMatthias Kaehlcke #include <linux/mutex.h> 252b7a32f7SSinan Akman #include <linux/of_device.h> 26d57a4282SGrant Likely #include <linux/of_irq.h> 2786be408bSSylwester Nawrocki #include <linux/clk/clk-conf.h> 285a0e3ad6STejun Heo #include <linux/slab.h> 29e0626e38SAnton Vorontsov #include <linux/mod_devicetable.h> 308ae12a0dSDavid Brownell #include <linux/spi/spi.h> 3174317984SJean-Christophe PLAGNIOL-VILLARD #include <linux/of_gpio.h> 323ae22e8cSMark Brown #include <linux/pm_runtime.h> 33f48c767cSUlf Hansson #include <linux/pm_domain.h> 34025ed130SPaul Gortmaker #include <linux/export.h> 358bd75c77SClark Williams #include <linux/sched/rt.h> 36ffbbdd21SLinus Walleij #include <linux/delay.h> 37ffbbdd21SLinus Walleij #include <linux/kthread.h> 3864bee4d2SMika Westerberg #include <linux/ioport.h> 3964bee4d2SMika Westerberg #include <linux/acpi.h> 40*b1b8153cSVignesh R #include <linux/highmem.h> 418ae12a0dSDavid Brownell 4256ec1978SMark Brown #define CREATE_TRACE_POINTS 4356ec1978SMark Brown #include <trace/events/spi.h> 4456ec1978SMark Brown 458ae12a0dSDavid Brownell static void spidev_release(struct device *dev) 468ae12a0dSDavid Brownell { 470ffa0285SHans-Peter Nilsson struct spi_device *spi = to_spi_device(dev); 488ae12a0dSDavid Brownell 498ae12a0dSDavid Brownell /* spi masters may cleanup for released devices */ 508ae12a0dSDavid Brownell if (spi->master->cleanup) 518ae12a0dSDavid Brownell spi->master->cleanup(spi); 528ae12a0dSDavid Brownell 530c868461SDavid Brownell spi_master_put(spi->master); 5407a389feSRoman Tereshonkov kfree(spi); 558ae12a0dSDavid Brownell } 568ae12a0dSDavid Brownell 578ae12a0dSDavid Brownell static ssize_t 588ae12a0dSDavid Brownell modalias_show(struct device *dev, struct device_attribute *a, char *buf) 598ae12a0dSDavid Brownell { 608ae12a0dSDavid Brownell const struct spi_device *spi = to_spi_device(dev); 618c4ff6d0SZhang Rui int len; 628c4ff6d0SZhang Rui 638c4ff6d0SZhang Rui len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1); 648c4ff6d0SZhang Rui if (len != -ENODEV) 658c4ff6d0SZhang Rui return len; 668ae12a0dSDavid Brownell 67d8e328b3SGrant Likely return sprintf(buf, "%s%s\n", SPI_MODULE_PREFIX, spi->modalias); 688ae12a0dSDavid Brownell } 69aa7da564SGreg Kroah-Hartman static DEVICE_ATTR_RO(modalias); 708ae12a0dSDavid Brownell 71eca2ebc7SMartin Sperl #define SPI_STATISTICS_ATTRS(field, file) \ 72eca2ebc7SMartin Sperl static ssize_t spi_master_##field##_show(struct device *dev, \ 73eca2ebc7SMartin Sperl struct device_attribute *attr, \ 74eca2ebc7SMartin Sperl char *buf) \ 75eca2ebc7SMartin Sperl { \ 76eca2ebc7SMartin Sperl struct spi_master *master = container_of(dev, \ 77eca2ebc7SMartin Sperl struct spi_master, dev); \ 78eca2ebc7SMartin Sperl return spi_statistics_##field##_show(&master->statistics, buf); \ 79eca2ebc7SMartin Sperl } \ 80eca2ebc7SMartin Sperl static struct device_attribute dev_attr_spi_master_##field = { \ 81eca2ebc7SMartin Sperl .attr = { .name = file, .mode = S_IRUGO }, \ 82eca2ebc7SMartin Sperl .show = spi_master_##field##_show, \ 83eca2ebc7SMartin Sperl }; \ 84eca2ebc7SMartin Sperl static ssize_t spi_device_##field##_show(struct device *dev, \ 85eca2ebc7SMartin Sperl struct device_attribute *attr, \ 86eca2ebc7SMartin Sperl char *buf) \ 87eca2ebc7SMartin Sperl { \ 88d1eba93bSGeliang Tang struct spi_device *spi = to_spi_device(dev); \ 89eca2ebc7SMartin Sperl return spi_statistics_##field##_show(&spi->statistics, buf); \ 90eca2ebc7SMartin Sperl } \ 91eca2ebc7SMartin Sperl static struct device_attribute dev_attr_spi_device_##field = { \ 92eca2ebc7SMartin Sperl .attr = { .name = file, .mode = S_IRUGO }, \ 93eca2ebc7SMartin Sperl .show = spi_device_##field##_show, \ 94eca2ebc7SMartin Sperl } 95eca2ebc7SMartin Sperl 96eca2ebc7SMartin Sperl #define SPI_STATISTICS_SHOW_NAME(name, file, field, format_string) \ 97eca2ebc7SMartin Sperl static ssize_t spi_statistics_##name##_show(struct spi_statistics *stat, \ 98eca2ebc7SMartin Sperl char *buf) \ 99eca2ebc7SMartin Sperl { \ 100eca2ebc7SMartin Sperl unsigned long flags; \ 101eca2ebc7SMartin Sperl ssize_t len; \ 102eca2ebc7SMartin Sperl spin_lock_irqsave(&stat->lock, flags); \ 103eca2ebc7SMartin Sperl len = sprintf(buf, format_string, stat->field); \ 104eca2ebc7SMartin Sperl spin_unlock_irqrestore(&stat->lock, flags); \ 105eca2ebc7SMartin Sperl return len; \ 106eca2ebc7SMartin Sperl } \ 107eca2ebc7SMartin Sperl SPI_STATISTICS_ATTRS(name, file) 108eca2ebc7SMartin Sperl 109eca2ebc7SMartin Sperl #define SPI_STATISTICS_SHOW(field, format_string) \ 110eca2ebc7SMartin Sperl SPI_STATISTICS_SHOW_NAME(field, __stringify(field), \ 111eca2ebc7SMartin Sperl field, format_string) 112eca2ebc7SMartin Sperl 113eca2ebc7SMartin Sperl SPI_STATISTICS_SHOW(messages, "%lu"); 114eca2ebc7SMartin Sperl SPI_STATISTICS_SHOW(transfers, "%lu"); 115eca2ebc7SMartin Sperl SPI_STATISTICS_SHOW(errors, "%lu"); 116eca2ebc7SMartin Sperl SPI_STATISTICS_SHOW(timedout, "%lu"); 117eca2ebc7SMartin Sperl 118eca2ebc7SMartin Sperl SPI_STATISTICS_SHOW(spi_sync, "%lu"); 119eca2ebc7SMartin Sperl SPI_STATISTICS_SHOW(spi_sync_immediate, "%lu"); 120eca2ebc7SMartin Sperl SPI_STATISTICS_SHOW(spi_async, "%lu"); 121eca2ebc7SMartin Sperl 122eca2ebc7SMartin Sperl SPI_STATISTICS_SHOW(bytes, "%llu"); 123eca2ebc7SMartin Sperl SPI_STATISTICS_SHOW(bytes_rx, "%llu"); 124eca2ebc7SMartin Sperl SPI_STATISTICS_SHOW(bytes_tx, "%llu"); 125eca2ebc7SMartin Sperl 1266b7bc061SMartin Sperl #define SPI_STATISTICS_TRANSFER_BYTES_HISTO(index, number) \ 1276b7bc061SMartin Sperl SPI_STATISTICS_SHOW_NAME(transfer_bytes_histo##index, \ 1286b7bc061SMartin Sperl "transfer_bytes_histo_" number, \ 1296b7bc061SMartin Sperl transfer_bytes_histo[index], "%lu") 1306b7bc061SMartin Sperl SPI_STATISTICS_TRANSFER_BYTES_HISTO(0, "0-1"); 1316b7bc061SMartin Sperl SPI_STATISTICS_TRANSFER_BYTES_HISTO(1, "2-3"); 1326b7bc061SMartin Sperl SPI_STATISTICS_TRANSFER_BYTES_HISTO(2, "4-7"); 1336b7bc061SMartin Sperl SPI_STATISTICS_TRANSFER_BYTES_HISTO(3, "8-15"); 1346b7bc061SMartin Sperl SPI_STATISTICS_TRANSFER_BYTES_HISTO(4, "16-31"); 1356b7bc061SMartin Sperl SPI_STATISTICS_TRANSFER_BYTES_HISTO(5, "32-63"); 1366b7bc061SMartin Sperl SPI_STATISTICS_TRANSFER_BYTES_HISTO(6, "64-127"); 1376b7bc061SMartin Sperl SPI_STATISTICS_TRANSFER_BYTES_HISTO(7, "128-255"); 1386b7bc061SMartin Sperl SPI_STATISTICS_TRANSFER_BYTES_HISTO(8, "256-511"); 1396b7bc061SMartin Sperl SPI_STATISTICS_TRANSFER_BYTES_HISTO(9, "512-1023"); 1406b7bc061SMartin Sperl SPI_STATISTICS_TRANSFER_BYTES_HISTO(10, "1024-2047"); 1416b7bc061SMartin Sperl SPI_STATISTICS_TRANSFER_BYTES_HISTO(11, "2048-4095"); 1426b7bc061SMartin Sperl SPI_STATISTICS_TRANSFER_BYTES_HISTO(12, "4096-8191"); 1436b7bc061SMartin Sperl SPI_STATISTICS_TRANSFER_BYTES_HISTO(13, "8192-16383"); 1446b7bc061SMartin Sperl SPI_STATISTICS_TRANSFER_BYTES_HISTO(14, "16384-32767"); 1456b7bc061SMartin Sperl SPI_STATISTICS_TRANSFER_BYTES_HISTO(15, "32768-65535"); 1466b7bc061SMartin Sperl SPI_STATISTICS_TRANSFER_BYTES_HISTO(16, "65536+"); 1476b7bc061SMartin Sperl 148d9f12122SMartin Sperl SPI_STATISTICS_SHOW(transfers_split_maxsize, "%lu"); 149d9f12122SMartin Sperl 150aa7da564SGreg Kroah-Hartman static struct attribute *spi_dev_attrs[] = { 151aa7da564SGreg Kroah-Hartman &dev_attr_modalias.attr, 152aa7da564SGreg Kroah-Hartman NULL, 1538ae12a0dSDavid Brownell }; 154eca2ebc7SMartin Sperl 155eca2ebc7SMartin Sperl static const struct attribute_group spi_dev_group = { 156eca2ebc7SMartin Sperl .attrs = spi_dev_attrs, 157eca2ebc7SMartin Sperl }; 158eca2ebc7SMartin Sperl 159eca2ebc7SMartin Sperl static struct attribute *spi_device_statistics_attrs[] = { 160eca2ebc7SMartin Sperl &dev_attr_spi_device_messages.attr, 161eca2ebc7SMartin Sperl &dev_attr_spi_device_transfers.attr, 162eca2ebc7SMartin Sperl &dev_attr_spi_device_errors.attr, 163eca2ebc7SMartin Sperl &dev_attr_spi_device_timedout.attr, 164eca2ebc7SMartin Sperl &dev_attr_spi_device_spi_sync.attr, 165eca2ebc7SMartin Sperl &dev_attr_spi_device_spi_sync_immediate.attr, 166eca2ebc7SMartin Sperl &dev_attr_spi_device_spi_async.attr, 167eca2ebc7SMartin Sperl &dev_attr_spi_device_bytes.attr, 168eca2ebc7SMartin Sperl &dev_attr_spi_device_bytes_rx.attr, 169eca2ebc7SMartin Sperl &dev_attr_spi_device_bytes_tx.attr, 1706b7bc061SMartin Sperl &dev_attr_spi_device_transfer_bytes_histo0.attr, 1716b7bc061SMartin Sperl &dev_attr_spi_device_transfer_bytes_histo1.attr, 1726b7bc061SMartin Sperl &dev_attr_spi_device_transfer_bytes_histo2.attr, 1736b7bc061SMartin Sperl &dev_attr_spi_device_transfer_bytes_histo3.attr, 1746b7bc061SMartin Sperl &dev_attr_spi_device_transfer_bytes_histo4.attr, 1756b7bc061SMartin Sperl &dev_attr_spi_device_transfer_bytes_histo5.attr, 1766b7bc061SMartin Sperl &dev_attr_spi_device_transfer_bytes_histo6.attr, 1776b7bc061SMartin Sperl &dev_attr_spi_device_transfer_bytes_histo7.attr, 1786b7bc061SMartin Sperl &dev_attr_spi_device_transfer_bytes_histo8.attr, 1796b7bc061SMartin Sperl &dev_attr_spi_device_transfer_bytes_histo9.attr, 1806b7bc061SMartin Sperl &dev_attr_spi_device_transfer_bytes_histo10.attr, 1816b7bc061SMartin Sperl &dev_attr_spi_device_transfer_bytes_histo11.attr, 1826b7bc061SMartin Sperl &dev_attr_spi_device_transfer_bytes_histo12.attr, 1836b7bc061SMartin Sperl &dev_attr_spi_device_transfer_bytes_histo13.attr, 1846b7bc061SMartin Sperl &dev_attr_spi_device_transfer_bytes_histo14.attr, 1856b7bc061SMartin Sperl &dev_attr_spi_device_transfer_bytes_histo15.attr, 1866b7bc061SMartin Sperl &dev_attr_spi_device_transfer_bytes_histo16.attr, 187d9f12122SMartin Sperl &dev_attr_spi_device_transfers_split_maxsize.attr, 188eca2ebc7SMartin Sperl NULL, 189eca2ebc7SMartin Sperl }; 190eca2ebc7SMartin Sperl 191eca2ebc7SMartin Sperl static const struct attribute_group spi_device_statistics_group = { 192eca2ebc7SMartin Sperl .name = "statistics", 193eca2ebc7SMartin Sperl .attrs = spi_device_statistics_attrs, 194eca2ebc7SMartin Sperl }; 195eca2ebc7SMartin Sperl 196eca2ebc7SMartin Sperl static const struct attribute_group *spi_dev_groups[] = { 197eca2ebc7SMartin Sperl &spi_dev_group, 198eca2ebc7SMartin Sperl &spi_device_statistics_group, 199eca2ebc7SMartin Sperl NULL, 200eca2ebc7SMartin Sperl }; 201eca2ebc7SMartin Sperl 202eca2ebc7SMartin Sperl static struct attribute *spi_master_statistics_attrs[] = { 203eca2ebc7SMartin Sperl &dev_attr_spi_master_messages.attr, 204eca2ebc7SMartin Sperl &dev_attr_spi_master_transfers.attr, 205eca2ebc7SMartin Sperl &dev_attr_spi_master_errors.attr, 206eca2ebc7SMartin Sperl &dev_attr_spi_master_timedout.attr, 207eca2ebc7SMartin Sperl &dev_attr_spi_master_spi_sync.attr, 208eca2ebc7SMartin Sperl &dev_attr_spi_master_spi_sync_immediate.attr, 209eca2ebc7SMartin Sperl &dev_attr_spi_master_spi_async.attr, 210eca2ebc7SMartin Sperl &dev_attr_spi_master_bytes.attr, 211eca2ebc7SMartin Sperl &dev_attr_spi_master_bytes_rx.attr, 212eca2ebc7SMartin Sperl &dev_attr_spi_master_bytes_tx.attr, 2136b7bc061SMartin Sperl &dev_attr_spi_master_transfer_bytes_histo0.attr, 2146b7bc061SMartin Sperl &dev_attr_spi_master_transfer_bytes_histo1.attr, 2156b7bc061SMartin Sperl &dev_attr_spi_master_transfer_bytes_histo2.attr, 2166b7bc061SMartin Sperl &dev_attr_spi_master_transfer_bytes_histo3.attr, 2176b7bc061SMartin Sperl &dev_attr_spi_master_transfer_bytes_histo4.attr, 2186b7bc061SMartin Sperl &dev_attr_spi_master_transfer_bytes_histo5.attr, 2196b7bc061SMartin Sperl &dev_attr_spi_master_transfer_bytes_histo6.attr, 2206b7bc061SMartin Sperl &dev_attr_spi_master_transfer_bytes_histo7.attr, 2216b7bc061SMartin Sperl &dev_attr_spi_master_transfer_bytes_histo8.attr, 2226b7bc061SMartin Sperl &dev_attr_spi_master_transfer_bytes_histo9.attr, 2236b7bc061SMartin Sperl &dev_attr_spi_master_transfer_bytes_histo10.attr, 2246b7bc061SMartin Sperl &dev_attr_spi_master_transfer_bytes_histo11.attr, 2256b7bc061SMartin Sperl &dev_attr_spi_master_transfer_bytes_histo12.attr, 2266b7bc061SMartin Sperl &dev_attr_spi_master_transfer_bytes_histo13.attr, 2276b7bc061SMartin Sperl &dev_attr_spi_master_transfer_bytes_histo14.attr, 2286b7bc061SMartin Sperl &dev_attr_spi_master_transfer_bytes_histo15.attr, 2296b7bc061SMartin Sperl &dev_attr_spi_master_transfer_bytes_histo16.attr, 230d9f12122SMartin Sperl &dev_attr_spi_master_transfers_split_maxsize.attr, 231eca2ebc7SMartin Sperl NULL, 232eca2ebc7SMartin Sperl }; 233eca2ebc7SMartin Sperl 234eca2ebc7SMartin Sperl static const struct attribute_group spi_master_statistics_group = { 235eca2ebc7SMartin Sperl .name = "statistics", 236eca2ebc7SMartin Sperl .attrs = spi_master_statistics_attrs, 237eca2ebc7SMartin Sperl }; 238eca2ebc7SMartin Sperl 239eca2ebc7SMartin Sperl static const struct attribute_group *spi_master_groups[] = { 240eca2ebc7SMartin Sperl &spi_master_statistics_group, 241eca2ebc7SMartin Sperl NULL, 242eca2ebc7SMartin Sperl }; 243eca2ebc7SMartin Sperl 244eca2ebc7SMartin Sperl void spi_statistics_add_transfer_stats(struct spi_statistics *stats, 245eca2ebc7SMartin Sperl struct spi_transfer *xfer, 246eca2ebc7SMartin Sperl struct spi_master *master) 247eca2ebc7SMartin Sperl { 248eca2ebc7SMartin Sperl unsigned long flags; 2496b7bc061SMartin Sperl int l2len = min(fls(xfer->len), SPI_STATISTICS_HISTO_SIZE) - 1; 2506b7bc061SMartin Sperl 2516b7bc061SMartin Sperl if (l2len < 0) 2526b7bc061SMartin Sperl l2len = 0; 253eca2ebc7SMartin Sperl 254eca2ebc7SMartin Sperl spin_lock_irqsave(&stats->lock, flags); 255eca2ebc7SMartin Sperl 256eca2ebc7SMartin Sperl stats->transfers++; 2576b7bc061SMartin Sperl stats->transfer_bytes_histo[l2len]++; 258eca2ebc7SMartin Sperl 259eca2ebc7SMartin Sperl stats->bytes += xfer->len; 260eca2ebc7SMartin Sperl if ((xfer->tx_buf) && 261eca2ebc7SMartin Sperl (xfer->tx_buf != master->dummy_tx)) 262eca2ebc7SMartin Sperl stats->bytes_tx += xfer->len; 263eca2ebc7SMartin Sperl if ((xfer->rx_buf) && 264eca2ebc7SMartin Sperl (xfer->rx_buf != master->dummy_rx)) 265eca2ebc7SMartin Sperl stats->bytes_rx += xfer->len; 266eca2ebc7SMartin Sperl 267eca2ebc7SMartin Sperl spin_unlock_irqrestore(&stats->lock, flags); 268eca2ebc7SMartin Sperl } 269eca2ebc7SMartin Sperl EXPORT_SYMBOL_GPL(spi_statistics_add_transfer_stats); 2708ae12a0dSDavid Brownell 2718ae12a0dSDavid Brownell /* modalias support makes "modprobe $MODALIAS" new-style hotplug work, 2728ae12a0dSDavid Brownell * and the sysfs version makes coldplug work too. 2738ae12a0dSDavid Brownell */ 2748ae12a0dSDavid Brownell 27575368bf6SAnton Vorontsov static const struct spi_device_id *spi_match_id(const struct spi_device_id *id, 27675368bf6SAnton Vorontsov const struct spi_device *sdev) 27775368bf6SAnton Vorontsov { 27875368bf6SAnton Vorontsov while (id->name[0]) { 27975368bf6SAnton Vorontsov if (!strcmp(sdev->modalias, id->name)) 28075368bf6SAnton Vorontsov return id; 28175368bf6SAnton Vorontsov id++; 28275368bf6SAnton Vorontsov } 28375368bf6SAnton Vorontsov return NULL; 28475368bf6SAnton Vorontsov } 28575368bf6SAnton Vorontsov 28675368bf6SAnton Vorontsov const struct spi_device_id *spi_get_device_id(const struct spi_device *sdev) 28775368bf6SAnton Vorontsov { 28875368bf6SAnton Vorontsov const struct spi_driver *sdrv = to_spi_driver(sdev->dev.driver); 28975368bf6SAnton Vorontsov 29075368bf6SAnton Vorontsov return spi_match_id(sdrv->id_table, sdev); 29175368bf6SAnton Vorontsov } 29275368bf6SAnton Vorontsov EXPORT_SYMBOL_GPL(spi_get_device_id); 29375368bf6SAnton Vorontsov 2948ae12a0dSDavid Brownell static int spi_match_device(struct device *dev, struct device_driver *drv) 2958ae12a0dSDavid Brownell { 2968ae12a0dSDavid Brownell const struct spi_device *spi = to_spi_device(dev); 29775368bf6SAnton Vorontsov const struct spi_driver *sdrv = to_spi_driver(drv); 29875368bf6SAnton Vorontsov 2992b7a32f7SSinan Akman /* Attempt an OF style match */ 3002b7a32f7SSinan Akman if (of_driver_match_device(dev, drv)) 3012b7a32f7SSinan Akman return 1; 3022b7a32f7SSinan Akman 30364bee4d2SMika Westerberg /* Then try ACPI */ 30464bee4d2SMika Westerberg if (acpi_driver_match_device(dev, drv)) 30564bee4d2SMika Westerberg return 1; 30664bee4d2SMika Westerberg 30775368bf6SAnton Vorontsov if (sdrv->id_table) 30875368bf6SAnton Vorontsov return !!spi_match_id(sdrv->id_table, spi); 3098ae12a0dSDavid Brownell 31035f74fcaSKay Sievers return strcmp(spi->modalias, drv->name) == 0; 3118ae12a0dSDavid Brownell } 3128ae12a0dSDavid Brownell 3137eff2e7aSKay Sievers static int spi_uevent(struct device *dev, struct kobj_uevent_env *env) 3148ae12a0dSDavid Brownell { 3158ae12a0dSDavid Brownell const struct spi_device *spi = to_spi_device(dev); 3168c4ff6d0SZhang Rui int rc; 3178c4ff6d0SZhang Rui 3188c4ff6d0SZhang Rui rc = acpi_device_uevent_modalias(dev, env); 3198c4ff6d0SZhang Rui if (rc != -ENODEV) 3208c4ff6d0SZhang Rui return rc; 3218ae12a0dSDavid Brownell 322e0626e38SAnton Vorontsov add_uevent_var(env, "MODALIAS=%s%s", SPI_MODULE_PREFIX, spi->modalias); 3238ae12a0dSDavid Brownell return 0; 3248ae12a0dSDavid Brownell } 3258ae12a0dSDavid Brownell 3268ae12a0dSDavid Brownell struct bus_type spi_bus_type = { 3278ae12a0dSDavid Brownell .name = "spi", 328aa7da564SGreg Kroah-Hartman .dev_groups = spi_dev_groups, 3298ae12a0dSDavid Brownell .match = spi_match_device, 3308ae12a0dSDavid Brownell .uevent = spi_uevent, 3318ae12a0dSDavid Brownell }; 3328ae12a0dSDavid Brownell EXPORT_SYMBOL_GPL(spi_bus_type); 3338ae12a0dSDavid Brownell 334b885244eSDavid Brownell 335b885244eSDavid Brownell static int spi_drv_probe(struct device *dev) 336b885244eSDavid Brownell { 337b885244eSDavid Brownell const struct spi_driver *sdrv = to_spi_driver(dev->driver); 33844af7927SJon Hunter struct spi_device *spi = to_spi_device(dev); 33933cf00e5SMika Westerberg int ret; 340b885244eSDavid Brownell 34186be408bSSylwester Nawrocki ret = of_clk_set_defaults(dev->of_node, false); 34286be408bSSylwester Nawrocki if (ret) 34386be408bSSylwester Nawrocki return ret; 34486be408bSSylwester Nawrocki 34544af7927SJon Hunter if (dev->of_node) { 34644af7927SJon Hunter spi->irq = of_irq_get(dev->of_node, 0); 34744af7927SJon Hunter if (spi->irq == -EPROBE_DEFER) 34844af7927SJon Hunter return -EPROBE_DEFER; 34944af7927SJon Hunter if (spi->irq < 0) 35044af7927SJon Hunter spi->irq = 0; 35144af7927SJon Hunter } 35244af7927SJon Hunter 353676e7c25SUlf Hansson ret = dev_pm_domain_attach(dev, true); 354676e7c25SUlf Hansson if (ret != -EPROBE_DEFER) { 35544af7927SJon Hunter ret = sdrv->probe(spi); 35633cf00e5SMika Westerberg if (ret) 357676e7c25SUlf Hansson dev_pm_domain_detach(dev, true); 358676e7c25SUlf Hansson } 35933cf00e5SMika Westerberg 36033cf00e5SMika Westerberg return ret; 361b885244eSDavid Brownell } 362b885244eSDavid Brownell 363b885244eSDavid Brownell static int spi_drv_remove(struct device *dev) 364b885244eSDavid Brownell { 365b885244eSDavid Brownell const struct spi_driver *sdrv = to_spi_driver(dev->driver); 36633cf00e5SMika Westerberg int ret; 367b885244eSDavid Brownell 368aec35f4eSJean Delvare ret = sdrv->remove(to_spi_device(dev)); 369676e7c25SUlf Hansson dev_pm_domain_detach(dev, true); 37033cf00e5SMika Westerberg 37133cf00e5SMika Westerberg return ret; 372b885244eSDavid Brownell } 373b885244eSDavid Brownell 374b885244eSDavid Brownell static void spi_drv_shutdown(struct device *dev) 375b885244eSDavid Brownell { 376b885244eSDavid Brownell const struct spi_driver *sdrv = to_spi_driver(dev->driver); 377b885244eSDavid Brownell 378b885244eSDavid Brownell sdrv->shutdown(to_spi_device(dev)); 379b885244eSDavid Brownell } 380b885244eSDavid Brownell 38133e34dc6SDavid Brownell /** 382ca5d2485SAndrew F. Davis * __spi_register_driver - register a SPI driver 38388c9321dSThierry Reding * @owner: owner module of the driver to register 38433e34dc6SDavid Brownell * @sdrv: the driver to register 38533e34dc6SDavid Brownell * Context: can sleep 38697d56dc6SJavier Martinez Canillas * 38797d56dc6SJavier Martinez Canillas * Return: zero on success, else a negative error code. 38833e34dc6SDavid Brownell */ 389ca5d2485SAndrew F. Davis int __spi_register_driver(struct module *owner, struct spi_driver *sdrv) 390b885244eSDavid Brownell { 391ca5d2485SAndrew F. Davis sdrv->driver.owner = owner; 392b885244eSDavid Brownell sdrv->driver.bus = &spi_bus_type; 393b885244eSDavid Brownell if (sdrv->probe) 394b885244eSDavid Brownell sdrv->driver.probe = spi_drv_probe; 395b885244eSDavid Brownell if (sdrv->remove) 396b885244eSDavid Brownell sdrv->driver.remove = spi_drv_remove; 397b885244eSDavid Brownell if (sdrv->shutdown) 398b885244eSDavid Brownell sdrv->driver.shutdown = spi_drv_shutdown; 399b885244eSDavid Brownell return driver_register(&sdrv->driver); 400b885244eSDavid Brownell } 401ca5d2485SAndrew F. Davis EXPORT_SYMBOL_GPL(__spi_register_driver); 402b885244eSDavid Brownell 4038ae12a0dSDavid Brownell /*-------------------------------------------------------------------------*/ 4048ae12a0dSDavid Brownell 4058ae12a0dSDavid Brownell /* SPI devices should normally not be created by SPI device drivers; that 4068ae12a0dSDavid Brownell * would make them board-specific. Similarly with SPI master drivers. 4078ae12a0dSDavid Brownell * Device registration normally goes into like arch/.../mach.../board-YYY.c 4088ae12a0dSDavid Brownell * with other readonly (flashable) information about mainboard devices. 4098ae12a0dSDavid Brownell */ 4108ae12a0dSDavid Brownell 4118ae12a0dSDavid Brownell struct boardinfo { 4128ae12a0dSDavid Brownell struct list_head list; 4132b9603a0SFeng Tang struct spi_board_info board_info; 4148ae12a0dSDavid Brownell }; 4158ae12a0dSDavid Brownell 4168ae12a0dSDavid Brownell static LIST_HEAD(board_list); 4172b9603a0SFeng Tang static LIST_HEAD(spi_master_list); 4182b9603a0SFeng Tang 4192b9603a0SFeng Tang /* 4202b9603a0SFeng Tang * Used to protect add/del opertion for board_info list and 4212b9603a0SFeng Tang * spi_master list, and their matching process 4222b9603a0SFeng Tang */ 42394040828SMatthias Kaehlcke static DEFINE_MUTEX(board_lock); 4248ae12a0dSDavid Brownell 425dc87c98eSGrant Likely /** 426dc87c98eSGrant Likely * spi_alloc_device - Allocate a new SPI device 427dc87c98eSGrant Likely * @master: Controller to which device is connected 428dc87c98eSGrant Likely * Context: can sleep 429dc87c98eSGrant Likely * 430dc87c98eSGrant Likely * Allows a driver to allocate and initialize a spi_device without 431dc87c98eSGrant Likely * registering it immediately. This allows a driver to directly 432dc87c98eSGrant Likely * fill the spi_device with device parameters before calling 433dc87c98eSGrant Likely * spi_add_device() on it. 434dc87c98eSGrant Likely * 435dc87c98eSGrant Likely * Caller is responsible to call spi_add_device() on the returned 436dc87c98eSGrant Likely * spi_device structure to add it to the SPI master. If the caller 437dc87c98eSGrant Likely * needs to discard the spi_device without adding it, then it should 438dc87c98eSGrant Likely * call spi_dev_put() on it. 439dc87c98eSGrant Likely * 44097d56dc6SJavier Martinez Canillas * Return: a pointer to the new device, or NULL. 441dc87c98eSGrant Likely */ 442dc87c98eSGrant Likely struct spi_device *spi_alloc_device(struct spi_master *master) 443dc87c98eSGrant Likely { 444dc87c98eSGrant Likely struct spi_device *spi; 445dc87c98eSGrant Likely 446dc87c98eSGrant Likely if (!spi_master_get(master)) 447dc87c98eSGrant Likely return NULL; 448dc87c98eSGrant Likely 4495fe5f05eSJingoo Han spi = kzalloc(sizeof(*spi), GFP_KERNEL); 450dc87c98eSGrant Likely if (!spi) { 451dc87c98eSGrant Likely spi_master_put(master); 452dc87c98eSGrant Likely return NULL; 453dc87c98eSGrant Likely } 454dc87c98eSGrant Likely 455dc87c98eSGrant Likely spi->master = master; 456178db7d3SLaurent Pinchart spi->dev.parent = &master->dev; 457dc87c98eSGrant Likely spi->dev.bus = &spi_bus_type; 458dc87c98eSGrant Likely spi->dev.release = spidev_release; 459446411e1SAndreas Larsson spi->cs_gpio = -ENOENT; 460eca2ebc7SMartin Sperl 461eca2ebc7SMartin Sperl spin_lock_init(&spi->statistics.lock); 462eca2ebc7SMartin Sperl 463dc87c98eSGrant Likely device_initialize(&spi->dev); 464dc87c98eSGrant Likely return spi; 465dc87c98eSGrant Likely } 466dc87c98eSGrant Likely EXPORT_SYMBOL_GPL(spi_alloc_device); 467dc87c98eSGrant Likely 468e13ac47bSJarkko Nikula static void spi_dev_set_name(struct spi_device *spi) 469e13ac47bSJarkko Nikula { 470e13ac47bSJarkko Nikula struct acpi_device *adev = ACPI_COMPANION(&spi->dev); 471e13ac47bSJarkko Nikula 472e13ac47bSJarkko Nikula if (adev) { 473e13ac47bSJarkko Nikula dev_set_name(&spi->dev, "spi-%s", acpi_dev_name(adev)); 474e13ac47bSJarkko Nikula return; 475e13ac47bSJarkko Nikula } 476e13ac47bSJarkko Nikula 477e13ac47bSJarkko Nikula dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->master->dev), 478e13ac47bSJarkko Nikula spi->chip_select); 479e13ac47bSJarkko Nikula } 480e13ac47bSJarkko Nikula 481b6fb8d3aSMika Westerberg static int spi_dev_check(struct device *dev, void *data) 482b6fb8d3aSMika Westerberg { 483b6fb8d3aSMika Westerberg struct spi_device *spi = to_spi_device(dev); 484b6fb8d3aSMika Westerberg struct spi_device *new_spi = data; 485b6fb8d3aSMika Westerberg 486b6fb8d3aSMika Westerberg if (spi->master == new_spi->master && 487b6fb8d3aSMika Westerberg spi->chip_select == new_spi->chip_select) 488b6fb8d3aSMika Westerberg return -EBUSY; 489b6fb8d3aSMika Westerberg return 0; 490b6fb8d3aSMika Westerberg } 491b6fb8d3aSMika Westerberg 492dc87c98eSGrant Likely /** 493dc87c98eSGrant Likely * spi_add_device - Add spi_device allocated with spi_alloc_device 494dc87c98eSGrant Likely * @spi: spi_device to register 495dc87c98eSGrant Likely * 496dc87c98eSGrant Likely * Companion function to spi_alloc_device. Devices allocated with 497dc87c98eSGrant Likely * spi_alloc_device can be added onto the spi bus with this function. 498dc87c98eSGrant Likely * 49997d56dc6SJavier Martinez Canillas * Return: 0 on success; negative errno on failure 500dc87c98eSGrant Likely */ 501dc87c98eSGrant Likely int spi_add_device(struct spi_device *spi) 502dc87c98eSGrant Likely { 503e48880e0SDavid Brownell static DEFINE_MUTEX(spi_add_lock); 50474317984SJean-Christophe PLAGNIOL-VILLARD struct spi_master *master = spi->master; 50574317984SJean-Christophe PLAGNIOL-VILLARD struct device *dev = master->dev.parent; 506dc87c98eSGrant Likely int status; 507dc87c98eSGrant Likely 508dc87c98eSGrant Likely /* Chipselects are numbered 0..max; validate. */ 50974317984SJean-Christophe PLAGNIOL-VILLARD if (spi->chip_select >= master->num_chipselect) { 510dc87c98eSGrant Likely dev_err(dev, "cs%d >= max %d\n", 511dc87c98eSGrant Likely spi->chip_select, 51274317984SJean-Christophe PLAGNIOL-VILLARD master->num_chipselect); 513dc87c98eSGrant Likely return -EINVAL; 514dc87c98eSGrant Likely } 515dc87c98eSGrant Likely 516dc87c98eSGrant Likely /* Set the bus ID string */ 517e13ac47bSJarkko Nikula spi_dev_set_name(spi); 518e48880e0SDavid Brownell 519e48880e0SDavid Brownell /* We need to make sure there's no other device with this 520e48880e0SDavid Brownell * chipselect **BEFORE** we call setup(), else we'll trash 521e48880e0SDavid Brownell * its configuration. Lock against concurrent add() calls. 522e48880e0SDavid Brownell */ 523e48880e0SDavid Brownell mutex_lock(&spi_add_lock); 524e48880e0SDavid Brownell 525b6fb8d3aSMika Westerberg status = bus_for_each_dev(&spi_bus_type, NULL, spi, spi_dev_check); 526b6fb8d3aSMika Westerberg if (status) { 527e48880e0SDavid Brownell dev_err(dev, "chipselect %d already in use\n", 528e48880e0SDavid Brownell spi->chip_select); 529e48880e0SDavid Brownell goto done; 530e48880e0SDavid Brownell } 531e48880e0SDavid Brownell 53274317984SJean-Christophe PLAGNIOL-VILLARD if (master->cs_gpios) 53374317984SJean-Christophe PLAGNIOL-VILLARD spi->cs_gpio = master->cs_gpios[spi->chip_select]; 53474317984SJean-Christophe PLAGNIOL-VILLARD 535e48880e0SDavid Brownell /* Drivers may modify this initial i/o setup, but will 536e48880e0SDavid Brownell * normally rely on the device being setup. Devices 537e48880e0SDavid Brownell * using SPI_CS_HIGH can't coexist well otherwise... 538e48880e0SDavid Brownell */ 5397d077197SDavid Brownell status = spi_setup(spi); 540dc87c98eSGrant Likely if (status < 0) { 541eb288a1fSLinus Walleij dev_err(dev, "can't setup %s, status %d\n", 542eb288a1fSLinus Walleij dev_name(&spi->dev), status); 543e48880e0SDavid Brownell goto done; 544dc87c98eSGrant Likely } 545dc87c98eSGrant Likely 546e48880e0SDavid Brownell /* Device may be bound to an active driver when this returns */ 547dc87c98eSGrant Likely status = device_add(&spi->dev); 548e48880e0SDavid Brownell if (status < 0) 549eb288a1fSLinus Walleij dev_err(dev, "can't add %s, status %d\n", 550eb288a1fSLinus Walleij dev_name(&spi->dev), status); 551e48880e0SDavid Brownell else 55235f74fcaSKay Sievers dev_dbg(dev, "registered child %s\n", dev_name(&spi->dev)); 553e48880e0SDavid Brownell 554e48880e0SDavid Brownell done: 555e48880e0SDavid Brownell mutex_unlock(&spi_add_lock); 556e48880e0SDavid Brownell return status; 557dc87c98eSGrant Likely } 558dc87c98eSGrant Likely EXPORT_SYMBOL_GPL(spi_add_device); 5598ae12a0dSDavid Brownell 56033e34dc6SDavid Brownell /** 56133e34dc6SDavid Brownell * spi_new_device - instantiate one new SPI device 56233e34dc6SDavid Brownell * @master: Controller to which device is connected 56333e34dc6SDavid Brownell * @chip: Describes the SPI device 56433e34dc6SDavid Brownell * Context: can sleep 56533e34dc6SDavid Brownell * 56633e34dc6SDavid Brownell * On typical mainboards, this is purely internal; and it's not needed 5678ae12a0dSDavid Brownell * after board init creates the hard-wired devices. Some development 5688ae12a0dSDavid Brownell * platforms may not be able to use spi_register_board_info though, and 5698ae12a0dSDavid Brownell * this is exported so that for example a USB or parport based adapter 5708ae12a0dSDavid Brownell * driver could add devices (which it would learn about out-of-band). 571082c8cb4SDavid Brownell * 57297d56dc6SJavier Martinez Canillas * Return: the new device, or NULL. 5738ae12a0dSDavid Brownell */ 574e9d5a461SAdrian Bunk struct spi_device *spi_new_device(struct spi_master *master, 575e9d5a461SAdrian Bunk struct spi_board_info *chip) 5768ae12a0dSDavid Brownell { 5778ae12a0dSDavid Brownell struct spi_device *proxy; 5788ae12a0dSDavid Brownell int status; 5798ae12a0dSDavid Brownell 580082c8cb4SDavid Brownell /* NOTE: caller did any chip->bus_num checks necessary. 581082c8cb4SDavid Brownell * 582082c8cb4SDavid Brownell * Also, unless we change the return value convention to use 583082c8cb4SDavid Brownell * error-or-pointer (not NULL-or-pointer), troubleshootability 584082c8cb4SDavid Brownell * suggests syslogged diagnostics are best here (ugh). 585082c8cb4SDavid Brownell */ 586082c8cb4SDavid Brownell 587dc87c98eSGrant Likely proxy = spi_alloc_device(master); 588dc87c98eSGrant Likely if (!proxy) 5898ae12a0dSDavid Brownell return NULL; 5908ae12a0dSDavid Brownell 591102eb975SGrant Likely WARN_ON(strlen(chip->modalias) >= sizeof(proxy->modalias)); 592102eb975SGrant Likely 5938ae12a0dSDavid Brownell proxy->chip_select = chip->chip_select; 5948ae12a0dSDavid Brownell proxy->max_speed_hz = chip->max_speed_hz; 595980a01c9SDavid Brownell proxy->mode = chip->mode; 5968ae12a0dSDavid Brownell proxy->irq = chip->irq; 597102eb975SGrant Likely strlcpy(proxy->modalias, chip->modalias, sizeof(proxy->modalias)); 5988ae12a0dSDavid Brownell proxy->dev.platform_data = (void *) chip->platform_data; 5998ae12a0dSDavid Brownell proxy->controller_data = chip->controller_data; 6008ae12a0dSDavid Brownell proxy->controller_state = NULL; 6018ae12a0dSDavid Brownell 602dc87c98eSGrant Likely status = spi_add_device(proxy); 6038ae12a0dSDavid Brownell if (status < 0) { 604dc87c98eSGrant Likely spi_dev_put(proxy); 6058ae12a0dSDavid Brownell return NULL; 6068ae12a0dSDavid Brownell } 607dc87c98eSGrant Likely 608dc87c98eSGrant Likely return proxy; 609dc87c98eSGrant Likely } 6108ae12a0dSDavid Brownell EXPORT_SYMBOL_GPL(spi_new_device); 6118ae12a0dSDavid Brownell 6123b1884c2SGeert Uytterhoeven /** 6133b1884c2SGeert Uytterhoeven * spi_unregister_device - unregister a single SPI device 6143b1884c2SGeert Uytterhoeven * @spi: spi_device to unregister 6153b1884c2SGeert Uytterhoeven * 6163b1884c2SGeert Uytterhoeven * Start making the passed SPI device vanish. Normally this would be handled 6173b1884c2SGeert Uytterhoeven * by spi_unregister_master(). 6183b1884c2SGeert Uytterhoeven */ 6193b1884c2SGeert Uytterhoeven void spi_unregister_device(struct spi_device *spi) 6203b1884c2SGeert Uytterhoeven { 621bd6c1644SGeert Uytterhoeven if (!spi) 622bd6c1644SGeert Uytterhoeven return; 623bd6c1644SGeert Uytterhoeven 624bd6c1644SGeert Uytterhoeven if (spi->dev.of_node) 625bd6c1644SGeert Uytterhoeven of_node_clear_flag(spi->dev.of_node, OF_POPULATED); 6267f24467fSOctavian Purdila if (ACPI_COMPANION(&spi->dev)) 6277f24467fSOctavian Purdila acpi_device_clear_enumerated(ACPI_COMPANION(&spi->dev)); 6283b1884c2SGeert Uytterhoeven device_unregister(&spi->dev); 6293b1884c2SGeert Uytterhoeven } 6303b1884c2SGeert Uytterhoeven EXPORT_SYMBOL_GPL(spi_unregister_device); 6313b1884c2SGeert Uytterhoeven 6322b9603a0SFeng Tang static void spi_match_master_to_boardinfo(struct spi_master *master, 6332b9603a0SFeng Tang struct spi_board_info *bi) 6342b9603a0SFeng Tang { 6352b9603a0SFeng Tang struct spi_device *dev; 6362b9603a0SFeng Tang 6372b9603a0SFeng Tang if (master->bus_num != bi->bus_num) 6382b9603a0SFeng Tang return; 6392b9603a0SFeng Tang 6402b9603a0SFeng Tang dev = spi_new_device(master, bi); 6412b9603a0SFeng Tang if (!dev) 6422b9603a0SFeng Tang dev_err(master->dev.parent, "can't create new device for %s\n", 6432b9603a0SFeng Tang bi->modalias); 6442b9603a0SFeng Tang } 6452b9603a0SFeng Tang 64633e34dc6SDavid Brownell /** 64733e34dc6SDavid Brownell * spi_register_board_info - register SPI devices for a given board 64833e34dc6SDavid Brownell * @info: array of chip descriptors 64933e34dc6SDavid Brownell * @n: how many descriptors are provided 65033e34dc6SDavid Brownell * Context: can sleep 65133e34dc6SDavid Brownell * 6528ae12a0dSDavid Brownell * Board-specific early init code calls this (probably during arch_initcall) 6538ae12a0dSDavid Brownell * with segments of the SPI device table. Any device nodes are created later, 6548ae12a0dSDavid Brownell * after the relevant parent SPI controller (bus_num) is defined. We keep 6558ae12a0dSDavid Brownell * this table of devices forever, so that reloading a controller driver will 6568ae12a0dSDavid Brownell * not make Linux forget about these hard-wired devices. 6578ae12a0dSDavid Brownell * 6588ae12a0dSDavid Brownell * Other code can also call this, e.g. a particular add-on board might provide 6598ae12a0dSDavid Brownell * SPI devices through its expansion connector, so code initializing that board 6608ae12a0dSDavid Brownell * would naturally declare its SPI devices. 6618ae12a0dSDavid Brownell * 6628ae12a0dSDavid Brownell * The board info passed can safely be __initdata ... but be careful of 6638ae12a0dSDavid Brownell * any embedded pointers (platform_data, etc), they're copied as-is. 66497d56dc6SJavier Martinez Canillas * 66597d56dc6SJavier Martinez Canillas * Return: zero on success, else a negative error code. 6668ae12a0dSDavid Brownell */ 667fd4a319bSGrant Likely int spi_register_board_info(struct spi_board_info const *info, unsigned n) 6688ae12a0dSDavid Brownell { 6698ae12a0dSDavid Brownell struct boardinfo *bi; 6702b9603a0SFeng Tang int i; 6718ae12a0dSDavid Brownell 672c7908a37SXiubo Li if (!n) 673c7908a37SXiubo Li return -EINVAL; 674c7908a37SXiubo Li 6752b9603a0SFeng Tang bi = kzalloc(n * sizeof(*bi), GFP_KERNEL); 6768ae12a0dSDavid Brownell if (!bi) 6778ae12a0dSDavid Brownell return -ENOMEM; 6788ae12a0dSDavid Brownell 6792b9603a0SFeng Tang for (i = 0; i < n; i++, bi++, info++) { 6802b9603a0SFeng Tang struct spi_master *master; 6812b9603a0SFeng Tang 6822b9603a0SFeng Tang memcpy(&bi->board_info, info, sizeof(*info)); 68394040828SMatthias Kaehlcke mutex_lock(&board_lock); 6848ae12a0dSDavid Brownell list_add_tail(&bi->list, &board_list); 6852b9603a0SFeng Tang list_for_each_entry(master, &spi_master_list, list) 6862b9603a0SFeng Tang spi_match_master_to_boardinfo(master, &bi->board_info); 68794040828SMatthias Kaehlcke mutex_unlock(&board_lock); 6882b9603a0SFeng Tang } 6892b9603a0SFeng Tang 6908ae12a0dSDavid Brownell return 0; 6918ae12a0dSDavid Brownell } 6928ae12a0dSDavid Brownell 6938ae12a0dSDavid Brownell /*-------------------------------------------------------------------------*/ 6948ae12a0dSDavid Brownell 695b158935fSMark Brown static void spi_set_cs(struct spi_device *spi, bool enable) 696b158935fSMark Brown { 697b158935fSMark Brown if (spi->mode & SPI_CS_HIGH) 698b158935fSMark Brown enable = !enable; 699b158935fSMark Brown 700243f07beSAndy Shevchenko if (gpio_is_valid(spi->cs_gpio)) 701b158935fSMark Brown gpio_set_value(spi->cs_gpio, !enable); 702b158935fSMark Brown else if (spi->master->set_cs) 703b158935fSMark Brown spi->master->set_cs(spi, !enable); 704b158935fSMark Brown } 705b158935fSMark Brown 7062de440f5SGeert Uytterhoeven #ifdef CONFIG_HAS_DMA 7076ad45a27SMark Brown static int spi_map_buf(struct spi_master *master, struct device *dev, 7086ad45a27SMark Brown struct sg_table *sgt, void *buf, size_t len, 7096ad45a27SMark Brown enum dma_data_direction dir) 7106ad45a27SMark Brown { 7116ad45a27SMark Brown const bool vmalloced_buf = is_vmalloc_addr(buf); 712df88e91bSAndy Shevchenko unsigned int max_seg_size = dma_get_max_seg_size(dev); 713*b1b8153cSVignesh R #ifdef CONFIG_HIGHMEM 714*b1b8153cSVignesh R const bool kmap_buf = ((unsigned long)buf >= PKMAP_BASE && 715*b1b8153cSVignesh R (unsigned long)buf < (PKMAP_BASE + 716*b1b8153cSVignesh R (LAST_PKMAP * PAGE_SIZE))); 717*b1b8153cSVignesh R #else 718*b1b8153cSVignesh R const bool kmap_buf = false; 719*b1b8153cSVignesh R #endif 72065598c13SAndrew Gabbasov int desc_len; 72165598c13SAndrew Gabbasov int sgs; 7226ad45a27SMark Brown struct page *vm_page; 7236ad45a27SMark Brown void *sg_buf; 7246ad45a27SMark Brown size_t min; 7256ad45a27SMark Brown int i, ret; 7266ad45a27SMark Brown 727*b1b8153cSVignesh R if (vmalloced_buf || kmap_buf) { 728df88e91bSAndy Shevchenko desc_len = min_t(int, max_seg_size, PAGE_SIZE); 72965598c13SAndrew Gabbasov sgs = DIV_ROUND_UP(len + offset_in_page(buf), desc_len); 7300569a88fSVignesh R } else if (virt_addr_valid(buf)) { 731df88e91bSAndy Shevchenko desc_len = min_t(int, max_seg_size, master->max_dma_len); 73265598c13SAndrew Gabbasov sgs = DIV_ROUND_UP(len, desc_len); 7330569a88fSVignesh R } else { 7340569a88fSVignesh R return -EINVAL; 73565598c13SAndrew Gabbasov } 73665598c13SAndrew Gabbasov 7376ad45a27SMark Brown ret = sg_alloc_table(sgt, sgs, GFP_KERNEL); 7386ad45a27SMark Brown if (ret != 0) 7396ad45a27SMark Brown return ret; 7406ad45a27SMark Brown 7416ad45a27SMark Brown for (i = 0; i < sgs; i++) { 7426ad45a27SMark Brown 743*b1b8153cSVignesh R if (vmalloced_buf || kmap_buf) { 74465598c13SAndrew Gabbasov min = min_t(size_t, 74565598c13SAndrew Gabbasov len, desc_len - offset_in_page(buf)); 746*b1b8153cSVignesh R if (vmalloced_buf) 7476ad45a27SMark Brown vm_page = vmalloc_to_page(buf); 748*b1b8153cSVignesh R else 749*b1b8153cSVignesh R vm_page = kmap_to_page(buf); 7506ad45a27SMark Brown if (!vm_page) { 7516ad45a27SMark Brown sg_free_table(sgt); 7526ad45a27SMark Brown return -ENOMEM; 7536ad45a27SMark Brown } 754c1aefbddSCharles Keepax sg_set_page(&sgt->sgl[i], vm_page, 755c1aefbddSCharles Keepax min, offset_in_page(buf)); 7566ad45a27SMark Brown } else { 75765598c13SAndrew Gabbasov min = min_t(size_t, len, desc_len); 7586ad45a27SMark Brown sg_buf = buf; 759c1aefbddSCharles Keepax sg_set_buf(&sgt->sgl[i], sg_buf, min); 7606ad45a27SMark Brown } 7616ad45a27SMark Brown 7626ad45a27SMark Brown buf += min; 7636ad45a27SMark Brown len -= min; 7646ad45a27SMark Brown } 7656ad45a27SMark Brown 7666ad45a27SMark Brown ret = dma_map_sg(dev, sgt->sgl, sgt->nents, dir); 76789e4b66aSGeert Uytterhoeven if (!ret) 76889e4b66aSGeert Uytterhoeven ret = -ENOMEM; 7696ad45a27SMark Brown if (ret < 0) { 7706ad45a27SMark Brown sg_free_table(sgt); 7716ad45a27SMark Brown return ret; 7726ad45a27SMark Brown } 7736ad45a27SMark Brown 7746ad45a27SMark Brown sgt->nents = ret; 7756ad45a27SMark Brown 7766ad45a27SMark Brown return 0; 7776ad45a27SMark Brown } 7786ad45a27SMark Brown 7796ad45a27SMark Brown static void spi_unmap_buf(struct spi_master *master, struct device *dev, 7806ad45a27SMark Brown struct sg_table *sgt, enum dma_data_direction dir) 7816ad45a27SMark Brown { 7826ad45a27SMark Brown if (sgt->orig_nents) { 7836ad45a27SMark Brown dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir); 7846ad45a27SMark Brown sg_free_table(sgt); 7856ad45a27SMark Brown } 7866ad45a27SMark Brown } 7876ad45a27SMark Brown 7882de440f5SGeert Uytterhoeven static int __spi_map_msg(struct spi_master *master, struct spi_message *msg) 78999adef31SMark Brown { 79099adef31SMark Brown struct device *tx_dev, *rx_dev; 79199adef31SMark Brown struct spi_transfer *xfer; 7926ad45a27SMark Brown int ret; 7933a2eba9bSMark Brown 7946ad45a27SMark Brown if (!master->can_dma) 79599adef31SMark Brown return 0; 79699adef31SMark Brown 797c37f45b5SLeilk Liu if (master->dma_tx) 7983fc25421SGeert Uytterhoeven tx_dev = master->dma_tx->device->dev; 799c37f45b5SLeilk Liu else 800c37f45b5SLeilk Liu tx_dev = &master->dev; 801c37f45b5SLeilk Liu 802c37f45b5SLeilk Liu if (master->dma_rx) 8033fc25421SGeert Uytterhoeven rx_dev = master->dma_rx->device->dev; 804c37f45b5SLeilk Liu else 805c37f45b5SLeilk Liu rx_dev = &master->dev; 80699adef31SMark Brown 80799adef31SMark Brown list_for_each_entry(xfer, &msg->transfers, transfer_list) { 80899adef31SMark Brown if (!master->can_dma(master, msg->spi, xfer)) 80999adef31SMark Brown continue; 81099adef31SMark Brown 81199adef31SMark Brown if (xfer->tx_buf != NULL) { 8126ad45a27SMark Brown ret = spi_map_buf(master, tx_dev, &xfer->tx_sg, 8136ad45a27SMark Brown (void *)xfer->tx_buf, xfer->len, 81499adef31SMark Brown DMA_TO_DEVICE); 8156ad45a27SMark Brown if (ret != 0) 8166ad45a27SMark Brown return ret; 81799adef31SMark Brown } 81899adef31SMark Brown 81999adef31SMark Brown if (xfer->rx_buf != NULL) { 8206ad45a27SMark Brown ret = spi_map_buf(master, rx_dev, &xfer->rx_sg, 82199adef31SMark Brown xfer->rx_buf, xfer->len, 82299adef31SMark Brown DMA_FROM_DEVICE); 8236ad45a27SMark Brown if (ret != 0) { 8246ad45a27SMark Brown spi_unmap_buf(master, tx_dev, &xfer->tx_sg, 8256ad45a27SMark Brown DMA_TO_DEVICE); 8266ad45a27SMark Brown return ret; 82799adef31SMark Brown } 82899adef31SMark Brown } 82999adef31SMark Brown } 83099adef31SMark Brown 83199adef31SMark Brown master->cur_msg_mapped = true; 83299adef31SMark Brown 83399adef31SMark Brown return 0; 83499adef31SMark Brown } 83599adef31SMark Brown 8364b786458SMartin Sperl static int __spi_unmap_msg(struct spi_master *master, struct spi_message *msg) 83799adef31SMark Brown { 83899adef31SMark Brown struct spi_transfer *xfer; 83999adef31SMark Brown struct device *tx_dev, *rx_dev; 84099adef31SMark Brown 8416ad45a27SMark Brown if (!master->cur_msg_mapped || !master->can_dma) 84299adef31SMark Brown return 0; 84399adef31SMark Brown 844c37f45b5SLeilk Liu if (master->dma_tx) 8453fc25421SGeert Uytterhoeven tx_dev = master->dma_tx->device->dev; 846c37f45b5SLeilk Liu else 847c37f45b5SLeilk Liu tx_dev = &master->dev; 848c37f45b5SLeilk Liu 849c37f45b5SLeilk Liu if (master->dma_rx) 8503fc25421SGeert Uytterhoeven rx_dev = master->dma_rx->device->dev; 851c37f45b5SLeilk Liu else 852c37f45b5SLeilk Liu rx_dev = &master->dev; 85399adef31SMark Brown 85499adef31SMark Brown list_for_each_entry(xfer, &msg->transfers, transfer_list) { 85599adef31SMark Brown if (!master->can_dma(master, msg->spi, xfer)) 85699adef31SMark Brown continue; 85799adef31SMark Brown 8586ad45a27SMark Brown spi_unmap_buf(master, rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE); 8596ad45a27SMark Brown spi_unmap_buf(master, tx_dev, &xfer->tx_sg, DMA_TO_DEVICE); 86099adef31SMark Brown } 86199adef31SMark Brown 86299adef31SMark Brown return 0; 86399adef31SMark Brown } 8642de440f5SGeert Uytterhoeven #else /* !CONFIG_HAS_DMA */ 865f4502dd1SVignesh R static inline int spi_map_buf(struct spi_master *master, 866f4502dd1SVignesh R struct device *dev, struct sg_table *sgt, 867f4502dd1SVignesh R void *buf, size_t len, 868f4502dd1SVignesh R enum dma_data_direction dir) 869f4502dd1SVignesh R { 870f4502dd1SVignesh R return -EINVAL; 871f4502dd1SVignesh R } 872f4502dd1SVignesh R 873f4502dd1SVignesh R static inline void spi_unmap_buf(struct spi_master *master, 874f4502dd1SVignesh R struct device *dev, struct sg_table *sgt, 875f4502dd1SVignesh R enum dma_data_direction dir) 876f4502dd1SVignesh R { 877f4502dd1SVignesh R } 878f4502dd1SVignesh R 8792de440f5SGeert Uytterhoeven static inline int __spi_map_msg(struct spi_master *master, 8802de440f5SGeert Uytterhoeven struct spi_message *msg) 8812de440f5SGeert Uytterhoeven { 8822de440f5SGeert Uytterhoeven return 0; 8832de440f5SGeert Uytterhoeven } 8842de440f5SGeert Uytterhoeven 8854b786458SMartin Sperl static inline int __spi_unmap_msg(struct spi_master *master, 8862de440f5SGeert Uytterhoeven struct spi_message *msg) 8872de440f5SGeert Uytterhoeven { 8882de440f5SGeert Uytterhoeven return 0; 8892de440f5SGeert Uytterhoeven } 8902de440f5SGeert Uytterhoeven #endif /* !CONFIG_HAS_DMA */ 8912de440f5SGeert Uytterhoeven 8924b786458SMartin Sperl static inline int spi_unmap_msg(struct spi_master *master, 8934b786458SMartin Sperl struct spi_message *msg) 8944b786458SMartin Sperl { 8954b786458SMartin Sperl struct spi_transfer *xfer; 8964b786458SMartin Sperl 8974b786458SMartin Sperl list_for_each_entry(xfer, &msg->transfers, transfer_list) { 8984b786458SMartin Sperl /* 8994b786458SMartin Sperl * Restore the original value of tx_buf or rx_buf if they are 9004b786458SMartin Sperl * NULL. 9014b786458SMartin Sperl */ 9024b786458SMartin Sperl if (xfer->tx_buf == master->dummy_tx) 9034b786458SMartin Sperl xfer->tx_buf = NULL; 9044b786458SMartin Sperl if (xfer->rx_buf == master->dummy_rx) 9054b786458SMartin Sperl xfer->rx_buf = NULL; 9064b786458SMartin Sperl } 9074b786458SMartin Sperl 9084b786458SMartin Sperl return __spi_unmap_msg(master, msg); 9094b786458SMartin Sperl } 9104b786458SMartin Sperl 9112de440f5SGeert Uytterhoeven static int spi_map_msg(struct spi_master *master, struct spi_message *msg) 9122de440f5SGeert Uytterhoeven { 9132de440f5SGeert Uytterhoeven struct spi_transfer *xfer; 9142de440f5SGeert Uytterhoeven void *tmp; 9152de440f5SGeert Uytterhoeven unsigned int max_tx, max_rx; 9162de440f5SGeert Uytterhoeven 9172de440f5SGeert Uytterhoeven if (master->flags & (SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX)) { 9182de440f5SGeert Uytterhoeven max_tx = 0; 9192de440f5SGeert Uytterhoeven max_rx = 0; 9202de440f5SGeert Uytterhoeven 9212de440f5SGeert Uytterhoeven list_for_each_entry(xfer, &msg->transfers, transfer_list) { 9222de440f5SGeert Uytterhoeven if ((master->flags & SPI_MASTER_MUST_TX) && 9232de440f5SGeert Uytterhoeven !xfer->tx_buf) 9242de440f5SGeert Uytterhoeven max_tx = max(xfer->len, max_tx); 9252de440f5SGeert Uytterhoeven if ((master->flags & SPI_MASTER_MUST_RX) && 9262de440f5SGeert Uytterhoeven !xfer->rx_buf) 9272de440f5SGeert Uytterhoeven max_rx = max(xfer->len, max_rx); 9282de440f5SGeert Uytterhoeven } 9292de440f5SGeert Uytterhoeven 9302de440f5SGeert Uytterhoeven if (max_tx) { 9312de440f5SGeert Uytterhoeven tmp = krealloc(master->dummy_tx, max_tx, 9322de440f5SGeert Uytterhoeven GFP_KERNEL | GFP_DMA); 9332de440f5SGeert Uytterhoeven if (!tmp) 9342de440f5SGeert Uytterhoeven return -ENOMEM; 9352de440f5SGeert Uytterhoeven master->dummy_tx = tmp; 9362de440f5SGeert Uytterhoeven memset(tmp, 0, max_tx); 9372de440f5SGeert Uytterhoeven } 9382de440f5SGeert Uytterhoeven 9392de440f5SGeert Uytterhoeven if (max_rx) { 9402de440f5SGeert Uytterhoeven tmp = krealloc(master->dummy_rx, max_rx, 9412de440f5SGeert Uytterhoeven GFP_KERNEL | GFP_DMA); 9422de440f5SGeert Uytterhoeven if (!tmp) 9432de440f5SGeert Uytterhoeven return -ENOMEM; 9442de440f5SGeert Uytterhoeven master->dummy_rx = tmp; 9452de440f5SGeert Uytterhoeven } 9462de440f5SGeert Uytterhoeven 9472de440f5SGeert Uytterhoeven if (max_tx || max_rx) { 9482de440f5SGeert Uytterhoeven list_for_each_entry(xfer, &msg->transfers, 9492de440f5SGeert Uytterhoeven transfer_list) { 9502de440f5SGeert Uytterhoeven if (!xfer->tx_buf) 9512de440f5SGeert Uytterhoeven xfer->tx_buf = master->dummy_tx; 9522de440f5SGeert Uytterhoeven if (!xfer->rx_buf) 9532de440f5SGeert Uytterhoeven xfer->rx_buf = master->dummy_rx; 9542de440f5SGeert Uytterhoeven } 9552de440f5SGeert Uytterhoeven } 9562de440f5SGeert Uytterhoeven } 9572de440f5SGeert Uytterhoeven 9582de440f5SGeert Uytterhoeven return __spi_map_msg(master, msg); 9592de440f5SGeert Uytterhoeven } 96099adef31SMark Brown 961b158935fSMark Brown /* 962b158935fSMark Brown * spi_transfer_one_message - Default implementation of transfer_one_message() 963b158935fSMark Brown * 964b158935fSMark Brown * This is a standard implementation of transfer_one_message() for 9658ba811a7SMoritz Fischer * drivers which implement a transfer_one() operation. It provides 966b158935fSMark Brown * standard handling of delays and chip select management. 967b158935fSMark Brown */ 968b158935fSMark Brown static int spi_transfer_one_message(struct spi_master *master, 969b158935fSMark Brown struct spi_message *msg) 970b158935fSMark Brown { 971b158935fSMark Brown struct spi_transfer *xfer; 972b158935fSMark Brown bool keep_cs = false; 973b158935fSMark Brown int ret = 0; 974682a71b2SNicholas Mc Guire unsigned long ms = 1; 975eca2ebc7SMartin Sperl struct spi_statistics *statm = &master->statistics; 976eca2ebc7SMartin Sperl struct spi_statistics *stats = &msg->spi->statistics; 977b158935fSMark Brown 978b158935fSMark Brown spi_set_cs(msg->spi, true); 979b158935fSMark Brown 980eca2ebc7SMartin Sperl SPI_STATISTICS_INCREMENT_FIELD(statm, messages); 981eca2ebc7SMartin Sperl SPI_STATISTICS_INCREMENT_FIELD(stats, messages); 982eca2ebc7SMartin Sperl 983b158935fSMark Brown list_for_each_entry(xfer, &msg->transfers, transfer_list) { 984b158935fSMark Brown trace_spi_transfer_start(msg, xfer); 985b158935fSMark Brown 986eca2ebc7SMartin Sperl spi_statistics_add_transfer_stats(statm, xfer, master); 987eca2ebc7SMartin Sperl spi_statistics_add_transfer_stats(stats, xfer, master); 988eca2ebc7SMartin Sperl 98938ec10f6SMark Brown if (xfer->tx_buf || xfer->rx_buf) { 99016735d02SWolfram Sang reinit_completion(&master->xfer_completion); 991b158935fSMark Brown 992b158935fSMark Brown ret = master->transfer_one(master, msg->spi, xfer); 993b158935fSMark Brown if (ret < 0) { 994eca2ebc7SMartin Sperl SPI_STATISTICS_INCREMENT_FIELD(statm, 995eca2ebc7SMartin Sperl errors); 996eca2ebc7SMartin Sperl SPI_STATISTICS_INCREMENT_FIELD(stats, 997eca2ebc7SMartin Sperl errors); 998b158935fSMark Brown dev_err(&msg->spi->dev, 999b158935fSMark Brown "SPI transfer failed: %d\n", ret); 1000b158935fSMark Brown goto out; 1001b158935fSMark Brown } 1002b158935fSMark Brown 100313a42798SAxel Lin if (ret > 0) { 100413a42798SAxel Lin ret = 0; 100516a0ce4eSMark Brown ms = xfer->len * 8 * 1000 / xfer->speed_hz; 1006eee668a9SHarini Katakam ms += ms + 100; /* some tolerance */ 100716a0ce4eSMark Brown 100816a0ce4eSMark Brown ms = wait_for_completion_timeout(&master->xfer_completion, 100916a0ce4eSMark Brown msecs_to_jiffies(ms)); 101016a0ce4eSMark Brown } 101116a0ce4eSMark Brown 101216a0ce4eSMark Brown if (ms == 0) { 1013eca2ebc7SMartin Sperl SPI_STATISTICS_INCREMENT_FIELD(statm, 1014eca2ebc7SMartin Sperl timedout); 1015eca2ebc7SMartin Sperl SPI_STATISTICS_INCREMENT_FIELD(stats, 1016eca2ebc7SMartin Sperl timedout); 101738ec10f6SMark Brown dev_err(&msg->spi->dev, 101838ec10f6SMark Brown "SPI transfer timed out\n"); 101916a0ce4eSMark Brown msg->status = -ETIMEDOUT; 102013a42798SAxel Lin } 102138ec10f6SMark Brown } else { 102238ec10f6SMark Brown if (xfer->len) 102338ec10f6SMark Brown dev_err(&msg->spi->dev, 102438ec10f6SMark Brown "Bufferless transfer has length %u\n", 102538ec10f6SMark Brown xfer->len); 102638ec10f6SMark Brown } 1027b158935fSMark Brown 1028b158935fSMark Brown trace_spi_transfer_stop(msg, xfer); 1029b158935fSMark Brown 1030b158935fSMark Brown if (msg->status != -EINPROGRESS) 1031b158935fSMark Brown goto out; 1032b158935fSMark Brown 1033b158935fSMark Brown if (xfer->delay_usecs) 1034b158935fSMark Brown udelay(xfer->delay_usecs); 1035b158935fSMark Brown 1036b158935fSMark Brown if (xfer->cs_change) { 1037b158935fSMark Brown if (list_is_last(&xfer->transfer_list, 1038b158935fSMark Brown &msg->transfers)) { 1039b158935fSMark Brown keep_cs = true; 1040b158935fSMark Brown } else { 10410b73aa63SMark Brown spi_set_cs(msg->spi, false); 10420b73aa63SMark Brown udelay(10); 10430b73aa63SMark Brown spi_set_cs(msg->spi, true); 1044b158935fSMark Brown } 1045b158935fSMark Brown } 1046b158935fSMark Brown 1047b158935fSMark Brown msg->actual_length += xfer->len; 1048b158935fSMark Brown } 1049b158935fSMark Brown 1050b158935fSMark Brown out: 1051b158935fSMark Brown if (ret != 0 || !keep_cs) 1052b158935fSMark Brown spi_set_cs(msg->spi, false); 1053b158935fSMark Brown 1054b158935fSMark Brown if (msg->status == -EINPROGRESS) 1055b158935fSMark Brown msg->status = ret; 1056b158935fSMark Brown 1057ff61eb42SGeert Uytterhoeven if (msg->status && master->handle_err) 1058b716c4ffSAndy Shevchenko master->handle_err(master, msg); 1059b716c4ffSAndy Shevchenko 1060d780c371SMartin Sperl spi_res_release(master, msg); 1061d780c371SMartin Sperl 1062b158935fSMark Brown spi_finalize_current_message(master); 1063b158935fSMark Brown 1064b158935fSMark Brown return ret; 1065b158935fSMark Brown } 1066b158935fSMark Brown 1067b158935fSMark Brown /** 1068b158935fSMark Brown * spi_finalize_current_transfer - report completion of a transfer 10692c675689SThierry Reding * @master: the master reporting completion 1070b158935fSMark Brown * 1071b158935fSMark Brown * Called by SPI drivers using the core transfer_one_message() 1072b158935fSMark Brown * implementation to notify it that the current interrupt driven 10739e8f4882SGeert Uytterhoeven * transfer has finished and the next one may be scheduled. 1074b158935fSMark Brown */ 1075b158935fSMark Brown void spi_finalize_current_transfer(struct spi_master *master) 1076b158935fSMark Brown { 1077b158935fSMark Brown complete(&master->xfer_completion); 1078b158935fSMark Brown } 1079b158935fSMark Brown EXPORT_SYMBOL_GPL(spi_finalize_current_transfer); 1080b158935fSMark Brown 1081ffbbdd21SLinus Walleij /** 1082fc9e0f71SMark Brown * __spi_pump_messages - function which processes spi message queue 1083fc9e0f71SMark Brown * @master: master to process queue for 1084fc9e0f71SMark Brown * @in_kthread: true if we are in the context of the message pump thread 1085ffbbdd21SLinus Walleij * 1086ffbbdd21SLinus Walleij * This function checks if there is any spi message in the queue that 1087ffbbdd21SLinus Walleij * needs processing and if so call out to the driver to initialize hardware 1088ffbbdd21SLinus Walleij * and transfer each message. 1089ffbbdd21SLinus Walleij * 10900461a414SMark Brown * Note that it is called both from the kthread itself and also from 10910461a414SMark Brown * inside spi_sync(); the queue extraction handling at the top of the 10920461a414SMark Brown * function should deal with this safely. 1093ffbbdd21SLinus Walleij */ 1094ef4d96ecSMark Brown static void __spi_pump_messages(struct spi_master *master, bool in_kthread) 1095ffbbdd21SLinus Walleij { 1096ffbbdd21SLinus Walleij unsigned long flags; 1097ffbbdd21SLinus Walleij bool was_busy = false; 1098ffbbdd21SLinus Walleij int ret; 1099ffbbdd21SLinus Walleij 1100983aee5dSMark Brown /* Lock queue */ 1101ffbbdd21SLinus Walleij spin_lock_irqsave(&master->queue_lock, flags); 1102983aee5dSMark Brown 1103983aee5dSMark Brown /* Make sure we are not already running a message */ 1104983aee5dSMark Brown if (master->cur_msg) { 1105983aee5dSMark Brown spin_unlock_irqrestore(&master->queue_lock, flags); 1106983aee5dSMark Brown return; 1107983aee5dSMark Brown } 1108983aee5dSMark Brown 11090461a414SMark Brown /* If another context is idling the device then defer */ 11100461a414SMark Brown if (master->idling) { 11110461a414SMark Brown queue_kthread_work(&master->kworker, &master->pump_messages); 11120461a414SMark Brown spin_unlock_irqrestore(&master->queue_lock, flags); 11130461a414SMark Brown return; 11140461a414SMark Brown } 11150461a414SMark Brown 1116983aee5dSMark Brown /* Check if the queue is idle */ 1117ffbbdd21SLinus Walleij if (list_empty(&master->queue) || !master->running) { 1118b0b36b86SBryan Freed if (!master->busy) { 11199af4acc0SDan Carpenter spin_unlock_irqrestore(&master->queue_lock, flags); 1120ffbbdd21SLinus Walleij return; 1121ffbbdd21SLinus Walleij } 1122fc9e0f71SMark Brown 1123fc9e0f71SMark Brown /* Only do teardown in the thread */ 1124fc9e0f71SMark Brown if (!in_kthread) { 1125fc9e0f71SMark Brown queue_kthread_work(&master->kworker, 1126fc9e0f71SMark Brown &master->pump_messages); 1127ffbbdd21SLinus Walleij spin_unlock_irqrestore(&master->queue_lock, flags); 1128fc9e0f71SMark Brown return; 1129fc9e0f71SMark Brown } 1130fc9e0f71SMark Brown 1131ffbbdd21SLinus Walleij master->busy = false; 11320461a414SMark Brown master->idling = true; 1133ffbbdd21SLinus Walleij spin_unlock_irqrestore(&master->queue_lock, flags); 11340461a414SMark Brown 11353a2eba9bSMark Brown kfree(master->dummy_rx); 11363a2eba9bSMark Brown master->dummy_rx = NULL; 11373a2eba9bSMark Brown kfree(master->dummy_tx); 11383a2eba9bSMark Brown master->dummy_tx = NULL; 1139b0b36b86SBryan Freed if (master->unprepare_transfer_hardware && 1140b0b36b86SBryan Freed master->unprepare_transfer_hardware(master)) 1141b0b36b86SBryan Freed dev_err(&master->dev, 1142b0b36b86SBryan Freed "failed to unprepare transfer hardware\n"); 114349834de2SMark Brown if (master->auto_runtime_pm) { 114449834de2SMark Brown pm_runtime_mark_last_busy(master->dev.parent); 114549834de2SMark Brown pm_runtime_put_autosuspend(master->dev.parent); 114649834de2SMark Brown } 114756ec1978SMark Brown trace_spi_master_idle(master); 1148ffbbdd21SLinus Walleij 11490461a414SMark Brown spin_lock_irqsave(&master->queue_lock, flags); 11500461a414SMark Brown master->idling = false; 1151ffbbdd21SLinus Walleij spin_unlock_irqrestore(&master->queue_lock, flags); 1152ffbbdd21SLinus Walleij return; 1153ffbbdd21SLinus Walleij } 1154ffbbdd21SLinus Walleij 1155ffbbdd21SLinus Walleij /* Extract head of queue */ 1156ffbbdd21SLinus Walleij master->cur_msg = 1157a89e2d27SAxel Lin list_first_entry(&master->queue, struct spi_message, queue); 1158ffbbdd21SLinus Walleij 1159ffbbdd21SLinus Walleij list_del_init(&master->cur_msg->queue); 1160ffbbdd21SLinus Walleij if (master->busy) 1161ffbbdd21SLinus Walleij was_busy = true; 1162ffbbdd21SLinus Walleij else 1163ffbbdd21SLinus Walleij master->busy = true; 1164ffbbdd21SLinus Walleij spin_unlock_irqrestore(&master->queue_lock, flags); 1165ffbbdd21SLinus Walleij 1166ef4d96ecSMark Brown mutex_lock(&master->io_mutex); 1167ef4d96ecSMark Brown 116849834de2SMark Brown if (!was_busy && master->auto_runtime_pm) { 116949834de2SMark Brown ret = pm_runtime_get_sync(master->dev.parent); 117049834de2SMark Brown if (ret < 0) { 117149834de2SMark Brown dev_err(&master->dev, "Failed to power device: %d\n", 117249834de2SMark Brown ret); 117349834de2SMark Brown return; 117449834de2SMark Brown } 117549834de2SMark Brown } 117649834de2SMark Brown 117756ec1978SMark Brown if (!was_busy) 117856ec1978SMark Brown trace_spi_master_busy(master); 117956ec1978SMark Brown 11807dfd2bd7SShubhrajyoti D if (!was_busy && master->prepare_transfer_hardware) { 1181ffbbdd21SLinus Walleij ret = master->prepare_transfer_hardware(master); 1182ffbbdd21SLinus Walleij if (ret) { 1183ffbbdd21SLinus Walleij dev_err(&master->dev, 1184ffbbdd21SLinus Walleij "failed to prepare transfer hardware\n"); 118549834de2SMark Brown 118649834de2SMark Brown if (master->auto_runtime_pm) 118749834de2SMark Brown pm_runtime_put(master->dev.parent); 1188ffbbdd21SLinus Walleij return; 1189ffbbdd21SLinus Walleij } 1190ffbbdd21SLinus Walleij } 1191ffbbdd21SLinus Walleij 119256ec1978SMark Brown trace_spi_message_start(master->cur_msg); 119356ec1978SMark Brown 11942841a5fcSMark Brown if (master->prepare_message) { 11952841a5fcSMark Brown ret = master->prepare_message(master, master->cur_msg); 11962841a5fcSMark Brown if (ret) { 11972841a5fcSMark Brown dev_err(&master->dev, 11982841a5fcSMark Brown "failed to prepare message: %d\n", ret); 11992841a5fcSMark Brown master->cur_msg->status = ret; 12002841a5fcSMark Brown spi_finalize_current_message(master); 120149023d2eSJon Hunter goto out; 12022841a5fcSMark Brown } 12032841a5fcSMark Brown master->cur_msg_prepared = true; 12042841a5fcSMark Brown } 12052841a5fcSMark Brown 120699adef31SMark Brown ret = spi_map_msg(master, master->cur_msg); 120799adef31SMark Brown if (ret) { 120899adef31SMark Brown master->cur_msg->status = ret; 120999adef31SMark Brown spi_finalize_current_message(master); 121049023d2eSJon Hunter goto out; 121199adef31SMark Brown } 121299adef31SMark Brown 1213ffbbdd21SLinus Walleij ret = master->transfer_one_message(master, master->cur_msg); 1214ffbbdd21SLinus Walleij if (ret) { 1215ffbbdd21SLinus Walleij dev_err(&master->dev, 12161f802f82SGeert Uytterhoeven "failed to transfer one message from queue\n"); 121749023d2eSJon Hunter goto out; 1218ffbbdd21SLinus Walleij } 121949023d2eSJon Hunter 122049023d2eSJon Hunter out: 1221ef4d96ecSMark Brown mutex_unlock(&master->io_mutex); 122262826970SMark Brown 122362826970SMark Brown /* Prod the scheduler in case transfer_one() was busy waiting */ 122449023d2eSJon Hunter if (!ret) 122562826970SMark Brown cond_resched(); 1226ffbbdd21SLinus Walleij } 1227ffbbdd21SLinus Walleij 1228fc9e0f71SMark Brown /** 1229fc9e0f71SMark Brown * spi_pump_messages - kthread work function which processes spi message queue 1230fc9e0f71SMark Brown * @work: pointer to kthread work struct contained in the master struct 1231fc9e0f71SMark Brown */ 1232fc9e0f71SMark Brown static void spi_pump_messages(struct kthread_work *work) 1233fc9e0f71SMark Brown { 1234fc9e0f71SMark Brown struct spi_master *master = 1235fc9e0f71SMark Brown container_of(work, struct spi_master, pump_messages); 1236fc9e0f71SMark Brown 1237ef4d96ecSMark Brown __spi_pump_messages(master, true); 1238fc9e0f71SMark Brown } 1239fc9e0f71SMark Brown 1240ffbbdd21SLinus Walleij static int spi_init_queue(struct spi_master *master) 1241ffbbdd21SLinus Walleij { 1242ffbbdd21SLinus Walleij struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 }; 1243ffbbdd21SLinus Walleij 1244ffbbdd21SLinus Walleij master->running = false; 1245ffbbdd21SLinus Walleij master->busy = false; 1246ffbbdd21SLinus Walleij 1247ffbbdd21SLinus Walleij init_kthread_worker(&master->kworker); 1248ffbbdd21SLinus Walleij master->kworker_task = kthread_run(kthread_worker_fn, 1249f170168bSKees Cook &master->kworker, "%s", 1250ffbbdd21SLinus Walleij dev_name(&master->dev)); 1251ffbbdd21SLinus Walleij if (IS_ERR(master->kworker_task)) { 1252ffbbdd21SLinus Walleij dev_err(&master->dev, "failed to create message pump task\n"); 125398a8f5a0SJarkko Nikula return PTR_ERR(master->kworker_task); 1254ffbbdd21SLinus Walleij } 1255ffbbdd21SLinus Walleij init_kthread_work(&master->pump_messages, spi_pump_messages); 1256ffbbdd21SLinus Walleij 1257ffbbdd21SLinus Walleij /* 1258ffbbdd21SLinus Walleij * Master config will indicate if this controller should run the 1259ffbbdd21SLinus Walleij * message pump with high (realtime) priority to reduce the transfer 1260ffbbdd21SLinus Walleij * latency on the bus by minimising the delay between a transfer 1261ffbbdd21SLinus Walleij * request and the scheduling of the message pump thread. Without this 1262ffbbdd21SLinus Walleij * setting the message pump thread will remain at default priority. 1263ffbbdd21SLinus Walleij */ 1264ffbbdd21SLinus Walleij if (master->rt) { 1265ffbbdd21SLinus Walleij dev_info(&master->dev, 1266ffbbdd21SLinus Walleij "will run message pump with realtime priority\n"); 1267ffbbdd21SLinus Walleij sched_setscheduler(master->kworker_task, SCHED_FIFO, ¶m); 1268ffbbdd21SLinus Walleij } 1269ffbbdd21SLinus Walleij 1270ffbbdd21SLinus Walleij return 0; 1271ffbbdd21SLinus Walleij } 1272ffbbdd21SLinus Walleij 1273ffbbdd21SLinus Walleij /** 1274ffbbdd21SLinus Walleij * spi_get_next_queued_message() - called by driver to check for queued 1275ffbbdd21SLinus Walleij * messages 1276ffbbdd21SLinus Walleij * @master: the master to check for queued messages 1277ffbbdd21SLinus Walleij * 1278ffbbdd21SLinus Walleij * If there are more messages in the queue, the next message is returned from 1279ffbbdd21SLinus Walleij * this call. 128097d56dc6SJavier Martinez Canillas * 128197d56dc6SJavier Martinez Canillas * Return: the next message in the queue, else NULL if the queue is empty. 1282ffbbdd21SLinus Walleij */ 1283ffbbdd21SLinus Walleij struct spi_message *spi_get_next_queued_message(struct spi_master *master) 1284ffbbdd21SLinus Walleij { 1285ffbbdd21SLinus Walleij struct spi_message *next; 1286ffbbdd21SLinus Walleij unsigned long flags; 1287ffbbdd21SLinus Walleij 1288ffbbdd21SLinus Walleij /* get a pointer to the next message, if any */ 1289ffbbdd21SLinus Walleij spin_lock_irqsave(&master->queue_lock, flags); 12901cfd97f9SAxel Lin next = list_first_entry_or_null(&master->queue, struct spi_message, 12911cfd97f9SAxel Lin queue); 1292ffbbdd21SLinus Walleij spin_unlock_irqrestore(&master->queue_lock, flags); 1293ffbbdd21SLinus Walleij 1294ffbbdd21SLinus Walleij return next; 1295ffbbdd21SLinus Walleij } 1296ffbbdd21SLinus Walleij EXPORT_SYMBOL_GPL(spi_get_next_queued_message); 1297ffbbdd21SLinus Walleij 1298ffbbdd21SLinus Walleij /** 1299ffbbdd21SLinus Walleij * spi_finalize_current_message() - the current message is complete 1300ffbbdd21SLinus Walleij * @master: the master to return the message to 1301ffbbdd21SLinus Walleij * 1302ffbbdd21SLinus Walleij * Called by the driver to notify the core that the message in the front of the 1303ffbbdd21SLinus Walleij * queue is complete and can be removed from the queue. 1304ffbbdd21SLinus Walleij */ 1305ffbbdd21SLinus Walleij void spi_finalize_current_message(struct spi_master *master) 1306ffbbdd21SLinus Walleij { 1307ffbbdd21SLinus Walleij struct spi_message *mesg; 1308ffbbdd21SLinus Walleij unsigned long flags; 13092841a5fcSMark Brown int ret; 1310ffbbdd21SLinus Walleij 1311ffbbdd21SLinus Walleij spin_lock_irqsave(&master->queue_lock, flags); 1312ffbbdd21SLinus Walleij mesg = master->cur_msg; 1313ffbbdd21SLinus Walleij spin_unlock_irqrestore(&master->queue_lock, flags); 1314ffbbdd21SLinus Walleij 131599adef31SMark Brown spi_unmap_msg(master, mesg); 131699adef31SMark Brown 13172841a5fcSMark Brown if (master->cur_msg_prepared && master->unprepare_message) { 13182841a5fcSMark Brown ret = master->unprepare_message(master, mesg); 13192841a5fcSMark Brown if (ret) { 13202841a5fcSMark Brown dev_err(&master->dev, 13212841a5fcSMark Brown "failed to unprepare message: %d\n", ret); 13222841a5fcSMark Brown } 13232841a5fcSMark Brown } 1324391949b6SUwe Kleine-König 13258e76ef88SMartin Sperl spin_lock_irqsave(&master->queue_lock, flags); 13268e76ef88SMartin Sperl master->cur_msg = NULL; 13272841a5fcSMark Brown master->cur_msg_prepared = false; 13288e76ef88SMartin Sperl queue_kthread_work(&master->kworker, &master->pump_messages); 13298e76ef88SMartin Sperl spin_unlock_irqrestore(&master->queue_lock, flags); 13308e76ef88SMartin Sperl 13318e76ef88SMartin Sperl trace_spi_message_done(mesg); 13322841a5fcSMark Brown 1333ffbbdd21SLinus Walleij mesg->state = NULL; 1334ffbbdd21SLinus Walleij if (mesg->complete) 1335ffbbdd21SLinus Walleij mesg->complete(mesg->context); 1336ffbbdd21SLinus Walleij } 1337ffbbdd21SLinus Walleij EXPORT_SYMBOL_GPL(spi_finalize_current_message); 1338ffbbdd21SLinus Walleij 1339ffbbdd21SLinus Walleij static int spi_start_queue(struct spi_master *master) 1340ffbbdd21SLinus Walleij { 1341ffbbdd21SLinus Walleij unsigned long flags; 1342ffbbdd21SLinus Walleij 1343ffbbdd21SLinus Walleij spin_lock_irqsave(&master->queue_lock, flags); 1344ffbbdd21SLinus Walleij 1345ffbbdd21SLinus Walleij if (master->running || master->busy) { 1346ffbbdd21SLinus Walleij spin_unlock_irqrestore(&master->queue_lock, flags); 1347ffbbdd21SLinus Walleij return -EBUSY; 1348ffbbdd21SLinus Walleij } 1349ffbbdd21SLinus Walleij 1350ffbbdd21SLinus Walleij master->running = true; 1351ffbbdd21SLinus Walleij master->cur_msg = NULL; 1352ffbbdd21SLinus Walleij spin_unlock_irqrestore(&master->queue_lock, flags); 1353ffbbdd21SLinus Walleij 1354ffbbdd21SLinus Walleij queue_kthread_work(&master->kworker, &master->pump_messages); 1355ffbbdd21SLinus Walleij 1356ffbbdd21SLinus Walleij return 0; 1357ffbbdd21SLinus Walleij } 1358ffbbdd21SLinus Walleij 1359ffbbdd21SLinus Walleij static int spi_stop_queue(struct spi_master *master) 1360ffbbdd21SLinus Walleij { 1361ffbbdd21SLinus Walleij unsigned long flags; 1362ffbbdd21SLinus Walleij unsigned limit = 500; 1363ffbbdd21SLinus Walleij int ret = 0; 1364ffbbdd21SLinus Walleij 1365ffbbdd21SLinus Walleij spin_lock_irqsave(&master->queue_lock, flags); 1366ffbbdd21SLinus Walleij 1367ffbbdd21SLinus Walleij /* 1368ffbbdd21SLinus Walleij * This is a bit lame, but is optimized for the common execution path. 1369ffbbdd21SLinus Walleij * A wait_queue on the master->busy could be used, but then the common 1370ffbbdd21SLinus Walleij * execution path (pump_messages) would be required to call wake_up or 1371ffbbdd21SLinus Walleij * friends on every SPI message. Do this instead. 1372ffbbdd21SLinus Walleij */ 1373ffbbdd21SLinus Walleij while ((!list_empty(&master->queue) || master->busy) && limit--) { 1374ffbbdd21SLinus Walleij spin_unlock_irqrestore(&master->queue_lock, flags); 1375f97b26b0SAxel Lin usleep_range(10000, 11000); 1376ffbbdd21SLinus Walleij spin_lock_irqsave(&master->queue_lock, flags); 1377ffbbdd21SLinus Walleij } 1378ffbbdd21SLinus Walleij 1379ffbbdd21SLinus Walleij if (!list_empty(&master->queue) || master->busy) 1380ffbbdd21SLinus Walleij ret = -EBUSY; 1381ffbbdd21SLinus Walleij else 1382ffbbdd21SLinus Walleij master->running = false; 1383ffbbdd21SLinus Walleij 1384ffbbdd21SLinus Walleij spin_unlock_irqrestore(&master->queue_lock, flags); 1385ffbbdd21SLinus Walleij 1386ffbbdd21SLinus Walleij if (ret) { 1387ffbbdd21SLinus Walleij dev_warn(&master->dev, 1388ffbbdd21SLinus Walleij "could not stop message queue\n"); 1389ffbbdd21SLinus Walleij return ret; 1390ffbbdd21SLinus Walleij } 1391ffbbdd21SLinus Walleij return ret; 1392ffbbdd21SLinus Walleij } 1393ffbbdd21SLinus Walleij 1394ffbbdd21SLinus Walleij static int spi_destroy_queue(struct spi_master *master) 1395ffbbdd21SLinus Walleij { 1396ffbbdd21SLinus Walleij int ret; 1397ffbbdd21SLinus Walleij 1398ffbbdd21SLinus Walleij ret = spi_stop_queue(master); 1399ffbbdd21SLinus Walleij 1400ffbbdd21SLinus Walleij /* 1401ffbbdd21SLinus Walleij * flush_kthread_worker will block until all work is done. 1402ffbbdd21SLinus Walleij * If the reason that stop_queue timed out is that the work will never 1403ffbbdd21SLinus Walleij * finish, then it does no good to call flush/stop thread, so 1404ffbbdd21SLinus Walleij * return anyway. 1405ffbbdd21SLinus Walleij */ 1406ffbbdd21SLinus Walleij if (ret) { 1407ffbbdd21SLinus Walleij dev_err(&master->dev, "problem destroying queue\n"); 1408ffbbdd21SLinus Walleij return ret; 1409ffbbdd21SLinus Walleij } 1410ffbbdd21SLinus Walleij 1411ffbbdd21SLinus Walleij flush_kthread_worker(&master->kworker); 1412ffbbdd21SLinus Walleij kthread_stop(master->kworker_task); 1413ffbbdd21SLinus Walleij 1414ffbbdd21SLinus Walleij return 0; 1415ffbbdd21SLinus Walleij } 1416ffbbdd21SLinus Walleij 14170461a414SMark Brown static int __spi_queued_transfer(struct spi_device *spi, 14180461a414SMark Brown struct spi_message *msg, 14190461a414SMark Brown bool need_pump) 1420ffbbdd21SLinus Walleij { 1421ffbbdd21SLinus Walleij struct spi_master *master = spi->master; 1422ffbbdd21SLinus Walleij unsigned long flags; 1423ffbbdd21SLinus Walleij 1424ffbbdd21SLinus Walleij spin_lock_irqsave(&master->queue_lock, flags); 1425ffbbdd21SLinus Walleij 1426ffbbdd21SLinus Walleij if (!master->running) { 1427ffbbdd21SLinus Walleij spin_unlock_irqrestore(&master->queue_lock, flags); 1428ffbbdd21SLinus Walleij return -ESHUTDOWN; 1429ffbbdd21SLinus Walleij } 1430ffbbdd21SLinus Walleij msg->actual_length = 0; 1431ffbbdd21SLinus Walleij msg->status = -EINPROGRESS; 1432ffbbdd21SLinus Walleij 1433ffbbdd21SLinus Walleij list_add_tail(&msg->queue, &master->queue); 14340461a414SMark Brown if (!master->busy && need_pump) 1435ffbbdd21SLinus Walleij queue_kthread_work(&master->kworker, &master->pump_messages); 1436ffbbdd21SLinus Walleij 1437ffbbdd21SLinus Walleij spin_unlock_irqrestore(&master->queue_lock, flags); 1438ffbbdd21SLinus Walleij return 0; 1439ffbbdd21SLinus Walleij } 1440ffbbdd21SLinus Walleij 14410461a414SMark Brown /** 14420461a414SMark Brown * spi_queued_transfer - transfer function for queued transfers 14430461a414SMark Brown * @spi: spi device which is requesting transfer 14440461a414SMark Brown * @msg: spi message which is to handled is queued to driver queue 144597d56dc6SJavier Martinez Canillas * 144697d56dc6SJavier Martinez Canillas * Return: zero on success, else a negative error code. 14470461a414SMark Brown */ 14480461a414SMark Brown static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg) 14490461a414SMark Brown { 14500461a414SMark Brown return __spi_queued_transfer(spi, msg, true); 14510461a414SMark Brown } 14520461a414SMark Brown 1453ffbbdd21SLinus Walleij static int spi_master_initialize_queue(struct spi_master *master) 1454ffbbdd21SLinus Walleij { 1455ffbbdd21SLinus Walleij int ret; 1456ffbbdd21SLinus Walleij 1457ffbbdd21SLinus Walleij master->transfer = spi_queued_transfer; 1458b158935fSMark Brown if (!master->transfer_one_message) 1459b158935fSMark Brown master->transfer_one_message = spi_transfer_one_message; 1460ffbbdd21SLinus Walleij 1461ffbbdd21SLinus Walleij /* Initialize and start queue */ 1462ffbbdd21SLinus Walleij ret = spi_init_queue(master); 1463ffbbdd21SLinus Walleij if (ret) { 1464ffbbdd21SLinus Walleij dev_err(&master->dev, "problem initializing queue\n"); 1465ffbbdd21SLinus Walleij goto err_init_queue; 1466ffbbdd21SLinus Walleij } 1467c3676d5cSMark Brown master->queued = true; 1468ffbbdd21SLinus Walleij ret = spi_start_queue(master); 1469ffbbdd21SLinus Walleij if (ret) { 1470ffbbdd21SLinus Walleij dev_err(&master->dev, "problem starting queue\n"); 1471ffbbdd21SLinus Walleij goto err_start_queue; 1472ffbbdd21SLinus Walleij } 1473ffbbdd21SLinus Walleij 1474ffbbdd21SLinus Walleij return 0; 1475ffbbdd21SLinus Walleij 1476ffbbdd21SLinus Walleij err_start_queue: 1477ffbbdd21SLinus Walleij spi_destroy_queue(master); 1478c3676d5cSMark Brown err_init_queue: 1479ffbbdd21SLinus Walleij return ret; 1480ffbbdd21SLinus Walleij } 1481ffbbdd21SLinus Walleij 1482ffbbdd21SLinus Walleij /*-------------------------------------------------------------------------*/ 1483ffbbdd21SLinus Walleij 14847cb94361SAndreas Larsson #if defined(CONFIG_OF) 1485aff5e3f8SPantelis Antoniou static struct spi_device * 1486aff5e3f8SPantelis Antoniou of_register_spi_device(struct spi_master *master, struct device_node *nc) 1487d57a4282SGrant Likely { 1488d57a4282SGrant Likely struct spi_device *spi; 1489d57a4282SGrant Likely int rc; 149089da4293STrent Piepho u32 value; 1491d57a4282SGrant Likely 1492d57a4282SGrant Likely /* Alloc an spi_device */ 1493d57a4282SGrant Likely spi = spi_alloc_device(master); 1494d57a4282SGrant Likely if (!spi) { 1495d57a4282SGrant Likely dev_err(&master->dev, "spi_device alloc error for %s\n", 1496d57a4282SGrant Likely nc->full_name); 1497aff5e3f8SPantelis Antoniou rc = -ENOMEM; 1498aff5e3f8SPantelis Antoniou goto err_out; 1499d57a4282SGrant Likely } 1500d57a4282SGrant Likely 1501d57a4282SGrant Likely /* Select device driver */ 1502aff5e3f8SPantelis Antoniou rc = of_modalias_node(nc, spi->modalias, 1503aff5e3f8SPantelis Antoniou sizeof(spi->modalias)); 1504aff5e3f8SPantelis Antoniou if (rc < 0) { 1505d57a4282SGrant Likely dev_err(&master->dev, "cannot find modalias for %s\n", 1506d57a4282SGrant Likely nc->full_name); 1507aff5e3f8SPantelis Antoniou goto err_out; 1508d57a4282SGrant Likely } 1509d57a4282SGrant Likely 1510d57a4282SGrant Likely /* Device address */ 151189da4293STrent Piepho rc = of_property_read_u32(nc, "reg", &value); 151289da4293STrent Piepho if (rc) { 151389da4293STrent Piepho dev_err(&master->dev, "%s has no valid 'reg' property (%d)\n", 151489da4293STrent Piepho nc->full_name, rc); 1515aff5e3f8SPantelis Antoniou goto err_out; 1516d57a4282SGrant Likely } 151789da4293STrent Piepho spi->chip_select = value; 1518d57a4282SGrant Likely 1519d57a4282SGrant Likely /* Mode (clock phase/polarity/etc.) */ 1520d57a4282SGrant Likely if (of_find_property(nc, "spi-cpha", NULL)) 1521d57a4282SGrant Likely spi->mode |= SPI_CPHA; 1522d57a4282SGrant Likely if (of_find_property(nc, "spi-cpol", NULL)) 1523d57a4282SGrant Likely spi->mode |= SPI_CPOL; 1524d57a4282SGrant Likely if (of_find_property(nc, "spi-cs-high", NULL)) 1525d57a4282SGrant Likely spi->mode |= SPI_CS_HIGH; 1526c20151dfSLars-Peter Clausen if (of_find_property(nc, "spi-3wire", NULL)) 1527c20151dfSLars-Peter Clausen spi->mode |= SPI_3WIRE; 1528cd6339e6SZhao Qiang if (of_find_property(nc, "spi-lsb-first", NULL)) 1529cd6339e6SZhao Qiang spi->mode |= SPI_LSB_FIRST; 1530d57a4282SGrant Likely 1531f477b7fbSwangyuhang /* Device DUAL/QUAD mode */ 153289da4293STrent Piepho if (!of_property_read_u32(nc, "spi-tx-bus-width", &value)) { 153389da4293STrent Piepho switch (value) { 153489da4293STrent Piepho case 1: 1535f477b7fbSwangyuhang break; 153689da4293STrent Piepho case 2: 1537f477b7fbSwangyuhang spi->mode |= SPI_TX_DUAL; 1538f477b7fbSwangyuhang break; 153989da4293STrent Piepho case 4: 1540f477b7fbSwangyuhang spi->mode |= SPI_TX_QUAD; 1541f477b7fbSwangyuhang break; 1542f477b7fbSwangyuhang default: 154380874d8cSGeert Uytterhoeven dev_warn(&master->dev, 1544a110f93dSwangyuhang "spi-tx-bus-width %d not supported\n", 154589da4293STrent Piepho value); 154680874d8cSGeert Uytterhoeven break; 1547f477b7fbSwangyuhang } 1548a822e99cSMark Brown } 1549f477b7fbSwangyuhang 155089da4293STrent Piepho if (!of_property_read_u32(nc, "spi-rx-bus-width", &value)) { 155189da4293STrent Piepho switch (value) { 155289da4293STrent Piepho case 1: 1553f477b7fbSwangyuhang break; 155489da4293STrent Piepho case 2: 1555f477b7fbSwangyuhang spi->mode |= SPI_RX_DUAL; 1556f477b7fbSwangyuhang break; 155789da4293STrent Piepho case 4: 1558f477b7fbSwangyuhang spi->mode |= SPI_RX_QUAD; 1559f477b7fbSwangyuhang break; 1560f477b7fbSwangyuhang default: 156180874d8cSGeert Uytterhoeven dev_warn(&master->dev, 1562a110f93dSwangyuhang "spi-rx-bus-width %d not supported\n", 156389da4293STrent Piepho value); 156480874d8cSGeert Uytterhoeven break; 1565f477b7fbSwangyuhang } 1566a822e99cSMark Brown } 1567f477b7fbSwangyuhang 1568d57a4282SGrant Likely /* Device speed */ 156989da4293STrent Piepho rc = of_property_read_u32(nc, "spi-max-frequency", &value); 157089da4293STrent Piepho if (rc) { 157189da4293STrent Piepho dev_err(&master->dev, "%s has no valid 'spi-max-frequency' property (%d)\n", 157289da4293STrent Piepho nc->full_name, rc); 1573aff5e3f8SPantelis Antoniou goto err_out; 1574d57a4282SGrant Likely } 157589da4293STrent Piepho spi->max_speed_hz = value; 1576d57a4282SGrant Likely 1577d57a4282SGrant Likely /* Store a pointer to the node in the device structure */ 1578d57a4282SGrant Likely of_node_get(nc); 1579d57a4282SGrant Likely spi->dev.of_node = nc; 1580d57a4282SGrant Likely 1581d57a4282SGrant Likely /* Register the new device */ 1582d57a4282SGrant Likely rc = spi_add_device(spi); 1583d57a4282SGrant Likely if (rc) { 1584d57a4282SGrant Likely dev_err(&master->dev, "spi_device register error %s\n", 1585d57a4282SGrant Likely nc->full_name); 1586aff5e3f8SPantelis Antoniou goto err_out; 1587d57a4282SGrant Likely } 1588d57a4282SGrant Likely 1589aff5e3f8SPantelis Antoniou return spi; 1590aff5e3f8SPantelis Antoniou 1591aff5e3f8SPantelis Antoniou err_out: 1592aff5e3f8SPantelis Antoniou spi_dev_put(spi); 1593aff5e3f8SPantelis Antoniou return ERR_PTR(rc); 1594aff5e3f8SPantelis Antoniou } 1595aff5e3f8SPantelis Antoniou 1596aff5e3f8SPantelis Antoniou /** 1597aff5e3f8SPantelis Antoniou * of_register_spi_devices() - Register child devices onto the SPI bus 1598aff5e3f8SPantelis Antoniou * @master: Pointer to spi_master device 1599aff5e3f8SPantelis Antoniou * 1600aff5e3f8SPantelis Antoniou * Registers an spi_device for each child node of master node which has a 'reg' 1601aff5e3f8SPantelis Antoniou * property. 1602aff5e3f8SPantelis Antoniou */ 1603aff5e3f8SPantelis Antoniou static void of_register_spi_devices(struct spi_master *master) 1604aff5e3f8SPantelis Antoniou { 1605aff5e3f8SPantelis Antoniou struct spi_device *spi; 1606aff5e3f8SPantelis Antoniou struct device_node *nc; 1607aff5e3f8SPantelis Antoniou 1608aff5e3f8SPantelis Antoniou if (!master->dev.of_node) 1609aff5e3f8SPantelis Antoniou return; 1610aff5e3f8SPantelis Antoniou 1611aff5e3f8SPantelis Antoniou for_each_available_child_of_node(master->dev.of_node, nc) { 1612bd6c1644SGeert Uytterhoeven if (of_node_test_and_set_flag(nc, OF_POPULATED)) 1613bd6c1644SGeert Uytterhoeven continue; 1614aff5e3f8SPantelis Antoniou spi = of_register_spi_device(master, nc); 1615aff5e3f8SPantelis Antoniou if (IS_ERR(spi)) 1616aff5e3f8SPantelis Antoniou dev_warn(&master->dev, "Failed to create SPI device for %s\n", 1617aff5e3f8SPantelis Antoniou nc->full_name); 1618d57a4282SGrant Likely } 1619d57a4282SGrant Likely } 1620d57a4282SGrant Likely #else 1621d57a4282SGrant Likely static void of_register_spi_devices(struct spi_master *master) { } 1622d57a4282SGrant Likely #endif 1623d57a4282SGrant Likely 162464bee4d2SMika Westerberg #ifdef CONFIG_ACPI 162564bee4d2SMika Westerberg static int acpi_spi_add_resource(struct acpi_resource *ares, void *data) 162664bee4d2SMika Westerberg { 162764bee4d2SMika Westerberg struct spi_device *spi = data; 1628a0a90718SMika Westerberg struct spi_master *master = spi->master; 162964bee4d2SMika Westerberg 163064bee4d2SMika Westerberg if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) { 163164bee4d2SMika Westerberg struct acpi_resource_spi_serialbus *sb; 163264bee4d2SMika Westerberg 163364bee4d2SMika Westerberg sb = &ares->data.spi_serial_bus; 163464bee4d2SMika Westerberg if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_SPI) { 1635a0a90718SMika Westerberg /* 1636a0a90718SMika Westerberg * ACPI DeviceSelection numbering is handled by the 1637a0a90718SMika Westerberg * host controller driver in Windows and can vary 1638a0a90718SMika Westerberg * from driver to driver. In Linux we always expect 1639a0a90718SMika Westerberg * 0 .. max - 1 so we need to ask the driver to 1640a0a90718SMika Westerberg * translate between the two schemes. 1641a0a90718SMika Westerberg */ 1642a0a90718SMika Westerberg if (master->fw_translate_cs) { 1643a0a90718SMika Westerberg int cs = master->fw_translate_cs(master, 1644a0a90718SMika Westerberg sb->device_selection); 1645a0a90718SMika Westerberg if (cs < 0) 1646a0a90718SMika Westerberg return cs; 1647a0a90718SMika Westerberg spi->chip_select = cs; 1648a0a90718SMika Westerberg } else { 164964bee4d2SMika Westerberg spi->chip_select = sb->device_selection; 1650a0a90718SMika Westerberg } 1651a0a90718SMika Westerberg 165264bee4d2SMika Westerberg spi->max_speed_hz = sb->connection_speed; 165364bee4d2SMika Westerberg 165464bee4d2SMika Westerberg if (sb->clock_phase == ACPI_SPI_SECOND_PHASE) 165564bee4d2SMika Westerberg spi->mode |= SPI_CPHA; 165664bee4d2SMika Westerberg if (sb->clock_polarity == ACPI_SPI_START_HIGH) 165764bee4d2SMika Westerberg spi->mode |= SPI_CPOL; 165864bee4d2SMika Westerberg if (sb->device_polarity == ACPI_SPI_ACTIVE_HIGH) 165964bee4d2SMika Westerberg spi->mode |= SPI_CS_HIGH; 166064bee4d2SMika Westerberg } 166164bee4d2SMika Westerberg } else if (spi->irq < 0) { 166264bee4d2SMika Westerberg struct resource r; 166364bee4d2SMika Westerberg 166464bee4d2SMika Westerberg if (acpi_dev_resource_interrupt(ares, 0, &r)) 166564bee4d2SMika Westerberg spi->irq = r.start; 166664bee4d2SMika Westerberg } 166764bee4d2SMika Westerberg 166864bee4d2SMika Westerberg /* Always tell the ACPI core to skip this resource */ 166964bee4d2SMika Westerberg return 1; 167064bee4d2SMika Westerberg } 167164bee4d2SMika Westerberg 16727f24467fSOctavian Purdila static acpi_status acpi_register_spi_device(struct spi_master *master, 16737f24467fSOctavian Purdila struct acpi_device *adev) 167464bee4d2SMika Westerberg { 167564bee4d2SMika Westerberg struct list_head resource_list; 167664bee4d2SMika Westerberg struct spi_device *spi; 167764bee4d2SMika Westerberg int ret; 167864bee4d2SMika Westerberg 16797f24467fSOctavian Purdila if (acpi_bus_get_status(adev) || !adev->status.present || 16807f24467fSOctavian Purdila acpi_device_enumerated(adev)) 168164bee4d2SMika Westerberg return AE_OK; 168264bee4d2SMika Westerberg 168364bee4d2SMika Westerberg spi = spi_alloc_device(master); 168464bee4d2SMika Westerberg if (!spi) { 168564bee4d2SMika Westerberg dev_err(&master->dev, "failed to allocate SPI device for %s\n", 168664bee4d2SMika Westerberg dev_name(&adev->dev)); 168764bee4d2SMika Westerberg return AE_NO_MEMORY; 168864bee4d2SMika Westerberg } 168964bee4d2SMika Westerberg 16907b199811SRafael J. Wysocki ACPI_COMPANION_SET(&spi->dev, adev); 169164bee4d2SMika Westerberg spi->irq = -1; 169264bee4d2SMika Westerberg 169364bee4d2SMika Westerberg INIT_LIST_HEAD(&resource_list); 169464bee4d2SMika Westerberg ret = acpi_dev_get_resources(adev, &resource_list, 169564bee4d2SMika Westerberg acpi_spi_add_resource, spi); 169664bee4d2SMika Westerberg acpi_dev_free_resource_list(&resource_list); 169764bee4d2SMika Westerberg 169864bee4d2SMika Westerberg if (ret < 0 || !spi->max_speed_hz) { 169964bee4d2SMika Westerberg spi_dev_put(spi); 170064bee4d2SMika Westerberg return AE_OK; 170164bee4d2SMika Westerberg } 170264bee4d2SMika Westerberg 170333ada67dSChristophe RICARD if (spi->irq < 0) 170433ada67dSChristophe RICARD spi->irq = acpi_dev_gpio_irq_get(adev, 0); 170533ada67dSChristophe RICARD 17067f24467fSOctavian Purdila acpi_device_set_enumerated(adev); 17077f24467fSOctavian Purdila 170833cf00e5SMika Westerberg adev->power.flags.ignore_parent = true; 1709cf9eb39cSJarkko Nikula strlcpy(spi->modalias, acpi_device_hid(adev), sizeof(spi->modalias)); 171064bee4d2SMika Westerberg if (spi_add_device(spi)) { 171133cf00e5SMika Westerberg adev->power.flags.ignore_parent = false; 171264bee4d2SMika Westerberg dev_err(&master->dev, "failed to add SPI device %s from ACPI\n", 171364bee4d2SMika Westerberg dev_name(&adev->dev)); 171464bee4d2SMika Westerberg spi_dev_put(spi); 171564bee4d2SMika Westerberg } 171664bee4d2SMika Westerberg 171764bee4d2SMika Westerberg return AE_OK; 171864bee4d2SMika Westerberg } 171964bee4d2SMika Westerberg 17207f24467fSOctavian Purdila static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level, 17217f24467fSOctavian Purdila void *data, void **return_value) 17227f24467fSOctavian Purdila { 17237f24467fSOctavian Purdila struct spi_master *master = data; 17247f24467fSOctavian Purdila struct acpi_device *adev; 17257f24467fSOctavian Purdila 17267f24467fSOctavian Purdila if (acpi_bus_get_device(handle, &adev)) 17277f24467fSOctavian Purdila return AE_OK; 17287f24467fSOctavian Purdila 17297f24467fSOctavian Purdila return acpi_register_spi_device(master, adev); 17307f24467fSOctavian Purdila } 17317f24467fSOctavian Purdila 173264bee4d2SMika Westerberg static void acpi_register_spi_devices(struct spi_master *master) 173364bee4d2SMika Westerberg { 173464bee4d2SMika Westerberg acpi_status status; 173564bee4d2SMika Westerberg acpi_handle handle; 173664bee4d2SMika Westerberg 173729896178SRafael J. Wysocki handle = ACPI_HANDLE(master->dev.parent); 173864bee4d2SMika Westerberg if (!handle) 173964bee4d2SMika Westerberg return; 174064bee4d2SMika Westerberg 174164bee4d2SMika Westerberg status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1, 174264bee4d2SMika Westerberg acpi_spi_add_device, NULL, 174364bee4d2SMika Westerberg master, NULL); 174464bee4d2SMika Westerberg if (ACPI_FAILURE(status)) 174564bee4d2SMika Westerberg dev_warn(&master->dev, "failed to enumerate SPI slaves\n"); 174664bee4d2SMika Westerberg } 174764bee4d2SMika Westerberg #else 174864bee4d2SMika Westerberg static inline void acpi_register_spi_devices(struct spi_master *master) {} 174964bee4d2SMika Westerberg #endif /* CONFIG_ACPI */ 175064bee4d2SMika Westerberg 175149dce689STony Jones static void spi_master_release(struct device *dev) 17528ae12a0dSDavid Brownell { 17538ae12a0dSDavid Brownell struct spi_master *master; 17548ae12a0dSDavid Brownell 175549dce689STony Jones master = container_of(dev, struct spi_master, dev); 17568ae12a0dSDavid Brownell kfree(master); 17578ae12a0dSDavid Brownell } 17588ae12a0dSDavid Brownell 17598ae12a0dSDavid Brownell static struct class spi_master_class = { 17608ae12a0dSDavid Brownell .name = "spi_master", 17618ae12a0dSDavid Brownell .owner = THIS_MODULE, 176249dce689STony Jones .dev_release = spi_master_release, 1763eca2ebc7SMartin Sperl .dev_groups = spi_master_groups, 17648ae12a0dSDavid Brownell }; 17658ae12a0dSDavid Brownell 17668ae12a0dSDavid Brownell 17678ae12a0dSDavid Brownell /** 17688ae12a0dSDavid Brownell * spi_alloc_master - allocate SPI master controller 17698ae12a0dSDavid Brownell * @dev: the controller, possibly using the platform_bus 177033e34dc6SDavid Brownell * @size: how much zeroed driver-private data to allocate; the pointer to this 177149dce689STony Jones * memory is in the driver_data field of the returned device, 17720c868461SDavid Brownell * accessible with spi_master_get_devdata(). 177333e34dc6SDavid Brownell * Context: can sleep 17748ae12a0dSDavid Brownell * 17758ae12a0dSDavid Brownell * This call is used only by SPI master controller drivers, which are the 17768ae12a0dSDavid Brownell * only ones directly touching chip registers. It's how they allocate 1777ba1a0513Sdmitry pervushin * an spi_master structure, prior to calling spi_register_master(). 17788ae12a0dSDavid Brownell * 177997d56dc6SJavier Martinez Canillas * This must be called from context that can sleep. 17808ae12a0dSDavid Brownell * 17818ae12a0dSDavid Brownell * The caller is responsible for assigning the bus number and initializing 1782ba1a0513Sdmitry pervushin * the master's methods before calling spi_register_master(); and (after errors 1783a394d635SGuenter Roeck * adding the device) calling spi_master_put() to prevent a memory leak. 178497d56dc6SJavier Martinez Canillas * 178597d56dc6SJavier Martinez Canillas * Return: the SPI master structure on success, else NULL. 17868ae12a0dSDavid Brownell */ 1787e9d5a461SAdrian Bunk struct spi_master *spi_alloc_master(struct device *dev, unsigned size) 17888ae12a0dSDavid Brownell { 17898ae12a0dSDavid Brownell struct spi_master *master; 17908ae12a0dSDavid Brownell 17910c868461SDavid Brownell if (!dev) 17920c868461SDavid Brownell return NULL; 17930c868461SDavid Brownell 17945fe5f05eSJingoo Han master = kzalloc(size + sizeof(*master), GFP_KERNEL); 17958ae12a0dSDavid Brownell if (!master) 17968ae12a0dSDavid Brownell return NULL; 17978ae12a0dSDavid Brownell 179849dce689STony Jones device_initialize(&master->dev); 17991e8a52e1SGrant Likely master->bus_num = -1; 18001e8a52e1SGrant Likely master->num_chipselect = 1; 180149dce689STony Jones master->dev.class = &spi_master_class; 1802157f38f9SJohan Hovold master->dev.parent = dev; 1803d7e2ee25SLinus Walleij pm_suspend_ignore_children(&master->dev, true); 18040c868461SDavid Brownell spi_master_set_devdata(master, &master[1]); 18058ae12a0dSDavid Brownell 18068ae12a0dSDavid Brownell return master; 18078ae12a0dSDavid Brownell } 18088ae12a0dSDavid Brownell EXPORT_SYMBOL_GPL(spi_alloc_master); 18098ae12a0dSDavid Brownell 181074317984SJean-Christophe PLAGNIOL-VILLARD #ifdef CONFIG_OF 181174317984SJean-Christophe PLAGNIOL-VILLARD static int of_spi_register_master(struct spi_master *master) 181274317984SJean-Christophe PLAGNIOL-VILLARD { 1813e80beb27SGrant Likely int nb, i, *cs; 181474317984SJean-Christophe PLAGNIOL-VILLARD struct device_node *np = master->dev.of_node; 181574317984SJean-Christophe PLAGNIOL-VILLARD 181674317984SJean-Christophe PLAGNIOL-VILLARD if (!np) 181774317984SJean-Christophe PLAGNIOL-VILLARD return 0; 181874317984SJean-Christophe PLAGNIOL-VILLARD 181974317984SJean-Christophe PLAGNIOL-VILLARD nb = of_gpio_named_count(np, "cs-gpios"); 18205fe5f05eSJingoo Han master->num_chipselect = max_t(int, nb, master->num_chipselect); 182174317984SJean-Christophe PLAGNIOL-VILLARD 18228ec5d84eSAndreas Larsson /* Return error only for an incorrectly formed cs-gpios property */ 18238ec5d84eSAndreas Larsson if (nb == 0 || nb == -ENOENT) 182474317984SJean-Christophe PLAGNIOL-VILLARD return 0; 18258ec5d84eSAndreas Larsson else if (nb < 0) 18268ec5d84eSAndreas Larsson return nb; 182774317984SJean-Christophe PLAGNIOL-VILLARD 182874317984SJean-Christophe PLAGNIOL-VILLARD cs = devm_kzalloc(&master->dev, 182974317984SJean-Christophe PLAGNIOL-VILLARD sizeof(int) * master->num_chipselect, 183074317984SJean-Christophe PLAGNIOL-VILLARD GFP_KERNEL); 183174317984SJean-Christophe PLAGNIOL-VILLARD master->cs_gpios = cs; 183274317984SJean-Christophe PLAGNIOL-VILLARD 183374317984SJean-Christophe PLAGNIOL-VILLARD if (!master->cs_gpios) 183474317984SJean-Christophe PLAGNIOL-VILLARD return -ENOMEM; 183574317984SJean-Christophe PLAGNIOL-VILLARD 18360da83bb1SAndreas Larsson for (i = 0; i < master->num_chipselect; i++) 1837446411e1SAndreas Larsson cs[i] = -ENOENT; 183874317984SJean-Christophe PLAGNIOL-VILLARD 183974317984SJean-Christophe PLAGNIOL-VILLARD for (i = 0; i < nb; i++) 184074317984SJean-Christophe PLAGNIOL-VILLARD cs[i] = of_get_named_gpio(np, "cs-gpios", i); 184174317984SJean-Christophe PLAGNIOL-VILLARD 184274317984SJean-Christophe PLAGNIOL-VILLARD return 0; 184374317984SJean-Christophe PLAGNIOL-VILLARD } 184474317984SJean-Christophe PLAGNIOL-VILLARD #else 184574317984SJean-Christophe PLAGNIOL-VILLARD static int of_spi_register_master(struct spi_master *master) 184674317984SJean-Christophe PLAGNIOL-VILLARD { 184774317984SJean-Christophe PLAGNIOL-VILLARD return 0; 184874317984SJean-Christophe PLAGNIOL-VILLARD } 184974317984SJean-Christophe PLAGNIOL-VILLARD #endif 185074317984SJean-Christophe PLAGNIOL-VILLARD 18518ae12a0dSDavid Brownell /** 18528ae12a0dSDavid Brownell * spi_register_master - register SPI master controller 18538ae12a0dSDavid Brownell * @master: initialized master, originally from spi_alloc_master() 185433e34dc6SDavid Brownell * Context: can sleep 18558ae12a0dSDavid Brownell * 18568ae12a0dSDavid Brownell * SPI master controllers connect to their drivers using some non-SPI bus, 18578ae12a0dSDavid Brownell * such as the platform bus. The final stage of probe() in that code 18588ae12a0dSDavid Brownell * includes calling spi_register_master() to hook up to this SPI bus glue. 18598ae12a0dSDavid Brownell * 18608ae12a0dSDavid Brownell * SPI controllers use board specific (often SOC specific) bus numbers, 18618ae12a0dSDavid Brownell * and board-specific addressing for SPI devices combines those numbers 18628ae12a0dSDavid Brownell * with chip select numbers. Since SPI does not directly support dynamic 18638ae12a0dSDavid Brownell * device identification, boards need configuration tables telling which 18648ae12a0dSDavid Brownell * chip is at which address. 18658ae12a0dSDavid Brownell * 18668ae12a0dSDavid Brownell * This must be called from context that can sleep. It returns zero on 18678ae12a0dSDavid Brownell * success, else a negative error code (dropping the master's refcount). 18680c868461SDavid Brownell * After a successful return, the caller is responsible for calling 18690c868461SDavid Brownell * spi_unregister_master(). 187097d56dc6SJavier Martinez Canillas * 187197d56dc6SJavier Martinez Canillas * Return: zero on success, else a negative error code. 18728ae12a0dSDavid Brownell */ 1873e9d5a461SAdrian Bunk int spi_register_master(struct spi_master *master) 18748ae12a0dSDavid Brownell { 1875e44a45aeSDavid Brownell static atomic_t dyn_bus_id = ATOMIC_INIT((1<<15) - 1); 187649dce689STony Jones struct device *dev = master->dev.parent; 18772b9603a0SFeng Tang struct boardinfo *bi; 18788ae12a0dSDavid Brownell int status = -ENODEV; 18798ae12a0dSDavid Brownell int dynamic = 0; 18808ae12a0dSDavid Brownell 18810c868461SDavid Brownell if (!dev) 18820c868461SDavid Brownell return -ENODEV; 18830c868461SDavid Brownell 188474317984SJean-Christophe PLAGNIOL-VILLARD status = of_spi_register_master(master); 188574317984SJean-Christophe PLAGNIOL-VILLARD if (status) 188674317984SJean-Christophe PLAGNIOL-VILLARD return status; 188774317984SJean-Christophe PLAGNIOL-VILLARD 1888082c8cb4SDavid Brownell /* even if it's just one always-selected device, there must 1889082c8cb4SDavid Brownell * be at least one chipselect 1890082c8cb4SDavid Brownell */ 1891082c8cb4SDavid Brownell if (master->num_chipselect == 0) 1892082c8cb4SDavid Brownell return -EINVAL; 1893082c8cb4SDavid Brownell 1894bb29785eSGrant Likely if ((master->bus_num < 0) && master->dev.of_node) 1895bb29785eSGrant Likely master->bus_num = of_alias_get_id(master->dev.of_node, "spi"); 1896bb29785eSGrant Likely 18978ae12a0dSDavid Brownell /* convention: dynamically assigned bus IDs count down from the max */ 1898a020ed75SDavid Brownell if (master->bus_num < 0) { 1899082c8cb4SDavid Brownell /* FIXME switch to an IDR based scheme, something like 1900082c8cb4SDavid Brownell * I2C now uses, so we can't run out of "dynamic" IDs 1901082c8cb4SDavid Brownell */ 19028ae12a0dSDavid Brownell master->bus_num = atomic_dec_return(&dyn_bus_id); 1903b885244eSDavid Brownell dynamic = 1; 19048ae12a0dSDavid Brownell } 19058ae12a0dSDavid Brownell 19065424d43eSMark Brown INIT_LIST_HEAD(&master->queue); 19075424d43eSMark Brown spin_lock_init(&master->queue_lock); 1908cf32b71eSErnst Schwab spin_lock_init(&master->bus_lock_spinlock); 1909cf32b71eSErnst Schwab mutex_init(&master->bus_lock_mutex); 1910ef4d96ecSMark Brown mutex_init(&master->io_mutex); 1911cf32b71eSErnst Schwab master->bus_lock_flag = 0; 1912b158935fSMark Brown init_completion(&master->xfer_completion); 19136ad45a27SMark Brown if (!master->max_dma_len) 19146ad45a27SMark Brown master->max_dma_len = INT_MAX; 1915cf32b71eSErnst Schwab 19168ae12a0dSDavid Brownell /* register the device, then userspace will see it. 19178ae12a0dSDavid Brownell * registration fails if the bus ID is in use. 19188ae12a0dSDavid Brownell */ 191935f74fcaSKay Sievers dev_set_name(&master->dev, "spi%u", master->bus_num); 192049dce689STony Jones status = device_add(&master->dev); 1921b885244eSDavid Brownell if (status < 0) 19228ae12a0dSDavid Brownell goto done; 192335f74fcaSKay Sievers dev_dbg(dev, "registered master %s%s\n", dev_name(&master->dev), 19248ae12a0dSDavid Brownell dynamic ? " (dynamic)" : ""); 19258ae12a0dSDavid Brownell 1926ffbbdd21SLinus Walleij /* If we're using a queued driver, start the queue */ 1927ffbbdd21SLinus Walleij if (master->transfer) 1928ffbbdd21SLinus Walleij dev_info(dev, "master is unqueued, this is deprecated\n"); 1929ffbbdd21SLinus Walleij else { 1930ffbbdd21SLinus Walleij status = spi_master_initialize_queue(master); 1931ffbbdd21SLinus Walleij if (status) { 1932e93b0724SAxel Lin device_del(&master->dev); 1933ffbbdd21SLinus Walleij goto done; 1934ffbbdd21SLinus Walleij } 1935ffbbdd21SLinus Walleij } 1936eca2ebc7SMartin Sperl /* add statistics */ 1937eca2ebc7SMartin Sperl spin_lock_init(&master->statistics.lock); 1938ffbbdd21SLinus Walleij 19392b9603a0SFeng Tang mutex_lock(&board_lock); 19402b9603a0SFeng Tang list_add_tail(&master->list, &spi_master_list); 19412b9603a0SFeng Tang list_for_each_entry(bi, &board_list, list) 19422b9603a0SFeng Tang spi_match_master_to_boardinfo(master, &bi->board_info); 19432b9603a0SFeng Tang mutex_unlock(&board_lock); 19442b9603a0SFeng Tang 194564bee4d2SMika Westerberg /* Register devices from the device tree and ACPI */ 194612b15e83SAnatolij Gustschin of_register_spi_devices(master); 194764bee4d2SMika Westerberg acpi_register_spi_devices(master); 19488ae12a0dSDavid Brownell done: 19498ae12a0dSDavid Brownell return status; 19508ae12a0dSDavid Brownell } 19518ae12a0dSDavid Brownell EXPORT_SYMBOL_GPL(spi_register_master); 19528ae12a0dSDavid Brownell 1953666d5b4cSMark Brown static void devm_spi_unregister(struct device *dev, void *res) 1954666d5b4cSMark Brown { 1955666d5b4cSMark Brown spi_unregister_master(*(struct spi_master **)res); 1956666d5b4cSMark Brown } 1957666d5b4cSMark Brown 1958666d5b4cSMark Brown /** 1959666d5b4cSMark Brown * dev_spi_register_master - register managed SPI master controller 1960666d5b4cSMark Brown * @dev: device managing SPI master 1961666d5b4cSMark Brown * @master: initialized master, originally from spi_alloc_master() 1962666d5b4cSMark Brown * Context: can sleep 1963666d5b4cSMark Brown * 1964666d5b4cSMark Brown * Register a SPI device as with spi_register_master() which will 1965666d5b4cSMark Brown * automatically be unregister 196697d56dc6SJavier Martinez Canillas * 196797d56dc6SJavier Martinez Canillas * Return: zero on success, else a negative error code. 1968666d5b4cSMark Brown */ 1969666d5b4cSMark Brown int devm_spi_register_master(struct device *dev, struct spi_master *master) 1970666d5b4cSMark Brown { 1971666d5b4cSMark Brown struct spi_master **ptr; 1972666d5b4cSMark Brown int ret; 1973666d5b4cSMark Brown 1974666d5b4cSMark Brown ptr = devres_alloc(devm_spi_unregister, sizeof(*ptr), GFP_KERNEL); 1975666d5b4cSMark Brown if (!ptr) 1976666d5b4cSMark Brown return -ENOMEM; 1977666d5b4cSMark Brown 1978666d5b4cSMark Brown ret = spi_register_master(master); 19794b92894eSStephen Warren if (!ret) { 1980666d5b4cSMark Brown *ptr = master; 1981666d5b4cSMark Brown devres_add(dev, ptr); 1982666d5b4cSMark Brown } else { 1983666d5b4cSMark Brown devres_free(ptr); 1984666d5b4cSMark Brown } 1985666d5b4cSMark Brown 1986666d5b4cSMark Brown return ret; 1987666d5b4cSMark Brown } 1988666d5b4cSMark Brown EXPORT_SYMBOL_GPL(devm_spi_register_master); 1989666d5b4cSMark Brown 199034860089SDavid Lamparter static int __unregister(struct device *dev, void *null) 19918ae12a0dSDavid Brownell { 19920c868461SDavid Brownell spi_unregister_device(to_spi_device(dev)); 19938ae12a0dSDavid Brownell return 0; 19948ae12a0dSDavid Brownell } 19958ae12a0dSDavid Brownell 19968ae12a0dSDavid Brownell /** 19978ae12a0dSDavid Brownell * spi_unregister_master - unregister SPI master controller 19988ae12a0dSDavid Brownell * @master: the master being unregistered 199933e34dc6SDavid Brownell * Context: can sleep 20008ae12a0dSDavid Brownell * 20018ae12a0dSDavid Brownell * This call is used only by SPI master controller drivers, which are the 20028ae12a0dSDavid Brownell * only ones directly touching chip registers. 20038ae12a0dSDavid Brownell * 20048ae12a0dSDavid Brownell * This must be called from context that can sleep. 20058ae12a0dSDavid Brownell */ 20068ae12a0dSDavid Brownell void spi_unregister_master(struct spi_master *master) 20078ae12a0dSDavid Brownell { 200889fc9a1aSJeff Garzik int dummy; 200989fc9a1aSJeff Garzik 2010ffbbdd21SLinus Walleij if (master->queued) { 2011ffbbdd21SLinus Walleij if (spi_destroy_queue(master)) 2012ffbbdd21SLinus Walleij dev_err(&master->dev, "queue remove failed\n"); 2013ffbbdd21SLinus Walleij } 2014ffbbdd21SLinus Walleij 20152b9603a0SFeng Tang mutex_lock(&board_lock); 20162b9603a0SFeng Tang list_del(&master->list); 20172b9603a0SFeng Tang mutex_unlock(&board_lock); 20182b9603a0SFeng Tang 201997dbf37dSSebastian Andrzej Siewior dummy = device_for_each_child(&master->dev, NULL, __unregister); 202049dce689STony Jones device_unregister(&master->dev); 20218ae12a0dSDavid Brownell } 20228ae12a0dSDavid Brownell EXPORT_SYMBOL_GPL(spi_unregister_master); 20238ae12a0dSDavid Brownell 2024ffbbdd21SLinus Walleij int spi_master_suspend(struct spi_master *master) 2025ffbbdd21SLinus Walleij { 2026ffbbdd21SLinus Walleij int ret; 2027ffbbdd21SLinus Walleij 2028ffbbdd21SLinus Walleij /* Basically no-ops for non-queued masters */ 2029ffbbdd21SLinus Walleij if (!master->queued) 2030ffbbdd21SLinus Walleij return 0; 2031ffbbdd21SLinus Walleij 2032ffbbdd21SLinus Walleij ret = spi_stop_queue(master); 2033ffbbdd21SLinus Walleij if (ret) 2034ffbbdd21SLinus Walleij dev_err(&master->dev, "queue stop failed\n"); 2035ffbbdd21SLinus Walleij 2036ffbbdd21SLinus Walleij return ret; 2037ffbbdd21SLinus Walleij } 2038ffbbdd21SLinus Walleij EXPORT_SYMBOL_GPL(spi_master_suspend); 2039ffbbdd21SLinus Walleij 2040ffbbdd21SLinus Walleij int spi_master_resume(struct spi_master *master) 2041ffbbdd21SLinus Walleij { 2042ffbbdd21SLinus Walleij int ret; 2043ffbbdd21SLinus Walleij 2044ffbbdd21SLinus Walleij if (!master->queued) 2045ffbbdd21SLinus Walleij return 0; 2046ffbbdd21SLinus Walleij 2047ffbbdd21SLinus Walleij ret = spi_start_queue(master); 2048ffbbdd21SLinus Walleij if (ret) 2049ffbbdd21SLinus Walleij dev_err(&master->dev, "queue restart failed\n"); 2050ffbbdd21SLinus Walleij 2051ffbbdd21SLinus Walleij return ret; 2052ffbbdd21SLinus Walleij } 2053ffbbdd21SLinus Walleij EXPORT_SYMBOL_GPL(spi_master_resume); 2054ffbbdd21SLinus Walleij 20559f3b795aSMichał Mirosław static int __spi_master_match(struct device *dev, const void *data) 20565ed2c832SDave Young { 20575ed2c832SDave Young struct spi_master *m; 20589f3b795aSMichał Mirosław const u16 *bus_num = data; 20595ed2c832SDave Young 20605ed2c832SDave Young m = container_of(dev, struct spi_master, dev); 20615ed2c832SDave Young return m->bus_num == *bus_num; 20625ed2c832SDave Young } 20635ed2c832SDave Young 20648ae12a0dSDavid Brownell /** 20658ae12a0dSDavid Brownell * spi_busnum_to_master - look up master associated with bus_num 20668ae12a0dSDavid Brownell * @bus_num: the master's bus number 206733e34dc6SDavid Brownell * Context: can sleep 20688ae12a0dSDavid Brownell * 20698ae12a0dSDavid Brownell * This call may be used with devices that are registered after 20708ae12a0dSDavid Brownell * arch init time. It returns a refcounted pointer to the relevant 20718ae12a0dSDavid Brownell * spi_master (which the caller must release), or NULL if there is 20728ae12a0dSDavid Brownell * no such master registered. 207397d56dc6SJavier Martinez Canillas * 207497d56dc6SJavier Martinez Canillas * Return: the SPI master structure on success, else NULL. 20758ae12a0dSDavid Brownell */ 20768ae12a0dSDavid Brownell struct spi_master *spi_busnum_to_master(u16 bus_num) 20778ae12a0dSDavid Brownell { 207849dce689STony Jones struct device *dev; 20791e9a51dcSAtsushi Nemoto struct spi_master *master = NULL; 20808ae12a0dSDavid Brownell 2081695794aeSGreg Kroah-Hartman dev = class_find_device(&spi_master_class, NULL, &bus_num, 20825ed2c832SDave Young __spi_master_match); 20835ed2c832SDave Young if (dev) 20845ed2c832SDave Young master = container_of(dev, struct spi_master, dev); 20855ed2c832SDave Young /* reference got in class_find_device */ 20861e9a51dcSAtsushi Nemoto return master; 20878ae12a0dSDavid Brownell } 20888ae12a0dSDavid Brownell EXPORT_SYMBOL_GPL(spi_busnum_to_master); 20898ae12a0dSDavid Brownell 2090d780c371SMartin Sperl /*-------------------------------------------------------------------------*/ 2091d780c371SMartin Sperl 2092d780c371SMartin Sperl /* Core methods for SPI resource management */ 2093d780c371SMartin Sperl 2094d780c371SMartin Sperl /** 2095d780c371SMartin Sperl * spi_res_alloc - allocate a spi resource that is life-cycle managed 2096d780c371SMartin Sperl * during the processing of a spi_message while using 2097d780c371SMartin Sperl * spi_transfer_one 2098d780c371SMartin Sperl * @spi: the spi device for which we allocate memory 2099d780c371SMartin Sperl * @release: the release code to execute for this resource 2100d780c371SMartin Sperl * @size: size to alloc and return 2101d780c371SMartin Sperl * @gfp: GFP allocation flags 2102d780c371SMartin Sperl * 2103d780c371SMartin Sperl * Return: the pointer to the allocated data 2104d780c371SMartin Sperl * 2105d780c371SMartin Sperl * This may get enhanced in the future to allocate from a memory pool 2106d780c371SMartin Sperl * of the @spi_device or @spi_master to avoid repeated allocations. 2107d780c371SMartin Sperl */ 2108d780c371SMartin Sperl void *spi_res_alloc(struct spi_device *spi, 2109d780c371SMartin Sperl spi_res_release_t release, 2110d780c371SMartin Sperl size_t size, gfp_t gfp) 2111d780c371SMartin Sperl { 2112d780c371SMartin Sperl struct spi_res *sres; 2113d780c371SMartin Sperl 2114d780c371SMartin Sperl sres = kzalloc(sizeof(*sres) + size, gfp); 2115d780c371SMartin Sperl if (!sres) 2116d780c371SMartin Sperl return NULL; 2117d780c371SMartin Sperl 2118d780c371SMartin Sperl INIT_LIST_HEAD(&sres->entry); 2119d780c371SMartin Sperl sres->release = release; 2120d780c371SMartin Sperl 2121d780c371SMartin Sperl return sres->data; 2122d780c371SMartin Sperl } 2123d780c371SMartin Sperl EXPORT_SYMBOL_GPL(spi_res_alloc); 2124d780c371SMartin Sperl 2125d780c371SMartin Sperl /** 2126d780c371SMartin Sperl * spi_res_free - free an spi resource 2127d780c371SMartin Sperl * @res: pointer to the custom data of a resource 2128d780c371SMartin Sperl * 2129d780c371SMartin Sperl */ 2130d780c371SMartin Sperl void spi_res_free(void *res) 2131d780c371SMartin Sperl { 2132d780c371SMartin Sperl struct spi_res *sres = container_of(res, struct spi_res, data); 2133d780c371SMartin Sperl 2134d780c371SMartin Sperl if (!res) 2135d780c371SMartin Sperl return; 2136d780c371SMartin Sperl 2137d780c371SMartin Sperl WARN_ON(!list_empty(&sres->entry)); 2138d780c371SMartin Sperl kfree(sres); 2139d780c371SMartin Sperl } 2140d780c371SMartin Sperl EXPORT_SYMBOL_GPL(spi_res_free); 2141d780c371SMartin Sperl 2142d780c371SMartin Sperl /** 2143d780c371SMartin Sperl * spi_res_add - add a spi_res to the spi_message 2144d780c371SMartin Sperl * @message: the spi message 2145d780c371SMartin Sperl * @res: the spi_resource 2146d780c371SMartin Sperl */ 2147d780c371SMartin Sperl void spi_res_add(struct spi_message *message, void *res) 2148d780c371SMartin Sperl { 2149d780c371SMartin Sperl struct spi_res *sres = container_of(res, struct spi_res, data); 2150d780c371SMartin Sperl 2151d780c371SMartin Sperl WARN_ON(!list_empty(&sres->entry)); 2152d780c371SMartin Sperl list_add_tail(&sres->entry, &message->resources); 2153d780c371SMartin Sperl } 2154d780c371SMartin Sperl EXPORT_SYMBOL_GPL(spi_res_add); 2155d780c371SMartin Sperl 2156d780c371SMartin Sperl /** 2157d780c371SMartin Sperl * spi_res_release - release all spi resources for this message 2158d780c371SMartin Sperl * @master: the @spi_master 2159d780c371SMartin Sperl * @message: the @spi_message 2160d780c371SMartin Sperl */ 2161d780c371SMartin Sperl void spi_res_release(struct spi_master *master, 2162d780c371SMartin Sperl struct spi_message *message) 2163d780c371SMartin Sperl { 2164d780c371SMartin Sperl struct spi_res *res; 2165d780c371SMartin Sperl 2166d780c371SMartin Sperl while (!list_empty(&message->resources)) { 2167d780c371SMartin Sperl res = list_last_entry(&message->resources, 2168d780c371SMartin Sperl struct spi_res, entry); 2169d780c371SMartin Sperl 2170d780c371SMartin Sperl if (res->release) 2171d780c371SMartin Sperl res->release(master, message, res->data); 2172d780c371SMartin Sperl 2173d780c371SMartin Sperl list_del(&res->entry); 2174d780c371SMartin Sperl 2175d780c371SMartin Sperl kfree(res); 2176d780c371SMartin Sperl } 2177d780c371SMartin Sperl } 2178d780c371SMartin Sperl EXPORT_SYMBOL_GPL(spi_res_release); 21798ae12a0dSDavid Brownell 21808ae12a0dSDavid Brownell /*-------------------------------------------------------------------------*/ 21818ae12a0dSDavid Brownell 2182523baf5aSMartin Sperl /* Core methods for spi_message alterations */ 2183523baf5aSMartin Sperl 2184523baf5aSMartin Sperl static void __spi_replace_transfers_release(struct spi_master *master, 2185523baf5aSMartin Sperl struct spi_message *msg, 2186523baf5aSMartin Sperl void *res) 2187523baf5aSMartin Sperl { 2188523baf5aSMartin Sperl struct spi_replaced_transfers *rxfer = res; 2189523baf5aSMartin Sperl size_t i; 2190523baf5aSMartin Sperl 2191523baf5aSMartin Sperl /* call extra callback if requested */ 2192523baf5aSMartin Sperl if (rxfer->release) 2193523baf5aSMartin Sperl rxfer->release(master, msg, res); 2194523baf5aSMartin Sperl 2195523baf5aSMartin Sperl /* insert replaced transfers back into the message */ 2196523baf5aSMartin Sperl list_splice(&rxfer->replaced_transfers, rxfer->replaced_after); 2197523baf5aSMartin Sperl 2198523baf5aSMartin Sperl /* remove the formerly inserted entries */ 2199523baf5aSMartin Sperl for (i = 0; i < rxfer->inserted; i++) 2200523baf5aSMartin Sperl list_del(&rxfer->inserted_transfers[i].transfer_list); 2201523baf5aSMartin Sperl } 2202523baf5aSMartin Sperl 2203523baf5aSMartin Sperl /** 2204523baf5aSMartin Sperl * spi_replace_transfers - replace transfers with several transfers 2205523baf5aSMartin Sperl * and register change with spi_message.resources 2206523baf5aSMartin Sperl * @msg: the spi_message we work upon 2207523baf5aSMartin Sperl * @xfer_first: the first spi_transfer we want to replace 2208523baf5aSMartin Sperl * @remove: number of transfers to remove 2209523baf5aSMartin Sperl * @insert: the number of transfers we want to insert instead 2210523baf5aSMartin Sperl * @release: extra release code necessary in some circumstances 2211523baf5aSMartin Sperl * @extradatasize: extra data to allocate (with alignment guarantees 2212523baf5aSMartin Sperl * of struct @spi_transfer) 221305885397SMartin Sperl * @gfp: gfp flags 2214523baf5aSMartin Sperl * 2215523baf5aSMartin Sperl * Returns: pointer to @spi_replaced_transfers, 2216523baf5aSMartin Sperl * PTR_ERR(...) in case of errors. 2217523baf5aSMartin Sperl */ 2218523baf5aSMartin Sperl struct spi_replaced_transfers *spi_replace_transfers( 2219523baf5aSMartin Sperl struct spi_message *msg, 2220523baf5aSMartin Sperl struct spi_transfer *xfer_first, 2221523baf5aSMartin Sperl size_t remove, 2222523baf5aSMartin Sperl size_t insert, 2223523baf5aSMartin Sperl spi_replaced_release_t release, 2224523baf5aSMartin Sperl size_t extradatasize, 2225523baf5aSMartin Sperl gfp_t gfp) 2226523baf5aSMartin Sperl { 2227523baf5aSMartin Sperl struct spi_replaced_transfers *rxfer; 2228523baf5aSMartin Sperl struct spi_transfer *xfer; 2229523baf5aSMartin Sperl size_t i; 2230523baf5aSMartin Sperl 2231523baf5aSMartin Sperl /* allocate the structure using spi_res */ 2232523baf5aSMartin Sperl rxfer = spi_res_alloc(msg->spi, __spi_replace_transfers_release, 2233523baf5aSMartin Sperl insert * sizeof(struct spi_transfer) 2234523baf5aSMartin Sperl + sizeof(struct spi_replaced_transfers) 2235523baf5aSMartin Sperl + extradatasize, 2236523baf5aSMartin Sperl gfp); 2237523baf5aSMartin Sperl if (!rxfer) 2238523baf5aSMartin Sperl return ERR_PTR(-ENOMEM); 2239523baf5aSMartin Sperl 2240523baf5aSMartin Sperl /* the release code to invoke before running the generic release */ 2241523baf5aSMartin Sperl rxfer->release = release; 2242523baf5aSMartin Sperl 2243523baf5aSMartin Sperl /* assign extradata */ 2244523baf5aSMartin Sperl if (extradatasize) 2245523baf5aSMartin Sperl rxfer->extradata = 2246523baf5aSMartin Sperl &rxfer->inserted_transfers[insert]; 2247523baf5aSMartin Sperl 2248523baf5aSMartin Sperl /* init the replaced_transfers list */ 2249523baf5aSMartin Sperl INIT_LIST_HEAD(&rxfer->replaced_transfers); 2250523baf5aSMartin Sperl 2251523baf5aSMartin Sperl /* assign the list_entry after which we should reinsert 2252523baf5aSMartin Sperl * the @replaced_transfers - it may be spi_message.messages! 2253523baf5aSMartin Sperl */ 2254523baf5aSMartin Sperl rxfer->replaced_after = xfer_first->transfer_list.prev; 2255523baf5aSMartin Sperl 2256523baf5aSMartin Sperl /* remove the requested number of transfers */ 2257523baf5aSMartin Sperl for (i = 0; i < remove; i++) { 2258523baf5aSMartin Sperl /* if the entry after replaced_after it is msg->transfers 2259523baf5aSMartin Sperl * then we have been requested to remove more transfers 2260523baf5aSMartin Sperl * than are in the list 2261523baf5aSMartin Sperl */ 2262523baf5aSMartin Sperl if (rxfer->replaced_after->next == &msg->transfers) { 2263523baf5aSMartin Sperl dev_err(&msg->spi->dev, 2264523baf5aSMartin Sperl "requested to remove more spi_transfers than are available\n"); 2265523baf5aSMartin Sperl /* insert replaced transfers back into the message */ 2266523baf5aSMartin Sperl list_splice(&rxfer->replaced_transfers, 2267523baf5aSMartin Sperl rxfer->replaced_after); 2268523baf5aSMartin Sperl 2269523baf5aSMartin Sperl /* free the spi_replace_transfer structure */ 2270523baf5aSMartin Sperl spi_res_free(rxfer); 2271523baf5aSMartin Sperl 2272523baf5aSMartin Sperl /* and return with an error */ 2273523baf5aSMartin Sperl return ERR_PTR(-EINVAL); 2274523baf5aSMartin Sperl } 2275523baf5aSMartin Sperl 2276523baf5aSMartin Sperl /* remove the entry after replaced_after from list of 2277523baf5aSMartin Sperl * transfers and add it to list of replaced_transfers 2278523baf5aSMartin Sperl */ 2279523baf5aSMartin Sperl list_move_tail(rxfer->replaced_after->next, 2280523baf5aSMartin Sperl &rxfer->replaced_transfers); 2281523baf5aSMartin Sperl } 2282523baf5aSMartin Sperl 2283523baf5aSMartin Sperl /* create copy of the given xfer with identical settings 2284523baf5aSMartin Sperl * based on the first transfer to get removed 2285523baf5aSMartin Sperl */ 2286523baf5aSMartin Sperl for (i = 0; i < insert; i++) { 2287523baf5aSMartin Sperl /* we need to run in reverse order */ 2288523baf5aSMartin Sperl xfer = &rxfer->inserted_transfers[insert - 1 - i]; 2289523baf5aSMartin Sperl 2290523baf5aSMartin Sperl /* copy all spi_transfer data */ 2291523baf5aSMartin Sperl memcpy(xfer, xfer_first, sizeof(*xfer)); 2292523baf5aSMartin Sperl 2293523baf5aSMartin Sperl /* add to list */ 2294523baf5aSMartin Sperl list_add(&xfer->transfer_list, rxfer->replaced_after); 2295523baf5aSMartin Sperl 2296523baf5aSMartin Sperl /* clear cs_change and delay_usecs for all but the last */ 2297523baf5aSMartin Sperl if (i) { 2298523baf5aSMartin Sperl xfer->cs_change = false; 2299523baf5aSMartin Sperl xfer->delay_usecs = 0; 2300523baf5aSMartin Sperl } 2301523baf5aSMartin Sperl } 2302523baf5aSMartin Sperl 2303523baf5aSMartin Sperl /* set up inserted */ 2304523baf5aSMartin Sperl rxfer->inserted = insert; 2305523baf5aSMartin Sperl 2306523baf5aSMartin Sperl /* and register it with spi_res/spi_message */ 2307523baf5aSMartin Sperl spi_res_add(msg, rxfer); 2308523baf5aSMartin Sperl 2309523baf5aSMartin Sperl return rxfer; 2310523baf5aSMartin Sperl } 2311523baf5aSMartin Sperl EXPORT_SYMBOL_GPL(spi_replace_transfers); 2312523baf5aSMartin Sperl 231308933418SFabio Estevam static int __spi_split_transfer_maxsize(struct spi_master *master, 2314d9f12122SMartin Sperl struct spi_message *msg, 2315d9f12122SMartin Sperl struct spi_transfer **xferp, 2316d9f12122SMartin Sperl size_t maxsize, 2317d9f12122SMartin Sperl gfp_t gfp) 2318d9f12122SMartin Sperl { 2319d9f12122SMartin Sperl struct spi_transfer *xfer = *xferp, *xfers; 2320d9f12122SMartin Sperl struct spi_replaced_transfers *srt; 2321d9f12122SMartin Sperl size_t offset; 2322d9f12122SMartin Sperl size_t count, i; 2323d9f12122SMartin Sperl 2324d9f12122SMartin Sperl /* warn once about this fact that we are splitting a transfer */ 2325d9f12122SMartin Sperl dev_warn_once(&msg->spi->dev, 23267d62f51eSFabio Estevam "spi_transfer of length %i exceed max length of %zu - needed to split transfers\n", 2327d9f12122SMartin Sperl xfer->len, maxsize); 2328d9f12122SMartin Sperl 2329d9f12122SMartin Sperl /* calculate how many we have to replace */ 2330d9f12122SMartin Sperl count = DIV_ROUND_UP(xfer->len, maxsize); 2331d9f12122SMartin Sperl 2332d9f12122SMartin Sperl /* create replacement */ 2333d9f12122SMartin Sperl srt = spi_replace_transfers(msg, xfer, 1, count, NULL, 0, gfp); 2334657d32efSDan Carpenter if (IS_ERR(srt)) 2335657d32efSDan Carpenter return PTR_ERR(srt); 2336d9f12122SMartin Sperl xfers = srt->inserted_transfers; 2337d9f12122SMartin Sperl 2338d9f12122SMartin Sperl /* now handle each of those newly inserted spi_transfers 2339d9f12122SMartin Sperl * note that the replacements spi_transfers all are preset 2340d9f12122SMartin Sperl * to the same values as *xferp, so tx_buf, rx_buf and len 2341d9f12122SMartin Sperl * are all identical (as well as most others) 2342d9f12122SMartin Sperl * so we just have to fix up len and the pointers. 2343d9f12122SMartin Sperl * 2344d9f12122SMartin Sperl * this also includes support for the depreciated 2345d9f12122SMartin Sperl * spi_message.is_dma_mapped interface 2346d9f12122SMartin Sperl */ 2347d9f12122SMartin Sperl 2348d9f12122SMartin Sperl /* the first transfer just needs the length modified, so we 2349d9f12122SMartin Sperl * run it outside the loop 2350d9f12122SMartin Sperl */ 2351c8dab77aSFabio Estevam xfers[0].len = min_t(size_t, maxsize, xfer[0].len); 2352d9f12122SMartin Sperl 2353d9f12122SMartin Sperl /* all the others need rx_buf/tx_buf also set */ 2354d9f12122SMartin Sperl for (i = 1, offset = maxsize; i < count; offset += maxsize, i++) { 2355d9f12122SMartin Sperl /* update rx_buf, tx_buf and dma */ 2356d9f12122SMartin Sperl if (xfers[i].rx_buf) 2357d9f12122SMartin Sperl xfers[i].rx_buf += offset; 2358d9f12122SMartin Sperl if (xfers[i].rx_dma) 2359d9f12122SMartin Sperl xfers[i].rx_dma += offset; 2360d9f12122SMartin Sperl if (xfers[i].tx_buf) 2361d9f12122SMartin Sperl xfers[i].tx_buf += offset; 2362d9f12122SMartin Sperl if (xfers[i].tx_dma) 2363d9f12122SMartin Sperl xfers[i].tx_dma += offset; 2364d9f12122SMartin Sperl 2365d9f12122SMartin Sperl /* update length */ 2366d9f12122SMartin Sperl xfers[i].len = min(maxsize, xfers[i].len - offset); 2367d9f12122SMartin Sperl } 2368d9f12122SMartin Sperl 2369d9f12122SMartin Sperl /* we set up xferp to the last entry we have inserted, 2370d9f12122SMartin Sperl * so that we skip those already split transfers 2371d9f12122SMartin Sperl */ 2372d9f12122SMartin Sperl *xferp = &xfers[count - 1]; 2373d9f12122SMartin Sperl 2374d9f12122SMartin Sperl /* increment statistics counters */ 2375d9f12122SMartin Sperl SPI_STATISTICS_INCREMENT_FIELD(&master->statistics, 2376d9f12122SMartin Sperl transfers_split_maxsize); 2377d9f12122SMartin Sperl SPI_STATISTICS_INCREMENT_FIELD(&msg->spi->statistics, 2378d9f12122SMartin Sperl transfers_split_maxsize); 2379d9f12122SMartin Sperl 2380d9f12122SMartin Sperl return 0; 2381d9f12122SMartin Sperl } 2382d9f12122SMartin Sperl 2383d9f12122SMartin Sperl /** 2384d9f12122SMartin Sperl * spi_split_tranfers_maxsize - split spi transfers into multiple transfers 2385d9f12122SMartin Sperl * when an individual transfer exceeds a 2386d9f12122SMartin Sperl * certain size 2387d9f12122SMartin Sperl * @master: the @spi_master for this transfer 23883700ce95SMasanari Iida * @msg: the @spi_message to transform 23893700ce95SMasanari Iida * @maxsize: the maximum when to apply this 239010f11a22SJavier Martinez Canillas * @gfp: GFP allocation flags 2391d9f12122SMartin Sperl * 2392d9f12122SMartin Sperl * Return: status of transformation 2393d9f12122SMartin Sperl */ 2394d9f12122SMartin Sperl int spi_split_transfers_maxsize(struct spi_master *master, 2395d9f12122SMartin Sperl struct spi_message *msg, 2396d9f12122SMartin Sperl size_t maxsize, 2397d9f12122SMartin Sperl gfp_t gfp) 2398d9f12122SMartin Sperl { 2399d9f12122SMartin Sperl struct spi_transfer *xfer; 2400d9f12122SMartin Sperl int ret; 2401d9f12122SMartin Sperl 2402d9f12122SMartin Sperl /* iterate over the transfer_list, 2403d9f12122SMartin Sperl * but note that xfer is advanced to the last transfer inserted 2404d9f12122SMartin Sperl * to avoid checking sizes again unnecessarily (also xfer does 2405d9f12122SMartin Sperl * potentiall belong to a different list by the time the 2406d9f12122SMartin Sperl * replacement has happened 2407d9f12122SMartin Sperl */ 2408d9f12122SMartin Sperl list_for_each_entry(xfer, &msg->transfers, transfer_list) { 2409d9f12122SMartin Sperl if (xfer->len > maxsize) { 2410d9f12122SMartin Sperl ret = __spi_split_transfer_maxsize( 2411d9f12122SMartin Sperl master, msg, &xfer, maxsize, gfp); 2412d9f12122SMartin Sperl if (ret) 2413d9f12122SMartin Sperl return ret; 2414d9f12122SMartin Sperl } 2415d9f12122SMartin Sperl } 2416d9f12122SMartin Sperl 2417d9f12122SMartin Sperl return 0; 2418d9f12122SMartin Sperl } 2419d9f12122SMartin Sperl EXPORT_SYMBOL_GPL(spi_split_transfers_maxsize); 24208ae12a0dSDavid Brownell 24218ae12a0dSDavid Brownell /*-------------------------------------------------------------------------*/ 24228ae12a0dSDavid Brownell 24237d077197SDavid Brownell /* Core methods for SPI master protocol drivers. Some of the 24247d077197SDavid Brownell * other core methods are currently defined as inline functions. 24257d077197SDavid Brownell */ 24267d077197SDavid Brownell 242763ab645fSStefan Brüns static int __spi_validate_bits_per_word(struct spi_master *master, u8 bits_per_word) 242863ab645fSStefan Brüns { 242963ab645fSStefan Brüns if (master->bits_per_word_mask) { 243063ab645fSStefan Brüns /* Only 32 bits fit in the mask */ 243163ab645fSStefan Brüns if (bits_per_word > 32) 243263ab645fSStefan Brüns return -EINVAL; 243363ab645fSStefan Brüns if (!(master->bits_per_word_mask & 243463ab645fSStefan Brüns SPI_BPW_MASK(bits_per_word))) 243563ab645fSStefan Brüns return -EINVAL; 243663ab645fSStefan Brüns } 243763ab645fSStefan Brüns 243863ab645fSStefan Brüns return 0; 243963ab645fSStefan Brüns } 244063ab645fSStefan Brüns 24417d077197SDavid Brownell /** 24427d077197SDavid Brownell * spi_setup - setup SPI mode and clock rate 24437d077197SDavid Brownell * @spi: the device whose settings are being modified 24447d077197SDavid Brownell * Context: can sleep, and no requests are queued to the device 24457d077197SDavid Brownell * 24467d077197SDavid Brownell * SPI protocol drivers may need to update the transfer mode if the 24477d077197SDavid Brownell * device doesn't work with its default. They may likewise need 24487d077197SDavid Brownell * to update clock rates or word sizes from initial values. This function 24497d077197SDavid Brownell * changes those settings, and must be called from a context that can sleep. 24507d077197SDavid Brownell * Except for SPI_CS_HIGH, which takes effect immediately, the changes take 24517d077197SDavid Brownell * effect the next time the device is selected and data is transferred to 24527d077197SDavid Brownell * or from it. When this function returns, the spi device is deselected. 24537d077197SDavid Brownell * 24547d077197SDavid Brownell * Note that this call will fail if the protocol driver specifies an option 24557d077197SDavid Brownell * that the underlying controller or its driver does not support. For 24567d077197SDavid Brownell * example, not all hardware supports wire transfers using nine bit words, 24577d077197SDavid Brownell * LSB-first wire encoding, or active-high chipselects. 245897d56dc6SJavier Martinez Canillas * 245997d56dc6SJavier Martinez Canillas * Return: zero on success, else a negative error code. 24607d077197SDavid Brownell */ 24617d077197SDavid Brownell int spi_setup(struct spi_device *spi) 24627d077197SDavid Brownell { 246383596fbeSGeert Uytterhoeven unsigned bad_bits, ugly_bits; 24645ab8d262SAndy Shevchenko int status; 24657d077197SDavid Brownell 2466f477b7fbSwangyuhang /* check mode to prevent that DUAL and QUAD set at the same time 2467f477b7fbSwangyuhang */ 2468f477b7fbSwangyuhang if (((spi->mode & SPI_TX_DUAL) && (spi->mode & SPI_TX_QUAD)) || 2469f477b7fbSwangyuhang ((spi->mode & SPI_RX_DUAL) && (spi->mode & SPI_RX_QUAD))) { 2470f477b7fbSwangyuhang dev_err(&spi->dev, 2471f477b7fbSwangyuhang "setup: can not select dual and quad at the same time\n"); 2472f477b7fbSwangyuhang return -EINVAL; 2473f477b7fbSwangyuhang } 2474f477b7fbSwangyuhang /* if it is SPI_3WIRE mode, DUAL and QUAD should be forbidden 2475f477b7fbSwangyuhang */ 2476f477b7fbSwangyuhang if ((spi->mode & SPI_3WIRE) && (spi->mode & 2477f477b7fbSwangyuhang (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD))) 2478f477b7fbSwangyuhang return -EINVAL; 2479e7db06b5SDavid Brownell /* help drivers fail *cleanly* when they need options 2480e7db06b5SDavid Brownell * that aren't supported with their current master 2481e7db06b5SDavid Brownell */ 2482e7db06b5SDavid Brownell bad_bits = spi->mode & ~spi->master->mode_bits; 248383596fbeSGeert Uytterhoeven ugly_bits = bad_bits & 248483596fbeSGeert Uytterhoeven (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD); 248583596fbeSGeert Uytterhoeven if (ugly_bits) { 248683596fbeSGeert Uytterhoeven dev_warn(&spi->dev, 248783596fbeSGeert Uytterhoeven "setup: ignoring unsupported mode bits %x\n", 248883596fbeSGeert Uytterhoeven ugly_bits); 248983596fbeSGeert Uytterhoeven spi->mode &= ~ugly_bits; 249083596fbeSGeert Uytterhoeven bad_bits &= ~ugly_bits; 249183596fbeSGeert Uytterhoeven } 2492e7db06b5SDavid Brownell if (bad_bits) { 2493eb288a1fSLinus Walleij dev_err(&spi->dev, "setup: unsupported mode bits %x\n", 2494e7db06b5SDavid Brownell bad_bits); 2495e7db06b5SDavid Brownell return -EINVAL; 2496e7db06b5SDavid Brownell } 2497e7db06b5SDavid Brownell 24987d077197SDavid Brownell if (!spi->bits_per_word) 24997d077197SDavid Brownell spi->bits_per_word = 8; 25007d077197SDavid Brownell 25015ab8d262SAndy Shevchenko status = __spi_validate_bits_per_word(spi->master, spi->bits_per_word); 25025ab8d262SAndy Shevchenko if (status) 25035ab8d262SAndy Shevchenko return status; 250463ab645fSStefan Brüns 2505052eb2d4SAxel Lin if (!spi->max_speed_hz) 2506052eb2d4SAxel Lin spi->max_speed_hz = spi->master->max_speed_hz; 2507052eb2d4SAxel Lin 2508caae070cSLaxman Dewangan if (spi->master->setup) 25097d077197SDavid Brownell status = spi->master->setup(spi); 25107d077197SDavid Brownell 2511abeedb01SFranklin S Cooper Jr spi_set_cs(spi, false); 2512abeedb01SFranklin S Cooper Jr 25135fe5f05eSJingoo Han dev_dbg(&spi->dev, "setup mode %d, %s%s%s%s%u bits/w, %u Hz max --> %d\n", 25147d077197SDavid Brownell (int) (spi->mode & (SPI_CPOL | SPI_CPHA)), 25157d077197SDavid Brownell (spi->mode & SPI_CS_HIGH) ? "cs_high, " : "", 25167d077197SDavid Brownell (spi->mode & SPI_LSB_FIRST) ? "lsb, " : "", 25177d077197SDavid Brownell (spi->mode & SPI_3WIRE) ? "3wire, " : "", 25187d077197SDavid Brownell (spi->mode & SPI_LOOP) ? "loopback, " : "", 25197d077197SDavid Brownell spi->bits_per_word, spi->max_speed_hz, 25207d077197SDavid Brownell status); 25217d077197SDavid Brownell 25227d077197SDavid Brownell return status; 25237d077197SDavid Brownell } 25247d077197SDavid Brownell EXPORT_SYMBOL_GPL(spi_setup); 25257d077197SDavid Brownell 252690808738SMark Brown static int __spi_validate(struct spi_device *spi, struct spi_message *message) 2527cf32b71eSErnst Schwab { 2528cf32b71eSErnst Schwab struct spi_master *master = spi->master; 2529e6811d1dSLaxman Dewangan struct spi_transfer *xfer; 25306ea31293SAtsushi Nemoto int w_size; 2531cf32b71eSErnst Schwab 253224a0013aSMark Brown if (list_empty(&message->transfers)) 253324a0013aSMark Brown return -EINVAL; 253424a0013aSMark Brown 2535cf32b71eSErnst Schwab /* Half-duplex links include original MicroWire, and ones with 2536cf32b71eSErnst Schwab * only one data pin like SPI_3WIRE (switches direction) or where 2537cf32b71eSErnst Schwab * either MOSI or MISO is missing. They can also be caused by 2538cf32b71eSErnst Schwab * software limitations. 2539cf32b71eSErnst Schwab */ 2540cf32b71eSErnst Schwab if ((master->flags & SPI_MASTER_HALF_DUPLEX) 2541cf32b71eSErnst Schwab || (spi->mode & SPI_3WIRE)) { 2542cf32b71eSErnst Schwab unsigned flags = master->flags; 2543cf32b71eSErnst Schwab 2544cf32b71eSErnst Schwab list_for_each_entry(xfer, &message->transfers, transfer_list) { 2545cf32b71eSErnst Schwab if (xfer->rx_buf && xfer->tx_buf) 2546cf32b71eSErnst Schwab return -EINVAL; 2547cf32b71eSErnst Schwab if ((flags & SPI_MASTER_NO_TX) && xfer->tx_buf) 2548cf32b71eSErnst Schwab return -EINVAL; 2549cf32b71eSErnst Schwab if ((flags & SPI_MASTER_NO_RX) && xfer->rx_buf) 2550cf32b71eSErnst Schwab return -EINVAL; 2551cf32b71eSErnst Schwab } 2552cf32b71eSErnst Schwab } 2553cf32b71eSErnst Schwab 2554e6811d1dSLaxman Dewangan /** 2555059b8ffeSLaxman Dewangan * Set transfer bits_per_word and max speed as spi device default if 2556059b8ffeSLaxman Dewangan * it is not set for this transfer. 2557f477b7fbSwangyuhang * Set transfer tx_nbits and rx_nbits as single transfer default 2558f477b7fbSwangyuhang * (SPI_NBITS_SINGLE) if it is not set for this transfer. 2559e6811d1dSLaxman Dewangan */ 256077e80588SMartin Sperl message->frame_length = 0; 2561e6811d1dSLaxman Dewangan list_for_each_entry(xfer, &message->transfers, transfer_list) { 2562078726ceSSourav Poddar message->frame_length += xfer->len; 2563e6811d1dSLaxman Dewangan if (!xfer->bits_per_word) 2564e6811d1dSLaxman Dewangan xfer->bits_per_word = spi->bits_per_word; 2565a6f87fadSAxel Lin 2566a6f87fadSAxel Lin if (!xfer->speed_hz) 2567059b8ffeSLaxman Dewangan xfer->speed_hz = spi->max_speed_hz; 25687dc9fbc3SMark Brown if (!xfer->speed_hz) 25697dc9fbc3SMark Brown xfer->speed_hz = master->max_speed_hz; 2570a6f87fadSAxel Lin 257156ede94aSGabor Juhos if (master->max_speed_hz && 257256ede94aSGabor Juhos xfer->speed_hz > master->max_speed_hz) 257356ede94aSGabor Juhos xfer->speed_hz = master->max_speed_hz; 257456ede94aSGabor Juhos 257563ab645fSStefan Brüns if (__spi_validate_bits_per_word(master, xfer->bits_per_word)) 2576543bb255SStephen Warren return -EINVAL; 2577a2fd4f9fSMark Brown 25784d94bd21SIvan T. Ivanov /* 25794d94bd21SIvan T. Ivanov * SPI transfer length should be multiple of SPI word size 25804d94bd21SIvan T. Ivanov * where SPI word size should be power-of-two multiple 25814d94bd21SIvan T. Ivanov */ 25824d94bd21SIvan T. Ivanov if (xfer->bits_per_word <= 8) 25834d94bd21SIvan T. Ivanov w_size = 1; 25844d94bd21SIvan T. Ivanov else if (xfer->bits_per_word <= 16) 25854d94bd21SIvan T. Ivanov w_size = 2; 25864d94bd21SIvan T. Ivanov else 25874d94bd21SIvan T. Ivanov w_size = 4; 25884d94bd21SIvan T. Ivanov 25894d94bd21SIvan T. Ivanov /* No partial transfers accepted */ 25906ea31293SAtsushi Nemoto if (xfer->len % w_size) 25914d94bd21SIvan T. Ivanov return -EINVAL; 25924d94bd21SIvan T. Ivanov 2593a2fd4f9fSMark Brown if (xfer->speed_hz && master->min_speed_hz && 2594a2fd4f9fSMark Brown xfer->speed_hz < master->min_speed_hz) 2595a2fd4f9fSMark Brown return -EINVAL; 2596f477b7fbSwangyuhang 2597f477b7fbSwangyuhang if (xfer->tx_buf && !xfer->tx_nbits) 2598f477b7fbSwangyuhang xfer->tx_nbits = SPI_NBITS_SINGLE; 2599f477b7fbSwangyuhang if (xfer->rx_buf && !xfer->rx_nbits) 2600f477b7fbSwangyuhang xfer->rx_nbits = SPI_NBITS_SINGLE; 2601f477b7fbSwangyuhang /* check transfer tx/rx_nbits: 26021afd9989SGeert Uytterhoeven * 1. check the value matches one of single, dual and quad 26031afd9989SGeert Uytterhoeven * 2. check tx/rx_nbits match the mode in spi_device 2604f477b7fbSwangyuhang */ 2605db90a441SSourav Poddar if (xfer->tx_buf) { 2606f477b7fbSwangyuhang if (xfer->tx_nbits != SPI_NBITS_SINGLE && 2607f477b7fbSwangyuhang xfer->tx_nbits != SPI_NBITS_DUAL && 2608f477b7fbSwangyuhang xfer->tx_nbits != SPI_NBITS_QUAD) 2609a2fd4f9fSMark Brown return -EINVAL; 2610f477b7fbSwangyuhang if ((xfer->tx_nbits == SPI_NBITS_DUAL) && 2611f477b7fbSwangyuhang !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD))) 2612f477b7fbSwangyuhang return -EINVAL; 2613f477b7fbSwangyuhang if ((xfer->tx_nbits == SPI_NBITS_QUAD) && 2614f477b7fbSwangyuhang !(spi->mode & SPI_TX_QUAD)) 2615f477b7fbSwangyuhang return -EINVAL; 2616db90a441SSourav Poddar } 2617f477b7fbSwangyuhang /* check transfer rx_nbits */ 2618db90a441SSourav Poddar if (xfer->rx_buf) { 2619f477b7fbSwangyuhang if (xfer->rx_nbits != SPI_NBITS_SINGLE && 2620f477b7fbSwangyuhang xfer->rx_nbits != SPI_NBITS_DUAL && 2621f477b7fbSwangyuhang xfer->rx_nbits != SPI_NBITS_QUAD) 2622f477b7fbSwangyuhang return -EINVAL; 2623f477b7fbSwangyuhang if ((xfer->rx_nbits == SPI_NBITS_DUAL) && 2624f477b7fbSwangyuhang !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD))) 2625f477b7fbSwangyuhang return -EINVAL; 2626f477b7fbSwangyuhang if ((xfer->rx_nbits == SPI_NBITS_QUAD) && 2627f477b7fbSwangyuhang !(spi->mode & SPI_RX_QUAD)) 2628f477b7fbSwangyuhang return -EINVAL; 2629e6811d1dSLaxman Dewangan } 2630e6811d1dSLaxman Dewangan } 2631e6811d1dSLaxman Dewangan 2632cf32b71eSErnst Schwab message->status = -EINPROGRESS; 263390808738SMark Brown 263490808738SMark Brown return 0; 263590808738SMark Brown } 263690808738SMark Brown 263790808738SMark Brown static int __spi_async(struct spi_device *spi, struct spi_message *message) 263890808738SMark Brown { 263990808738SMark Brown struct spi_master *master = spi->master; 264090808738SMark Brown 264190808738SMark Brown message->spi = spi; 264290808738SMark Brown 2643eca2ebc7SMartin Sperl SPI_STATISTICS_INCREMENT_FIELD(&master->statistics, spi_async); 2644eca2ebc7SMartin Sperl SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_async); 2645eca2ebc7SMartin Sperl 264690808738SMark Brown trace_spi_message_submit(message); 264790808738SMark Brown 2648cf32b71eSErnst Schwab return master->transfer(spi, message); 2649cf32b71eSErnst Schwab } 2650cf32b71eSErnst Schwab 2651568d0697SDavid Brownell /** 2652568d0697SDavid Brownell * spi_async - asynchronous SPI transfer 2653568d0697SDavid Brownell * @spi: device with which data will be exchanged 2654568d0697SDavid Brownell * @message: describes the data transfers, including completion callback 2655568d0697SDavid Brownell * Context: any (irqs may be blocked, etc) 2656568d0697SDavid Brownell * 2657568d0697SDavid Brownell * This call may be used in_irq and other contexts which can't sleep, 2658568d0697SDavid Brownell * as well as from task contexts which can sleep. 2659568d0697SDavid Brownell * 2660568d0697SDavid Brownell * The completion callback is invoked in a context which can't sleep. 2661568d0697SDavid Brownell * Before that invocation, the value of message->status is undefined. 2662568d0697SDavid Brownell * When the callback is issued, message->status holds either zero (to 2663568d0697SDavid Brownell * indicate complete success) or a negative error code. After that 2664568d0697SDavid Brownell * callback returns, the driver which issued the transfer request may 2665568d0697SDavid Brownell * deallocate the associated memory; it's no longer in use by any SPI 2666568d0697SDavid Brownell * core or controller driver code. 2667568d0697SDavid Brownell * 2668568d0697SDavid Brownell * Note that although all messages to a spi_device are handled in 2669568d0697SDavid Brownell * FIFO order, messages may go to different devices in other orders. 2670568d0697SDavid Brownell * Some device might be higher priority, or have various "hard" access 2671568d0697SDavid Brownell * time requirements, for example. 2672568d0697SDavid Brownell * 2673568d0697SDavid Brownell * On detection of any fault during the transfer, processing of 2674568d0697SDavid Brownell * the entire message is aborted, and the device is deselected. 2675568d0697SDavid Brownell * Until returning from the associated message completion callback, 2676568d0697SDavid Brownell * no other spi_message queued to that device will be processed. 2677568d0697SDavid Brownell * (This rule applies equally to all the synchronous transfer calls, 2678568d0697SDavid Brownell * which are wrappers around this core asynchronous primitive.) 267997d56dc6SJavier Martinez Canillas * 268097d56dc6SJavier Martinez Canillas * Return: zero on success, else a negative error code. 2681568d0697SDavid Brownell */ 2682568d0697SDavid Brownell int spi_async(struct spi_device *spi, struct spi_message *message) 2683568d0697SDavid Brownell { 2684568d0697SDavid Brownell struct spi_master *master = spi->master; 2685cf32b71eSErnst Schwab int ret; 2686cf32b71eSErnst Schwab unsigned long flags; 2687568d0697SDavid Brownell 268890808738SMark Brown ret = __spi_validate(spi, message); 268990808738SMark Brown if (ret != 0) 269090808738SMark Brown return ret; 269190808738SMark Brown 2692cf32b71eSErnst Schwab spin_lock_irqsave(&master->bus_lock_spinlock, flags); 2693568d0697SDavid Brownell 2694cf32b71eSErnst Schwab if (master->bus_lock_flag) 2695cf32b71eSErnst Schwab ret = -EBUSY; 2696cf32b71eSErnst Schwab else 2697cf32b71eSErnst Schwab ret = __spi_async(spi, message); 2698568d0697SDavid Brownell 2699cf32b71eSErnst Schwab spin_unlock_irqrestore(&master->bus_lock_spinlock, flags); 2700cf32b71eSErnst Schwab 2701cf32b71eSErnst Schwab return ret; 2702568d0697SDavid Brownell } 2703568d0697SDavid Brownell EXPORT_SYMBOL_GPL(spi_async); 2704568d0697SDavid Brownell 2705cf32b71eSErnst Schwab /** 2706cf32b71eSErnst Schwab * spi_async_locked - version of spi_async with exclusive bus usage 2707cf32b71eSErnst Schwab * @spi: device with which data will be exchanged 2708cf32b71eSErnst Schwab * @message: describes the data transfers, including completion callback 2709cf32b71eSErnst Schwab * Context: any (irqs may be blocked, etc) 2710cf32b71eSErnst Schwab * 2711cf32b71eSErnst Schwab * This call may be used in_irq and other contexts which can't sleep, 2712cf32b71eSErnst Schwab * as well as from task contexts which can sleep. 2713cf32b71eSErnst Schwab * 2714cf32b71eSErnst Schwab * The completion callback is invoked in a context which can't sleep. 2715cf32b71eSErnst Schwab * Before that invocation, the value of message->status is undefined. 2716cf32b71eSErnst Schwab * When the callback is issued, message->status holds either zero (to 2717cf32b71eSErnst Schwab * indicate complete success) or a negative error code. After that 2718cf32b71eSErnst Schwab * callback returns, the driver which issued the transfer request may 2719cf32b71eSErnst Schwab * deallocate the associated memory; it's no longer in use by any SPI 2720cf32b71eSErnst Schwab * core or controller driver code. 2721cf32b71eSErnst Schwab * 2722cf32b71eSErnst Schwab * Note that although all messages to a spi_device are handled in 2723cf32b71eSErnst Schwab * FIFO order, messages may go to different devices in other orders. 2724cf32b71eSErnst Schwab * Some device might be higher priority, or have various "hard" access 2725cf32b71eSErnst Schwab * time requirements, for example. 2726cf32b71eSErnst Schwab * 2727cf32b71eSErnst Schwab * On detection of any fault during the transfer, processing of 2728cf32b71eSErnst Schwab * the entire message is aborted, and the device is deselected. 2729cf32b71eSErnst Schwab * Until returning from the associated message completion callback, 2730cf32b71eSErnst Schwab * no other spi_message queued to that device will be processed. 2731cf32b71eSErnst Schwab * (This rule applies equally to all the synchronous transfer calls, 2732cf32b71eSErnst Schwab * which are wrappers around this core asynchronous primitive.) 273397d56dc6SJavier Martinez Canillas * 273497d56dc6SJavier Martinez Canillas * Return: zero on success, else a negative error code. 2735cf32b71eSErnst Schwab */ 2736cf32b71eSErnst Schwab int spi_async_locked(struct spi_device *spi, struct spi_message *message) 2737cf32b71eSErnst Schwab { 2738cf32b71eSErnst Schwab struct spi_master *master = spi->master; 2739cf32b71eSErnst Schwab int ret; 2740cf32b71eSErnst Schwab unsigned long flags; 2741cf32b71eSErnst Schwab 274290808738SMark Brown ret = __spi_validate(spi, message); 274390808738SMark Brown if (ret != 0) 274490808738SMark Brown return ret; 274590808738SMark Brown 2746cf32b71eSErnst Schwab spin_lock_irqsave(&master->bus_lock_spinlock, flags); 2747cf32b71eSErnst Schwab 2748cf32b71eSErnst Schwab ret = __spi_async(spi, message); 2749cf32b71eSErnst Schwab 2750cf32b71eSErnst Schwab spin_unlock_irqrestore(&master->bus_lock_spinlock, flags); 2751cf32b71eSErnst Schwab 2752cf32b71eSErnst Schwab return ret; 2753cf32b71eSErnst Schwab 2754cf32b71eSErnst Schwab } 2755cf32b71eSErnst Schwab EXPORT_SYMBOL_GPL(spi_async_locked); 2756cf32b71eSErnst Schwab 27577d077197SDavid Brownell 2758556351f1SVignesh R int spi_flash_read(struct spi_device *spi, 2759556351f1SVignesh R struct spi_flash_read_message *msg) 2760556351f1SVignesh R 2761556351f1SVignesh R { 2762556351f1SVignesh R struct spi_master *master = spi->master; 2763f4502dd1SVignesh R struct device *rx_dev = NULL; 2764556351f1SVignesh R int ret; 2765556351f1SVignesh R 2766556351f1SVignesh R if ((msg->opcode_nbits == SPI_NBITS_DUAL || 2767556351f1SVignesh R msg->addr_nbits == SPI_NBITS_DUAL) && 2768556351f1SVignesh R !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD))) 2769556351f1SVignesh R return -EINVAL; 2770556351f1SVignesh R if ((msg->opcode_nbits == SPI_NBITS_QUAD || 2771556351f1SVignesh R msg->addr_nbits == SPI_NBITS_QUAD) && 2772556351f1SVignesh R !(spi->mode & SPI_TX_QUAD)) 2773556351f1SVignesh R return -EINVAL; 2774556351f1SVignesh R if (msg->data_nbits == SPI_NBITS_DUAL && 2775556351f1SVignesh R !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD))) 2776556351f1SVignesh R return -EINVAL; 2777556351f1SVignesh R if (msg->data_nbits == SPI_NBITS_QUAD && 2778556351f1SVignesh R !(spi->mode & SPI_RX_QUAD)) 2779556351f1SVignesh R return -EINVAL; 2780556351f1SVignesh R 2781556351f1SVignesh R if (master->auto_runtime_pm) { 2782556351f1SVignesh R ret = pm_runtime_get_sync(master->dev.parent); 2783556351f1SVignesh R if (ret < 0) { 2784556351f1SVignesh R dev_err(&master->dev, "Failed to power device: %d\n", 2785556351f1SVignesh R ret); 2786556351f1SVignesh R return ret; 2787556351f1SVignesh R } 2788556351f1SVignesh R } 2789f4502dd1SVignesh R 2790556351f1SVignesh R mutex_lock(&master->bus_lock_mutex); 2791ef4d96ecSMark Brown mutex_lock(&master->io_mutex); 2792f4502dd1SVignesh R if (master->dma_rx) { 2793f4502dd1SVignesh R rx_dev = master->dma_rx->device->dev; 2794f4502dd1SVignesh R ret = spi_map_buf(master, rx_dev, &msg->rx_sg, 2795f4502dd1SVignesh R msg->buf, msg->len, 2796f4502dd1SVignesh R DMA_FROM_DEVICE); 2797f4502dd1SVignesh R if (!ret) 2798f4502dd1SVignesh R msg->cur_msg_mapped = true; 2799f4502dd1SVignesh R } 2800556351f1SVignesh R ret = master->spi_flash_read(spi, msg); 2801f4502dd1SVignesh R if (msg->cur_msg_mapped) 2802f4502dd1SVignesh R spi_unmap_buf(master, rx_dev, &msg->rx_sg, 2803f4502dd1SVignesh R DMA_FROM_DEVICE); 2804ef4d96ecSMark Brown mutex_unlock(&master->io_mutex); 2805556351f1SVignesh R mutex_unlock(&master->bus_lock_mutex); 2806f4502dd1SVignesh R 2807556351f1SVignesh R if (master->auto_runtime_pm) 2808556351f1SVignesh R pm_runtime_put(master->dev.parent); 2809556351f1SVignesh R 2810556351f1SVignesh R return ret; 2811556351f1SVignesh R } 2812556351f1SVignesh R EXPORT_SYMBOL_GPL(spi_flash_read); 2813556351f1SVignesh R 28147d077197SDavid Brownell /*-------------------------------------------------------------------------*/ 28157d077197SDavid Brownell 28167d077197SDavid Brownell /* Utility methods for SPI master protocol drivers, layered on 28177d077197SDavid Brownell * top of the core. Some other utility methods are defined as 28187d077197SDavid Brownell * inline functions. 28197d077197SDavid Brownell */ 28207d077197SDavid Brownell 28215d870c8eSAndrew Morton static void spi_complete(void *arg) 28225d870c8eSAndrew Morton { 28235d870c8eSAndrew Morton complete(arg); 28245d870c8eSAndrew Morton } 28255d870c8eSAndrew Morton 2826ef4d96ecSMark Brown static int __spi_sync(struct spi_device *spi, struct spi_message *message) 2827cf32b71eSErnst Schwab { 2828cf32b71eSErnst Schwab DECLARE_COMPLETION_ONSTACK(done); 2829cf32b71eSErnst Schwab int status; 2830cf32b71eSErnst Schwab struct spi_master *master = spi->master; 28310461a414SMark Brown unsigned long flags; 28320461a414SMark Brown 28330461a414SMark Brown status = __spi_validate(spi, message); 28340461a414SMark Brown if (status != 0) 28350461a414SMark Brown return status; 2836cf32b71eSErnst Schwab 2837cf32b71eSErnst Schwab message->complete = spi_complete; 2838cf32b71eSErnst Schwab message->context = &done; 28390461a414SMark Brown message->spi = spi; 2840cf32b71eSErnst Schwab 2841eca2ebc7SMartin Sperl SPI_STATISTICS_INCREMENT_FIELD(&master->statistics, spi_sync); 2842eca2ebc7SMartin Sperl SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_sync); 2843eca2ebc7SMartin Sperl 28440461a414SMark Brown /* If we're not using the legacy transfer method then we will 28450461a414SMark Brown * try to transfer in the calling context so special case. 28460461a414SMark Brown * This code would be less tricky if we could remove the 28470461a414SMark Brown * support for driver implemented message queues. 28480461a414SMark Brown */ 28490461a414SMark Brown if (master->transfer == spi_queued_transfer) { 28500461a414SMark Brown spin_lock_irqsave(&master->bus_lock_spinlock, flags); 28510461a414SMark Brown 28520461a414SMark Brown trace_spi_message_submit(message); 28530461a414SMark Brown 28540461a414SMark Brown status = __spi_queued_transfer(spi, message, false); 28550461a414SMark Brown 28560461a414SMark Brown spin_unlock_irqrestore(&master->bus_lock_spinlock, flags); 28570461a414SMark Brown } else { 2858cf32b71eSErnst Schwab status = spi_async_locked(spi, message); 28590461a414SMark Brown } 2860cf32b71eSErnst Schwab 2861cf32b71eSErnst Schwab if (status == 0) { 28620461a414SMark Brown /* Push out the messages in the calling context if we 28630461a414SMark Brown * can. 28640461a414SMark Brown */ 2865eca2ebc7SMartin Sperl if (master->transfer == spi_queued_transfer) { 2866eca2ebc7SMartin Sperl SPI_STATISTICS_INCREMENT_FIELD(&master->statistics, 2867eca2ebc7SMartin Sperl spi_sync_immediate); 2868eca2ebc7SMartin Sperl SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, 2869eca2ebc7SMartin Sperl spi_sync_immediate); 2870ef4d96ecSMark Brown __spi_pump_messages(master, false); 2871eca2ebc7SMartin Sperl } 28720461a414SMark Brown 2873cf32b71eSErnst Schwab wait_for_completion(&done); 2874cf32b71eSErnst Schwab status = message->status; 2875cf32b71eSErnst Schwab } 2876cf32b71eSErnst Schwab message->context = NULL; 2877cf32b71eSErnst Schwab return status; 2878cf32b71eSErnst Schwab } 2879cf32b71eSErnst Schwab 28808ae12a0dSDavid Brownell /** 28818ae12a0dSDavid Brownell * spi_sync - blocking/synchronous SPI data transfers 28828ae12a0dSDavid Brownell * @spi: device with which data will be exchanged 28838ae12a0dSDavid Brownell * @message: describes the data transfers 288433e34dc6SDavid Brownell * Context: can sleep 28858ae12a0dSDavid Brownell * 28868ae12a0dSDavid Brownell * This call may only be used from a context that may sleep. The sleep 28878ae12a0dSDavid Brownell * is non-interruptible, and has no timeout. Low-overhead controller 28888ae12a0dSDavid Brownell * drivers may DMA directly into and out of the message buffers. 28898ae12a0dSDavid Brownell * 28908ae12a0dSDavid Brownell * Note that the SPI device's chip select is active during the message, 28918ae12a0dSDavid Brownell * and then is normally disabled between messages. Drivers for some 28928ae12a0dSDavid Brownell * frequently-used devices may want to minimize costs of selecting a chip, 28938ae12a0dSDavid Brownell * by leaving it selected in anticipation that the next message will go 28948ae12a0dSDavid Brownell * to the same chip. (That may increase power usage.) 28958ae12a0dSDavid Brownell * 28960c868461SDavid Brownell * Also, the caller is guaranteeing that the memory associated with the 28970c868461SDavid Brownell * message will not be freed before this call returns. 28980c868461SDavid Brownell * 289997d56dc6SJavier Martinez Canillas * Return: zero on success, else a negative error code. 29008ae12a0dSDavid Brownell */ 29018ae12a0dSDavid Brownell int spi_sync(struct spi_device *spi, struct spi_message *message) 29028ae12a0dSDavid Brownell { 2903ef4d96ecSMark Brown int ret; 2904ef4d96ecSMark Brown 2905ef4d96ecSMark Brown mutex_lock(&spi->master->bus_lock_mutex); 2906ef4d96ecSMark Brown ret = __spi_sync(spi, message); 2907ef4d96ecSMark Brown mutex_unlock(&spi->master->bus_lock_mutex); 2908ef4d96ecSMark Brown 2909ef4d96ecSMark Brown return ret; 29108ae12a0dSDavid Brownell } 29118ae12a0dSDavid Brownell EXPORT_SYMBOL_GPL(spi_sync); 29128ae12a0dSDavid Brownell 2913cf32b71eSErnst Schwab /** 2914cf32b71eSErnst Schwab * spi_sync_locked - version of spi_sync with exclusive bus usage 2915cf32b71eSErnst Schwab * @spi: device with which data will be exchanged 2916cf32b71eSErnst Schwab * @message: describes the data transfers 2917cf32b71eSErnst Schwab * Context: can sleep 2918cf32b71eSErnst Schwab * 2919cf32b71eSErnst Schwab * This call may only be used from a context that may sleep. The sleep 2920cf32b71eSErnst Schwab * is non-interruptible, and has no timeout. Low-overhead controller 2921cf32b71eSErnst Schwab * drivers may DMA directly into and out of the message buffers. 2922cf32b71eSErnst Schwab * 2923cf32b71eSErnst Schwab * This call should be used by drivers that require exclusive access to the 292425985edcSLucas De Marchi * SPI bus. It has to be preceded by a spi_bus_lock call. The SPI bus must 2925cf32b71eSErnst Schwab * be released by a spi_bus_unlock call when the exclusive access is over. 2926cf32b71eSErnst Schwab * 292797d56dc6SJavier Martinez Canillas * Return: zero on success, else a negative error code. 2928cf32b71eSErnst Schwab */ 2929cf32b71eSErnst Schwab int spi_sync_locked(struct spi_device *spi, struct spi_message *message) 2930cf32b71eSErnst Schwab { 2931ef4d96ecSMark Brown return __spi_sync(spi, message); 2932cf32b71eSErnst Schwab } 2933cf32b71eSErnst Schwab EXPORT_SYMBOL_GPL(spi_sync_locked); 2934cf32b71eSErnst Schwab 2935cf32b71eSErnst Schwab /** 2936cf32b71eSErnst Schwab * spi_bus_lock - obtain a lock for exclusive SPI bus usage 2937cf32b71eSErnst Schwab * @master: SPI bus master that should be locked for exclusive bus access 2938cf32b71eSErnst Schwab * Context: can sleep 2939cf32b71eSErnst Schwab * 2940cf32b71eSErnst Schwab * This call may only be used from a context that may sleep. The sleep 2941cf32b71eSErnst Schwab * is non-interruptible, and has no timeout. 2942cf32b71eSErnst Schwab * 2943cf32b71eSErnst Schwab * This call should be used by drivers that require exclusive access to the 2944cf32b71eSErnst Schwab * SPI bus. The SPI bus must be released by a spi_bus_unlock call when the 2945cf32b71eSErnst Schwab * exclusive access is over. Data transfer must be done by spi_sync_locked 2946cf32b71eSErnst Schwab * and spi_async_locked calls when the SPI bus lock is held. 2947cf32b71eSErnst Schwab * 294897d56dc6SJavier Martinez Canillas * Return: always zero. 2949cf32b71eSErnst Schwab */ 2950cf32b71eSErnst Schwab int spi_bus_lock(struct spi_master *master) 2951cf32b71eSErnst Schwab { 2952cf32b71eSErnst Schwab unsigned long flags; 2953cf32b71eSErnst Schwab 2954cf32b71eSErnst Schwab mutex_lock(&master->bus_lock_mutex); 2955cf32b71eSErnst Schwab 2956cf32b71eSErnst Schwab spin_lock_irqsave(&master->bus_lock_spinlock, flags); 2957cf32b71eSErnst Schwab master->bus_lock_flag = 1; 2958cf32b71eSErnst Schwab spin_unlock_irqrestore(&master->bus_lock_spinlock, flags); 2959cf32b71eSErnst Schwab 2960cf32b71eSErnst Schwab /* mutex remains locked until spi_bus_unlock is called */ 2961cf32b71eSErnst Schwab 2962cf32b71eSErnst Schwab return 0; 2963cf32b71eSErnst Schwab } 2964cf32b71eSErnst Schwab EXPORT_SYMBOL_GPL(spi_bus_lock); 2965cf32b71eSErnst Schwab 2966cf32b71eSErnst Schwab /** 2967cf32b71eSErnst Schwab * spi_bus_unlock - release the lock for exclusive SPI bus usage 2968cf32b71eSErnst Schwab * @master: SPI bus master that was locked for exclusive bus access 2969cf32b71eSErnst Schwab * Context: can sleep 2970cf32b71eSErnst Schwab * 2971cf32b71eSErnst Schwab * This call may only be used from a context that may sleep. The sleep 2972cf32b71eSErnst Schwab * is non-interruptible, and has no timeout. 2973cf32b71eSErnst Schwab * 2974cf32b71eSErnst Schwab * This call releases an SPI bus lock previously obtained by an spi_bus_lock 2975cf32b71eSErnst Schwab * call. 2976cf32b71eSErnst Schwab * 297797d56dc6SJavier Martinez Canillas * Return: always zero. 2978cf32b71eSErnst Schwab */ 2979cf32b71eSErnst Schwab int spi_bus_unlock(struct spi_master *master) 2980cf32b71eSErnst Schwab { 2981cf32b71eSErnst Schwab master->bus_lock_flag = 0; 2982cf32b71eSErnst Schwab 2983cf32b71eSErnst Schwab mutex_unlock(&master->bus_lock_mutex); 2984cf32b71eSErnst Schwab 2985cf32b71eSErnst Schwab return 0; 2986cf32b71eSErnst Schwab } 2987cf32b71eSErnst Schwab EXPORT_SYMBOL_GPL(spi_bus_unlock); 2988cf32b71eSErnst Schwab 2989a9948b61SDavid Brownell /* portable code must never pass more than 32 bytes */ 2990a9948b61SDavid Brownell #define SPI_BUFSIZ max(32, SMP_CACHE_BYTES) 29918ae12a0dSDavid Brownell 29928ae12a0dSDavid Brownell static u8 *buf; 29938ae12a0dSDavid Brownell 29948ae12a0dSDavid Brownell /** 29958ae12a0dSDavid Brownell * spi_write_then_read - SPI synchronous write followed by read 29968ae12a0dSDavid Brownell * @spi: device with which data will be exchanged 29978ae12a0dSDavid Brownell * @txbuf: data to be written (need not be dma-safe) 29988ae12a0dSDavid Brownell * @n_tx: size of txbuf, in bytes 299927570497SJiri Pirko * @rxbuf: buffer into which data will be read (need not be dma-safe) 300027570497SJiri Pirko * @n_rx: size of rxbuf, in bytes 300133e34dc6SDavid Brownell * Context: can sleep 30028ae12a0dSDavid Brownell * 30038ae12a0dSDavid Brownell * This performs a half duplex MicroWire style transaction with the 30048ae12a0dSDavid Brownell * device, sending txbuf and then reading rxbuf. The return value 30058ae12a0dSDavid Brownell * is zero for success, else a negative errno status code. 3006b885244eSDavid Brownell * This call may only be used from a context that may sleep. 30078ae12a0dSDavid Brownell * 30080c868461SDavid Brownell * Parameters to this routine are always copied using a small buffer; 300933e34dc6SDavid Brownell * portable code should never use this for more than 32 bytes. 301033e34dc6SDavid Brownell * Performance-sensitive or bulk transfer code should instead use 30110c868461SDavid Brownell * spi_{async,sync}() calls with dma-safe buffers. 301297d56dc6SJavier Martinez Canillas * 301397d56dc6SJavier Martinez Canillas * Return: zero on success, else a negative error code. 30148ae12a0dSDavid Brownell */ 30158ae12a0dSDavid Brownell int spi_write_then_read(struct spi_device *spi, 30160c4a1590SMark Brown const void *txbuf, unsigned n_tx, 30170c4a1590SMark Brown void *rxbuf, unsigned n_rx) 30188ae12a0dSDavid Brownell { 3019068f4070SDavid Brownell static DEFINE_MUTEX(lock); 30208ae12a0dSDavid Brownell 30218ae12a0dSDavid Brownell int status; 30228ae12a0dSDavid Brownell struct spi_message message; 3023bdff549eSDavid Brownell struct spi_transfer x[2]; 30248ae12a0dSDavid Brownell u8 *local_buf; 30258ae12a0dSDavid Brownell 3026b3a223eeSMark Brown /* Use preallocated DMA-safe buffer if we can. We can't avoid 3027b3a223eeSMark Brown * copying here, (as a pure convenience thing), but we can 3028b3a223eeSMark Brown * keep heap costs out of the hot path unless someone else is 3029b3a223eeSMark Brown * using the pre-allocated buffer or the transfer is too large. 30308ae12a0dSDavid Brownell */ 3031b3a223eeSMark Brown if ((n_tx + n_rx) > SPI_BUFSIZ || !mutex_trylock(&lock)) { 30322cd94c8aSMark Brown local_buf = kmalloc(max((unsigned)SPI_BUFSIZ, n_tx + n_rx), 30332cd94c8aSMark Brown GFP_KERNEL | GFP_DMA); 3034b3a223eeSMark Brown if (!local_buf) 3035b3a223eeSMark Brown return -ENOMEM; 3036b3a223eeSMark Brown } else { 3037b3a223eeSMark Brown local_buf = buf; 3038b3a223eeSMark Brown } 30398ae12a0dSDavid Brownell 30408275c642SVitaly Wool spi_message_init(&message); 30415fe5f05eSJingoo Han memset(x, 0, sizeof(x)); 3042bdff549eSDavid Brownell if (n_tx) { 3043bdff549eSDavid Brownell x[0].len = n_tx; 3044bdff549eSDavid Brownell spi_message_add_tail(&x[0], &message); 3045bdff549eSDavid Brownell } 3046bdff549eSDavid Brownell if (n_rx) { 3047bdff549eSDavid Brownell x[1].len = n_rx; 3048bdff549eSDavid Brownell spi_message_add_tail(&x[1], &message); 3049bdff549eSDavid Brownell } 30508275c642SVitaly Wool 30518ae12a0dSDavid Brownell memcpy(local_buf, txbuf, n_tx); 3052bdff549eSDavid Brownell x[0].tx_buf = local_buf; 3053bdff549eSDavid Brownell x[1].rx_buf = local_buf + n_tx; 30548ae12a0dSDavid Brownell 30558ae12a0dSDavid Brownell /* do the i/o */ 30568ae12a0dSDavid Brownell status = spi_sync(spi, &message); 30579b938b74SMarc Pignat if (status == 0) 3058bdff549eSDavid Brownell memcpy(rxbuf, x[1].rx_buf, n_rx); 30598ae12a0dSDavid Brownell 3060bdff549eSDavid Brownell if (x[0].tx_buf == buf) 3061068f4070SDavid Brownell mutex_unlock(&lock); 30628ae12a0dSDavid Brownell else 30638ae12a0dSDavid Brownell kfree(local_buf); 30648ae12a0dSDavid Brownell 30658ae12a0dSDavid Brownell return status; 30668ae12a0dSDavid Brownell } 30678ae12a0dSDavid Brownell EXPORT_SYMBOL_GPL(spi_write_then_read); 30688ae12a0dSDavid Brownell 30698ae12a0dSDavid Brownell /*-------------------------------------------------------------------------*/ 30708ae12a0dSDavid Brownell 3071ce79d54aSPantelis Antoniou #if IS_ENABLED(CONFIG_OF_DYNAMIC) 3072ce79d54aSPantelis Antoniou static int __spi_of_device_match(struct device *dev, void *data) 3073ce79d54aSPantelis Antoniou { 3074ce79d54aSPantelis Antoniou return dev->of_node == data; 3075ce79d54aSPantelis Antoniou } 3076ce79d54aSPantelis Antoniou 3077ce79d54aSPantelis Antoniou /* must call put_device() when done with returned spi_device device */ 3078ce79d54aSPantelis Antoniou static struct spi_device *of_find_spi_device_by_node(struct device_node *node) 3079ce79d54aSPantelis Antoniou { 3080ce79d54aSPantelis Antoniou struct device *dev = bus_find_device(&spi_bus_type, NULL, node, 3081ce79d54aSPantelis Antoniou __spi_of_device_match); 3082ce79d54aSPantelis Antoniou return dev ? to_spi_device(dev) : NULL; 3083ce79d54aSPantelis Antoniou } 3084ce79d54aSPantelis Antoniou 3085ce79d54aSPantelis Antoniou static int __spi_of_master_match(struct device *dev, const void *data) 3086ce79d54aSPantelis Antoniou { 3087ce79d54aSPantelis Antoniou return dev->of_node == data; 3088ce79d54aSPantelis Antoniou } 3089ce79d54aSPantelis Antoniou 3090ce79d54aSPantelis Antoniou /* the spi masters are not using spi_bus, so we find it with another way */ 3091ce79d54aSPantelis Antoniou static struct spi_master *of_find_spi_master_by_node(struct device_node *node) 3092ce79d54aSPantelis Antoniou { 3093ce79d54aSPantelis Antoniou struct device *dev; 3094ce79d54aSPantelis Antoniou 3095ce79d54aSPantelis Antoniou dev = class_find_device(&spi_master_class, NULL, node, 3096ce79d54aSPantelis Antoniou __spi_of_master_match); 3097ce79d54aSPantelis Antoniou if (!dev) 3098ce79d54aSPantelis Antoniou return NULL; 3099ce79d54aSPantelis Antoniou 3100ce79d54aSPantelis Antoniou /* reference got in class_find_device */ 3101ce79d54aSPantelis Antoniou return container_of(dev, struct spi_master, dev); 3102ce79d54aSPantelis Antoniou } 3103ce79d54aSPantelis Antoniou 3104ce79d54aSPantelis Antoniou static int of_spi_notify(struct notifier_block *nb, unsigned long action, 3105ce79d54aSPantelis Antoniou void *arg) 3106ce79d54aSPantelis Antoniou { 3107ce79d54aSPantelis Antoniou struct of_reconfig_data *rd = arg; 3108ce79d54aSPantelis Antoniou struct spi_master *master; 3109ce79d54aSPantelis Antoniou struct spi_device *spi; 3110ce79d54aSPantelis Antoniou 3111ce79d54aSPantelis Antoniou switch (of_reconfig_get_state_change(action, arg)) { 3112ce79d54aSPantelis Antoniou case OF_RECONFIG_CHANGE_ADD: 3113ce79d54aSPantelis Antoniou master = of_find_spi_master_by_node(rd->dn->parent); 3114ce79d54aSPantelis Antoniou if (master == NULL) 3115ce79d54aSPantelis Antoniou return NOTIFY_OK; /* not for us */ 3116ce79d54aSPantelis Antoniou 3117bd6c1644SGeert Uytterhoeven if (of_node_test_and_set_flag(rd->dn, OF_POPULATED)) { 3118bd6c1644SGeert Uytterhoeven put_device(&master->dev); 3119bd6c1644SGeert Uytterhoeven return NOTIFY_OK; 3120bd6c1644SGeert Uytterhoeven } 3121bd6c1644SGeert Uytterhoeven 3122ce79d54aSPantelis Antoniou spi = of_register_spi_device(master, rd->dn); 3123ce79d54aSPantelis Antoniou put_device(&master->dev); 3124ce79d54aSPantelis Antoniou 3125ce79d54aSPantelis Antoniou if (IS_ERR(spi)) { 3126ce79d54aSPantelis Antoniou pr_err("%s: failed to create for '%s'\n", 3127ce79d54aSPantelis Antoniou __func__, rd->dn->full_name); 3128ce79d54aSPantelis Antoniou return notifier_from_errno(PTR_ERR(spi)); 3129ce79d54aSPantelis Antoniou } 3130ce79d54aSPantelis Antoniou break; 3131ce79d54aSPantelis Antoniou 3132ce79d54aSPantelis Antoniou case OF_RECONFIG_CHANGE_REMOVE: 3133bd6c1644SGeert Uytterhoeven /* already depopulated? */ 3134bd6c1644SGeert Uytterhoeven if (!of_node_check_flag(rd->dn, OF_POPULATED)) 3135bd6c1644SGeert Uytterhoeven return NOTIFY_OK; 3136bd6c1644SGeert Uytterhoeven 3137ce79d54aSPantelis Antoniou /* find our device by node */ 3138ce79d54aSPantelis Antoniou spi = of_find_spi_device_by_node(rd->dn); 3139ce79d54aSPantelis Antoniou if (spi == NULL) 3140ce79d54aSPantelis Antoniou return NOTIFY_OK; /* no? not meant for us */ 3141ce79d54aSPantelis Antoniou 3142ce79d54aSPantelis Antoniou /* unregister takes one ref away */ 3143ce79d54aSPantelis Antoniou spi_unregister_device(spi); 3144ce79d54aSPantelis Antoniou 3145ce79d54aSPantelis Antoniou /* and put the reference of the find */ 3146ce79d54aSPantelis Antoniou put_device(&spi->dev); 3147ce79d54aSPantelis Antoniou break; 3148ce79d54aSPantelis Antoniou } 3149ce79d54aSPantelis Antoniou 3150ce79d54aSPantelis Antoniou return NOTIFY_OK; 3151ce79d54aSPantelis Antoniou } 3152ce79d54aSPantelis Antoniou 3153ce79d54aSPantelis Antoniou static struct notifier_block spi_of_notifier = { 3154ce79d54aSPantelis Antoniou .notifier_call = of_spi_notify, 3155ce79d54aSPantelis Antoniou }; 3156ce79d54aSPantelis Antoniou #else /* IS_ENABLED(CONFIG_OF_DYNAMIC) */ 3157ce79d54aSPantelis Antoniou extern struct notifier_block spi_of_notifier; 3158ce79d54aSPantelis Antoniou #endif /* IS_ENABLED(CONFIG_OF_DYNAMIC) */ 3159ce79d54aSPantelis Antoniou 31607f24467fSOctavian Purdila #if IS_ENABLED(CONFIG_ACPI) 31617f24467fSOctavian Purdila static int spi_acpi_master_match(struct device *dev, const void *data) 31627f24467fSOctavian Purdila { 31637f24467fSOctavian Purdila return ACPI_COMPANION(dev->parent) == data; 31647f24467fSOctavian Purdila } 31657f24467fSOctavian Purdila 31667f24467fSOctavian Purdila static int spi_acpi_device_match(struct device *dev, void *data) 31677f24467fSOctavian Purdila { 31687f24467fSOctavian Purdila return ACPI_COMPANION(dev) == data; 31697f24467fSOctavian Purdila } 31707f24467fSOctavian Purdila 31717f24467fSOctavian Purdila static struct spi_master *acpi_spi_find_master_by_adev(struct acpi_device *adev) 31727f24467fSOctavian Purdila { 31737f24467fSOctavian Purdila struct device *dev; 31747f24467fSOctavian Purdila 31757f24467fSOctavian Purdila dev = class_find_device(&spi_master_class, NULL, adev, 31767f24467fSOctavian Purdila spi_acpi_master_match); 31777f24467fSOctavian Purdila if (!dev) 31787f24467fSOctavian Purdila return NULL; 31797f24467fSOctavian Purdila 31807f24467fSOctavian Purdila return container_of(dev, struct spi_master, dev); 31817f24467fSOctavian Purdila } 31827f24467fSOctavian Purdila 31837f24467fSOctavian Purdila static struct spi_device *acpi_spi_find_device_by_adev(struct acpi_device *adev) 31847f24467fSOctavian Purdila { 31857f24467fSOctavian Purdila struct device *dev; 31867f24467fSOctavian Purdila 31877f24467fSOctavian Purdila dev = bus_find_device(&spi_bus_type, NULL, adev, spi_acpi_device_match); 31887f24467fSOctavian Purdila 31897f24467fSOctavian Purdila return dev ? to_spi_device(dev) : NULL; 31907f24467fSOctavian Purdila } 31917f24467fSOctavian Purdila 31927f24467fSOctavian Purdila static int acpi_spi_notify(struct notifier_block *nb, unsigned long value, 31937f24467fSOctavian Purdila void *arg) 31947f24467fSOctavian Purdila { 31957f24467fSOctavian Purdila struct acpi_device *adev = arg; 31967f24467fSOctavian Purdila struct spi_master *master; 31977f24467fSOctavian Purdila struct spi_device *spi; 31987f24467fSOctavian Purdila 31997f24467fSOctavian Purdila switch (value) { 32007f24467fSOctavian Purdila case ACPI_RECONFIG_DEVICE_ADD: 32017f24467fSOctavian Purdila master = acpi_spi_find_master_by_adev(adev->parent); 32027f24467fSOctavian Purdila if (!master) 32037f24467fSOctavian Purdila break; 32047f24467fSOctavian Purdila 32057f24467fSOctavian Purdila acpi_register_spi_device(master, adev); 32067f24467fSOctavian Purdila put_device(&master->dev); 32077f24467fSOctavian Purdila break; 32087f24467fSOctavian Purdila case ACPI_RECONFIG_DEVICE_REMOVE: 32097f24467fSOctavian Purdila if (!acpi_device_enumerated(adev)) 32107f24467fSOctavian Purdila break; 32117f24467fSOctavian Purdila 32127f24467fSOctavian Purdila spi = acpi_spi_find_device_by_adev(adev); 32137f24467fSOctavian Purdila if (!spi) 32147f24467fSOctavian Purdila break; 32157f24467fSOctavian Purdila 32167f24467fSOctavian Purdila spi_unregister_device(spi); 32177f24467fSOctavian Purdila put_device(&spi->dev); 32187f24467fSOctavian Purdila break; 32197f24467fSOctavian Purdila } 32207f24467fSOctavian Purdila 32217f24467fSOctavian Purdila return NOTIFY_OK; 32227f24467fSOctavian Purdila } 32237f24467fSOctavian Purdila 32247f24467fSOctavian Purdila static struct notifier_block spi_acpi_notifier = { 32257f24467fSOctavian Purdila .notifier_call = acpi_spi_notify, 32267f24467fSOctavian Purdila }; 32277f24467fSOctavian Purdila #else 32287f24467fSOctavian Purdila extern struct notifier_block spi_acpi_notifier; 32297f24467fSOctavian Purdila #endif 32307f24467fSOctavian Purdila 32318ae12a0dSDavid Brownell static int __init spi_init(void) 32328ae12a0dSDavid Brownell { 3233b885244eSDavid Brownell int status; 32348ae12a0dSDavid Brownell 3235e94b1766SChristoph Lameter buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL); 3236b885244eSDavid Brownell if (!buf) { 3237b885244eSDavid Brownell status = -ENOMEM; 3238b885244eSDavid Brownell goto err0; 32398ae12a0dSDavid Brownell } 3240b885244eSDavid Brownell 3241b885244eSDavid Brownell status = bus_register(&spi_bus_type); 3242b885244eSDavid Brownell if (status < 0) 3243b885244eSDavid Brownell goto err1; 3244b885244eSDavid Brownell 3245b885244eSDavid Brownell status = class_register(&spi_master_class); 3246b885244eSDavid Brownell if (status < 0) 3247b885244eSDavid Brownell goto err2; 3248ce79d54aSPantelis Antoniou 32495267720eSFabio Estevam if (IS_ENABLED(CONFIG_OF_DYNAMIC)) 3250ce79d54aSPantelis Antoniou WARN_ON(of_reconfig_notifier_register(&spi_of_notifier)); 32517f24467fSOctavian Purdila if (IS_ENABLED(CONFIG_ACPI)) 32527f24467fSOctavian Purdila WARN_ON(acpi_reconfig_notifier_register(&spi_acpi_notifier)); 3253ce79d54aSPantelis Antoniou 3254b885244eSDavid Brownell return 0; 3255b885244eSDavid Brownell 3256b885244eSDavid Brownell err2: 3257b885244eSDavid Brownell bus_unregister(&spi_bus_type); 3258b885244eSDavid Brownell err1: 3259b885244eSDavid Brownell kfree(buf); 3260b885244eSDavid Brownell buf = NULL; 3261b885244eSDavid Brownell err0: 3262b885244eSDavid Brownell return status; 3263b885244eSDavid Brownell } 3264b885244eSDavid Brownell 32658ae12a0dSDavid Brownell /* board_info is normally registered in arch_initcall(), 32668ae12a0dSDavid Brownell * but even essential drivers wait till later 3267b885244eSDavid Brownell * 3268b885244eSDavid Brownell * REVISIT only boardinfo really needs static linking. the rest (device and 3269b885244eSDavid Brownell * driver registration) _could_ be dynamically linked (modular) ... costs 3270b885244eSDavid Brownell * include needing to have boardinfo data structures be much more public. 32718ae12a0dSDavid Brownell */ 3272673c0c00SDavid Brownell postcore_initcall(spi_init); 32738ae12a0dSDavid Brownell 3274