18ae12a0dSDavid Brownell /* 2ca632f55SGrant Likely * SPI init/core code 38ae12a0dSDavid Brownell * 48ae12a0dSDavid Brownell * Copyright (C) 2005 David Brownell 5d57a4282SGrant Likely * Copyright (C) 2008 Secret Lab Technologies Ltd. 68ae12a0dSDavid Brownell * 78ae12a0dSDavid Brownell * This program is free software; you can redistribute it and/or modify 88ae12a0dSDavid Brownell * it under the terms of the GNU General Public License as published by 98ae12a0dSDavid Brownell * the Free Software Foundation; either version 2 of the License, or 108ae12a0dSDavid Brownell * (at your option) any later version. 118ae12a0dSDavid Brownell * 128ae12a0dSDavid Brownell * This program is distributed in the hope that it will be useful, 138ae12a0dSDavid Brownell * but WITHOUT ANY WARRANTY; without even the implied warranty of 148ae12a0dSDavid Brownell * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 158ae12a0dSDavid Brownell * GNU General Public License for more details. 168ae12a0dSDavid Brownell */ 178ae12a0dSDavid Brownell 188ae12a0dSDavid Brownell #include <linux/kernel.h> 198ae12a0dSDavid Brownell #include <linux/device.h> 208ae12a0dSDavid Brownell #include <linux/init.h> 218ae12a0dSDavid Brownell #include <linux/cache.h> 2299adef31SMark Brown #include <linux/dma-mapping.h> 2399adef31SMark Brown #include <linux/dmaengine.h> 2494040828SMatthias Kaehlcke #include <linux/mutex.h> 252b7a32f7SSinan Akman #include <linux/of_device.h> 26d57a4282SGrant Likely #include <linux/of_irq.h> 2786be408bSSylwester Nawrocki #include <linux/clk/clk-conf.h> 285a0e3ad6STejun Heo #include <linux/slab.h> 29e0626e38SAnton Vorontsov #include <linux/mod_devicetable.h> 308ae12a0dSDavid Brownell #include <linux/spi/spi.h> 3174317984SJean-Christophe PLAGNIOL-VILLARD #include <linux/of_gpio.h> 323ae22e8cSMark Brown #include <linux/pm_runtime.h> 33f48c767cSUlf Hansson #include <linux/pm_domain.h> 34025ed130SPaul Gortmaker #include <linux/export.h> 358bd75c77SClark Williams #include <linux/sched/rt.h> 36ffbbdd21SLinus Walleij #include <linux/delay.h> 37ffbbdd21SLinus Walleij #include <linux/kthread.h> 3864bee4d2SMika Westerberg #include <linux/ioport.h> 3964bee4d2SMika Westerberg #include <linux/acpi.h> 408ae12a0dSDavid Brownell 4156ec1978SMark Brown #define CREATE_TRACE_POINTS 4256ec1978SMark Brown #include <trace/events/spi.h> 4356ec1978SMark Brown 448ae12a0dSDavid Brownell static void spidev_release(struct device *dev) 458ae12a0dSDavid Brownell { 460ffa0285SHans-Peter Nilsson struct spi_device *spi = to_spi_device(dev); 478ae12a0dSDavid Brownell 488ae12a0dSDavid Brownell /* spi masters may cleanup for released devices */ 498ae12a0dSDavid Brownell if (spi->master->cleanup) 508ae12a0dSDavid Brownell spi->master->cleanup(spi); 518ae12a0dSDavid Brownell 520c868461SDavid Brownell spi_master_put(spi->master); 5307a389feSRoman Tereshonkov kfree(spi); 548ae12a0dSDavid Brownell } 558ae12a0dSDavid Brownell 568ae12a0dSDavid Brownell static ssize_t 578ae12a0dSDavid Brownell modalias_show(struct device *dev, struct device_attribute *a, char *buf) 588ae12a0dSDavid Brownell { 598ae12a0dSDavid Brownell const struct spi_device *spi = to_spi_device(dev); 608c4ff6d0SZhang Rui int len; 618c4ff6d0SZhang Rui 628c4ff6d0SZhang Rui len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1); 638c4ff6d0SZhang Rui if (len != -ENODEV) 648c4ff6d0SZhang Rui return len; 658ae12a0dSDavid Brownell 66d8e328b3SGrant Likely return sprintf(buf, "%s%s\n", SPI_MODULE_PREFIX, spi->modalias); 678ae12a0dSDavid Brownell } 68aa7da564SGreg Kroah-Hartman static DEVICE_ATTR_RO(modalias); 698ae12a0dSDavid Brownell 70eca2ebc7SMartin Sperl #define SPI_STATISTICS_ATTRS(field, file) \ 71eca2ebc7SMartin Sperl static ssize_t spi_master_##field##_show(struct device *dev, \ 72eca2ebc7SMartin Sperl struct device_attribute *attr, \ 73eca2ebc7SMartin Sperl char *buf) \ 74eca2ebc7SMartin Sperl { \ 75eca2ebc7SMartin Sperl struct spi_master *master = container_of(dev, \ 76eca2ebc7SMartin Sperl struct spi_master, dev); \ 77eca2ebc7SMartin Sperl return spi_statistics_##field##_show(&master->statistics, buf); \ 78eca2ebc7SMartin Sperl } \ 79eca2ebc7SMartin Sperl static struct device_attribute dev_attr_spi_master_##field = { \ 80eca2ebc7SMartin Sperl .attr = { .name = file, .mode = S_IRUGO }, \ 81eca2ebc7SMartin Sperl .show = spi_master_##field##_show, \ 82eca2ebc7SMartin Sperl }; \ 83eca2ebc7SMartin Sperl static ssize_t spi_device_##field##_show(struct device *dev, \ 84eca2ebc7SMartin Sperl struct device_attribute *attr, \ 85eca2ebc7SMartin Sperl char *buf) \ 86eca2ebc7SMartin Sperl { \ 87eca2ebc7SMartin Sperl struct spi_device *spi = container_of(dev, \ 88eca2ebc7SMartin Sperl struct spi_device, dev); \ 89eca2ebc7SMartin Sperl return spi_statistics_##field##_show(&spi->statistics, buf); \ 90eca2ebc7SMartin Sperl } \ 91eca2ebc7SMartin Sperl static struct device_attribute dev_attr_spi_device_##field = { \ 92eca2ebc7SMartin Sperl .attr = { .name = file, .mode = S_IRUGO }, \ 93eca2ebc7SMartin Sperl .show = spi_device_##field##_show, \ 94eca2ebc7SMartin Sperl } 95eca2ebc7SMartin Sperl 96eca2ebc7SMartin Sperl #define SPI_STATISTICS_SHOW_NAME(name, file, field, format_string) \ 97eca2ebc7SMartin Sperl static ssize_t spi_statistics_##name##_show(struct spi_statistics *stat, \ 98eca2ebc7SMartin Sperl char *buf) \ 99eca2ebc7SMartin Sperl { \ 100eca2ebc7SMartin Sperl unsigned long flags; \ 101eca2ebc7SMartin Sperl ssize_t len; \ 102eca2ebc7SMartin Sperl spin_lock_irqsave(&stat->lock, flags); \ 103eca2ebc7SMartin Sperl len = sprintf(buf, format_string, stat->field); \ 104eca2ebc7SMartin Sperl spin_unlock_irqrestore(&stat->lock, flags); \ 105eca2ebc7SMartin Sperl return len; \ 106eca2ebc7SMartin Sperl } \ 107eca2ebc7SMartin Sperl SPI_STATISTICS_ATTRS(name, file) 108eca2ebc7SMartin Sperl 109eca2ebc7SMartin Sperl #define SPI_STATISTICS_SHOW(field, format_string) \ 110eca2ebc7SMartin Sperl SPI_STATISTICS_SHOW_NAME(field, __stringify(field), \ 111eca2ebc7SMartin Sperl field, format_string) 112eca2ebc7SMartin Sperl 113eca2ebc7SMartin Sperl SPI_STATISTICS_SHOW(messages, "%lu"); 114eca2ebc7SMartin Sperl SPI_STATISTICS_SHOW(transfers, "%lu"); 115eca2ebc7SMartin Sperl SPI_STATISTICS_SHOW(errors, "%lu"); 116eca2ebc7SMartin Sperl SPI_STATISTICS_SHOW(timedout, "%lu"); 117eca2ebc7SMartin Sperl 118eca2ebc7SMartin Sperl SPI_STATISTICS_SHOW(spi_sync, "%lu"); 119eca2ebc7SMartin Sperl SPI_STATISTICS_SHOW(spi_sync_immediate, "%lu"); 120eca2ebc7SMartin Sperl SPI_STATISTICS_SHOW(spi_async, "%lu"); 121eca2ebc7SMartin Sperl 122eca2ebc7SMartin Sperl SPI_STATISTICS_SHOW(bytes, "%llu"); 123eca2ebc7SMartin Sperl SPI_STATISTICS_SHOW(bytes_rx, "%llu"); 124eca2ebc7SMartin Sperl SPI_STATISTICS_SHOW(bytes_tx, "%llu"); 125eca2ebc7SMartin Sperl 126aa7da564SGreg Kroah-Hartman static struct attribute *spi_dev_attrs[] = { 127aa7da564SGreg Kroah-Hartman &dev_attr_modalias.attr, 128aa7da564SGreg Kroah-Hartman NULL, 1298ae12a0dSDavid Brownell }; 130eca2ebc7SMartin Sperl 131eca2ebc7SMartin Sperl static const struct attribute_group spi_dev_group = { 132eca2ebc7SMartin Sperl .attrs = spi_dev_attrs, 133eca2ebc7SMartin Sperl }; 134eca2ebc7SMartin Sperl 135eca2ebc7SMartin Sperl static struct attribute *spi_device_statistics_attrs[] = { 136eca2ebc7SMartin Sperl &dev_attr_spi_device_messages.attr, 137eca2ebc7SMartin Sperl &dev_attr_spi_device_transfers.attr, 138eca2ebc7SMartin Sperl &dev_attr_spi_device_errors.attr, 139eca2ebc7SMartin Sperl &dev_attr_spi_device_timedout.attr, 140eca2ebc7SMartin Sperl &dev_attr_spi_device_spi_sync.attr, 141eca2ebc7SMartin Sperl &dev_attr_spi_device_spi_sync_immediate.attr, 142eca2ebc7SMartin Sperl &dev_attr_spi_device_spi_async.attr, 143eca2ebc7SMartin Sperl &dev_attr_spi_device_bytes.attr, 144eca2ebc7SMartin Sperl &dev_attr_spi_device_bytes_rx.attr, 145eca2ebc7SMartin Sperl &dev_attr_spi_device_bytes_tx.attr, 146eca2ebc7SMartin Sperl NULL, 147eca2ebc7SMartin Sperl }; 148eca2ebc7SMartin Sperl 149eca2ebc7SMartin Sperl static const struct attribute_group spi_device_statistics_group = { 150eca2ebc7SMartin Sperl .name = "statistics", 151eca2ebc7SMartin Sperl .attrs = spi_device_statistics_attrs, 152eca2ebc7SMartin Sperl }; 153eca2ebc7SMartin Sperl 154eca2ebc7SMartin Sperl static const struct attribute_group *spi_dev_groups[] = { 155eca2ebc7SMartin Sperl &spi_dev_group, 156eca2ebc7SMartin Sperl &spi_device_statistics_group, 157eca2ebc7SMartin Sperl NULL, 158eca2ebc7SMartin Sperl }; 159eca2ebc7SMartin Sperl 160eca2ebc7SMartin Sperl static struct attribute *spi_master_statistics_attrs[] = { 161eca2ebc7SMartin Sperl &dev_attr_spi_master_messages.attr, 162eca2ebc7SMartin Sperl &dev_attr_spi_master_transfers.attr, 163eca2ebc7SMartin Sperl &dev_attr_spi_master_errors.attr, 164eca2ebc7SMartin Sperl &dev_attr_spi_master_timedout.attr, 165eca2ebc7SMartin Sperl &dev_attr_spi_master_spi_sync.attr, 166eca2ebc7SMartin Sperl &dev_attr_spi_master_spi_sync_immediate.attr, 167eca2ebc7SMartin Sperl &dev_attr_spi_master_spi_async.attr, 168eca2ebc7SMartin Sperl &dev_attr_spi_master_bytes.attr, 169eca2ebc7SMartin Sperl &dev_attr_spi_master_bytes_rx.attr, 170eca2ebc7SMartin Sperl &dev_attr_spi_master_bytes_tx.attr, 171eca2ebc7SMartin Sperl NULL, 172eca2ebc7SMartin Sperl }; 173eca2ebc7SMartin Sperl 174eca2ebc7SMartin Sperl static const struct attribute_group spi_master_statistics_group = { 175eca2ebc7SMartin Sperl .name = "statistics", 176eca2ebc7SMartin Sperl .attrs = spi_master_statistics_attrs, 177eca2ebc7SMartin Sperl }; 178eca2ebc7SMartin Sperl 179eca2ebc7SMartin Sperl static const struct attribute_group *spi_master_groups[] = { 180eca2ebc7SMartin Sperl &spi_master_statistics_group, 181eca2ebc7SMartin Sperl NULL, 182eca2ebc7SMartin Sperl }; 183eca2ebc7SMartin Sperl 184eca2ebc7SMartin Sperl void spi_statistics_add_transfer_stats(struct spi_statistics *stats, 185eca2ebc7SMartin Sperl struct spi_transfer *xfer, 186eca2ebc7SMartin Sperl struct spi_master *master) 187eca2ebc7SMartin Sperl { 188eca2ebc7SMartin Sperl unsigned long flags; 189eca2ebc7SMartin Sperl 190eca2ebc7SMartin Sperl spin_lock_irqsave(&stats->lock, flags); 191eca2ebc7SMartin Sperl 192eca2ebc7SMartin Sperl stats->transfers++; 193eca2ebc7SMartin Sperl 194eca2ebc7SMartin Sperl stats->bytes += xfer->len; 195eca2ebc7SMartin Sperl if ((xfer->tx_buf) && 196eca2ebc7SMartin Sperl (xfer->tx_buf != master->dummy_tx)) 197eca2ebc7SMartin Sperl stats->bytes_tx += xfer->len; 198eca2ebc7SMartin Sperl if ((xfer->rx_buf) && 199eca2ebc7SMartin Sperl (xfer->rx_buf != master->dummy_rx)) 200eca2ebc7SMartin Sperl stats->bytes_rx += xfer->len; 201eca2ebc7SMartin Sperl 202eca2ebc7SMartin Sperl spin_unlock_irqrestore(&stats->lock, flags); 203eca2ebc7SMartin Sperl } 204eca2ebc7SMartin Sperl EXPORT_SYMBOL_GPL(spi_statistics_add_transfer_stats); 2058ae12a0dSDavid Brownell 2068ae12a0dSDavid Brownell /* modalias support makes "modprobe $MODALIAS" new-style hotplug work, 2078ae12a0dSDavid Brownell * and the sysfs version makes coldplug work too. 2088ae12a0dSDavid Brownell */ 2098ae12a0dSDavid Brownell 21075368bf6SAnton Vorontsov static const struct spi_device_id *spi_match_id(const struct spi_device_id *id, 21175368bf6SAnton Vorontsov const struct spi_device *sdev) 21275368bf6SAnton Vorontsov { 21375368bf6SAnton Vorontsov while (id->name[0]) { 21475368bf6SAnton Vorontsov if (!strcmp(sdev->modalias, id->name)) 21575368bf6SAnton Vorontsov return id; 21675368bf6SAnton Vorontsov id++; 21775368bf6SAnton Vorontsov } 21875368bf6SAnton Vorontsov return NULL; 21975368bf6SAnton Vorontsov } 22075368bf6SAnton Vorontsov 22175368bf6SAnton Vorontsov const struct spi_device_id *spi_get_device_id(const struct spi_device *sdev) 22275368bf6SAnton Vorontsov { 22375368bf6SAnton Vorontsov const struct spi_driver *sdrv = to_spi_driver(sdev->dev.driver); 22475368bf6SAnton Vorontsov 22575368bf6SAnton Vorontsov return spi_match_id(sdrv->id_table, sdev); 22675368bf6SAnton Vorontsov } 22775368bf6SAnton Vorontsov EXPORT_SYMBOL_GPL(spi_get_device_id); 22875368bf6SAnton Vorontsov 2298ae12a0dSDavid Brownell static int spi_match_device(struct device *dev, struct device_driver *drv) 2308ae12a0dSDavid Brownell { 2318ae12a0dSDavid Brownell const struct spi_device *spi = to_spi_device(dev); 23275368bf6SAnton Vorontsov const struct spi_driver *sdrv = to_spi_driver(drv); 23375368bf6SAnton Vorontsov 2342b7a32f7SSinan Akman /* Attempt an OF style match */ 2352b7a32f7SSinan Akman if (of_driver_match_device(dev, drv)) 2362b7a32f7SSinan Akman return 1; 2372b7a32f7SSinan Akman 23864bee4d2SMika Westerberg /* Then try ACPI */ 23964bee4d2SMika Westerberg if (acpi_driver_match_device(dev, drv)) 24064bee4d2SMika Westerberg return 1; 24164bee4d2SMika Westerberg 24275368bf6SAnton Vorontsov if (sdrv->id_table) 24375368bf6SAnton Vorontsov return !!spi_match_id(sdrv->id_table, spi); 2448ae12a0dSDavid Brownell 24535f74fcaSKay Sievers return strcmp(spi->modalias, drv->name) == 0; 2468ae12a0dSDavid Brownell } 2478ae12a0dSDavid Brownell 2487eff2e7aSKay Sievers static int spi_uevent(struct device *dev, struct kobj_uevent_env *env) 2498ae12a0dSDavid Brownell { 2508ae12a0dSDavid Brownell const struct spi_device *spi = to_spi_device(dev); 2518c4ff6d0SZhang Rui int rc; 2528c4ff6d0SZhang Rui 2538c4ff6d0SZhang Rui rc = acpi_device_uevent_modalias(dev, env); 2548c4ff6d0SZhang Rui if (rc != -ENODEV) 2558c4ff6d0SZhang Rui return rc; 2568ae12a0dSDavid Brownell 257e0626e38SAnton Vorontsov add_uevent_var(env, "MODALIAS=%s%s", SPI_MODULE_PREFIX, spi->modalias); 2588ae12a0dSDavid Brownell return 0; 2598ae12a0dSDavid Brownell } 2608ae12a0dSDavid Brownell 2618ae12a0dSDavid Brownell struct bus_type spi_bus_type = { 2628ae12a0dSDavid Brownell .name = "spi", 263aa7da564SGreg Kroah-Hartman .dev_groups = spi_dev_groups, 2648ae12a0dSDavid Brownell .match = spi_match_device, 2658ae12a0dSDavid Brownell .uevent = spi_uevent, 2668ae12a0dSDavid Brownell }; 2678ae12a0dSDavid Brownell EXPORT_SYMBOL_GPL(spi_bus_type); 2688ae12a0dSDavid Brownell 269b885244eSDavid Brownell 270b885244eSDavid Brownell static int spi_drv_probe(struct device *dev) 271b885244eSDavid Brownell { 272b885244eSDavid Brownell const struct spi_driver *sdrv = to_spi_driver(dev->driver); 27344af7927SJon Hunter struct spi_device *spi = to_spi_device(dev); 27433cf00e5SMika Westerberg int ret; 275b885244eSDavid Brownell 27686be408bSSylwester Nawrocki ret = of_clk_set_defaults(dev->of_node, false); 27786be408bSSylwester Nawrocki if (ret) 27886be408bSSylwester Nawrocki return ret; 27986be408bSSylwester Nawrocki 28044af7927SJon Hunter if (dev->of_node) { 28144af7927SJon Hunter spi->irq = of_irq_get(dev->of_node, 0); 28244af7927SJon Hunter if (spi->irq == -EPROBE_DEFER) 28344af7927SJon Hunter return -EPROBE_DEFER; 28444af7927SJon Hunter if (spi->irq < 0) 28544af7927SJon Hunter spi->irq = 0; 28644af7927SJon Hunter } 28744af7927SJon Hunter 288676e7c25SUlf Hansson ret = dev_pm_domain_attach(dev, true); 289676e7c25SUlf Hansson if (ret != -EPROBE_DEFER) { 29044af7927SJon Hunter ret = sdrv->probe(spi); 29133cf00e5SMika Westerberg if (ret) 292676e7c25SUlf Hansson dev_pm_domain_detach(dev, true); 293676e7c25SUlf Hansson } 29433cf00e5SMika Westerberg 29533cf00e5SMika Westerberg return ret; 296b885244eSDavid Brownell } 297b885244eSDavid Brownell 298b885244eSDavid Brownell static int spi_drv_remove(struct device *dev) 299b885244eSDavid Brownell { 300b885244eSDavid Brownell const struct spi_driver *sdrv = to_spi_driver(dev->driver); 30133cf00e5SMika Westerberg int ret; 302b885244eSDavid Brownell 303aec35f4eSJean Delvare ret = sdrv->remove(to_spi_device(dev)); 304676e7c25SUlf Hansson dev_pm_domain_detach(dev, true); 30533cf00e5SMika Westerberg 30633cf00e5SMika Westerberg return ret; 307b885244eSDavid Brownell } 308b885244eSDavid Brownell 309b885244eSDavid Brownell static void spi_drv_shutdown(struct device *dev) 310b885244eSDavid Brownell { 311b885244eSDavid Brownell const struct spi_driver *sdrv = to_spi_driver(dev->driver); 312b885244eSDavid Brownell 313b885244eSDavid Brownell sdrv->shutdown(to_spi_device(dev)); 314b885244eSDavid Brownell } 315b885244eSDavid Brownell 31633e34dc6SDavid Brownell /** 31733e34dc6SDavid Brownell * spi_register_driver - register a SPI driver 31833e34dc6SDavid Brownell * @sdrv: the driver to register 31933e34dc6SDavid Brownell * Context: can sleep 32033e34dc6SDavid Brownell */ 321b885244eSDavid Brownell int spi_register_driver(struct spi_driver *sdrv) 322b885244eSDavid Brownell { 323b885244eSDavid Brownell sdrv->driver.bus = &spi_bus_type; 324b885244eSDavid Brownell if (sdrv->probe) 325b885244eSDavid Brownell sdrv->driver.probe = spi_drv_probe; 326b885244eSDavid Brownell if (sdrv->remove) 327b885244eSDavid Brownell sdrv->driver.remove = spi_drv_remove; 328b885244eSDavid Brownell if (sdrv->shutdown) 329b885244eSDavid Brownell sdrv->driver.shutdown = spi_drv_shutdown; 330b885244eSDavid Brownell return driver_register(&sdrv->driver); 331b885244eSDavid Brownell } 332b885244eSDavid Brownell EXPORT_SYMBOL_GPL(spi_register_driver); 333b885244eSDavid Brownell 3348ae12a0dSDavid Brownell /*-------------------------------------------------------------------------*/ 3358ae12a0dSDavid Brownell 3368ae12a0dSDavid Brownell /* SPI devices should normally not be created by SPI device drivers; that 3378ae12a0dSDavid Brownell * would make them board-specific. Similarly with SPI master drivers. 3388ae12a0dSDavid Brownell * Device registration normally goes into like arch/.../mach.../board-YYY.c 3398ae12a0dSDavid Brownell * with other readonly (flashable) information about mainboard devices. 3408ae12a0dSDavid Brownell */ 3418ae12a0dSDavid Brownell 3428ae12a0dSDavid Brownell struct boardinfo { 3438ae12a0dSDavid Brownell struct list_head list; 3442b9603a0SFeng Tang struct spi_board_info board_info; 3458ae12a0dSDavid Brownell }; 3468ae12a0dSDavid Brownell 3478ae12a0dSDavid Brownell static LIST_HEAD(board_list); 3482b9603a0SFeng Tang static LIST_HEAD(spi_master_list); 3492b9603a0SFeng Tang 3502b9603a0SFeng Tang /* 3512b9603a0SFeng Tang * Used to protect add/del opertion for board_info list and 3522b9603a0SFeng Tang * spi_master list, and their matching process 3532b9603a0SFeng Tang */ 35494040828SMatthias Kaehlcke static DEFINE_MUTEX(board_lock); 3558ae12a0dSDavid Brownell 356dc87c98eSGrant Likely /** 357dc87c98eSGrant Likely * spi_alloc_device - Allocate a new SPI device 358dc87c98eSGrant Likely * @master: Controller to which device is connected 359dc87c98eSGrant Likely * Context: can sleep 360dc87c98eSGrant Likely * 361dc87c98eSGrant Likely * Allows a driver to allocate and initialize a spi_device without 362dc87c98eSGrant Likely * registering it immediately. This allows a driver to directly 363dc87c98eSGrant Likely * fill the spi_device with device parameters before calling 364dc87c98eSGrant Likely * spi_add_device() on it. 365dc87c98eSGrant Likely * 366dc87c98eSGrant Likely * Caller is responsible to call spi_add_device() on the returned 367dc87c98eSGrant Likely * spi_device structure to add it to the SPI master. If the caller 368dc87c98eSGrant Likely * needs to discard the spi_device without adding it, then it should 369dc87c98eSGrant Likely * call spi_dev_put() on it. 370dc87c98eSGrant Likely * 371dc87c98eSGrant Likely * Returns a pointer to the new device, or NULL. 372dc87c98eSGrant Likely */ 373dc87c98eSGrant Likely struct spi_device *spi_alloc_device(struct spi_master *master) 374dc87c98eSGrant Likely { 375dc87c98eSGrant Likely struct spi_device *spi; 376dc87c98eSGrant Likely 377dc87c98eSGrant Likely if (!spi_master_get(master)) 378dc87c98eSGrant Likely return NULL; 379dc87c98eSGrant Likely 3805fe5f05eSJingoo Han spi = kzalloc(sizeof(*spi), GFP_KERNEL); 381dc87c98eSGrant Likely if (!spi) { 382dc87c98eSGrant Likely spi_master_put(master); 383dc87c98eSGrant Likely return NULL; 384dc87c98eSGrant Likely } 385dc87c98eSGrant Likely 386dc87c98eSGrant Likely spi->master = master; 387178db7d3SLaurent Pinchart spi->dev.parent = &master->dev; 388dc87c98eSGrant Likely spi->dev.bus = &spi_bus_type; 389dc87c98eSGrant Likely spi->dev.release = spidev_release; 390446411e1SAndreas Larsson spi->cs_gpio = -ENOENT; 391eca2ebc7SMartin Sperl 392eca2ebc7SMartin Sperl spin_lock_init(&spi->statistics.lock); 393eca2ebc7SMartin Sperl 394dc87c98eSGrant Likely device_initialize(&spi->dev); 395dc87c98eSGrant Likely return spi; 396dc87c98eSGrant Likely } 397dc87c98eSGrant Likely EXPORT_SYMBOL_GPL(spi_alloc_device); 398dc87c98eSGrant Likely 399e13ac47bSJarkko Nikula static void spi_dev_set_name(struct spi_device *spi) 400e13ac47bSJarkko Nikula { 401e13ac47bSJarkko Nikula struct acpi_device *adev = ACPI_COMPANION(&spi->dev); 402e13ac47bSJarkko Nikula 403e13ac47bSJarkko Nikula if (adev) { 404e13ac47bSJarkko Nikula dev_set_name(&spi->dev, "spi-%s", acpi_dev_name(adev)); 405e13ac47bSJarkko Nikula return; 406e13ac47bSJarkko Nikula } 407e13ac47bSJarkko Nikula 408e13ac47bSJarkko Nikula dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->master->dev), 409e13ac47bSJarkko Nikula spi->chip_select); 410e13ac47bSJarkko Nikula } 411e13ac47bSJarkko Nikula 412b6fb8d3aSMika Westerberg static int spi_dev_check(struct device *dev, void *data) 413b6fb8d3aSMika Westerberg { 414b6fb8d3aSMika Westerberg struct spi_device *spi = to_spi_device(dev); 415b6fb8d3aSMika Westerberg struct spi_device *new_spi = data; 416b6fb8d3aSMika Westerberg 417b6fb8d3aSMika Westerberg if (spi->master == new_spi->master && 418b6fb8d3aSMika Westerberg spi->chip_select == new_spi->chip_select) 419b6fb8d3aSMika Westerberg return -EBUSY; 420b6fb8d3aSMika Westerberg return 0; 421b6fb8d3aSMika Westerberg } 422b6fb8d3aSMika Westerberg 423dc87c98eSGrant Likely /** 424dc87c98eSGrant Likely * spi_add_device - Add spi_device allocated with spi_alloc_device 425dc87c98eSGrant Likely * @spi: spi_device to register 426dc87c98eSGrant Likely * 427dc87c98eSGrant Likely * Companion function to spi_alloc_device. Devices allocated with 428dc87c98eSGrant Likely * spi_alloc_device can be added onto the spi bus with this function. 429dc87c98eSGrant Likely * 430e48880e0SDavid Brownell * Returns 0 on success; negative errno on failure 431dc87c98eSGrant Likely */ 432dc87c98eSGrant Likely int spi_add_device(struct spi_device *spi) 433dc87c98eSGrant Likely { 434e48880e0SDavid Brownell static DEFINE_MUTEX(spi_add_lock); 43574317984SJean-Christophe PLAGNIOL-VILLARD struct spi_master *master = spi->master; 43674317984SJean-Christophe PLAGNIOL-VILLARD struct device *dev = master->dev.parent; 437dc87c98eSGrant Likely int status; 438dc87c98eSGrant Likely 439dc87c98eSGrant Likely /* Chipselects are numbered 0..max; validate. */ 44074317984SJean-Christophe PLAGNIOL-VILLARD if (spi->chip_select >= master->num_chipselect) { 441dc87c98eSGrant Likely dev_err(dev, "cs%d >= max %d\n", 442dc87c98eSGrant Likely spi->chip_select, 44374317984SJean-Christophe PLAGNIOL-VILLARD master->num_chipselect); 444dc87c98eSGrant Likely return -EINVAL; 445dc87c98eSGrant Likely } 446dc87c98eSGrant Likely 447dc87c98eSGrant Likely /* Set the bus ID string */ 448e13ac47bSJarkko Nikula spi_dev_set_name(spi); 449e48880e0SDavid Brownell 450e48880e0SDavid Brownell /* We need to make sure there's no other device with this 451e48880e0SDavid Brownell * chipselect **BEFORE** we call setup(), else we'll trash 452e48880e0SDavid Brownell * its configuration. Lock against concurrent add() calls. 453e48880e0SDavid Brownell */ 454e48880e0SDavid Brownell mutex_lock(&spi_add_lock); 455e48880e0SDavid Brownell 456b6fb8d3aSMika Westerberg status = bus_for_each_dev(&spi_bus_type, NULL, spi, spi_dev_check); 457b6fb8d3aSMika Westerberg if (status) { 458e48880e0SDavid Brownell dev_err(dev, "chipselect %d already in use\n", 459e48880e0SDavid Brownell spi->chip_select); 460e48880e0SDavid Brownell goto done; 461e48880e0SDavid Brownell } 462e48880e0SDavid Brownell 46374317984SJean-Christophe PLAGNIOL-VILLARD if (master->cs_gpios) 46474317984SJean-Christophe PLAGNIOL-VILLARD spi->cs_gpio = master->cs_gpios[spi->chip_select]; 46574317984SJean-Christophe PLAGNIOL-VILLARD 466e48880e0SDavid Brownell /* Drivers may modify this initial i/o setup, but will 467e48880e0SDavid Brownell * normally rely on the device being setup. Devices 468e48880e0SDavid Brownell * using SPI_CS_HIGH can't coexist well otherwise... 469e48880e0SDavid Brownell */ 4707d077197SDavid Brownell status = spi_setup(spi); 471dc87c98eSGrant Likely if (status < 0) { 472eb288a1fSLinus Walleij dev_err(dev, "can't setup %s, status %d\n", 473eb288a1fSLinus Walleij dev_name(&spi->dev), status); 474e48880e0SDavid Brownell goto done; 475dc87c98eSGrant Likely } 476dc87c98eSGrant Likely 477e48880e0SDavid Brownell /* Device may be bound to an active driver when this returns */ 478dc87c98eSGrant Likely status = device_add(&spi->dev); 479e48880e0SDavid Brownell if (status < 0) 480eb288a1fSLinus Walleij dev_err(dev, "can't add %s, status %d\n", 481eb288a1fSLinus Walleij dev_name(&spi->dev), status); 482e48880e0SDavid Brownell else 48335f74fcaSKay Sievers dev_dbg(dev, "registered child %s\n", dev_name(&spi->dev)); 484e48880e0SDavid Brownell 485e48880e0SDavid Brownell done: 486e48880e0SDavid Brownell mutex_unlock(&spi_add_lock); 487e48880e0SDavid Brownell return status; 488dc87c98eSGrant Likely } 489dc87c98eSGrant Likely EXPORT_SYMBOL_GPL(spi_add_device); 4908ae12a0dSDavid Brownell 49133e34dc6SDavid Brownell /** 49233e34dc6SDavid Brownell * spi_new_device - instantiate one new SPI device 49333e34dc6SDavid Brownell * @master: Controller to which device is connected 49433e34dc6SDavid Brownell * @chip: Describes the SPI device 49533e34dc6SDavid Brownell * Context: can sleep 49633e34dc6SDavid Brownell * 49733e34dc6SDavid Brownell * On typical mainboards, this is purely internal; and it's not needed 4988ae12a0dSDavid Brownell * after board init creates the hard-wired devices. Some development 4998ae12a0dSDavid Brownell * platforms may not be able to use spi_register_board_info though, and 5008ae12a0dSDavid Brownell * this is exported so that for example a USB or parport based adapter 5018ae12a0dSDavid Brownell * driver could add devices (which it would learn about out-of-band). 502082c8cb4SDavid Brownell * 503082c8cb4SDavid Brownell * Returns the new device, or NULL. 5048ae12a0dSDavid Brownell */ 505e9d5a461SAdrian Bunk struct spi_device *spi_new_device(struct spi_master *master, 506e9d5a461SAdrian Bunk struct spi_board_info *chip) 5078ae12a0dSDavid Brownell { 5088ae12a0dSDavid Brownell struct spi_device *proxy; 5098ae12a0dSDavid Brownell int status; 5108ae12a0dSDavid Brownell 511082c8cb4SDavid Brownell /* NOTE: caller did any chip->bus_num checks necessary. 512082c8cb4SDavid Brownell * 513082c8cb4SDavid Brownell * Also, unless we change the return value convention to use 514082c8cb4SDavid Brownell * error-or-pointer (not NULL-or-pointer), troubleshootability 515082c8cb4SDavid Brownell * suggests syslogged diagnostics are best here (ugh). 516082c8cb4SDavid Brownell */ 517082c8cb4SDavid Brownell 518dc87c98eSGrant Likely proxy = spi_alloc_device(master); 519dc87c98eSGrant Likely if (!proxy) 5208ae12a0dSDavid Brownell return NULL; 5218ae12a0dSDavid Brownell 522102eb975SGrant Likely WARN_ON(strlen(chip->modalias) >= sizeof(proxy->modalias)); 523102eb975SGrant Likely 5248ae12a0dSDavid Brownell proxy->chip_select = chip->chip_select; 5258ae12a0dSDavid Brownell proxy->max_speed_hz = chip->max_speed_hz; 526980a01c9SDavid Brownell proxy->mode = chip->mode; 5278ae12a0dSDavid Brownell proxy->irq = chip->irq; 528102eb975SGrant Likely strlcpy(proxy->modalias, chip->modalias, sizeof(proxy->modalias)); 5298ae12a0dSDavid Brownell proxy->dev.platform_data = (void *) chip->platform_data; 5308ae12a0dSDavid Brownell proxy->controller_data = chip->controller_data; 5318ae12a0dSDavid Brownell proxy->controller_state = NULL; 5328ae12a0dSDavid Brownell 533dc87c98eSGrant Likely status = spi_add_device(proxy); 5348ae12a0dSDavid Brownell if (status < 0) { 535dc87c98eSGrant Likely spi_dev_put(proxy); 5368ae12a0dSDavid Brownell return NULL; 5378ae12a0dSDavid Brownell } 538dc87c98eSGrant Likely 539dc87c98eSGrant Likely return proxy; 540dc87c98eSGrant Likely } 5418ae12a0dSDavid Brownell EXPORT_SYMBOL_GPL(spi_new_device); 5428ae12a0dSDavid Brownell 5432b9603a0SFeng Tang static void spi_match_master_to_boardinfo(struct spi_master *master, 5442b9603a0SFeng Tang struct spi_board_info *bi) 5452b9603a0SFeng Tang { 5462b9603a0SFeng Tang struct spi_device *dev; 5472b9603a0SFeng Tang 5482b9603a0SFeng Tang if (master->bus_num != bi->bus_num) 5492b9603a0SFeng Tang return; 5502b9603a0SFeng Tang 5512b9603a0SFeng Tang dev = spi_new_device(master, bi); 5522b9603a0SFeng Tang if (!dev) 5532b9603a0SFeng Tang dev_err(master->dev.parent, "can't create new device for %s\n", 5542b9603a0SFeng Tang bi->modalias); 5552b9603a0SFeng Tang } 5562b9603a0SFeng Tang 55733e34dc6SDavid Brownell /** 55833e34dc6SDavid Brownell * spi_register_board_info - register SPI devices for a given board 55933e34dc6SDavid Brownell * @info: array of chip descriptors 56033e34dc6SDavid Brownell * @n: how many descriptors are provided 56133e34dc6SDavid Brownell * Context: can sleep 56233e34dc6SDavid Brownell * 5638ae12a0dSDavid Brownell * Board-specific early init code calls this (probably during arch_initcall) 5648ae12a0dSDavid Brownell * with segments of the SPI device table. Any device nodes are created later, 5658ae12a0dSDavid Brownell * after the relevant parent SPI controller (bus_num) is defined. We keep 5668ae12a0dSDavid Brownell * this table of devices forever, so that reloading a controller driver will 5678ae12a0dSDavid Brownell * not make Linux forget about these hard-wired devices. 5688ae12a0dSDavid Brownell * 5698ae12a0dSDavid Brownell * Other code can also call this, e.g. a particular add-on board might provide 5708ae12a0dSDavid Brownell * SPI devices through its expansion connector, so code initializing that board 5718ae12a0dSDavid Brownell * would naturally declare its SPI devices. 5728ae12a0dSDavid Brownell * 5738ae12a0dSDavid Brownell * The board info passed can safely be __initdata ... but be careful of 5748ae12a0dSDavid Brownell * any embedded pointers (platform_data, etc), they're copied as-is. 5758ae12a0dSDavid Brownell */ 576fd4a319bSGrant Likely int spi_register_board_info(struct spi_board_info const *info, unsigned n) 5778ae12a0dSDavid Brownell { 5788ae12a0dSDavid Brownell struct boardinfo *bi; 5792b9603a0SFeng Tang int i; 5808ae12a0dSDavid Brownell 581c7908a37SXiubo Li if (!n) 582c7908a37SXiubo Li return -EINVAL; 583c7908a37SXiubo Li 5842b9603a0SFeng Tang bi = kzalloc(n * sizeof(*bi), GFP_KERNEL); 5858ae12a0dSDavid Brownell if (!bi) 5868ae12a0dSDavid Brownell return -ENOMEM; 5878ae12a0dSDavid Brownell 5882b9603a0SFeng Tang for (i = 0; i < n; i++, bi++, info++) { 5892b9603a0SFeng Tang struct spi_master *master; 5902b9603a0SFeng Tang 5912b9603a0SFeng Tang memcpy(&bi->board_info, info, sizeof(*info)); 59294040828SMatthias Kaehlcke mutex_lock(&board_lock); 5938ae12a0dSDavid Brownell list_add_tail(&bi->list, &board_list); 5942b9603a0SFeng Tang list_for_each_entry(master, &spi_master_list, list) 5952b9603a0SFeng Tang spi_match_master_to_boardinfo(master, &bi->board_info); 59694040828SMatthias Kaehlcke mutex_unlock(&board_lock); 5972b9603a0SFeng Tang } 5982b9603a0SFeng Tang 5998ae12a0dSDavid Brownell return 0; 6008ae12a0dSDavid Brownell } 6018ae12a0dSDavid Brownell 6028ae12a0dSDavid Brownell /*-------------------------------------------------------------------------*/ 6038ae12a0dSDavid Brownell 604b158935fSMark Brown static void spi_set_cs(struct spi_device *spi, bool enable) 605b158935fSMark Brown { 606b158935fSMark Brown if (spi->mode & SPI_CS_HIGH) 607b158935fSMark Brown enable = !enable; 608b158935fSMark Brown 609*243f07beSAndy Shevchenko if (gpio_is_valid(spi->cs_gpio)) 610b158935fSMark Brown gpio_set_value(spi->cs_gpio, !enable); 611b158935fSMark Brown else if (spi->master->set_cs) 612b158935fSMark Brown spi->master->set_cs(spi, !enable); 613b158935fSMark Brown } 614b158935fSMark Brown 6152de440f5SGeert Uytterhoeven #ifdef CONFIG_HAS_DMA 6166ad45a27SMark Brown static int spi_map_buf(struct spi_master *master, struct device *dev, 6176ad45a27SMark Brown struct sg_table *sgt, void *buf, size_t len, 6186ad45a27SMark Brown enum dma_data_direction dir) 6196ad45a27SMark Brown { 6206ad45a27SMark Brown const bool vmalloced_buf = is_vmalloc_addr(buf); 62165598c13SAndrew Gabbasov int desc_len; 62265598c13SAndrew Gabbasov int sgs; 6236ad45a27SMark Brown struct page *vm_page; 6246ad45a27SMark Brown void *sg_buf; 6256ad45a27SMark Brown size_t min; 6266ad45a27SMark Brown int i, ret; 6276ad45a27SMark Brown 62865598c13SAndrew Gabbasov if (vmalloced_buf) { 62965598c13SAndrew Gabbasov desc_len = PAGE_SIZE; 63065598c13SAndrew Gabbasov sgs = DIV_ROUND_UP(len + offset_in_page(buf), desc_len); 63165598c13SAndrew Gabbasov } else { 63265598c13SAndrew Gabbasov desc_len = master->max_dma_len; 63365598c13SAndrew Gabbasov sgs = DIV_ROUND_UP(len, desc_len); 63465598c13SAndrew Gabbasov } 63565598c13SAndrew Gabbasov 6366ad45a27SMark Brown ret = sg_alloc_table(sgt, sgs, GFP_KERNEL); 6376ad45a27SMark Brown if (ret != 0) 6386ad45a27SMark Brown return ret; 6396ad45a27SMark Brown 6406ad45a27SMark Brown for (i = 0; i < sgs; i++) { 6416ad45a27SMark Brown 6426ad45a27SMark Brown if (vmalloced_buf) { 64365598c13SAndrew Gabbasov min = min_t(size_t, 64465598c13SAndrew Gabbasov len, desc_len - offset_in_page(buf)); 6456ad45a27SMark Brown vm_page = vmalloc_to_page(buf); 6466ad45a27SMark Brown if (!vm_page) { 6476ad45a27SMark Brown sg_free_table(sgt); 6486ad45a27SMark Brown return -ENOMEM; 6496ad45a27SMark Brown } 650c1aefbddSCharles Keepax sg_set_page(&sgt->sgl[i], vm_page, 651c1aefbddSCharles Keepax min, offset_in_page(buf)); 6526ad45a27SMark Brown } else { 65365598c13SAndrew Gabbasov min = min_t(size_t, len, desc_len); 6546ad45a27SMark Brown sg_buf = buf; 655c1aefbddSCharles Keepax sg_set_buf(&sgt->sgl[i], sg_buf, min); 6566ad45a27SMark Brown } 6576ad45a27SMark Brown 6586ad45a27SMark Brown 6596ad45a27SMark Brown buf += min; 6606ad45a27SMark Brown len -= min; 6616ad45a27SMark Brown } 6626ad45a27SMark Brown 6636ad45a27SMark Brown ret = dma_map_sg(dev, sgt->sgl, sgt->nents, dir); 66489e4b66aSGeert Uytterhoeven if (!ret) 66589e4b66aSGeert Uytterhoeven ret = -ENOMEM; 6666ad45a27SMark Brown if (ret < 0) { 6676ad45a27SMark Brown sg_free_table(sgt); 6686ad45a27SMark Brown return ret; 6696ad45a27SMark Brown } 6706ad45a27SMark Brown 6716ad45a27SMark Brown sgt->nents = ret; 6726ad45a27SMark Brown 6736ad45a27SMark Brown return 0; 6746ad45a27SMark Brown } 6756ad45a27SMark Brown 6766ad45a27SMark Brown static void spi_unmap_buf(struct spi_master *master, struct device *dev, 6776ad45a27SMark Brown struct sg_table *sgt, enum dma_data_direction dir) 6786ad45a27SMark Brown { 6796ad45a27SMark Brown if (sgt->orig_nents) { 6806ad45a27SMark Brown dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir); 6816ad45a27SMark Brown sg_free_table(sgt); 6826ad45a27SMark Brown } 6836ad45a27SMark Brown } 6846ad45a27SMark Brown 6852de440f5SGeert Uytterhoeven static int __spi_map_msg(struct spi_master *master, struct spi_message *msg) 68699adef31SMark Brown { 68799adef31SMark Brown struct device *tx_dev, *rx_dev; 68899adef31SMark Brown struct spi_transfer *xfer; 6896ad45a27SMark Brown int ret; 6903a2eba9bSMark Brown 6916ad45a27SMark Brown if (!master->can_dma) 69299adef31SMark Brown return 0; 69399adef31SMark Brown 694c37f45b5SLeilk Liu if (master->dma_tx) 6953fc25421SGeert Uytterhoeven tx_dev = master->dma_tx->device->dev; 696c37f45b5SLeilk Liu else 697c37f45b5SLeilk Liu tx_dev = &master->dev; 698c37f45b5SLeilk Liu 699c37f45b5SLeilk Liu if (master->dma_rx) 7003fc25421SGeert Uytterhoeven rx_dev = master->dma_rx->device->dev; 701c37f45b5SLeilk Liu else 702c37f45b5SLeilk Liu rx_dev = &master->dev; 70399adef31SMark Brown 70499adef31SMark Brown list_for_each_entry(xfer, &msg->transfers, transfer_list) { 70599adef31SMark Brown if (!master->can_dma(master, msg->spi, xfer)) 70699adef31SMark Brown continue; 70799adef31SMark Brown 70899adef31SMark Brown if (xfer->tx_buf != NULL) { 7096ad45a27SMark Brown ret = spi_map_buf(master, tx_dev, &xfer->tx_sg, 7106ad45a27SMark Brown (void *)xfer->tx_buf, xfer->len, 71199adef31SMark Brown DMA_TO_DEVICE); 7126ad45a27SMark Brown if (ret != 0) 7136ad45a27SMark Brown return ret; 71499adef31SMark Brown } 71599adef31SMark Brown 71699adef31SMark Brown if (xfer->rx_buf != NULL) { 7176ad45a27SMark Brown ret = spi_map_buf(master, rx_dev, &xfer->rx_sg, 71899adef31SMark Brown xfer->rx_buf, xfer->len, 71999adef31SMark Brown DMA_FROM_DEVICE); 7206ad45a27SMark Brown if (ret != 0) { 7216ad45a27SMark Brown spi_unmap_buf(master, tx_dev, &xfer->tx_sg, 7226ad45a27SMark Brown DMA_TO_DEVICE); 7236ad45a27SMark Brown return ret; 72499adef31SMark Brown } 72599adef31SMark Brown } 72699adef31SMark Brown } 72799adef31SMark Brown 72899adef31SMark Brown master->cur_msg_mapped = true; 72999adef31SMark Brown 73099adef31SMark Brown return 0; 73199adef31SMark Brown } 73299adef31SMark Brown 7334b786458SMartin Sperl static int __spi_unmap_msg(struct spi_master *master, struct spi_message *msg) 73499adef31SMark Brown { 73599adef31SMark Brown struct spi_transfer *xfer; 73699adef31SMark Brown struct device *tx_dev, *rx_dev; 73799adef31SMark Brown 7386ad45a27SMark Brown if (!master->cur_msg_mapped || !master->can_dma) 73999adef31SMark Brown return 0; 74099adef31SMark Brown 741c37f45b5SLeilk Liu if (master->dma_tx) 7423fc25421SGeert Uytterhoeven tx_dev = master->dma_tx->device->dev; 743c37f45b5SLeilk Liu else 744c37f45b5SLeilk Liu tx_dev = &master->dev; 745c37f45b5SLeilk Liu 746c37f45b5SLeilk Liu if (master->dma_rx) 7473fc25421SGeert Uytterhoeven rx_dev = master->dma_rx->device->dev; 748c37f45b5SLeilk Liu else 749c37f45b5SLeilk Liu rx_dev = &master->dev; 75099adef31SMark Brown 75199adef31SMark Brown list_for_each_entry(xfer, &msg->transfers, transfer_list) { 75299adef31SMark Brown if (!master->can_dma(master, msg->spi, xfer)) 75399adef31SMark Brown continue; 75499adef31SMark Brown 7556ad45a27SMark Brown spi_unmap_buf(master, rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE); 7566ad45a27SMark Brown spi_unmap_buf(master, tx_dev, &xfer->tx_sg, DMA_TO_DEVICE); 75799adef31SMark Brown } 75899adef31SMark Brown 75999adef31SMark Brown return 0; 76099adef31SMark Brown } 7612de440f5SGeert Uytterhoeven #else /* !CONFIG_HAS_DMA */ 7622de440f5SGeert Uytterhoeven static inline int __spi_map_msg(struct spi_master *master, 7632de440f5SGeert Uytterhoeven struct spi_message *msg) 7642de440f5SGeert Uytterhoeven { 7652de440f5SGeert Uytterhoeven return 0; 7662de440f5SGeert Uytterhoeven } 7672de440f5SGeert Uytterhoeven 7684b786458SMartin Sperl static inline int __spi_unmap_msg(struct spi_master *master, 7692de440f5SGeert Uytterhoeven struct spi_message *msg) 7702de440f5SGeert Uytterhoeven { 7712de440f5SGeert Uytterhoeven return 0; 7722de440f5SGeert Uytterhoeven } 7732de440f5SGeert Uytterhoeven #endif /* !CONFIG_HAS_DMA */ 7742de440f5SGeert Uytterhoeven 7754b786458SMartin Sperl static inline int spi_unmap_msg(struct spi_master *master, 7764b786458SMartin Sperl struct spi_message *msg) 7774b786458SMartin Sperl { 7784b786458SMartin Sperl struct spi_transfer *xfer; 7794b786458SMartin Sperl 7804b786458SMartin Sperl list_for_each_entry(xfer, &msg->transfers, transfer_list) { 7814b786458SMartin Sperl /* 7824b786458SMartin Sperl * Restore the original value of tx_buf or rx_buf if they are 7834b786458SMartin Sperl * NULL. 7844b786458SMartin Sperl */ 7854b786458SMartin Sperl if (xfer->tx_buf == master->dummy_tx) 7864b786458SMartin Sperl xfer->tx_buf = NULL; 7874b786458SMartin Sperl if (xfer->rx_buf == master->dummy_rx) 7884b786458SMartin Sperl xfer->rx_buf = NULL; 7894b786458SMartin Sperl } 7904b786458SMartin Sperl 7914b786458SMartin Sperl return __spi_unmap_msg(master, msg); 7924b786458SMartin Sperl } 7934b786458SMartin Sperl 7942de440f5SGeert Uytterhoeven static int spi_map_msg(struct spi_master *master, struct spi_message *msg) 7952de440f5SGeert Uytterhoeven { 7962de440f5SGeert Uytterhoeven struct spi_transfer *xfer; 7972de440f5SGeert Uytterhoeven void *tmp; 7982de440f5SGeert Uytterhoeven unsigned int max_tx, max_rx; 7992de440f5SGeert Uytterhoeven 8002de440f5SGeert Uytterhoeven if (master->flags & (SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX)) { 8012de440f5SGeert Uytterhoeven max_tx = 0; 8022de440f5SGeert Uytterhoeven max_rx = 0; 8032de440f5SGeert Uytterhoeven 8042de440f5SGeert Uytterhoeven list_for_each_entry(xfer, &msg->transfers, transfer_list) { 8052de440f5SGeert Uytterhoeven if ((master->flags & SPI_MASTER_MUST_TX) && 8062de440f5SGeert Uytterhoeven !xfer->tx_buf) 8072de440f5SGeert Uytterhoeven max_tx = max(xfer->len, max_tx); 8082de440f5SGeert Uytterhoeven if ((master->flags & SPI_MASTER_MUST_RX) && 8092de440f5SGeert Uytterhoeven !xfer->rx_buf) 8102de440f5SGeert Uytterhoeven max_rx = max(xfer->len, max_rx); 8112de440f5SGeert Uytterhoeven } 8122de440f5SGeert Uytterhoeven 8132de440f5SGeert Uytterhoeven if (max_tx) { 8142de440f5SGeert Uytterhoeven tmp = krealloc(master->dummy_tx, max_tx, 8152de440f5SGeert Uytterhoeven GFP_KERNEL | GFP_DMA); 8162de440f5SGeert Uytterhoeven if (!tmp) 8172de440f5SGeert Uytterhoeven return -ENOMEM; 8182de440f5SGeert Uytterhoeven master->dummy_tx = tmp; 8192de440f5SGeert Uytterhoeven memset(tmp, 0, max_tx); 8202de440f5SGeert Uytterhoeven } 8212de440f5SGeert Uytterhoeven 8222de440f5SGeert Uytterhoeven if (max_rx) { 8232de440f5SGeert Uytterhoeven tmp = krealloc(master->dummy_rx, max_rx, 8242de440f5SGeert Uytterhoeven GFP_KERNEL | GFP_DMA); 8252de440f5SGeert Uytterhoeven if (!tmp) 8262de440f5SGeert Uytterhoeven return -ENOMEM; 8272de440f5SGeert Uytterhoeven master->dummy_rx = tmp; 8282de440f5SGeert Uytterhoeven } 8292de440f5SGeert Uytterhoeven 8302de440f5SGeert Uytterhoeven if (max_tx || max_rx) { 8312de440f5SGeert Uytterhoeven list_for_each_entry(xfer, &msg->transfers, 8322de440f5SGeert Uytterhoeven transfer_list) { 8332de440f5SGeert Uytterhoeven if (!xfer->tx_buf) 8342de440f5SGeert Uytterhoeven xfer->tx_buf = master->dummy_tx; 8352de440f5SGeert Uytterhoeven if (!xfer->rx_buf) 8362de440f5SGeert Uytterhoeven xfer->rx_buf = master->dummy_rx; 8372de440f5SGeert Uytterhoeven } 8382de440f5SGeert Uytterhoeven } 8392de440f5SGeert Uytterhoeven } 8402de440f5SGeert Uytterhoeven 8412de440f5SGeert Uytterhoeven return __spi_map_msg(master, msg); 8422de440f5SGeert Uytterhoeven } 84399adef31SMark Brown 844b158935fSMark Brown /* 845b158935fSMark Brown * spi_transfer_one_message - Default implementation of transfer_one_message() 846b158935fSMark Brown * 847b158935fSMark Brown * This is a standard implementation of transfer_one_message() for 848b158935fSMark Brown * drivers which impelment a transfer_one() operation. It provides 849b158935fSMark Brown * standard handling of delays and chip select management. 850b158935fSMark Brown */ 851b158935fSMark Brown static int spi_transfer_one_message(struct spi_master *master, 852b158935fSMark Brown struct spi_message *msg) 853b158935fSMark Brown { 854b158935fSMark Brown struct spi_transfer *xfer; 855b158935fSMark Brown bool keep_cs = false; 856b158935fSMark Brown int ret = 0; 857682a71b2SNicholas Mc Guire unsigned long ms = 1; 858eca2ebc7SMartin Sperl struct spi_statistics *statm = &master->statistics; 859eca2ebc7SMartin Sperl struct spi_statistics *stats = &msg->spi->statistics; 860b158935fSMark Brown 861b158935fSMark Brown spi_set_cs(msg->spi, true); 862b158935fSMark Brown 863eca2ebc7SMartin Sperl SPI_STATISTICS_INCREMENT_FIELD(statm, messages); 864eca2ebc7SMartin Sperl SPI_STATISTICS_INCREMENT_FIELD(stats, messages); 865eca2ebc7SMartin Sperl 866b158935fSMark Brown list_for_each_entry(xfer, &msg->transfers, transfer_list) { 867b158935fSMark Brown trace_spi_transfer_start(msg, xfer); 868b158935fSMark Brown 869eca2ebc7SMartin Sperl spi_statistics_add_transfer_stats(statm, xfer, master); 870eca2ebc7SMartin Sperl spi_statistics_add_transfer_stats(stats, xfer, master); 871eca2ebc7SMartin Sperl 87238ec10f6SMark Brown if (xfer->tx_buf || xfer->rx_buf) { 87316735d02SWolfram Sang reinit_completion(&master->xfer_completion); 874b158935fSMark Brown 875b158935fSMark Brown ret = master->transfer_one(master, msg->spi, xfer); 876b158935fSMark Brown if (ret < 0) { 877eca2ebc7SMartin Sperl SPI_STATISTICS_INCREMENT_FIELD(statm, 878eca2ebc7SMartin Sperl errors); 879eca2ebc7SMartin Sperl SPI_STATISTICS_INCREMENT_FIELD(stats, 880eca2ebc7SMartin Sperl errors); 881b158935fSMark Brown dev_err(&msg->spi->dev, 882b158935fSMark Brown "SPI transfer failed: %d\n", ret); 883b158935fSMark Brown goto out; 884b158935fSMark Brown } 885b158935fSMark Brown 88613a42798SAxel Lin if (ret > 0) { 88713a42798SAxel Lin ret = 0; 88816a0ce4eSMark Brown ms = xfer->len * 8 * 1000 / xfer->speed_hz; 889eee668a9SHarini Katakam ms += ms + 100; /* some tolerance */ 89016a0ce4eSMark Brown 89116a0ce4eSMark Brown ms = wait_for_completion_timeout(&master->xfer_completion, 89216a0ce4eSMark Brown msecs_to_jiffies(ms)); 89316a0ce4eSMark Brown } 89416a0ce4eSMark Brown 89516a0ce4eSMark Brown if (ms == 0) { 896eca2ebc7SMartin Sperl SPI_STATISTICS_INCREMENT_FIELD(statm, 897eca2ebc7SMartin Sperl timedout); 898eca2ebc7SMartin Sperl SPI_STATISTICS_INCREMENT_FIELD(stats, 899eca2ebc7SMartin Sperl timedout); 90038ec10f6SMark Brown dev_err(&msg->spi->dev, 90138ec10f6SMark Brown "SPI transfer timed out\n"); 90216a0ce4eSMark Brown msg->status = -ETIMEDOUT; 90313a42798SAxel Lin } 90438ec10f6SMark Brown } else { 90538ec10f6SMark Brown if (xfer->len) 90638ec10f6SMark Brown dev_err(&msg->spi->dev, 90738ec10f6SMark Brown "Bufferless transfer has length %u\n", 90838ec10f6SMark Brown xfer->len); 90938ec10f6SMark Brown } 910b158935fSMark Brown 911b158935fSMark Brown trace_spi_transfer_stop(msg, xfer); 912b158935fSMark Brown 913b158935fSMark Brown if (msg->status != -EINPROGRESS) 914b158935fSMark Brown goto out; 915b158935fSMark Brown 916b158935fSMark Brown if (xfer->delay_usecs) 917b158935fSMark Brown udelay(xfer->delay_usecs); 918b158935fSMark Brown 919b158935fSMark Brown if (xfer->cs_change) { 920b158935fSMark Brown if (list_is_last(&xfer->transfer_list, 921b158935fSMark Brown &msg->transfers)) { 922b158935fSMark Brown keep_cs = true; 923b158935fSMark Brown } else { 9240b73aa63SMark Brown spi_set_cs(msg->spi, false); 9250b73aa63SMark Brown udelay(10); 9260b73aa63SMark Brown spi_set_cs(msg->spi, true); 927b158935fSMark Brown } 928b158935fSMark Brown } 929b158935fSMark Brown 930b158935fSMark Brown msg->actual_length += xfer->len; 931b158935fSMark Brown } 932b158935fSMark Brown 933b158935fSMark Brown out: 934b158935fSMark Brown if (ret != 0 || !keep_cs) 935b158935fSMark Brown spi_set_cs(msg->spi, false); 936b158935fSMark Brown 937b158935fSMark Brown if (msg->status == -EINPROGRESS) 938b158935fSMark Brown msg->status = ret; 939b158935fSMark Brown 940ff61eb42SGeert Uytterhoeven if (msg->status && master->handle_err) 941b716c4ffSAndy Shevchenko master->handle_err(master, msg); 942b716c4ffSAndy Shevchenko 943b158935fSMark Brown spi_finalize_current_message(master); 944b158935fSMark Brown 945b158935fSMark Brown return ret; 946b158935fSMark Brown } 947b158935fSMark Brown 948b158935fSMark Brown /** 949b158935fSMark Brown * spi_finalize_current_transfer - report completion of a transfer 9502c675689SThierry Reding * @master: the master reporting completion 951b158935fSMark Brown * 952b158935fSMark Brown * Called by SPI drivers using the core transfer_one_message() 953b158935fSMark Brown * implementation to notify it that the current interrupt driven 9549e8f4882SGeert Uytterhoeven * transfer has finished and the next one may be scheduled. 955b158935fSMark Brown */ 956b158935fSMark Brown void spi_finalize_current_transfer(struct spi_master *master) 957b158935fSMark Brown { 958b158935fSMark Brown complete(&master->xfer_completion); 959b158935fSMark Brown } 960b158935fSMark Brown EXPORT_SYMBOL_GPL(spi_finalize_current_transfer); 961b158935fSMark Brown 962ffbbdd21SLinus Walleij /** 963fc9e0f71SMark Brown * __spi_pump_messages - function which processes spi message queue 964fc9e0f71SMark Brown * @master: master to process queue for 965fc9e0f71SMark Brown * @in_kthread: true if we are in the context of the message pump thread 966ffbbdd21SLinus Walleij * 967ffbbdd21SLinus Walleij * This function checks if there is any spi message in the queue that 968ffbbdd21SLinus Walleij * needs processing and if so call out to the driver to initialize hardware 969ffbbdd21SLinus Walleij * and transfer each message. 970ffbbdd21SLinus Walleij * 9710461a414SMark Brown * Note that it is called both from the kthread itself and also from 9720461a414SMark Brown * inside spi_sync(); the queue extraction handling at the top of the 9730461a414SMark Brown * function should deal with this safely. 974ffbbdd21SLinus Walleij */ 975fc9e0f71SMark Brown static void __spi_pump_messages(struct spi_master *master, bool in_kthread) 976ffbbdd21SLinus Walleij { 977ffbbdd21SLinus Walleij unsigned long flags; 978ffbbdd21SLinus Walleij bool was_busy = false; 979ffbbdd21SLinus Walleij int ret; 980ffbbdd21SLinus Walleij 981983aee5dSMark Brown /* Lock queue */ 982ffbbdd21SLinus Walleij spin_lock_irqsave(&master->queue_lock, flags); 983983aee5dSMark Brown 984983aee5dSMark Brown /* Make sure we are not already running a message */ 985983aee5dSMark Brown if (master->cur_msg) { 986983aee5dSMark Brown spin_unlock_irqrestore(&master->queue_lock, flags); 987983aee5dSMark Brown return; 988983aee5dSMark Brown } 989983aee5dSMark Brown 9900461a414SMark Brown /* If another context is idling the device then defer */ 9910461a414SMark Brown if (master->idling) { 9920461a414SMark Brown queue_kthread_work(&master->kworker, &master->pump_messages); 9930461a414SMark Brown spin_unlock_irqrestore(&master->queue_lock, flags); 9940461a414SMark Brown return; 9950461a414SMark Brown } 9960461a414SMark Brown 997983aee5dSMark Brown /* Check if the queue is idle */ 998ffbbdd21SLinus Walleij if (list_empty(&master->queue) || !master->running) { 999b0b36b86SBryan Freed if (!master->busy) { 10009af4acc0SDan Carpenter spin_unlock_irqrestore(&master->queue_lock, flags); 1001ffbbdd21SLinus Walleij return; 1002ffbbdd21SLinus Walleij } 1003fc9e0f71SMark Brown 1004fc9e0f71SMark Brown /* Only do teardown in the thread */ 1005fc9e0f71SMark Brown if (!in_kthread) { 1006fc9e0f71SMark Brown queue_kthread_work(&master->kworker, 1007fc9e0f71SMark Brown &master->pump_messages); 1008ffbbdd21SLinus Walleij spin_unlock_irqrestore(&master->queue_lock, flags); 1009fc9e0f71SMark Brown return; 1010fc9e0f71SMark Brown } 1011fc9e0f71SMark Brown 1012ffbbdd21SLinus Walleij master->busy = false; 10130461a414SMark Brown master->idling = true; 1014ffbbdd21SLinus Walleij spin_unlock_irqrestore(&master->queue_lock, flags); 10150461a414SMark Brown 10163a2eba9bSMark Brown kfree(master->dummy_rx); 10173a2eba9bSMark Brown master->dummy_rx = NULL; 10183a2eba9bSMark Brown kfree(master->dummy_tx); 10193a2eba9bSMark Brown master->dummy_tx = NULL; 1020b0b36b86SBryan Freed if (master->unprepare_transfer_hardware && 1021b0b36b86SBryan Freed master->unprepare_transfer_hardware(master)) 1022b0b36b86SBryan Freed dev_err(&master->dev, 1023b0b36b86SBryan Freed "failed to unprepare transfer hardware\n"); 102449834de2SMark Brown if (master->auto_runtime_pm) { 102549834de2SMark Brown pm_runtime_mark_last_busy(master->dev.parent); 102649834de2SMark Brown pm_runtime_put_autosuspend(master->dev.parent); 102749834de2SMark Brown } 102856ec1978SMark Brown trace_spi_master_idle(master); 1029ffbbdd21SLinus Walleij 10300461a414SMark Brown spin_lock_irqsave(&master->queue_lock, flags); 10310461a414SMark Brown master->idling = false; 1032ffbbdd21SLinus Walleij spin_unlock_irqrestore(&master->queue_lock, flags); 1033ffbbdd21SLinus Walleij return; 1034ffbbdd21SLinus Walleij } 1035ffbbdd21SLinus Walleij 1036ffbbdd21SLinus Walleij /* Extract head of queue */ 1037ffbbdd21SLinus Walleij master->cur_msg = 1038a89e2d27SAxel Lin list_first_entry(&master->queue, struct spi_message, queue); 1039ffbbdd21SLinus Walleij 1040ffbbdd21SLinus Walleij list_del_init(&master->cur_msg->queue); 1041ffbbdd21SLinus Walleij if (master->busy) 1042ffbbdd21SLinus Walleij was_busy = true; 1043ffbbdd21SLinus Walleij else 1044ffbbdd21SLinus Walleij master->busy = true; 1045ffbbdd21SLinus Walleij spin_unlock_irqrestore(&master->queue_lock, flags); 1046ffbbdd21SLinus Walleij 104749834de2SMark Brown if (!was_busy && master->auto_runtime_pm) { 104849834de2SMark Brown ret = pm_runtime_get_sync(master->dev.parent); 104949834de2SMark Brown if (ret < 0) { 105049834de2SMark Brown dev_err(&master->dev, "Failed to power device: %d\n", 105149834de2SMark Brown ret); 105249834de2SMark Brown return; 105349834de2SMark Brown } 105449834de2SMark Brown } 105549834de2SMark Brown 105656ec1978SMark Brown if (!was_busy) 105756ec1978SMark Brown trace_spi_master_busy(master); 105856ec1978SMark Brown 10597dfd2bd7SShubhrajyoti D if (!was_busy && master->prepare_transfer_hardware) { 1060ffbbdd21SLinus Walleij ret = master->prepare_transfer_hardware(master); 1061ffbbdd21SLinus Walleij if (ret) { 1062ffbbdd21SLinus Walleij dev_err(&master->dev, 1063ffbbdd21SLinus Walleij "failed to prepare transfer hardware\n"); 106449834de2SMark Brown 106549834de2SMark Brown if (master->auto_runtime_pm) 106649834de2SMark Brown pm_runtime_put(master->dev.parent); 1067ffbbdd21SLinus Walleij return; 1068ffbbdd21SLinus Walleij } 1069ffbbdd21SLinus Walleij } 1070ffbbdd21SLinus Walleij 107156ec1978SMark Brown trace_spi_message_start(master->cur_msg); 107256ec1978SMark Brown 10732841a5fcSMark Brown if (master->prepare_message) { 10742841a5fcSMark Brown ret = master->prepare_message(master, master->cur_msg); 10752841a5fcSMark Brown if (ret) { 10762841a5fcSMark Brown dev_err(&master->dev, 10772841a5fcSMark Brown "failed to prepare message: %d\n", ret); 10782841a5fcSMark Brown master->cur_msg->status = ret; 10792841a5fcSMark Brown spi_finalize_current_message(master); 10802841a5fcSMark Brown return; 10812841a5fcSMark Brown } 10822841a5fcSMark Brown master->cur_msg_prepared = true; 10832841a5fcSMark Brown } 10842841a5fcSMark Brown 108599adef31SMark Brown ret = spi_map_msg(master, master->cur_msg); 108699adef31SMark Brown if (ret) { 108799adef31SMark Brown master->cur_msg->status = ret; 108899adef31SMark Brown spi_finalize_current_message(master); 108999adef31SMark Brown return; 109099adef31SMark Brown } 109199adef31SMark Brown 1092ffbbdd21SLinus Walleij ret = master->transfer_one_message(master, master->cur_msg); 1093ffbbdd21SLinus Walleij if (ret) { 1094ffbbdd21SLinus Walleij dev_err(&master->dev, 10951f802f82SGeert Uytterhoeven "failed to transfer one message from queue\n"); 1096ffbbdd21SLinus Walleij return; 1097ffbbdd21SLinus Walleij } 1098ffbbdd21SLinus Walleij } 1099ffbbdd21SLinus Walleij 1100fc9e0f71SMark Brown /** 1101fc9e0f71SMark Brown * spi_pump_messages - kthread work function which processes spi message queue 1102fc9e0f71SMark Brown * @work: pointer to kthread work struct contained in the master struct 1103fc9e0f71SMark Brown */ 1104fc9e0f71SMark Brown static void spi_pump_messages(struct kthread_work *work) 1105fc9e0f71SMark Brown { 1106fc9e0f71SMark Brown struct spi_master *master = 1107fc9e0f71SMark Brown container_of(work, struct spi_master, pump_messages); 1108fc9e0f71SMark Brown 1109fc9e0f71SMark Brown __spi_pump_messages(master, true); 1110fc9e0f71SMark Brown } 1111fc9e0f71SMark Brown 1112ffbbdd21SLinus Walleij static int spi_init_queue(struct spi_master *master) 1113ffbbdd21SLinus Walleij { 1114ffbbdd21SLinus Walleij struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 }; 1115ffbbdd21SLinus Walleij 1116ffbbdd21SLinus Walleij master->running = false; 1117ffbbdd21SLinus Walleij master->busy = false; 1118ffbbdd21SLinus Walleij 1119ffbbdd21SLinus Walleij init_kthread_worker(&master->kworker); 1120ffbbdd21SLinus Walleij master->kworker_task = kthread_run(kthread_worker_fn, 1121f170168bSKees Cook &master->kworker, "%s", 1122ffbbdd21SLinus Walleij dev_name(&master->dev)); 1123ffbbdd21SLinus Walleij if (IS_ERR(master->kworker_task)) { 1124ffbbdd21SLinus Walleij dev_err(&master->dev, "failed to create message pump task\n"); 112598a8f5a0SJarkko Nikula return PTR_ERR(master->kworker_task); 1126ffbbdd21SLinus Walleij } 1127ffbbdd21SLinus Walleij init_kthread_work(&master->pump_messages, spi_pump_messages); 1128ffbbdd21SLinus Walleij 1129ffbbdd21SLinus Walleij /* 1130ffbbdd21SLinus Walleij * Master config will indicate if this controller should run the 1131ffbbdd21SLinus Walleij * message pump with high (realtime) priority to reduce the transfer 1132ffbbdd21SLinus Walleij * latency on the bus by minimising the delay between a transfer 1133ffbbdd21SLinus Walleij * request and the scheduling of the message pump thread. Without this 1134ffbbdd21SLinus Walleij * setting the message pump thread will remain at default priority. 1135ffbbdd21SLinus Walleij */ 1136ffbbdd21SLinus Walleij if (master->rt) { 1137ffbbdd21SLinus Walleij dev_info(&master->dev, 1138ffbbdd21SLinus Walleij "will run message pump with realtime priority\n"); 1139ffbbdd21SLinus Walleij sched_setscheduler(master->kworker_task, SCHED_FIFO, ¶m); 1140ffbbdd21SLinus Walleij } 1141ffbbdd21SLinus Walleij 1142ffbbdd21SLinus Walleij return 0; 1143ffbbdd21SLinus Walleij } 1144ffbbdd21SLinus Walleij 1145ffbbdd21SLinus Walleij /** 1146ffbbdd21SLinus Walleij * spi_get_next_queued_message() - called by driver to check for queued 1147ffbbdd21SLinus Walleij * messages 1148ffbbdd21SLinus Walleij * @master: the master to check for queued messages 1149ffbbdd21SLinus Walleij * 1150ffbbdd21SLinus Walleij * If there are more messages in the queue, the next message is returned from 1151ffbbdd21SLinus Walleij * this call. 1152ffbbdd21SLinus Walleij */ 1153ffbbdd21SLinus Walleij struct spi_message *spi_get_next_queued_message(struct spi_master *master) 1154ffbbdd21SLinus Walleij { 1155ffbbdd21SLinus Walleij struct spi_message *next; 1156ffbbdd21SLinus Walleij unsigned long flags; 1157ffbbdd21SLinus Walleij 1158ffbbdd21SLinus Walleij /* get a pointer to the next message, if any */ 1159ffbbdd21SLinus Walleij spin_lock_irqsave(&master->queue_lock, flags); 11601cfd97f9SAxel Lin next = list_first_entry_or_null(&master->queue, struct spi_message, 11611cfd97f9SAxel Lin queue); 1162ffbbdd21SLinus Walleij spin_unlock_irqrestore(&master->queue_lock, flags); 1163ffbbdd21SLinus Walleij 1164ffbbdd21SLinus Walleij return next; 1165ffbbdd21SLinus Walleij } 1166ffbbdd21SLinus Walleij EXPORT_SYMBOL_GPL(spi_get_next_queued_message); 1167ffbbdd21SLinus Walleij 1168ffbbdd21SLinus Walleij /** 1169ffbbdd21SLinus Walleij * spi_finalize_current_message() - the current message is complete 1170ffbbdd21SLinus Walleij * @master: the master to return the message to 1171ffbbdd21SLinus Walleij * 1172ffbbdd21SLinus Walleij * Called by the driver to notify the core that the message in the front of the 1173ffbbdd21SLinus Walleij * queue is complete and can be removed from the queue. 1174ffbbdd21SLinus Walleij */ 1175ffbbdd21SLinus Walleij void spi_finalize_current_message(struct spi_master *master) 1176ffbbdd21SLinus Walleij { 1177ffbbdd21SLinus Walleij struct spi_message *mesg; 1178ffbbdd21SLinus Walleij unsigned long flags; 11792841a5fcSMark Brown int ret; 1180ffbbdd21SLinus Walleij 1181ffbbdd21SLinus Walleij spin_lock_irqsave(&master->queue_lock, flags); 1182ffbbdd21SLinus Walleij mesg = master->cur_msg; 1183ffbbdd21SLinus Walleij spin_unlock_irqrestore(&master->queue_lock, flags); 1184ffbbdd21SLinus Walleij 118599adef31SMark Brown spi_unmap_msg(master, mesg); 118699adef31SMark Brown 11872841a5fcSMark Brown if (master->cur_msg_prepared && master->unprepare_message) { 11882841a5fcSMark Brown ret = master->unprepare_message(master, mesg); 11892841a5fcSMark Brown if (ret) { 11902841a5fcSMark Brown dev_err(&master->dev, 11912841a5fcSMark Brown "failed to unprepare message: %d\n", ret); 11922841a5fcSMark Brown } 11932841a5fcSMark Brown } 1194391949b6SUwe Kleine-König 11958e76ef88SMartin Sperl spin_lock_irqsave(&master->queue_lock, flags); 11968e76ef88SMartin Sperl master->cur_msg = NULL; 11972841a5fcSMark Brown master->cur_msg_prepared = false; 11988e76ef88SMartin Sperl queue_kthread_work(&master->kworker, &master->pump_messages); 11998e76ef88SMartin Sperl spin_unlock_irqrestore(&master->queue_lock, flags); 12008e76ef88SMartin Sperl 12018e76ef88SMartin Sperl trace_spi_message_done(mesg); 12022841a5fcSMark Brown 1203ffbbdd21SLinus Walleij mesg->state = NULL; 1204ffbbdd21SLinus Walleij if (mesg->complete) 1205ffbbdd21SLinus Walleij mesg->complete(mesg->context); 1206ffbbdd21SLinus Walleij } 1207ffbbdd21SLinus Walleij EXPORT_SYMBOL_GPL(spi_finalize_current_message); 1208ffbbdd21SLinus Walleij 1209ffbbdd21SLinus Walleij static int spi_start_queue(struct spi_master *master) 1210ffbbdd21SLinus Walleij { 1211ffbbdd21SLinus Walleij unsigned long flags; 1212ffbbdd21SLinus Walleij 1213ffbbdd21SLinus Walleij spin_lock_irqsave(&master->queue_lock, flags); 1214ffbbdd21SLinus Walleij 1215ffbbdd21SLinus Walleij if (master->running || master->busy) { 1216ffbbdd21SLinus Walleij spin_unlock_irqrestore(&master->queue_lock, flags); 1217ffbbdd21SLinus Walleij return -EBUSY; 1218ffbbdd21SLinus Walleij } 1219ffbbdd21SLinus Walleij 1220ffbbdd21SLinus Walleij master->running = true; 1221ffbbdd21SLinus Walleij master->cur_msg = NULL; 1222ffbbdd21SLinus Walleij spin_unlock_irqrestore(&master->queue_lock, flags); 1223ffbbdd21SLinus Walleij 1224ffbbdd21SLinus Walleij queue_kthread_work(&master->kworker, &master->pump_messages); 1225ffbbdd21SLinus Walleij 1226ffbbdd21SLinus Walleij return 0; 1227ffbbdd21SLinus Walleij } 1228ffbbdd21SLinus Walleij 1229ffbbdd21SLinus Walleij static int spi_stop_queue(struct spi_master *master) 1230ffbbdd21SLinus Walleij { 1231ffbbdd21SLinus Walleij unsigned long flags; 1232ffbbdd21SLinus Walleij unsigned limit = 500; 1233ffbbdd21SLinus Walleij int ret = 0; 1234ffbbdd21SLinus Walleij 1235ffbbdd21SLinus Walleij spin_lock_irqsave(&master->queue_lock, flags); 1236ffbbdd21SLinus Walleij 1237ffbbdd21SLinus Walleij /* 1238ffbbdd21SLinus Walleij * This is a bit lame, but is optimized for the common execution path. 1239ffbbdd21SLinus Walleij * A wait_queue on the master->busy could be used, but then the common 1240ffbbdd21SLinus Walleij * execution path (pump_messages) would be required to call wake_up or 1241ffbbdd21SLinus Walleij * friends on every SPI message. Do this instead. 1242ffbbdd21SLinus Walleij */ 1243ffbbdd21SLinus Walleij while ((!list_empty(&master->queue) || master->busy) && limit--) { 1244ffbbdd21SLinus Walleij spin_unlock_irqrestore(&master->queue_lock, flags); 1245f97b26b0SAxel Lin usleep_range(10000, 11000); 1246ffbbdd21SLinus Walleij spin_lock_irqsave(&master->queue_lock, flags); 1247ffbbdd21SLinus Walleij } 1248ffbbdd21SLinus Walleij 1249ffbbdd21SLinus Walleij if (!list_empty(&master->queue) || master->busy) 1250ffbbdd21SLinus Walleij ret = -EBUSY; 1251ffbbdd21SLinus Walleij else 1252ffbbdd21SLinus Walleij master->running = false; 1253ffbbdd21SLinus Walleij 1254ffbbdd21SLinus Walleij spin_unlock_irqrestore(&master->queue_lock, flags); 1255ffbbdd21SLinus Walleij 1256ffbbdd21SLinus Walleij if (ret) { 1257ffbbdd21SLinus Walleij dev_warn(&master->dev, 1258ffbbdd21SLinus Walleij "could not stop message queue\n"); 1259ffbbdd21SLinus Walleij return ret; 1260ffbbdd21SLinus Walleij } 1261ffbbdd21SLinus Walleij return ret; 1262ffbbdd21SLinus Walleij } 1263ffbbdd21SLinus Walleij 1264ffbbdd21SLinus Walleij static int spi_destroy_queue(struct spi_master *master) 1265ffbbdd21SLinus Walleij { 1266ffbbdd21SLinus Walleij int ret; 1267ffbbdd21SLinus Walleij 1268ffbbdd21SLinus Walleij ret = spi_stop_queue(master); 1269ffbbdd21SLinus Walleij 1270ffbbdd21SLinus Walleij /* 1271ffbbdd21SLinus Walleij * flush_kthread_worker will block until all work is done. 1272ffbbdd21SLinus Walleij * If the reason that stop_queue timed out is that the work will never 1273ffbbdd21SLinus Walleij * finish, then it does no good to call flush/stop thread, so 1274ffbbdd21SLinus Walleij * return anyway. 1275ffbbdd21SLinus Walleij */ 1276ffbbdd21SLinus Walleij if (ret) { 1277ffbbdd21SLinus Walleij dev_err(&master->dev, "problem destroying queue\n"); 1278ffbbdd21SLinus Walleij return ret; 1279ffbbdd21SLinus Walleij } 1280ffbbdd21SLinus Walleij 1281ffbbdd21SLinus Walleij flush_kthread_worker(&master->kworker); 1282ffbbdd21SLinus Walleij kthread_stop(master->kworker_task); 1283ffbbdd21SLinus Walleij 1284ffbbdd21SLinus Walleij return 0; 1285ffbbdd21SLinus Walleij } 1286ffbbdd21SLinus Walleij 12870461a414SMark Brown static int __spi_queued_transfer(struct spi_device *spi, 12880461a414SMark Brown struct spi_message *msg, 12890461a414SMark Brown bool need_pump) 1290ffbbdd21SLinus Walleij { 1291ffbbdd21SLinus Walleij struct spi_master *master = spi->master; 1292ffbbdd21SLinus Walleij unsigned long flags; 1293ffbbdd21SLinus Walleij 1294ffbbdd21SLinus Walleij spin_lock_irqsave(&master->queue_lock, flags); 1295ffbbdd21SLinus Walleij 1296ffbbdd21SLinus Walleij if (!master->running) { 1297ffbbdd21SLinus Walleij spin_unlock_irqrestore(&master->queue_lock, flags); 1298ffbbdd21SLinus Walleij return -ESHUTDOWN; 1299ffbbdd21SLinus Walleij } 1300ffbbdd21SLinus Walleij msg->actual_length = 0; 1301ffbbdd21SLinus Walleij msg->status = -EINPROGRESS; 1302ffbbdd21SLinus Walleij 1303ffbbdd21SLinus Walleij list_add_tail(&msg->queue, &master->queue); 13040461a414SMark Brown if (!master->busy && need_pump) 1305ffbbdd21SLinus Walleij queue_kthread_work(&master->kworker, &master->pump_messages); 1306ffbbdd21SLinus Walleij 1307ffbbdd21SLinus Walleij spin_unlock_irqrestore(&master->queue_lock, flags); 1308ffbbdd21SLinus Walleij return 0; 1309ffbbdd21SLinus Walleij } 1310ffbbdd21SLinus Walleij 13110461a414SMark Brown /** 13120461a414SMark Brown * spi_queued_transfer - transfer function for queued transfers 13130461a414SMark Brown * @spi: spi device which is requesting transfer 13140461a414SMark Brown * @msg: spi message which is to handled is queued to driver queue 13150461a414SMark Brown */ 13160461a414SMark Brown static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg) 13170461a414SMark Brown { 13180461a414SMark Brown return __spi_queued_transfer(spi, msg, true); 13190461a414SMark Brown } 13200461a414SMark Brown 1321ffbbdd21SLinus Walleij static int spi_master_initialize_queue(struct spi_master *master) 1322ffbbdd21SLinus Walleij { 1323ffbbdd21SLinus Walleij int ret; 1324ffbbdd21SLinus Walleij 1325ffbbdd21SLinus Walleij master->transfer = spi_queued_transfer; 1326b158935fSMark Brown if (!master->transfer_one_message) 1327b158935fSMark Brown master->transfer_one_message = spi_transfer_one_message; 1328ffbbdd21SLinus Walleij 1329ffbbdd21SLinus Walleij /* Initialize and start queue */ 1330ffbbdd21SLinus Walleij ret = spi_init_queue(master); 1331ffbbdd21SLinus Walleij if (ret) { 1332ffbbdd21SLinus Walleij dev_err(&master->dev, "problem initializing queue\n"); 1333ffbbdd21SLinus Walleij goto err_init_queue; 1334ffbbdd21SLinus Walleij } 1335c3676d5cSMark Brown master->queued = true; 1336ffbbdd21SLinus Walleij ret = spi_start_queue(master); 1337ffbbdd21SLinus Walleij if (ret) { 1338ffbbdd21SLinus Walleij dev_err(&master->dev, "problem starting queue\n"); 1339ffbbdd21SLinus Walleij goto err_start_queue; 1340ffbbdd21SLinus Walleij } 1341ffbbdd21SLinus Walleij 1342ffbbdd21SLinus Walleij return 0; 1343ffbbdd21SLinus Walleij 1344ffbbdd21SLinus Walleij err_start_queue: 1345ffbbdd21SLinus Walleij spi_destroy_queue(master); 1346c3676d5cSMark Brown err_init_queue: 1347ffbbdd21SLinus Walleij return ret; 1348ffbbdd21SLinus Walleij } 1349ffbbdd21SLinus Walleij 1350ffbbdd21SLinus Walleij /*-------------------------------------------------------------------------*/ 1351ffbbdd21SLinus Walleij 13527cb94361SAndreas Larsson #if defined(CONFIG_OF) 1353aff5e3f8SPantelis Antoniou static struct spi_device * 1354aff5e3f8SPantelis Antoniou of_register_spi_device(struct spi_master *master, struct device_node *nc) 1355d57a4282SGrant Likely { 1356d57a4282SGrant Likely struct spi_device *spi; 1357d57a4282SGrant Likely int rc; 135889da4293STrent Piepho u32 value; 1359d57a4282SGrant Likely 1360d57a4282SGrant Likely /* Alloc an spi_device */ 1361d57a4282SGrant Likely spi = spi_alloc_device(master); 1362d57a4282SGrant Likely if (!spi) { 1363d57a4282SGrant Likely dev_err(&master->dev, "spi_device alloc error for %s\n", 1364d57a4282SGrant Likely nc->full_name); 1365aff5e3f8SPantelis Antoniou rc = -ENOMEM; 1366aff5e3f8SPantelis Antoniou goto err_out; 1367d57a4282SGrant Likely } 1368d57a4282SGrant Likely 1369d57a4282SGrant Likely /* Select device driver */ 1370aff5e3f8SPantelis Antoniou rc = of_modalias_node(nc, spi->modalias, 1371aff5e3f8SPantelis Antoniou sizeof(spi->modalias)); 1372aff5e3f8SPantelis Antoniou if (rc < 0) { 1373d57a4282SGrant Likely dev_err(&master->dev, "cannot find modalias for %s\n", 1374d57a4282SGrant Likely nc->full_name); 1375aff5e3f8SPantelis Antoniou goto err_out; 1376d57a4282SGrant Likely } 1377d57a4282SGrant Likely 1378d57a4282SGrant Likely /* Device address */ 137989da4293STrent Piepho rc = of_property_read_u32(nc, "reg", &value); 138089da4293STrent Piepho if (rc) { 138189da4293STrent Piepho dev_err(&master->dev, "%s has no valid 'reg' property (%d)\n", 138289da4293STrent Piepho nc->full_name, rc); 1383aff5e3f8SPantelis Antoniou goto err_out; 1384d57a4282SGrant Likely } 138589da4293STrent Piepho spi->chip_select = value; 1386d57a4282SGrant Likely 1387d57a4282SGrant Likely /* Mode (clock phase/polarity/etc.) */ 1388d57a4282SGrant Likely if (of_find_property(nc, "spi-cpha", NULL)) 1389d57a4282SGrant Likely spi->mode |= SPI_CPHA; 1390d57a4282SGrant Likely if (of_find_property(nc, "spi-cpol", NULL)) 1391d57a4282SGrant Likely spi->mode |= SPI_CPOL; 1392d57a4282SGrant Likely if (of_find_property(nc, "spi-cs-high", NULL)) 1393d57a4282SGrant Likely spi->mode |= SPI_CS_HIGH; 1394c20151dfSLars-Peter Clausen if (of_find_property(nc, "spi-3wire", NULL)) 1395c20151dfSLars-Peter Clausen spi->mode |= SPI_3WIRE; 1396cd6339e6SZhao Qiang if (of_find_property(nc, "spi-lsb-first", NULL)) 1397cd6339e6SZhao Qiang spi->mode |= SPI_LSB_FIRST; 1398d57a4282SGrant Likely 1399f477b7fbSwangyuhang /* Device DUAL/QUAD mode */ 140089da4293STrent Piepho if (!of_property_read_u32(nc, "spi-tx-bus-width", &value)) { 140189da4293STrent Piepho switch (value) { 140289da4293STrent Piepho case 1: 1403f477b7fbSwangyuhang break; 140489da4293STrent Piepho case 2: 1405f477b7fbSwangyuhang spi->mode |= SPI_TX_DUAL; 1406f477b7fbSwangyuhang break; 140789da4293STrent Piepho case 4: 1408f477b7fbSwangyuhang spi->mode |= SPI_TX_QUAD; 1409f477b7fbSwangyuhang break; 1410f477b7fbSwangyuhang default: 141180874d8cSGeert Uytterhoeven dev_warn(&master->dev, 1412a110f93dSwangyuhang "spi-tx-bus-width %d not supported\n", 141389da4293STrent Piepho value); 141480874d8cSGeert Uytterhoeven break; 1415f477b7fbSwangyuhang } 1416a822e99cSMark Brown } 1417f477b7fbSwangyuhang 141889da4293STrent Piepho if (!of_property_read_u32(nc, "spi-rx-bus-width", &value)) { 141989da4293STrent Piepho switch (value) { 142089da4293STrent Piepho case 1: 1421f477b7fbSwangyuhang break; 142289da4293STrent Piepho case 2: 1423f477b7fbSwangyuhang spi->mode |= SPI_RX_DUAL; 1424f477b7fbSwangyuhang break; 142589da4293STrent Piepho case 4: 1426f477b7fbSwangyuhang spi->mode |= SPI_RX_QUAD; 1427f477b7fbSwangyuhang break; 1428f477b7fbSwangyuhang default: 142980874d8cSGeert Uytterhoeven dev_warn(&master->dev, 1430a110f93dSwangyuhang "spi-rx-bus-width %d not supported\n", 143189da4293STrent Piepho value); 143280874d8cSGeert Uytterhoeven break; 1433f477b7fbSwangyuhang } 1434a822e99cSMark Brown } 1435f477b7fbSwangyuhang 1436d57a4282SGrant Likely /* Device speed */ 143789da4293STrent Piepho rc = of_property_read_u32(nc, "spi-max-frequency", &value); 143889da4293STrent Piepho if (rc) { 143989da4293STrent Piepho dev_err(&master->dev, "%s has no valid 'spi-max-frequency' property (%d)\n", 144089da4293STrent Piepho nc->full_name, rc); 1441aff5e3f8SPantelis Antoniou goto err_out; 1442d57a4282SGrant Likely } 144389da4293STrent Piepho spi->max_speed_hz = value; 1444d57a4282SGrant Likely 1445d57a4282SGrant Likely /* Store a pointer to the node in the device structure */ 1446d57a4282SGrant Likely of_node_get(nc); 1447d57a4282SGrant Likely spi->dev.of_node = nc; 1448d57a4282SGrant Likely 1449d57a4282SGrant Likely /* Register the new device */ 1450d57a4282SGrant Likely rc = spi_add_device(spi); 1451d57a4282SGrant Likely if (rc) { 1452d57a4282SGrant Likely dev_err(&master->dev, "spi_device register error %s\n", 1453d57a4282SGrant Likely nc->full_name); 1454aff5e3f8SPantelis Antoniou goto err_out; 1455d57a4282SGrant Likely } 1456d57a4282SGrant Likely 1457aff5e3f8SPantelis Antoniou return spi; 1458aff5e3f8SPantelis Antoniou 1459aff5e3f8SPantelis Antoniou err_out: 1460aff5e3f8SPantelis Antoniou spi_dev_put(spi); 1461aff5e3f8SPantelis Antoniou return ERR_PTR(rc); 1462aff5e3f8SPantelis Antoniou } 1463aff5e3f8SPantelis Antoniou 1464aff5e3f8SPantelis Antoniou /** 1465aff5e3f8SPantelis Antoniou * of_register_spi_devices() - Register child devices onto the SPI bus 1466aff5e3f8SPantelis Antoniou * @master: Pointer to spi_master device 1467aff5e3f8SPantelis Antoniou * 1468aff5e3f8SPantelis Antoniou * Registers an spi_device for each child node of master node which has a 'reg' 1469aff5e3f8SPantelis Antoniou * property. 1470aff5e3f8SPantelis Antoniou */ 1471aff5e3f8SPantelis Antoniou static void of_register_spi_devices(struct spi_master *master) 1472aff5e3f8SPantelis Antoniou { 1473aff5e3f8SPantelis Antoniou struct spi_device *spi; 1474aff5e3f8SPantelis Antoniou struct device_node *nc; 1475aff5e3f8SPantelis Antoniou 1476aff5e3f8SPantelis Antoniou if (!master->dev.of_node) 1477aff5e3f8SPantelis Antoniou return; 1478aff5e3f8SPantelis Antoniou 1479aff5e3f8SPantelis Antoniou for_each_available_child_of_node(master->dev.of_node, nc) { 1480aff5e3f8SPantelis Antoniou spi = of_register_spi_device(master, nc); 1481aff5e3f8SPantelis Antoniou if (IS_ERR(spi)) 1482aff5e3f8SPantelis Antoniou dev_warn(&master->dev, "Failed to create SPI device for %s\n", 1483aff5e3f8SPantelis Antoniou nc->full_name); 1484d57a4282SGrant Likely } 1485d57a4282SGrant Likely } 1486d57a4282SGrant Likely #else 1487d57a4282SGrant Likely static void of_register_spi_devices(struct spi_master *master) { } 1488d57a4282SGrant Likely #endif 1489d57a4282SGrant Likely 149064bee4d2SMika Westerberg #ifdef CONFIG_ACPI 149164bee4d2SMika Westerberg static int acpi_spi_add_resource(struct acpi_resource *ares, void *data) 149264bee4d2SMika Westerberg { 149364bee4d2SMika Westerberg struct spi_device *spi = data; 149464bee4d2SMika Westerberg 149564bee4d2SMika Westerberg if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) { 149664bee4d2SMika Westerberg struct acpi_resource_spi_serialbus *sb; 149764bee4d2SMika Westerberg 149864bee4d2SMika Westerberg sb = &ares->data.spi_serial_bus; 149964bee4d2SMika Westerberg if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_SPI) { 150064bee4d2SMika Westerberg spi->chip_select = sb->device_selection; 150164bee4d2SMika Westerberg spi->max_speed_hz = sb->connection_speed; 150264bee4d2SMika Westerberg 150364bee4d2SMika Westerberg if (sb->clock_phase == ACPI_SPI_SECOND_PHASE) 150464bee4d2SMika Westerberg spi->mode |= SPI_CPHA; 150564bee4d2SMika Westerberg if (sb->clock_polarity == ACPI_SPI_START_HIGH) 150664bee4d2SMika Westerberg spi->mode |= SPI_CPOL; 150764bee4d2SMika Westerberg if (sb->device_polarity == ACPI_SPI_ACTIVE_HIGH) 150864bee4d2SMika Westerberg spi->mode |= SPI_CS_HIGH; 150964bee4d2SMika Westerberg } 151064bee4d2SMika Westerberg } else if (spi->irq < 0) { 151164bee4d2SMika Westerberg struct resource r; 151264bee4d2SMika Westerberg 151364bee4d2SMika Westerberg if (acpi_dev_resource_interrupt(ares, 0, &r)) 151464bee4d2SMika Westerberg spi->irq = r.start; 151564bee4d2SMika Westerberg } 151664bee4d2SMika Westerberg 151764bee4d2SMika Westerberg /* Always tell the ACPI core to skip this resource */ 151864bee4d2SMika Westerberg return 1; 151964bee4d2SMika Westerberg } 152064bee4d2SMika Westerberg 152164bee4d2SMika Westerberg static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level, 152264bee4d2SMika Westerberg void *data, void **return_value) 152364bee4d2SMika Westerberg { 152464bee4d2SMika Westerberg struct spi_master *master = data; 152564bee4d2SMika Westerberg struct list_head resource_list; 152664bee4d2SMika Westerberg struct acpi_device *adev; 152764bee4d2SMika Westerberg struct spi_device *spi; 152864bee4d2SMika Westerberg int ret; 152964bee4d2SMika Westerberg 153064bee4d2SMika Westerberg if (acpi_bus_get_device(handle, &adev)) 153164bee4d2SMika Westerberg return AE_OK; 153264bee4d2SMika Westerberg if (acpi_bus_get_status(adev) || !adev->status.present) 153364bee4d2SMika Westerberg return AE_OK; 153464bee4d2SMika Westerberg 153564bee4d2SMika Westerberg spi = spi_alloc_device(master); 153664bee4d2SMika Westerberg if (!spi) { 153764bee4d2SMika Westerberg dev_err(&master->dev, "failed to allocate SPI device for %s\n", 153864bee4d2SMika Westerberg dev_name(&adev->dev)); 153964bee4d2SMika Westerberg return AE_NO_MEMORY; 154064bee4d2SMika Westerberg } 154164bee4d2SMika Westerberg 15427b199811SRafael J. Wysocki ACPI_COMPANION_SET(&spi->dev, adev); 154364bee4d2SMika Westerberg spi->irq = -1; 154464bee4d2SMika Westerberg 154564bee4d2SMika Westerberg INIT_LIST_HEAD(&resource_list); 154664bee4d2SMika Westerberg ret = acpi_dev_get_resources(adev, &resource_list, 154764bee4d2SMika Westerberg acpi_spi_add_resource, spi); 154864bee4d2SMika Westerberg acpi_dev_free_resource_list(&resource_list); 154964bee4d2SMika Westerberg 155064bee4d2SMika Westerberg if (ret < 0 || !spi->max_speed_hz) { 155164bee4d2SMika Westerberg spi_dev_put(spi); 155264bee4d2SMika Westerberg return AE_OK; 155364bee4d2SMika Westerberg } 155464bee4d2SMika Westerberg 155533cf00e5SMika Westerberg adev->power.flags.ignore_parent = true; 1556cf9eb39cSJarkko Nikula strlcpy(spi->modalias, acpi_device_hid(adev), sizeof(spi->modalias)); 155764bee4d2SMika Westerberg if (spi_add_device(spi)) { 155833cf00e5SMika Westerberg adev->power.flags.ignore_parent = false; 155964bee4d2SMika Westerberg dev_err(&master->dev, "failed to add SPI device %s from ACPI\n", 156064bee4d2SMika Westerberg dev_name(&adev->dev)); 156164bee4d2SMika Westerberg spi_dev_put(spi); 156264bee4d2SMika Westerberg } 156364bee4d2SMika Westerberg 156464bee4d2SMika Westerberg return AE_OK; 156564bee4d2SMika Westerberg } 156664bee4d2SMika Westerberg 156764bee4d2SMika Westerberg static void acpi_register_spi_devices(struct spi_master *master) 156864bee4d2SMika Westerberg { 156964bee4d2SMika Westerberg acpi_status status; 157064bee4d2SMika Westerberg acpi_handle handle; 157164bee4d2SMika Westerberg 157229896178SRafael J. Wysocki handle = ACPI_HANDLE(master->dev.parent); 157364bee4d2SMika Westerberg if (!handle) 157464bee4d2SMika Westerberg return; 157564bee4d2SMika Westerberg 157664bee4d2SMika Westerberg status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1, 157764bee4d2SMika Westerberg acpi_spi_add_device, NULL, 157864bee4d2SMika Westerberg master, NULL); 157964bee4d2SMika Westerberg if (ACPI_FAILURE(status)) 158064bee4d2SMika Westerberg dev_warn(&master->dev, "failed to enumerate SPI slaves\n"); 158164bee4d2SMika Westerberg } 158264bee4d2SMika Westerberg #else 158364bee4d2SMika Westerberg static inline void acpi_register_spi_devices(struct spi_master *master) {} 158464bee4d2SMika Westerberg #endif /* CONFIG_ACPI */ 158564bee4d2SMika Westerberg 158649dce689STony Jones static void spi_master_release(struct device *dev) 15878ae12a0dSDavid Brownell { 15888ae12a0dSDavid Brownell struct spi_master *master; 15898ae12a0dSDavid Brownell 159049dce689STony Jones master = container_of(dev, struct spi_master, dev); 15918ae12a0dSDavid Brownell kfree(master); 15928ae12a0dSDavid Brownell } 15938ae12a0dSDavid Brownell 15948ae12a0dSDavid Brownell static struct class spi_master_class = { 15958ae12a0dSDavid Brownell .name = "spi_master", 15968ae12a0dSDavid Brownell .owner = THIS_MODULE, 159749dce689STony Jones .dev_release = spi_master_release, 1598eca2ebc7SMartin Sperl .dev_groups = spi_master_groups, 15998ae12a0dSDavid Brownell }; 16008ae12a0dSDavid Brownell 16018ae12a0dSDavid Brownell 16028ae12a0dSDavid Brownell /** 16038ae12a0dSDavid Brownell * spi_alloc_master - allocate SPI master controller 16048ae12a0dSDavid Brownell * @dev: the controller, possibly using the platform_bus 160533e34dc6SDavid Brownell * @size: how much zeroed driver-private data to allocate; the pointer to this 160649dce689STony Jones * memory is in the driver_data field of the returned device, 16070c868461SDavid Brownell * accessible with spi_master_get_devdata(). 160833e34dc6SDavid Brownell * Context: can sleep 16098ae12a0dSDavid Brownell * 16108ae12a0dSDavid Brownell * This call is used only by SPI master controller drivers, which are the 16118ae12a0dSDavid Brownell * only ones directly touching chip registers. It's how they allocate 1612ba1a0513Sdmitry pervushin * an spi_master structure, prior to calling spi_register_master(). 16138ae12a0dSDavid Brownell * 16148ae12a0dSDavid Brownell * This must be called from context that can sleep. It returns the SPI 16158ae12a0dSDavid Brownell * master structure on success, else NULL. 16168ae12a0dSDavid Brownell * 16178ae12a0dSDavid Brownell * The caller is responsible for assigning the bus number and initializing 1618ba1a0513Sdmitry pervushin * the master's methods before calling spi_register_master(); and (after errors 1619eb4af0f5SUwe Kleine-König * adding the device) calling spi_master_put() and kfree() to prevent a memory 1620eb4af0f5SUwe Kleine-König * leak. 16218ae12a0dSDavid Brownell */ 1622e9d5a461SAdrian Bunk struct spi_master *spi_alloc_master(struct device *dev, unsigned size) 16238ae12a0dSDavid Brownell { 16248ae12a0dSDavid Brownell struct spi_master *master; 16258ae12a0dSDavid Brownell 16260c868461SDavid Brownell if (!dev) 16270c868461SDavid Brownell return NULL; 16280c868461SDavid Brownell 16295fe5f05eSJingoo Han master = kzalloc(size + sizeof(*master), GFP_KERNEL); 16308ae12a0dSDavid Brownell if (!master) 16318ae12a0dSDavid Brownell return NULL; 16328ae12a0dSDavid Brownell 163349dce689STony Jones device_initialize(&master->dev); 16341e8a52e1SGrant Likely master->bus_num = -1; 16351e8a52e1SGrant Likely master->num_chipselect = 1; 163649dce689STony Jones master->dev.class = &spi_master_class; 163749dce689STony Jones master->dev.parent = get_device(dev); 16380c868461SDavid Brownell spi_master_set_devdata(master, &master[1]); 16398ae12a0dSDavid Brownell 16408ae12a0dSDavid Brownell return master; 16418ae12a0dSDavid Brownell } 16428ae12a0dSDavid Brownell EXPORT_SYMBOL_GPL(spi_alloc_master); 16438ae12a0dSDavid Brownell 164474317984SJean-Christophe PLAGNIOL-VILLARD #ifdef CONFIG_OF 164574317984SJean-Christophe PLAGNIOL-VILLARD static int of_spi_register_master(struct spi_master *master) 164674317984SJean-Christophe PLAGNIOL-VILLARD { 1647e80beb27SGrant Likely int nb, i, *cs; 164874317984SJean-Christophe PLAGNIOL-VILLARD struct device_node *np = master->dev.of_node; 164974317984SJean-Christophe PLAGNIOL-VILLARD 165074317984SJean-Christophe PLAGNIOL-VILLARD if (!np) 165174317984SJean-Christophe PLAGNIOL-VILLARD return 0; 165274317984SJean-Christophe PLAGNIOL-VILLARD 165374317984SJean-Christophe PLAGNIOL-VILLARD nb = of_gpio_named_count(np, "cs-gpios"); 16545fe5f05eSJingoo Han master->num_chipselect = max_t(int, nb, master->num_chipselect); 165574317984SJean-Christophe PLAGNIOL-VILLARD 16568ec5d84eSAndreas Larsson /* Return error only for an incorrectly formed cs-gpios property */ 16578ec5d84eSAndreas Larsson if (nb == 0 || nb == -ENOENT) 165874317984SJean-Christophe PLAGNIOL-VILLARD return 0; 16598ec5d84eSAndreas Larsson else if (nb < 0) 16608ec5d84eSAndreas Larsson return nb; 166174317984SJean-Christophe PLAGNIOL-VILLARD 166274317984SJean-Christophe PLAGNIOL-VILLARD cs = devm_kzalloc(&master->dev, 166374317984SJean-Christophe PLAGNIOL-VILLARD sizeof(int) * master->num_chipselect, 166474317984SJean-Christophe PLAGNIOL-VILLARD GFP_KERNEL); 166574317984SJean-Christophe PLAGNIOL-VILLARD master->cs_gpios = cs; 166674317984SJean-Christophe PLAGNIOL-VILLARD 166774317984SJean-Christophe PLAGNIOL-VILLARD if (!master->cs_gpios) 166874317984SJean-Christophe PLAGNIOL-VILLARD return -ENOMEM; 166974317984SJean-Christophe PLAGNIOL-VILLARD 16700da83bb1SAndreas Larsson for (i = 0; i < master->num_chipselect; i++) 1671446411e1SAndreas Larsson cs[i] = -ENOENT; 167274317984SJean-Christophe PLAGNIOL-VILLARD 167374317984SJean-Christophe PLAGNIOL-VILLARD for (i = 0; i < nb; i++) 167474317984SJean-Christophe PLAGNIOL-VILLARD cs[i] = of_get_named_gpio(np, "cs-gpios", i); 167574317984SJean-Christophe PLAGNIOL-VILLARD 167674317984SJean-Christophe PLAGNIOL-VILLARD return 0; 167774317984SJean-Christophe PLAGNIOL-VILLARD } 167874317984SJean-Christophe PLAGNIOL-VILLARD #else 167974317984SJean-Christophe PLAGNIOL-VILLARD static int of_spi_register_master(struct spi_master *master) 168074317984SJean-Christophe PLAGNIOL-VILLARD { 168174317984SJean-Christophe PLAGNIOL-VILLARD return 0; 168274317984SJean-Christophe PLAGNIOL-VILLARD } 168374317984SJean-Christophe PLAGNIOL-VILLARD #endif 168474317984SJean-Christophe PLAGNIOL-VILLARD 16858ae12a0dSDavid Brownell /** 16868ae12a0dSDavid Brownell * spi_register_master - register SPI master controller 16878ae12a0dSDavid Brownell * @master: initialized master, originally from spi_alloc_master() 168833e34dc6SDavid Brownell * Context: can sleep 16898ae12a0dSDavid Brownell * 16908ae12a0dSDavid Brownell * SPI master controllers connect to their drivers using some non-SPI bus, 16918ae12a0dSDavid Brownell * such as the platform bus. The final stage of probe() in that code 16928ae12a0dSDavid Brownell * includes calling spi_register_master() to hook up to this SPI bus glue. 16938ae12a0dSDavid Brownell * 16948ae12a0dSDavid Brownell * SPI controllers use board specific (often SOC specific) bus numbers, 16958ae12a0dSDavid Brownell * and board-specific addressing for SPI devices combines those numbers 16968ae12a0dSDavid Brownell * with chip select numbers. Since SPI does not directly support dynamic 16978ae12a0dSDavid Brownell * device identification, boards need configuration tables telling which 16988ae12a0dSDavid Brownell * chip is at which address. 16998ae12a0dSDavid Brownell * 17008ae12a0dSDavid Brownell * This must be called from context that can sleep. It returns zero on 17018ae12a0dSDavid Brownell * success, else a negative error code (dropping the master's refcount). 17020c868461SDavid Brownell * After a successful return, the caller is responsible for calling 17030c868461SDavid Brownell * spi_unregister_master(). 17048ae12a0dSDavid Brownell */ 1705e9d5a461SAdrian Bunk int spi_register_master(struct spi_master *master) 17068ae12a0dSDavid Brownell { 1707e44a45aeSDavid Brownell static atomic_t dyn_bus_id = ATOMIC_INIT((1<<15) - 1); 170849dce689STony Jones struct device *dev = master->dev.parent; 17092b9603a0SFeng Tang struct boardinfo *bi; 17108ae12a0dSDavid Brownell int status = -ENODEV; 17118ae12a0dSDavid Brownell int dynamic = 0; 17128ae12a0dSDavid Brownell 17130c868461SDavid Brownell if (!dev) 17140c868461SDavid Brownell return -ENODEV; 17150c868461SDavid Brownell 171674317984SJean-Christophe PLAGNIOL-VILLARD status = of_spi_register_master(master); 171774317984SJean-Christophe PLAGNIOL-VILLARD if (status) 171874317984SJean-Christophe PLAGNIOL-VILLARD return status; 171974317984SJean-Christophe PLAGNIOL-VILLARD 1720082c8cb4SDavid Brownell /* even if it's just one always-selected device, there must 1721082c8cb4SDavid Brownell * be at least one chipselect 1722082c8cb4SDavid Brownell */ 1723082c8cb4SDavid Brownell if (master->num_chipselect == 0) 1724082c8cb4SDavid Brownell return -EINVAL; 1725082c8cb4SDavid Brownell 1726bb29785eSGrant Likely if ((master->bus_num < 0) && master->dev.of_node) 1727bb29785eSGrant Likely master->bus_num = of_alias_get_id(master->dev.of_node, "spi"); 1728bb29785eSGrant Likely 17298ae12a0dSDavid Brownell /* convention: dynamically assigned bus IDs count down from the max */ 1730a020ed75SDavid Brownell if (master->bus_num < 0) { 1731082c8cb4SDavid Brownell /* FIXME switch to an IDR based scheme, something like 1732082c8cb4SDavid Brownell * I2C now uses, so we can't run out of "dynamic" IDs 1733082c8cb4SDavid Brownell */ 17348ae12a0dSDavid Brownell master->bus_num = atomic_dec_return(&dyn_bus_id); 1735b885244eSDavid Brownell dynamic = 1; 17368ae12a0dSDavid Brownell } 17378ae12a0dSDavid Brownell 17385424d43eSMark Brown INIT_LIST_HEAD(&master->queue); 17395424d43eSMark Brown spin_lock_init(&master->queue_lock); 1740cf32b71eSErnst Schwab spin_lock_init(&master->bus_lock_spinlock); 1741cf32b71eSErnst Schwab mutex_init(&master->bus_lock_mutex); 1742cf32b71eSErnst Schwab master->bus_lock_flag = 0; 1743b158935fSMark Brown init_completion(&master->xfer_completion); 17446ad45a27SMark Brown if (!master->max_dma_len) 17456ad45a27SMark Brown master->max_dma_len = INT_MAX; 1746cf32b71eSErnst Schwab 17478ae12a0dSDavid Brownell /* register the device, then userspace will see it. 17488ae12a0dSDavid Brownell * registration fails if the bus ID is in use. 17498ae12a0dSDavid Brownell */ 175035f74fcaSKay Sievers dev_set_name(&master->dev, "spi%u", master->bus_num); 175149dce689STony Jones status = device_add(&master->dev); 1752b885244eSDavid Brownell if (status < 0) 17538ae12a0dSDavid Brownell goto done; 175435f74fcaSKay Sievers dev_dbg(dev, "registered master %s%s\n", dev_name(&master->dev), 17558ae12a0dSDavid Brownell dynamic ? " (dynamic)" : ""); 17568ae12a0dSDavid Brownell 1757ffbbdd21SLinus Walleij /* If we're using a queued driver, start the queue */ 1758ffbbdd21SLinus Walleij if (master->transfer) 1759ffbbdd21SLinus Walleij dev_info(dev, "master is unqueued, this is deprecated\n"); 1760ffbbdd21SLinus Walleij else { 1761ffbbdd21SLinus Walleij status = spi_master_initialize_queue(master); 1762ffbbdd21SLinus Walleij if (status) { 1763e93b0724SAxel Lin device_del(&master->dev); 1764ffbbdd21SLinus Walleij goto done; 1765ffbbdd21SLinus Walleij } 1766ffbbdd21SLinus Walleij } 1767eca2ebc7SMartin Sperl /* add statistics */ 1768eca2ebc7SMartin Sperl spin_lock_init(&master->statistics.lock); 1769ffbbdd21SLinus Walleij 17702b9603a0SFeng Tang mutex_lock(&board_lock); 17712b9603a0SFeng Tang list_add_tail(&master->list, &spi_master_list); 17722b9603a0SFeng Tang list_for_each_entry(bi, &board_list, list) 17732b9603a0SFeng Tang spi_match_master_to_boardinfo(master, &bi->board_info); 17742b9603a0SFeng Tang mutex_unlock(&board_lock); 17752b9603a0SFeng Tang 177664bee4d2SMika Westerberg /* Register devices from the device tree and ACPI */ 177712b15e83SAnatolij Gustschin of_register_spi_devices(master); 177864bee4d2SMika Westerberg acpi_register_spi_devices(master); 17798ae12a0dSDavid Brownell done: 17808ae12a0dSDavid Brownell return status; 17818ae12a0dSDavid Brownell } 17828ae12a0dSDavid Brownell EXPORT_SYMBOL_GPL(spi_register_master); 17838ae12a0dSDavid Brownell 1784666d5b4cSMark Brown static void devm_spi_unregister(struct device *dev, void *res) 1785666d5b4cSMark Brown { 1786666d5b4cSMark Brown spi_unregister_master(*(struct spi_master **)res); 1787666d5b4cSMark Brown } 1788666d5b4cSMark Brown 1789666d5b4cSMark Brown /** 1790666d5b4cSMark Brown * dev_spi_register_master - register managed SPI master controller 1791666d5b4cSMark Brown * @dev: device managing SPI master 1792666d5b4cSMark Brown * @master: initialized master, originally from spi_alloc_master() 1793666d5b4cSMark Brown * Context: can sleep 1794666d5b4cSMark Brown * 1795666d5b4cSMark Brown * Register a SPI device as with spi_register_master() which will 1796666d5b4cSMark Brown * automatically be unregister 1797666d5b4cSMark Brown */ 1798666d5b4cSMark Brown int devm_spi_register_master(struct device *dev, struct spi_master *master) 1799666d5b4cSMark Brown { 1800666d5b4cSMark Brown struct spi_master **ptr; 1801666d5b4cSMark Brown int ret; 1802666d5b4cSMark Brown 1803666d5b4cSMark Brown ptr = devres_alloc(devm_spi_unregister, sizeof(*ptr), GFP_KERNEL); 1804666d5b4cSMark Brown if (!ptr) 1805666d5b4cSMark Brown return -ENOMEM; 1806666d5b4cSMark Brown 1807666d5b4cSMark Brown ret = spi_register_master(master); 18084b92894eSStephen Warren if (!ret) { 1809666d5b4cSMark Brown *ptr = master; 1810666d5b4cSMark Brown devres_add(dev, ptr); 1811666d5b4cSMark Brown } else { 1812666d5b4cSMark Brown devres_free(ptr); 1813666d5b4cSMark Brown } 1814666d5b4cSMark Brown 1815666d5b4cSMark Brown return ret; 1816666d5b4cSMark Brown } 1817666d5b4cSMark Brown EXPORT_SYMBOL_GPL(devm_spi_register_master); 1818666d5b4cSMark Brown 181934860089SDavid Lamparter static int __unregister(struct device *dev, void *null) 18208ae12a0dSDavid Brownell { 18210c868461SDavid Brownell spi_unregister_device(to_spi_device(dev)); 18228ae12a0dSDavid Brownell return 0; 18238ae12a0dSDavid Brownell } 18248ae12a0dSDavid Brownell 18258ae12a0dSDavid Brownell /** 18268ae12a0dSDavid Brownell * spi_unregister_master - unregister SPI master controller 18278ae12a0dSDavid Brownell * @master: the master being unregistered 182833e34dc6SDavid Brownell * Context: can sleep 18298ae12a0dSDavid Brownell * 18308ae12a0dSDavid Brownell * This call is used only by SPI master controller drivers, which are the 18318ae12a0dSDavid Brownell * only ones directly touching chip registers. 18328ae12a0dSDavid Brownell * 18338ae12a0dSDavid Brownell * This must be called from context that can sleep. 18348ae12a0dSDavid Brownell */ 18358ae12a0dSDavid Brownell void spi_unregister_master(struct spi_master *master) 18368ae12a0dSDavid Brownell { 183789fc9a1aSJeff Garzik int dummy; 183889fc9a1aSJeff Garzik 1839ffbbdd21SLinus Walleij if (master->queued) { 1840ffbbdd21SLinus Walleij if (spi_destroy_queue(master)) 1841ffbbdd21SLinus Walleij dev_err(&master->dev, "queue remove failed\n"); 1842ffbbdd21SLinus Walleij } 1843ffbbdd21SLinus Walleij 18442b9603a0SFeng Tang mutex_lock(&board_lock); 18452b9603a0SFeng Tang list_del(&master->list); 18462b9603a0SFeng Tang mutex_unlock(&board_lock); 18472b9603a0SFeng Tang 184897dbf37dSSebastian Andrzej Siewior dummy = device_for_each_child(&master->dev, NULL, __unregister); 184949dce689STony Jones device_unregister(&master->dev); 18508ae12a0dSDavid Brownell } 18518ae12a0dSDavid Brownell EXPORT_SYMBOL_GPL(spi_unregister_master); 18528ae12a0dSDavid Brownell 1853ffbbdd21SLinus Walleij int spi_master_suspend(struct spi_master *master) 1854ffbbdd21SLinus Walleij { 1855ffbbdd21SLinus Walleij int ret; 1856ffbbdd21SLinus Walleij 1857ffbbdd21SLinus Walleij /* Basically no-ops for non-queued masters */ 1858ffbbdd21SLinus Walleij if (!master->queued) 1859ffbbdd21SLinus Walleij return 0; 1860ffbbdd21SLinus Walleij 1861ffbbdd21SLinus Walleij ret = spi_stop_queue(master); 1862ffbbdd21SLinus Walleij if (ret) 1863ffbbdd21SLinus Walleij dev_err(&master->dev, "queue stop failed\n"); 1864ffbbdd21SLinus Walleij 1865ffbbdd21SLinus Walleij return ret; 1866ffbbdd21SLinus Walleij } 1867ffbbdd21SLinus Walleij EXPORT_SYMBOL_GPL(spi_master_suspend); 1868ffbbdd21SLinus Walleij 1869ffbbdd21SLinus Walleij int spi_master_resume(struct spi_master *master) 1870ffbbdd21SLinus Walleij { 1871ffbbdd21SLinus Walleij int ret; 1872ffbbdd21SLinus Walleij 1873ffbbdd21SLinus Walleij if (!master->queued) 1874ffbbdd21SLinus Walleij return 0; 1875ffbbdd21SLinus Walleij 1876ffbbdd21SLinus Walleij ret = spi_start_queue(master); 1877ffbbdd21SLinus Walleij if (ret) 1878ffbbdd21SLinus Walleij dev_err(&master->dev, "queue restart failed\n"); 1879ffbbdd21SLinus Walleij 1880ffbbdd21SLinus Walleij return ret; 1881ffbbdd21SLinus Walleij } 1882ffbbdd21SLinus Walleij EXPORT_SYMBOL_GPL(spi_master_resume); 1883ffbbdd21SLinus Walleij 18849f3b795aSMichał Mirosław static int __spi_master_match(struct device *dev, const void *data) 18855ed2c832SDave Young { 18865ed2c832SDave Young struct spi_master *m; 18879f3b795aSMichał Mirosław const u16 *bus_num = data; 18885ed2c832SDave Young 18895ed2c832SDave Young m = container_of(dev, struct spi_master, dev); 18905ed2c832SDave Young return m->bus_num == *bus_num; 18915ed2c832SDave Young } 18925ed2c832SDave Young 18938ae12a0dSDavid Brownell /** 18948ae12a0dSDavid Brownell * spi_busnum_to_master - look up master associated with bus_num 18958ae12a0dSDavid Brownell * @bus_num: the master's bus number 189633e34dc6SDavid Brownell * Context: can sleep 18978ae12a0dSDavid Brownell * 18988ae12a0dSDavid Brownell * This call may be used with devices that are registered after 18998ae12a0dSDavid Brownell * arch init time. It returns a refcounted pointer to the relevant 19008ae12a0dSDavid Brownell * spi_master (which the caller must release), or NULL if there is 19018ae12a0dSDavid Brownell * no such master registered. 19028ae12a0dSDavid Brownell */ 19038ae12a0dSDavid Brownell struct spi_master *spi_busnum_to_master(u16 bus_num) 19048ae12a0dSDavid Brownell { 190549dce689STony Jones struct device *dev; 19061e9a51dcSAtsushi Nemoto struct spi_master *master = NULL; 19078ae12a0dSDavid Brownell 1908695794aeSGreg Kroah-Hartman dev = class_find_device(&spi_master_class, NULL, &bus_num, 19095ed2c832SDave Young __spi_master_match); 19105ed2c832SDave Young if (dev) 19115ed2c832SDave Young master = container_of(dev, struct spi_master, dev); 19125ed2c832SDave Young /* reference got in class_find_device */ 19131e9a51dcSAtsushi Nemoto return master; 19148ae12a0dSDavid Brownell } 19158ae12a0dSDavid Brownell EXPORT_SYMBOL_GPL(spi_busnum_to_master); 19168ae12a0dSDavid Brownell 19178ae12a0dSDavid Brownell 19188ae12a0dSDavid Brownell /*-------------------------------------------------------------------------*/ 19198ae12a0dSDavid Brownell 19207d077197SDavid Brownell /* Core methods for SPI master protocol drivers. Some of the 19217d077197SDavid Brownell * other core methods are currently defined as inline functions. 19227d077197SDavid Brownell */ 19237d077197SDavid Brownell 192463ab645fSStefan Brüns static int __spi_validate_bits_per_word(struct spi_master *master, u8 bits_per_word) 192563ab645fSStefan Brüns { 192663ab645fSStefan Brüns if (master->bits_per_word_mask) { 192763ab645fSStefan Brüns /* Only 32 bits fit in the mask */ 192863ab645fSStefan Brüns if (bits_per_word > 32) 192963ab645fSStefan Brüns return -EINVAL; 193063ab645fSStefan Brüns if (!(master->bits_per_word_mask & 193163ab645fSStefan Brüns SPI_BPW_MASK(bits_per_word))) 193263ab645fSStefan Brüns return -EINVAL; 193363ab645fSStefan Brüns } 193463ab645fSStefan Brüns 193563ab645fSStefan Brüns return 0; 193663ab645fSStefan Brüns } 193763ab645fSStefan Brüns 19387d077197SDavid Brownell /** 19397d077197SDavid Brownell * spi_setup - setup SPI mode and clock rate 19407d077197SDavid Brownell * @spi: the device whose settings are being modified 19417d077197SDavid Brownell * Context: can sleep, and no requests are queued to the device 19427d077197SDavid Brownell * 19437d077197SDavid Brownell * SPI protocol drivers may need to update the transfer mode if the 19447d077197SDavid Brownell * device doesn't work with its default. They may likewise need 19457d077197SDavid Brownell * to update clock rates or word sizes from initial values. This function 19467d077197SDavid Brownell * changes those settings, and must be called from a context that can sleep. 19477d077197SDavid Brownell * Except for SPI_CS_HIGH, which takes effect immediately, the changes take 19487d077197SDavid Brownell * effect the next time the device is selected and data is transferred to 19497d077197SDavid Brownell * or from it. When this function returns, the spi device is deselected. 19507d077197SDavid Brownell * 19517d077197SDavid Brownell * Note that this call will fail if the protocol driver specifies an option 19527d077197SDavid Brownell * that the underlying controller or its driver does not support. For 19537d077197SDavid Brownell * example, not all hardware supports wire transfers using nine bit words, 19547d077197SDavid Brownell * LSB-first wire encoding, or active-high chipselects. 19557d077197SDavid Brownell */ 19567d077197SDavid Brownell int spi_setup(struct spi_device *spi) 19577d077197SDavid Brownell { 195883596fbeSGeert Uytterhoeven unsigned bad_bits, ugly_bits; 19595ab8d262SAndy Shevchenko int status; 19607d077197SDavid Brownell 1961f477b7fbSwangyuhang /* check mode to prevent that DUAL and QUAD set at the same time 1962f477b7fbSwangyuhang */ 1963f477b7fbSwangyuhang if (((spi->mode & SPI_TX_DUAL) && (spi->mode & SPI_TX_QUAD)) || 1964f477b7fbSwangyuhang ((spi->mode & SPI_RX_DUAL) && (spi->mode & SPI_RX_QUAD))) { 1965f477b7fbSwangyuhang dev_err(&spi->dev, 1966f477b7fbSwangyuhang "setup: can not select dual and quad at the same time\n"); 1967f477b7fbSwangyuhang return -EINVAL; 1968f477b7fbSwangyuhang } 1969f477b7fbSwangyuhang /* if it is SPI_3WIRE mode, DUAL and QUAD should be forbidden 1970f477b7fbSwangyuhang */ 1971f477b7fbSwangyuhang if ((spi->mode & SPI_3WIRE) && (spi->mode & 1972f477b7fbSwangyuhang (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD))) 1973f477b7fbSwangyuhang return -EINVAL; 1974e7db06b5SDavid Brownell /* help drivers fail *cleanly* when they need options 1975e7db06b5SDavid Brownell * that aren't supported with their current master 1976e7db06b5SDavid Brownell */ 1977e7db06b5SDavid Brownell bad_bits = spi->mode & ~spi->master->mode_bits; 197883596fbeSGeert Uytterhoeven ugly_bits = bad_bits & 197983596fbeSGeert Uytterhoeven (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD); 198083596fbeSGeert Uytterhoeven if (ugly_bits) { 198183596fbeSGeert Uytterhoeven dev_warn(&spi->dev, 198283596fbeSGeert Uytterhoeven "setup: ignoring unsupported mode bits %x\n", 198383596fbeSGeert Uytterhoeven ugly_bits); 198483596fbeSGeert Uytterhoeven spi->mode &= ~ugly_bits; 198583596fbeSGeert Uytterhoeven bad_bits &= ~ugly_bits; 198683596fbeSGeert Uytterhoeven } 1987e7db06b5SDavid Brownell if (bad_bits) { 1988eb288a1fSLinus Walleij dev_err(&spi->dev, "setup: unsupported mode bits %x\n", 1989e7db06b5SDavid Brownell bad_bits); 1990e7db06b5SDavid Brownell return -EINVAL; 1991e7db06b5SDavid Brownell } 1992e7db06b5SDavid Brownell 19937d077197SDavid Brownell if (!spi->bits_per_word) 19947d077197SDavid Brownell spi->bits_per_word = 8; 19957d077197SDavid Brownell 19965ab8d262SAndy Shevchenko status = __spi_validate_bits_per_word(spi->master, spi->bits_per_word); 19975ab8d262SAndy Shevchenko if (status) 19985ab8d262SAndy Shevchenko return status; 199963ab645fSStefan Brüns 2000052eb2d4SAxel Lin if (!spi->max_speed_hz) 2001052eb2d4SAxel Lin spi->max_speed_hz = spi->master->max_speed_hz; 2002052eb2d4SAxel Lin 20031a7b7ee7SIvan T. Ivanov spi_set_cs(spi, false); 20041a7b7ee7SIvan T. Ivanov 2005caae070cSLaxman Dewangan if (spi->master->setup) 20067d077197SDavid Brownell status = spi->master->setup(spi); 20077d077197SDavid Brownell 20085fe5f05eSJingoo Han dev_dbg(&spi->dev, "setup mode %d, %s%s%s%s%u bits/w, %u Hz max --> %d\n", 20097d077197SDavid Brownell (int) (spi->mode & (SPI_CPOL | SPI_CPHA)), 20107d077197SDavid Brownell (spi->mode & SPI_CS_HIGH) ? "cs_high, " : "", 20117d077197SDavid Brownell (spi->mode & SPI_LSB_FIRST) ? "lsb, " : "", 20127d077197SDavid Brownell (spi->mode & SPI_3WIRE) ? "3wire, " : "", 20137d077197SDavid Brownell (spi->mode & SPI_LOOP) ? "loopback, " : "", 20147d077197SDavid Brownell spi->bits_per_word, spi->max_speed_hz, 20157d077197SDavid Brownell status); 20167d077197SDavid Brownell 20177d077197SDavid Brownell return status; 20187d077197SDavid Brownell } 20197d077197SDavid Brownell EXPORT_SYMBOL_GPL(spi_setup); 20207d077197SDavid Brownell 202190808738SMark Brown static int __spi_validate(struct spi_device *spi, struct spi_message *message) 2022cf32b71eSErnst Schwab { 2023cf32b71eSErnst Schwab struct spi_master *master = spi->master; 2024e6811d1dSLaxman Dewangan struct spi_transfer *xfer; 20256ea31293SAtsushi Nemoto int w_size; 2026cf32b71eSErnst Schwab 202724a0013aSMark Brown if (list_empty(&message->transfers)) 202824a0013aSMark Brown return -EINVAL; 202924a0013aSMark Brown 2030cf32b71eSErnst Schwab /* Half-duplex links include original MicroWire, and ones with 2031cf32b71eSErnst Schwab * only one data pin like SPI_3WIRE (switches direction) or where 2032cf32b71eSErnst Schwab * either MOSI or MISO is missing. They can also be caused by 2033cf32b71eSErnst Schwab * software limitations. 2034cf32b71eSErnst Schwab */ 2035cf32b71eSErnst Schwab if ((master->flags & SPI_MASTER_HALF_DUPLEX) 2036cf32b71eSErnst Schwab || (spi->mode & SPI_3WIRE)) { 2037cf32b71eSErnst Schwab unsigned flags = master->flags; 2038cf32b71eSErnst Schwab 2039cf32b71eSErnst Schwab list_for_each_entry(xfer, &message->transfers, transfer_list) { 2040cf32b71eSErnst Schwab if (xfer->rx_buf && xfer->tx_buf) 2041cf32b71eSErnst Schwab return -EINVAL; 2042cf32b71eSErnst Schwab if ((flags & SPI_MASTER_NO_TX) && xfer->tx_buf) 2043cf32b71eSErnst Schwab return -EINVAL; 2044cf32b71eSErnst Schwab if ((flags & SPI_MASTER_NO_RX) && xfer->rx_buf) 2045cf32b71eSErnst Schwab return -EINVAL; 2046cf32b71eSErnst Schwab } 2047cf32b71eSErnst Schwab } 2048cf32b71eSErnst Schwab 2049e6811d1dSLaxman Dewangan /** 2050059b8ffeSLaxman Dewangan * Set transfer bits_per_word and max speed as spi device default if 2051059b8ffeSLaxman Dewangan * it is not set for this transfer. 2052f477b7fbSwangyuhang * Set transfer tx_nbits and rx_nbits as single transfer default 2053f477b7fbSwangyuhang * (SPI_NBITS_SINGLE) if it is not set for this transfer. 2054e6811d1dSLaxman Dewangan */ 2055e6811d1dSLaxman Dewangan list_for_each_entry(xfer, &message->transfers, transfer_list) { 2056078726ceSSourav Poddar message->frame_length += xfer->len; 2057e6811d1dSLaxman Dewangan if (!xfer->bits_per_word) 2058e6811d1dSLaxman Dewangan xfer->bits_per_word = spi->bits_per_word; 2059a6f87fadSAxel Lin 2060a6f87fadSAxel Lin if (!xfer->speed_hz) 2061059b8ffeSLaxman Dewangan xfer->speed_hz = spi->max_speed_hz; 20627dc9fbc3SMark Brown if (!xfer->speed_hz) 20637dc9fbc3SMark Brown xfer->speed_hz = master->max_speed_hz; 2064a6f87fadSAxel Lin 206556ede94aSGabor Juhos if (master->max_speed_hz && 206656ede94aSGabor Juhos xfer->speed_hz > master->max_speed_hz) 206756ede94aSGabor Juhos xfer->speed_hz = master->max_speed_hz; 206856ede94aSGabor Juhos 206963ab645fSStefan Brüns if (__spi_validate_bits_per_word(master, xfer->bits_per_word)) 2070543bb255SStephen Warren return -EINVAL; 2071a2fd4f9fSMark Brown 20724d94bd21SIvan T. Ivanov /* 20734d94bd21SIvan T. Ivanov * SPI transfer length should be multiple of SPI word size 20744d94bd21SIvan T. Ivanov * where SPI word size should be power-of-two multiple 20754d94bd21SIvan T. Ivanov */ 20764d94bd21SIvan T. Ivanov if (xfer->bits_per_word <= 8) 20774d94bd21SIvan T. Ivanov w_size = 1; 20784d94bd21SIvan T. Ivanov else if (xfer->bits_per_word <= 16) 20794d94bd21SIvan T. Ivanov w_size = 2; 20804d94bd21SIvan T. Ivanov else 20814d94bd21SIvan T. Ivanov w_size = 4; 20824d94bd21SIvan T. Ivanov 20834d94bd21SIvan T. Ivanov /* No partial transfers accepted */ 20846ea31293SAtsushi Nemoto if (xfer->len % w_size) 20854d94bd21SIvan T. Ivanov return -EINVAL; 20864d94bd21SIvan T. Ivanov 2087a2fd4f9fSMark Brown if (xfer->speed_hz && master->min_speed_hz && 2088a2fd4f9fSMark Brown xfer->speed_hz < master->min_speed_hz) 2089a2fd4f9fSMark Brown return -EINVAL; 2090f477b7fbSwangyuhang 2091f477b7fbSwangyuhang if (xfer->tx_buf && !xfer->tx_nbits) 2092f477b7fbSwangyuhang xfer->tx_nbits = SPI_NBITS_SINGLE; 2093f477b7fbSwangyuhang if (xfer->rx_buf && !xfer->rx_nbits) 2094f477b7fbSwangyuhang xfer->rx_nbits = SPI_NBITS_SINGLE; 2095f477b7fbSwangyuhang /* check transfer tx/rx_nbits: 20961afd9989SGeert Uytterhoeven * 1. check the value matches one of single, dual and quad 20971afd9989SGeert Uytterhoeven * 2. check tx/rx_nbits match the mode in spi_device 2098f477b7fbSwangyuhang */ 2099db90a441SSourav Poddar if (xfer->tx_buf) { 2100f477b7fbSwangyuhang if (xfer->tx_nbits != SPI_NBITS_SINGLE && 2101f477b7fbSwangyuhang xfer->tx_nbits != SPI_NBITS_DUAL && 2102f477b7fbSwangyuhang xfer->tx_nbits != SPI_NBITS_QUAD) 2103a2fd4f9fSMark Brown return -EINVAL; 2104f477b7fbSwangyuhang if ((xfer->tx_nbits == SPI_NBITS_DUAL) && 2105f477b7fbSwangyuhang !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD))) 2106f477b7fbSwangyuhang return -EINVAL; 2107f477b7fbSwangyuhang if ((xfer->tx_nbits == SPI_NBITS_QUAD) && 2108f477b7fbSwangyuhang !(spi->mode & SPI_TX_QUAD)) 2109f477b7fbSwangyuhang return -EINVAL; 2110db90a441SSourav Poddar } 2111f477b7fbSwangyuhang /* check transfer rx_nbits */ 2112db90a441SSourav Poddar if (xfer->rx_buf) { 2113f477b7fbSwangyuhang if (xfer->rx_nbits != SPI_NBITS_SINGLE && 2114f477b7fbSwangyuhang xfer->rx_nbits != SPI_NBITS_DUAL && 2115f477b7fbSwangyuhang xfer->rx_nbits != SPI_NBITS_QUAD) 2116f477b7fbSwangyuhang return -EINVAL; 2117f477b7fbSwangyuhang if ((xfer->rx_nbits == SPI_NBITS_DUAL) && 2118f477b7fbSwangyuhang !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD))) 2119f477b7fbSwangyuhang return -EINVAL; 2120f477b7fbSwangyuhang if ((xfer->rx_nbits == SPI_NBITS_QUAD) && 2121f477b7fbSwangyuhang !(spi->mode & SPI_RX_QUAD)) 2122f477b7fbSwangyuhang return -EINVAL; 2123e6811d1dSLaxman Dewangan } 2124e6811d1dSLaxman Dewangan } 2125e6811d1dSLaxman Dewangan 2126cf32b71eSErnst Schwab message->status = -EINPROGRESS; 212790808738SMark Brown 212890808738SMark Brown return 0; 212990808738SMark Brown } 213090808738SMark Brown 213190808738SMark Brown static int __spi_async(struct spi_device *spi, struct spi_message *message) 213290808738SMark Brown { 213390808738SMark Brown struct spi_master *master = spi->master; 213490808738SMark Brown 213590808738SMark Brown message->spi = spi; 213690808738SMark Brown 2137eca2ebc7SMartin Sperl SPI_STATISTICS_INCREMENT_FIELD(&master->statistics, spi_async); 2138eca2ebc7SMartin Sperl SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_async); 2139eca2ebc7SMartin Sperl 214090808738SMark Brown trace_spi_message_submit(message); 214190808738SMark Brown 2142cf32b71eSErnst Schwab return master->transfer(spi, message); 2143cf32b71eSErnst Schwab } 2144cf32b71eSErnst Schwab 2145568d0697SDavid Brownell /** 2146568d0697SDavid Brownell * spi_async - asynchronous SPI transfer 2147568d0697SDavid Brownell * @spi: device with which data will be exchanged 2148568d0697SDavid Brownell * @message: describes the data transfers, including completion callback 2149568d0697SDavid Brownell * Context: any (irqs may be blocked, etc) 2150568d0697SDavid Brownell * 2151568d0697SDavid Brownell * This call may be used in_irq and other contexts which can't sleep, 2152568d0697SDavid Brownell * as well as from task contexts which can sleep. 2153568d0697SDavid Brownell * 2154568d0697SDavid Brownell * The completion callback is invoked in a context which can't sleep. 2155568d0697SDavid Brownell * Before that invocation, the value of message->status is undefined. 2156568d0697SDavid Brownell * When the callback is issued, message->status holds either zero (to 2157568d0697SDavid Brownell * indicate complete success) or a negative error code. After that 2158568d0697SDavid Brownell * callback returns, the driver which issued the transfer request may 2159568d0697SDavid Brownell * deallocate the associated memory; it's no longer in use by any SPI 2160568d0697SDavid Brownell * core or controller driver code. 2161568d0697SDavid Brownell * 2162568d0697SDavid Brownell * Note that although all messages to a spi_device are handled in 2163568d0697SDavid Brownell * FIFO order, messages may go to different devices in other orders. 2164568d0697SDavid Brownell * Some device might be higher priority, or have various "hard" access 2165568d0697SDavid Brownell * time requirements, for example. 2166568d0697SDavid Brownell * 2167568d0697SDavid Brownell * On detection of any fault during the transfer, processing of 2168568d0697SDavid Brownell * the entire message is aborted, and the device is deselected. 2169568d0697SDavid Brownell * Until returning from the associated message completion callback, 2170568d0697SDavid Brownell * no other spi_message queued to that device will be processed. 2171568d0697SDavid Brownell * (This rule applies equally to all the synchronous transfer calls, 2172568d0697SDavid Brownell * which are wrappers around this core asynchronous primitive.) 2173568d0697SDavid Brownell */ 2174568d0697SDavid Brownell int spi_async(struct spi_device *spi, struct spi_message *message) 2175568d0697SDavid Brownell { 2176568d0697SDavid Brownell struct spi_master *master = spi->master; 2177cf32b71eSErnst Schwab int ret; 2178cf32b71eSErnst Schwab unsigned long flags; 2179568d0697SDavid Brownell 218090808738SMark Brown ret = __spi_validate(spi, message); 218190808738SMark Brown if (ret != 0) 218290808738SMark Brown return ret; 218390808738SMark Brown 2184cf32b71eSErnst Schwab spin_lock_irqsave(&master->bus_lock_spinlock, flags); 2185568d0697SDavid Brownell 2186cf32b71eSErnst Schwab if (master->bus_lock_flag) 2187cf32b71eSErnst Schwab ret = -EBUSY; 2188cf32b71eSErnst Schwab else 2189cf32b71eSErnst Schwab ret = __spi_async(spi, message); 2190568d0697SDavid Brownell 2191cf32b71eSErnst Schwab spin_unlock_irqrestore(&master->bus_lock_spinlock, flags); 2192cf32b71eSErnst Schwab 2193cf32b71eSErnst Schwab return ret; 2194568d0697SDavid Brownell } 2195568d0697SDavid Brownell EXPORT_SYMBOL_GPL(spi_async); 2196568d0697SDavid Brownell 2197cf32b71eSErnst Schwab /** 2198cf32b71eSErnst Schwab * spi_async_locked - version of spi_async with exclusive bus usage 2199cf32b71eSErnst Schwab * @spi: device with which data will be exchanged 2200cf32b71eSErnst Schwab * @message: describes the data transfers, including completion callback 2201cf32b71eSErnst Schwab * Context: any (irqs may be blocked, etc) 2202cf32b71eSErnst Schwab * 2203cf32b71eSErnst Schwab * This call may be used in_irq and other contexts which can't sleep, 2204cf32b71eSErnst Schwab * as well as from task contexts which can sleep. 2205cf32b71eSErnst Schwab * 2206cf32b71eSErnst Schwab * The completion callback is invoked in a context which can't sleep. 2207cf32b71eSErnst Schwab * Before that invocation, the value of message->status is undefined. 2208cf32b71eSErnst Schwab * When the callback is issued, message->status holds either zero (to 2209cf32b71eSErnst Schwab * indicate complete success) or a negative error code. After that 2210cf32b71eSErnst Schwab * callback returns, the driver which issued the transfer request may 2211cf32b71eSErnst Schwab * deallocate the associated memory; it's no longer in use by any SPI 2212cf32b71eSErnst Schwab * core or controller driver code. 2213cf32b71eSErnst Schwab * 2214cf32b71eSErnst Schwab * Note that although all messages to a spi_device are handled in 2215cf32b71eSErnst Schwab * FIFO order, messages may go to different devices in other orders. 2216cf32b71eSErnst Schwab * Some device might be higher priority, or have various "hard" access 2217cf32b71eSErnst Schwab * time requirements, for example. 2218cf32b71eSErnst Schwab * 2219cf32b71eSErnst Schwab * On detection of any fault during the transfer, processing of 2220cf32b71eSErnst Schwab * the entire message is aborted, and the device is deselected. 2221cf32b71eSErnst Schwab * Until returning from the associated message completion callback, 2222cf32b71eSErnst Schwab * no other spi_message queued to that device will be processed. 2223cf32b71eSErnst Schwab * (This rule applies equally to all the synchronous transfer calls, 2224cf32b71eSErnst Schwab * which are wrappers around this core asynchronous primitive.) 2225cf32b71eSErnst Schwab */ 2226cf32b71eSErnst Schwab int spi_async_locked(struct spi_device *spi, struct spi_message *message) 2227cf32b71eSErnst Schwab { 2228cf32b71eSErnst Schwab struct spi_master *master = spi->master; 2229cf32b71eSErnst Schwab int ret; 2230cf32b71eSErnst Schwab unsigned long flags; 2231cf32b71eSErnst Schwab 223290808738SMark Brown ret = __spi_validate(spi, message); 223390808738SMark Brown if (ret != 0) 223490808738SMark Brown return ret; 223590808738SMark Brown 2236cf32b71eSErnst Schwab spin_lock_irqsave(&master->bus_lock_spinlock, flags); 2237cf32b71eSErnst Schwab 2238cf32b71eSErnst Schwab ret = __spi_async(spi, message); 2239cf32b71eSErnst Schwab 2240cf32b71eSErnst Schwab spin_unlock_irqrestore(&master->bus_lock_spinlock, flags); 2241cf32b71eSErnst Schwab 2242cf32b71eSErnst Schwab return ret; 2243cf32b71eSErnst Schwab 2244cf32b71eSErnst Schwab } 2245cf32b71eSErnst Schwab EXPORT_SYMBOL_GPL(spi_async_locked); 2246cf32b71eSErnst Schwab 22477d077197SDavid Brownell 22487d077197SDavid Brownell /*-------------------------------------------------------------------------*/ 22497d077197SDavid Brownell 22507d077197SDavid Brownell /* Utility methods for SPI master protocol drivers, layered on 22517d077197SDavid Brownell * top of the core. Some other utility methods are defined as 22527d077197SDavid Brownell * inline functions. 22537d077197SDavid Brownell */ 22547d077197SDavid Brownell 22555d870c8eSAndrew Morton static void spi_complete(void *arg) 22565d870c8eSAndrew Morton { 22575d870c8eSAndrew Morton complete(arg); 22585d870c8eSAndrew Morton } 22595d870c8eSAndrew Morton 2260cf32b71eSErnst Schwab static int __spi_sync(struct spi_device *spi, struct spi_message *message, 2261cf32b71eSErnst Schwab int bus_locked) 2262cf32b71eSErnst Schwab { 2263cf32b71eSErnst Schwab DECLARE_COMPLETION_ONSTACK(done); 2264cf32b71eSErnst Schwab int status; 2265cf32b71eSErnst Schwab struct spi_master *master = spi->master; 22660461a414SMark Brown unsigned long flags; 22670461a414SMark Brown 22680461a414SMark Brown status = __spi_validate(spi, message); 22690461a414SMark Brown if (status != 0) 22700461a414SMark Brown return status; 2271cf32b71eSErnst Schwab 2272cf32b71eSErnst Schwab message->complete = spi_complete; 2273cf32b71eSErnst Schwab message->context = &done; 22740461a414SMark Brown message->spi = spi; 2275cf32b71eSErnst Schwab 2276eca2ebc7SMartin Sperl SPI_STATISTICS_INCREMENT_FIELD(&master->statistics, spi_sync); 2277eca2ebc7SMartin Sperl SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_sync); 2278eca2ebc7SMartin Sperl 2279cf32b71eSErnst Schwab if (!bus_locked) 2280cf32b71eSErnst Schwab mutex_lock(&master->bus_lock_mutex); 2281cf32b71eSErnst Schwab 22820461a414SMark Brown /* If we're not using the legacy transfer method then we will 22830461a414SMark Brown * try to transfer in the calling context so special case. 22840461a414SMark Brown * This code would be less tricky if we could remove the 22850461a414SMark Brown * support for driver implemented message queues. 22860461a414SMark Brown */ 22870461a414SMark Brown if (master->transfer == spi_queued_transfer) { 22880461a414SMark Brown spin_lock_irqsave(&master->bus_lock_spinlock, flags); 22890461a414SMark Brown 22900461a414SMark Brown trace_spi_message_submit(message); 22910461a414SMark Brown 22920461a414SMark Brown status = __spi_queued_transfer(spi, message, false); 22930461a414SMark Brown 22940461a414SMark Brown spin_unlock_irqrestore(&master->bus_lock_spinlock, flags); 22950461a414SMark Brown } else { 2296cf32b71eSErnst Schwab status = spi_async_locked(spi, message); 22970461a414SMark Brown } 2298cf32b71eSErnst Schwab 2299cf32b71eSErnst Schwab if (!bus_locked) 2300cf32b71eSErnst Schwab mutex_unlock(&master->bus_lock_mutex); 2301cf32b71eSErnst Schwab 2302cf32b71eSErnst Schwab if (status == 0) { 23030461a414SMark Brown /* Push out the messages in the calling context if we 23040461a414SMark Brown * can. 23050461a414SMark Brown */ 2306eca2ebc7SMartin Sperl if (master->transfer == spi_queued_transfer) { 2307eca2ebc7SMartin Sperl SPI_STATISTICS_INCREMENT_FIELD(&master->statistics, 2308eca2ebc7SMartin Sperl spi_sync_immediate); 2309eca2ebc7SMartin Sperl SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, 2310eca2ebc7SMartin Sperl spi_sync_immediate); 2311fc9e0f71SMark Brown __spi_pump_messages(master, false); 2312eca2ebc7SMartin Sperl } 23130461a414SMark Brown 2314cf32b71eSErnst Schwab wait_for_completion(&done); 2315cf32b71eSErnst Schwab status = message->status; 2316cf32b71eSErnst Schwab } 2317cf32b71eSErnst Schwab message->context = NULL; 2318cf32b71eSErnst Schwab return status; 2319cf32b71eSErnst Schwab } 2320cf32b71eSErnst Schwab 23218ae12a0dSDavid Brownell /** 23228ae12a0dSDavid Brownell * spi_sync - blocking/synchronous SPI data transfers 23238ae12a0dSDavid Brownell * @spi: device with which data will be exchanged 23248ae12a0dSDavid Brownell * @message: describes the data transfers 232533e34dc6SDavid Brownell * Context: can sleep 23268ae12a0dSDavid Brownell * 23278ae12a0dSDavid Brownell * This call may only be used from a context that may sleep. The sleep 23288ae12a0dSDavid Brownell * is non-interruptible, and has no timeout. Low-overhead controller 23298ae12a0dSDavid Brownell * drivers may DMA directly into and out of the message buffers. 23308ae12a0dSDavid Brownell * 23318ae12a0dSDavid Brownell * Note that the SPI device's chip select is active during the message, 23328ae12a0dSDavid Brownell * and then is normally disabled between messages. Drivers for some 23338ae12a0dSDavid Brownell * frequently-used devices may want to minimize costs of selecting a chip, 23348ae12a0dSDavid Brownell * by leaving it selected in anticipation that the next message will go 23358ae12a0dSDavid Brownell * to the same chip. (That may increase power usage.) 23368ae12a0dSDavid Brownell * 23370c868461SDavid Brownell * Also, the caller is guaranteeing that the memory associated with the 23380c868461SDavid Brownell * message will not be freed before this call returns. 23390c868461SDavid Brownell * 23409b938b74SMarc Pignat * It returns zero on success, else a negative error code. 23418ae12a0dSDavid Brownell */ 23428ae12a0dSDavid Brownell int spi_sync(struct spi_device *spi, struct spi_message *message) 23438ae12a0dSDavid Brownell { 2344cf32b71eSErnst Schwab return __spi_sync(spi, message, 0); 23458ae12a0dSDavid Brownell } 23468ae12a0dSDavid Brownell EXPORT_SYMBOL_GPL(spi_sync); 23478ae12a0dSDavid Brownell 2348cf32b71eSErnst Schwab /** 2349cf32b71eSErnst Schwab * spi_sync_locked - version of spi_sync with exclusive bus usage 2350cf32b71eSErnst Schwab * @spi: device with which data will be exchanged 2351cf32b71eSErnst Schwab * @message: describes the data transfers 2352cf32b71eSErnst Schwab * Context: can sleep 2353cf32b71eSErnst Schwab * 2354cf32b71eSErnst Schwab * This call may only be used from a context that may sleep. The sleep 2355cf32b71eSErnst Schwab * is non-interruptible, and has no timeout. Low-overhead controller 2356cf32b71eSErnst Schwab * drivers may DMA directly into and out of the message buffers. 2357cf32b71eSErnst Schwab * 2358cf32b71eSErnst Schwab * This call should be used by drivers that require exclusive access to the 235925985edcSLucas De Marchi * SPI bus. It has to be preceded by a spi_bus_lock call. The SPI bus must 2360cf32b71eSErnst Schwab * be released by a spi_bus_unlock call when the exclusive access is over. 2361cf32b71eSErnst Schwab * 2362cf32b71eSErnst Schwab * It returns zero on success, else a negative error code. 2363cf32b71eSErnst Schwab */ 2364cf32b71eSErnst Schwab int spi_sync_locked(struct spi_device *spi, struct spi_message *message) 2365cf32b71eSErnst Schwab { 2366cf32b71eSErnst Schwab return __spi_sync(spi, message, 1); 2367cf32b71eSErnst Schwab } 2368cf32b71eSErnst Schwab EXPORT_SYMBOL_GPL(spi_sync_locked); 2369cf32b71eSErnst Schwab 2370cf32b71eSErnst Schwab /** 2371cf32b71eSErnst Schwab * spi_bus_lock - obtain a lock for exclusive SPI bus usage 2372cf32b71eSErnst Schwab * @master: SPI bus master that should be locked for exclusive bus access 2373cf32b71eSErnst Schwab * Context: can sleep 2374cf32b71eSErnst Schwab * 2375cf32b71eSErnst Schwab * This call may only be used from a context that may sleep. The sleep 2376cf32b71eSErnst Schwab * is non-interruptible, and has no timeout. 2377cf32b71eSErnst Schwab * 2378cf32b71eSErnst Schwab * This call should be used by drivers that require exclusive access to the 2379cf32b71eSErnst Schwab * SPI bus. The SPI bus must be released by a spi_bus_unlock call when the 2380cf32b71eSErnst Schwab * exclusive access is over. Data transfer must be done by spi_sync_locked 2381cf32b71eSErnst Schwab * and spi_async_locked calls when the SPI bus lock is held. 2382cf32b71eSErnst Schwab * 2383cf32b71eSErnst Schwab * It returns zero on success, else a negative error code. 2384cf32b71eSErnst Schwab */ 2385cf32b71eSErnst Schwab int spi_bus_lock(struct spi_master *master) 2386cf32b71eSErnst Schwab { 2387cf32b71eSErnst Schwab unsigned long flags; 2388cf32b71eSErnst Schwab 2389cf32b71eSErnst Schwab mutex_lock(&master->bus_lock_mutex); 2390cf32b71eSErnst Schwab 2391cf32b71eSErnst Schwab spin_lock_irqsave(&master->bus_lock_spinlock, flags); 2392cf32b71eSErnst Schwab master->bus_lock_flag = 1; 2393cf32b71eSErnst Schwab spin_unlock_irqrestore(&master->bus_lock_spinlock, flags); 2394cf32b71eSErnst Schwab 2395cf32b71eSErnst Schwab /* mutex remains locked until spi_bus_unlock is called */ 2396cf32b71eSErnst Schwab 2397cf32b71eSErnst Schwab return 0; 2398cf32b71eSErnst Schwab } 2399cf32b71eSErnst Schwab EXPORT_SYMBOL_GPL(spi_bus_lock); 2400cf32b71eSErnst Schwab 2401cf32b71eSErnst Schwab /** 2402cf32b71eSErnst Schwab * spi_bus_unlock - release the lock for exclusive SPI bus usage 2403cf32b71eSErnst Schwab * @master: SPI bus master that was locked for exclusive bus access 2404cf32b71eSErnst Schwab * Context: can sleep 2405cf32b71eSErnst Schwab * 2406cf32b71eSErnst Schwab * This call may only be used from a context that may sleep. The sleep 2407cf32b71eSErnst Schwab * is non-interruptible, and has no timeout. 2408cf32b71eSErnst Schwab * 2409cf32b71eSErnst Schwab * This call releases an SPI bus lock previously obtained by an spi_bus_lock 2410cf32b71eSErnst Schwab * call. 2411cf32b71eSErnst Schwab * 2412cf32b71eSErnst Schwab * It returns zero on success, else a negative error code. 2413cf32b71eSErnst Schwab */ 2414cf32b71eSErnst Schwab int spi_bus_unlock(struct spi_master *master) 2415cf32b71eSErnst Schwab { 2416cf32b71eSErnst Schwab master->bus_lock_flag = 0; 2417cf32b71eSErnst Schwab 2418cf32b71eSErnst Schwab mutex_unlock(&master->bus_lock_mutex); 2419cf32b71eSErnst Schwab 2420cf32b71eSErnst Schwab return 0; 2421cf32b71eSErnst Schwab } 2422cf32b71eSErnst Schwab EXPORT_SYMBOL_GPL(spi_bus_unlock); 2423cf32b71eSErnst Schwab 2424a9948b61SDavid Brownell /* portable code must never pass more than 32 bytes */ 2425a9948b61SDavid Brownell #define SPI_BUFSIZ max(32, SMP_CACHE_BYTES) 24268ae12a0dSDavid Brownell 24278ae12a0dSDavid Brownell static u8 *buf; 24288ae12a0dSDavid Brownell 24298ae12a0dSDavid Brownell /** 24308ae12a0dSDavid Brownell * spi_write_then_read - SPI synchronous write followed by read 24318ae12a0dSDavid Brownell * @spi: device with which data will be exchanged 24328ae12a0dSDavid Brownell * @txbuf: data to be written (need not be dma-safe) 24338ae12a0dSDavid Brownell * @n_tx: size of txbuf, in bytes 243427570497SJiri Pirko * @rxbuf: buffer into which data will be read (need not be dma-safe) 243527570497SJiri Pirko * @n_rx: size of rxbuf, in bytes 243633e34dc6SDavid Brownell * Context: can sleep 24378ae12a0dSDavid Brownell * 24388ae12a0dSDavid Brownell * This performs a half duplex MicroWire style transaction with the 24398ae12a0dSDavid Brownell * device, sending txbuf and then reading rxbuf. The return value 24408ae12a0dSDavid Brownell * is zero for success, else a negative errno status code. 2441b885244eSDavid Brownell * This call may only be used from a context that may sleep. 24428ae12a0dSDavid Brownell * 24430c868461SDavid Brownell * Parameters to this routine are always copied using a small buffer; 244433e34dc6SDavid Brownell * portable code should never use this for more than 32 bytes. 244533e34dc6SDavid Brownell * Performance-sensitive or bulk transfer code should instead use 24460c868461SDavid Brownell * spi_{async,sync}() calls with dma-safe buffers. 24478ae12a0dSDavid Brownell */ 24488ae12a0dSDavid Brownell int spi_write_then_read(struct spi_device *spi, 24490c4a1590SMark Brown const void *txbuf, unsigned n_tx, 24500c4a1590SMark Brown void *rxbuf, unsigned n_rx) 24518ae12a0dSDavid Brownell { 2452068f4070SDavid Brownell static DEFINE_MUTEX(lock); 24538ae12a0dSDavid Brownell 24548ae12a0dSDavid Brownell int status; 24558ae12a0dSDavid Brownell struct spi_message message; 2456bdff549eSDavid Brownell struct spi_transfer x[2]; 24578ae12a0dSDavid Brownell u8 *local_buf; 24588ae12a0dSDavid Brownell 2459b3a223eeSMark Brown /* Use preallocated DMA-safe buffer if we can. We can't avoid 2460b3a223eeSMark Brown * copying here, (as a pure convenience thing), but we can 2461b3a223eeSMark Brown * keep heap costs out of the hot path unless someone else is 2462b3a223eeSMark Brown * using the pre-allocated buffer or the transfer is too large. 24638ae12a0dSDavid Brownell */ 2464b3a223eeSMark Brown if ((n_tx + n_rx) > SPI_BUFSIZ || !mutex_trylock(&lock)) { 24652cd94c8aSMark Brown local_buf = kmalloc(max((unsigned)SPI_BUFSIZ, n_tx + n_rx), 24662cd94c8aSMark Brown GFP_KERNEL | GFP_DMA); 2467b3a223eeSMark Brown if (!local_buf) 2468b3a223eeSMark Brown return -ENOMEM; 2469b3a223eeSMark Brown } else { 2470b3a223eeSMark Brown local_buf = buf; 2471b3a223eeSMark Brown } 24728ae12a0dSDavid Brownell 24738275c642SVitaly Wool spi_message_init(&message); 24745fe5f05eSJingoo Han memset(x, 0, sizeof(x)); 2475bdff549eSDavid Brownell if (n_tx) { 2476bdff549eSDavid Brownell x[0].len = n_tx; 2477bdff549eSDavid Brownell spi_message_add_tail(&x[0], &message); 2478bdff549eSDavid Brownell } 2479bdff549eSDavid Brownell if (n_rx) { 2480bdff549eSDavid Brownell x[1].len = n_rx; 2481bdff549eSDavid Brownell spi_message_add_tail(&x[1], &message); 2482bdff549eSDavid Brownell } 24838275c642SVitaly Wool 24848ae12a0dSDavid Brownell memcpy(local_buf, txbuf, n_tx); 2485bdff549eSDavid Brownell x[0].tx_buf = local_buf; 2486bdff549eSDavid Brownell x[1].rx_buf = local_buf + n_tx; 24878ae12a0dSDavid Brownell 24888ae12a0dSDavid Brownell /* do the i/o */ 24898ae12a0dSDavid Brownell status = spi_sync(spi, &message); 24909b938b74SMarc Pignat if (status == 0) 2491bdff549eSDavid Brownell memcpy(rxbuf, x[1].rx_buf, n_rx); 24928ae12a0dSDavid Brownell 2493bdff549eSDavid Brownell if (x[0].tx_buf == buf) 2494068f4070SDavid Brownell mutex_unlock(&lock); 24958ae12a0dSDavid Brownell else 24968ae12a0dSDavid Brownell kfree(local_buf); 24978ae12a0dSDavid Brownell 24988ae12a0dSDavid Brownell return status; 24998ae12a0dSDavid Brownell } 25008ae12a0dSDavid Brownell EXPORT_SYMBOL_GPL(spi_write_then_read); 25018ae12a0dSDavid Brownell 25028ae12a0dSDavid Brownell /*-------------------------------------------------------------------------*/ 25038ae12a0dSDavid Brownell 2504ce79d54aSPantelis Antoniou #if IS_ENABLED(CONFIG_OF_DYNAMIC) 2505ce79d54aSPantelis Antoniou static int __spi_of_device_match(struct device *dev, void *data) 2506ce79d54aSPantelis Antoniou { 2507ce79d54aSPantelis Antoniou return dev->of_node == data; 2508ce79d54aSPantelis Antoniou } 2509ce79d54aSPantelis Antoniou 2510ce79d54aSPantelis Antoniou /* must call put_device() when done with returned spi_device device */ 2511ce79d54aSPantelis Antoniou static struct spi_device *of_find_spi_device_by_node(struct device_node *node) 2512ce79d54aSPantelis Antoniou { 2513ce79d54aSPantelis Antoniou struct device *dev = bus_find_device(&spi_bus_type, NULL, node, 2514ce79d54aSPantelis Antoniou __spi_of_device_match); 2515ce79d54aSPantelis Antoniou return dev ? to_spi_device(dev) : NULL; 2516ce79d54aSPantelis Antoniou } 2517ce79d54aSPantelis Antoniou 2518ce79d54aSPantelis Antoniou static int __spi_of_master_match(struct device *dev, const void *data) 2519ce79d54aSPantelis Antoniou { 2520ce79d54aSPantelis Antoniou return dev->of_node == data; 2521ce79d54aSPantelis Antoniou } 2522ce79d54aSPantelis Antoniou 2523ce79d54aSPantelis Antoniou /* the spi masters are not using spi_bus, so we find it with another way */ 2524ce79d54aSPantelis Antoniou static struct spi_master *of_find_spi_master_by_node(struct device_node *node) 2525ce79d54aSPantelis Antoniou { 2526ce79d54aSPantelis Antoniou struct device *dev; 2527ce79d54aSPantelis Antoniou 2528ce79d54aSPantelis Antoniou dev = class_find_device(&spi_master_class, NULL, node, 2529ce79d54aSPantelis Antoniou __spi_of_master_match); 2530ce79d54aSPantelis Antoniou if (!dev) 2531ce79d54aSPantelis Antoniou return NULL; 2532ce79d54aSPantelis Antoniou 2533ce79d54aSPantelis Antoniou /* reference got in class_find_device */ 2534ce79d54aSPantelis Antoniou return container_of(dev, struct spi_master, dev); 2535ce79d54aSPantelis Antoniou } 2536ce79d54aSPantelis Antoniou 2537ce79d54aSPantelis Antoniou static int of_spi_notify(struct notifier_block *nb, unsigned long action, 2538ce79d54aSPantelis Antoniou void *arg) 2539ce79d54aSPantelis Antoniou { 2540ce79d54aSPantelis Antoniou struct of_reconfig_data *rd = arg; 2541ce79d54aSPantelis Antoniou struct spi_master *master; 2542ce79d54aSPantelis Antoniou struct spi_device *spi; 2543ce79d54aSPantelis Antoniou 2544ce79d54aSPantelis Antoniou switch (of_reconfig_get_state_change(action, arg)) { 2545ce79d54aSPantelis Antoniou case OF_RECONFIG_CHANGE_ADD: 2546ce79d54aSPantelis Antoniou master = of_find_spi_master_by_node(rd->dn->parent); 2547ce79d54aSPantelis Antoniou if (master == NULL) 2548ce79d54aSPantelis Antoniou return NOTIFY_OK; /* not for us */ 2549ce79d54aSPantelis Antoniou 2550ce79d54aSPantelis Antoniou spi = of_register_spi_device(master, rd->dn); 2551ce79d54aSPantelis Antoniou put_device(&master->dev); 2552ce79d54aSPantelis Antoniou 2553ce79d54aSPantelis Antoniou if (IS_ERR(spi)) { 2554ce79d54aSPantelis Antoniou pr_err("%s: failed to create for '%s'\n", 2555ce79d54aSPantelis Antoniou __func__, rd->dn->full_name); 2556ce79d54aSPantelis Antoniou return notifier_from_errno(PTR_ERR(spi)); 2557ce79d54aSPantelis Antoniou } 2558ce79d54aSPantelis Antoniou break; 2559ce79d54aSPantelis Antoniou 2560ce79d54aSPantelis Antoniou case OF_RECONFIG_CHANGE_REMOVE: 2561ce79d54aSPantelis Antoniou /* find our device by node */ 2562ce79d54aSPantelis Antoniou spi = of_find_spi_device_by_node(rd->dn); 2563ce79d54aSPantelis Antoniou if (spi == NULL) 2564ce79d54aSPantelis Antoniou return NOTIFY_OK; /* no? not meant for us */ 2565ce79d54aSPantelis Antoniou 2566ce79d54aSPantelis Antoniou /* unregister takes one ref away */ 2567ce79d54aSPantelis Antoniou spi_unregister_device(spi); 2568ce79d54aSPantelis Antoniou 2569ce79d54aSPantelis Antoniou /* and put the reference of the find */ 2570ce79d54aSPantelis Antoniou put_device(&spi->dev); 2571ce79d54aSPantelis Antoniou break; 2572ce79d54aSPantelis Antoniou } 2573ce79d54aSPantelis Antoniou 2574ce79d54aSPantelis Antoniou return NOTIFY_OK; 2575ce79d54aSPantelis Antoniou } 2576ce79d54aSPantelis Antoniou 2577ce79d54aSPantelis Antoniou static struct notifier_block spi_of_notifier = { 2578ce79d54aSPantelis Antoniou .notifier_call = of_spi_notify, 2579ce79d54aSPantelis Antoniou }; 2580ce79d54aSPantelis Antoniou #else /* IS_ENABLED(CONFIG_OF_DYNAMIC) */ 2581ce79d54aSPantelis Antoniou extern struct notifier_block spi_of_notifier; 2582ce79d54aSPantelis Antoniou #endif /* IS_ENABLED(CONFIG_OF_DYNAMIC) */ 2583ce79d54aSPantelis Antoniou 25848ae12a0dSDavid Brownell static int __init spi_init(void) 25858ae12a0dSDavid Brownell { 2586b885244eSDavid Brownell int status; 25878ae12a0dSDavid Brownell 2588e94b1766SChristoph Lameter buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL); 2589b885244eSDavid Brownell if (!buf) { 2590b885244eSDavid Brownell status = -ENOMEM; 2591b885244eSDavid Brownell goto err0; 25928ae12a0dSDavid Brownell } 2593b885244eSDavid Brownell 2594b885244eSDavid Brownell status = bus_register(&spi_bus_type); 2595b885244eSDavid Brownell if (status < 0) 2596b885244eSDavid Brownell goto err1; 2597b885244eSDavid Brownell 2598b885244eSDavid Brownell status = class_register(&spi_master_class); 2599b885244eSDavid Brownell if (status < 0) 2600b885244eSDavid Brownell goto err2; 2601ce79d54aSPantelis Antoniou 26025267720eSFabio Estevam if (IS_ENABLED(CONFIG_OF_DYNAMIC)) 2603ce79d54aSPantelis Antoniou WARN_ON(of_reconfig_notifier_register(&spi_of_notifier)); 2604ce79d54aSPantelis Antoniou 2605b885244eSDavid Brownell return 0; 2606b885244eSDavid Brownell 2607b885244eSDavid Brownell err2: 2608b885244eSDavid Brownell bus_unregister(&spi_bus_type); 2609b885244eSDavid Brownell err1: 2610b885244eSDavid Brownell kfree(buf); 2611b885244eSDavid Brownell buf = NULL; 2612b885244eSDavid Brownell err0: 2613b885244eSDavid Brownell return status; 2614b885244eSDavid Brownell } 2615b885244eSDavid Brownell 26168ae12a0dSDavid Brownell /* board_info is normally registered in arch_initcall(), 26178ae12a0dSDavid Brownell * but even essential drivers wait till later 2618b885244eSDavid Brownell * 2619b885244eSDavid Brownell * REVISIT only boardinfo really needs static linking. the rest (device and 2620b885244eSDavid Brownell * driver registration) _could_ be dynamically linked (modular) ... costs 2621b885244eSDavid Brownell * include needing to have boardinfo data structures be much more public. 26228ae12a0dSDavid Brownell */ 2623673c0c00SDavid Brownell postcore_initcall(spi_init); 26248ae12a0dSDavid Brownell 2625