18ae12a0dSDavid Brownell /* 2ca632f55SGrant Likely * SPI init/core code 38ae12a0dSDavid Brownell * 48ae12a0dSDavid Brownell * Copyright (C) 2005 David Brownell 5d57a4282SGrant Likely * Copyright (C) 2008 Secret Lab Technologies Ltd. 68ae12a0dSDavid Brownell * 78ae12a0dSDavid Brownell * This program is free software; you can redistribute it and/or modify 88ae12a0dSDavid Brownell * it under the terms of the GNU General Public License as published by 98ae12a0dSDavid Brownell * the Free Software Foundation; either version 2 of the License, or 108ae12a0dSDavid Brownell * (at your option) any later version. 118ae12a0dSDavid Brownell * 128ae12a0dSDavid Brownell * This program is distributed in the hope that it will be useful, 138ae12a0dSDavid Brownell * but WITHOUT ANY WARRANTY; without even the implied warranty of 148ae12a0dSDavid Brownell * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 158ae12a0dSDavid Brownell * GNU General Public License for more details. 168ae12a0dSDavid Brownell * 178ae12a0dSDavid Brownell * You should have received a copy of the GNU General Public License 188ae12a0dSDavid Brownell * along with this program; if not, write to the Free Software 198ae12a0dSDavid Brownell * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 208ae12a0dSDavid Brownell */ 218ae12a0dSDavid Brownell 228ae12a0dSDavid Brownell #include <linux/kernel.h> 23d57a4282SGrant Likely #include <linux/kmod.h> 248ae12a0dSDavid Brownell #include <linux/device.h> 258ae12a0dSDavid Brownell #include <linux/init.h> 268ae12a0dSDavid Brownell #include <linux/cache.h> 2794040828SMatthias Kaehlcke #include <linux/mutex.h> 282b7a32f7SSinan Akman #include <linux/of_device.h> 29d57a4282SGrant Likely #include <linux/of_irq.h> 305a0e3ad6STejun Heo #include <linux/slab.h> 31e0626e38SAnton Vorontsov #include <linux/mod_devicetable.h> 328ae12a0dSDavid Brownell #include <linux/spi/spi.h> 3374317984SJean-Christophe PLAGNIOL-VILLARD #include <linux/of_gpio.h> 343ae22e8cSMark Brown #include <linux/pm_runtime.h> 35025ed130SPaul Gortmaker #include <linux/export.h> 368bd75c77SClark Williams #include <linux/sched/rt.h> 37ffbbdd21SLinus Walleij #include <linux/delay.h> 38ffbbdd21SLinus Walleij #include <linux/kthread.h> 3964bee4d2SMika Westerberg #include <linux/ioport.h> 4064bee4d2SMika Westerberg #include <linux/acpi.h> 418ae12a0dSDavid Brownell 4256ec1978SMark Brown #define CREATE_TRACE_POINTS 4356ec1978SMark Brown #include <trace/events/spi.h> 4456ec1978SMark Brown 458ae12a0dSDavid Brownell static void spidev_release(struct device *dev) 468ae12a0dSDavid Brownell { 470ffa0285SHans-Peter Nilsson struct spi_device *spi = to_spi_device(dev); 488ae12a0dSDavid Brownell 498ae12a0dSDavid Brownell /* spi masters may cleanup for released devices */ 508ae12a0dSDavid Brownell if (spi->master->cleanup) 518ae12a0dSDavid Brownell spi->master->cleanup(spi); 528ae12a0dSDavid Brownell 530c868461SDavid Brownell spi_master_put(spi->master); 5407a389feSRoman Tereshonkov kfree(spi); 558ae12a0dSDavid Brownell } 568ae12a0dSDavid Brownell 578ae12a0dSDavid Brownell static ssize_t 588ae12a0dSDavid Brownell modalias_show(struct device *dev, struct device_attribute *a, char *buf) 598ae12a0dSDavid Brownell { 608ae12a0dSDavid Brownell const struct spi_device *spi = to_spi_device(dev); 618c4ff6d0SZhang Rui int len; 628c4ff6d0SZhang Rui 638c4ff6d0SZhang Rui len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1); 648c4ff6d0SZhang Rui if (len != -ENODEV) 658c4ff6d0SZhang Rui return len; 668ae12a0dSDavid Brownell 67d8e328b3SGrant Likely return sprintf(buf, "%s%s\n", SPI_MODULE_PREFIX, spi->modalias); 688ae12a0dSDavid Brownell } 69aa7da564SGreg Kroah-Hartman static DEVICE_ATTR_RO(modalias); 708ae12a0dSDavid Brownell 71aa7da564SGreg Kroah-Hartman static struct attribute *spi_dev_attrs[] = { 72aa7da564SGreg Kroah-Hartman &dev_attr_modalias.attr, 73aa7da564SGreg Kroah-Hartman NULL, 748ae12a0dSDavid Brownell }; 75aa7da564SGreg Kroah-Hartman ATTRIBUTE_GROUPS(spi_dev); 768ae12a0dSDavid Brownell 778ae12a0dSDavid Brownell /* modalias support makes "modprobe $MODALIAS" new-style hotplug work, 788ae12a0dSDavid Brownell * and the sysfs version makes coldplug work too. 798ae12a0dSDavid Brownell */ 808ae12a0dSDavid Brownell 8175368bf6SAnton Vorontsov static const struct spi_device_id *spi_match_id(const struct spi_device_id *id, 8275368bf6SAnton Vorontsov const struct spi_device *sdev) 8375368bf6SAnton Vorontsov { 8475368bf6SAnton Vorontsov while (id->name[0]) { 8575368bf6SAnton Vorontsov if (!strcmp(sdev->modalias, id->name)) 8675368bf6SAnton Vorontsov return id; 8775368bf6SAnton Vorontsov id++; 8875368bf6SAnton Vorontsov } 8975368bf6SAnton Vorontsov return NULL; 9075368bf6SAnton Vorontsov } 9175368bf6SAnton Vorontsov 9275368bf6SAnton Vorontsov const struct spi_device_id *spi_get_device_id(const struct spi_device *sdev) 9375368bf6SAnton Vorontsov { 9475368bf6SAnton Vorontsov const struct spi_driver *sdrv = to_spi_driver(sdev->dev.driver); 9575368bf6SAnton Vorontsov 9675368bf6SAnton Vorontsov return spi_match_id(sdrv->id_table, sdev); 9775368bf6SAnton Vorontsov } 9875368bf6SAnton Vorontsov EXPORT_SYMBOL_GPL(spi_get_device_id); 9975368bf6SAnton Vorontsov 1008ae12a0dSDavid Brownell static int spi_match_device(struct device *dev, struct device_driver *drv) 1018ae12a0dSDavid Brownell { 1028ae12a0dSDavid Brownell const struct spi_device *spi = to_spi_device(dev); 10375368bf6SAnton Vorontsov const struct spi_driver *sdrv = to_spi_driver(drv); 10475368bf6SAnton Vorontsov 1052b7a32f7SSinan Akman /* Attempt an OF style match */ 1062b7a32f7SSinan Akman if (of_driver_match_device(dev, drv)) 1072b7a32f7SSinan Akman return 1; 1082b7a32f7SSinan Akman 10964bee4d2SMika Westerberg /* Then try ACPI */ 11064bee4d2SMika Westerberg if (acpi_driver_match_device(dev, drv)) 11164bee4d2SMika Westerberg return 1; 11264bee4d2SMika Westerberg 11375368bf6SAnton Vorontsov if (sdrv->id_table) 11475368bf6SAnton Vorontsov return !!spi_match_id(sdrv->id_table, spi); 1158ae12a0dSDavid Brownell 11635f74fcaSKay Sievers return strcmp(spi->modalias, drv->name) == 0; 1178ae12a0dSDavid Brownell } 1188ae12a0dSDavid Brownell 1197eff2e7aSKay Sievers static int spi_uevent(struct device *dev, struct kobj_uevent_env *env) 1208ae12a0dSDavid Brownell { 1218ae12a0dSDavid Brownell const struct spi_device *spi = to_spi_device(dev); 1228c4ff6d0SZhang Rui int rc; 1238c4ff6d0SZhang Rui 1248c4ff6d0SZhang Rui rc = acpi_device_uevent_modalias(dev, env); 1258c4ff6d0SZhang Rui if (rc != -ENODEV) 1268c4ff6d0SZhang Rui return rc; 1278ae12a0dSDavid Brownell 128e0626e38SAnton Vorontsov add_uevent_var(env, "MODALIAS=%s%s", SPI_MODULE_PREFIX, spi->modalias); 1298ae12a0dSDavid Brownell return 0; 1308ae12a0dSDavid Brownell } 1318ae12a0dSDavid Brownell 1323ae22e8cSMark Brown #ifdef CONFIG_PM_SLEEP 1333ae22e8cSMark Brown static int spi_legacy_suspend(struct device *dev, pm_message_t message) 1348ae12a0dSDavid Brownell { 1353c72426fSDavid Brownell int value = 0; 136b885244eSDavid Brownell struct spi_driver *drv = to_spi_driver(dev->driver); 1378ae12a0dSDavid Brownell 1388ae12a0dSDavid Brownell /* suspend will stop irqs and dma; no more i/o */ 1393c72426fSDavid Brownell if (drv) { 1403c72426fSDavid Brownell if (drv->suspend) 141b885244eSDavid Brownell value = drv->suspend(to_spi_device(dev), message); 1423c72426fSDavid Brownell else 1433c72426fSDavid Brownell dev_dbg(dev, "... can't suspend\n"); 1443c72426fSDavid Brownell } 1458ae12a0dSDavid Brownell return value; 1468ae12a0dSDavid Brownell } 1478ae12a0dSDavid Brownell 1483ae22e8cSMark Brown static int spi_legacy_resume(struct device *dev) 1498ae12a0dSDavid Brownell { 1503c72426fSDavid Brownell int value = 0; 151b885244eSDavid Brownell struct spi_driver *drv = to_spi_driver(dev->driver); 1528ae12a0dSDavid Brownell 1538ae12a0dSDavid Brownell /* resume may restart the i/o queue */ 1543c72426fSDavid Brownell if (drv) { 1553c72426fSDavid Brownell if (drv->resume) 156b885244eSDavid Brownell value = drv->resume(to_spi_device(dev)); 1573c72426fSDavid Brownell else 1583c72426fSDavid Brownell dev_dbg(dev, "... can't resume\n"); 1593c72426fSDavid Brownell } 1608ae12a0dSDavid Brownell return value; 1618ae12a0dSDavid Brownell } 1628ae12a0dSDavid Brownell 1633ae22e8cSMark Brown static int spi_pm_suspend(struct device *dev) 1643ae22e8cSMark Brown { 1653ae22e8cSMark Brown const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 1663ae22e8cSMark Brown 1673ae22e8cSMark Brown if (pm) 1683ae22e8cSMark Brown return pm_generic_suspend(dev); 1693ae22e8cSMark Brown else 1703ae22e8cSMark Brown return spi_legacy_suspend(dev, PMSG_SUSPEND); 1713ae22e8cSMark Brown } 1723ae22e8cSMark Brown 1733ae22e8cSMark Brown static int spi_pm_resume(struct device *dev) 1743ae22e8cSMark Brown { 1753ae22e8cSMark Brown const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 1763ae22e8cSMark Brown 1773ae22e8cSMark Brown if (pm) 1783ae22e8cSMark Brown return pm_generic_resume(dev); 1793ae22e8cSMark Brown else 1803ae22e8cSMark Brown return spi_legacy_resume(dev); 1813ae22e8cSMark Brown } 1823ae22e8cSMark Brown 1833ae22e8cSMark Brown static int spi_pm_freeze(struct device *dev) 1843ae22e8cSMark Brown { 1853ae22e8cSMark Brown const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 1863ae22e8cSMark Brown 1873ae22e8cSMark Brown if (pm) 1883ae22e8cSMark Brown return pm_generic_freeze(dev); 1893ae22e8cSMark Brown else 1903ae22e8cSMark Brown return spi_legacy_suspend(dev, PMSG_FREEZE); 1913ae22e8cSMark Brown } 1923ae22e8cSMark Brown 1933ae22e8cSMark Brown static int spi_pm_thaw(struct device *dev) 1943ae22e8cSMark Brown { 1953ae22e8cSMark Brown const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 1963ae22e8cSMark Brown 1973ae22e8cSMark Brown if (pm) 1983ae22e8cSMark Brown return pm_generic_thaw(dev); 1993ae22e8cSMark Brown else 2003ae22e8cSMark Brown return spi_legacy_resume(dev); 2013ae22e8cSMark Brown } 2023ae22e8cSMark Brown 2033ae22e8cSMark Brown static int spi_pm_poweroff(struct device *dev) 2043ae22e8cSMark Brown { 2053ae22e8cSMark Brown const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 2063ae22e8cSMark Brown 2073ae22e8cSMark Brown if (pm) 2083ae22e8cSMark Brown return pm_generic_poweroff(dev); 2093ae22e8cSMark Brown else 2103ae22e8cSMark Brown return spi_legacy_suspend(dev, PMSG_HIBERNATE); 2113ae22e8cSMark Brown } 2123ae22e8cSMark Brown 2133ae22e8cSMark Brown static int spi_pm_restore(struct device *dev) 2143ae22e8cSMark Brown { 2153ae22e8cSMark Brown const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 2163ae22e8cSMark Brown 2173ae22e8cSMark Brown if (pm) 2183ae22e8cSMark Brown return pm_generic_restore(dev); 2193ae22e8cSMark Brown else 2203ae22e8cSMark Brown return spi_legacy_resume(dev); 2213ae22e8cSMark Brown } 2228ae12a0dSDavid Brownell #else 2233ae22e8cSMark Brown #define spi_pm_suspend NULL 2243ae22e8cSMark Brown #define spi_pm_resume NULL 2253ae22e8cSMark Brown #define spi_pm_freeze NULL 2263ae22e8cSMark Brown #define spi_pm_thaw NULL 2273ae22e8cSMark Brown #define spi_pm_poweroff NULL 2283ae22e8cSMark Brown #define spi_pm_restore NULL 2298ae12a0dSDavid Brownell #endif 2308ae12a0dSDavid Brownell 2313ae22e8cSMark Brown static const struct dev_pm_ops spi_pm = { 2323ae22e8cSMark Brown .suspend = spi_pm_suspend, 2333ae22e8cSMark Brown .resume = spi_pm_resume, 2343ae22e8cSMark Brown .freeze = spi_pm_freeze, 2353ae22e8cSMark Brown .thaw = spi_pm_thaw, 2363ae22e8cSMark Brown .poweroff = spi_pm_poweroff, 2373ae22e8cSMark Brown .restore = spi_pm_restore, 2383ae22e8cSMark Brown SET_RUNTIME_PM_OPS( 2393ae22e8cSMark Brown pm_generic_runtime_suspend, 2403ae22e8cSMark Brown pm_generic_runtime_resume, 24145f0a85cSRafael J. Wysocki NULL 2423ae22e8cSMark Brown ) 2433ae22e8cSMark Brown }; 2443ae22e8cSMark Brown 2458ae12a0dSDavid Brownell struct bus_type spi_bus_type = { 2468ae12a0dSDavid Brownell .name = "spi", 247aa7da564SGreg Kroah-Hartman .dev_groups = spi_dev_groups, 2488ae12a0dSDavid Brownell .match = spi_match_device, 2498ae12a0dSDavid Brownell .uevent = spi_uevent, 2503ae22e8cSMark Brown .pm = &spi_pm, 2518ae12a0dSDavid Brownell }; 2528ae12a0dSDavid Brownell EXPORT_SYMBOL_GPL(spi_bus_type); 2538ae12a0dSDavid Brownell 254b885244eSDavid Brownell 255b885244eSDavid Brownell static int spi_drv_probe(struct device *dev) 256b885244eSDavid Brownell { 257b885244eSDavid Brownell const struct spi_driver *sdrv = to_spi_driver(dev->driver); 25833cf00e5SMika Westerberg struct spi_device *spi = to_spi_device(dev); 25933cf00e5SMika Westerberg int ret; 260b885244eSDavid Brownell 26133cf00e5SMika Westerberg acpi_dev_pm_attach(&spi->dev, true); 26233cf00e5SMika Westerberg ret = sdrv->probe(spi); 26333cf00e5SMika Westerberg if (ret) 26433cf00e5SMika Westerberg acpi_dev_pm_detach(&spi->dev, true); 26533cf00e5SMika Westerberg 26633cf00e5SMika Westerberg return ret; 267b885244eSDavid Brownell } 268b885244eSDavid Brownell 269b885244eSDavid Brownell static int spi_drv_remove(struct device *dev) 270b885244eSDavid Brownell { 271b885244eSDavid Brownell const struct spi_driver *sdrv = to_spi_driver(dev->driver); 27233cf00e5SMika Westerberg struct spi_device *spi = to_spi_device(dev); 27333cf00e5SMika Westerberg int ret; 274b885244eSDavid Brownell 27533cf00e5SMika Westerberg ret = sdrv->remove(spi); 27633cf00e5SMika Westerberg acpi_dev_pm_detach(&spi->dev, true); 27733cf00e5SMika Westerberg 27833cf00e5SMika Westerberg return ret; 279b885244eSDavid Brownell } 280b885244eSDavid Brownell 281b885244eSDavid Brownell static void spi_drv_shutdown(struct device *dev) 282b885244eSDavid Brownell { 283b885244eSDavid Brownell const struct spi_driver *sdrv = to_spi_driver(dev->driver); 284b885244eSDavid Brownell 285b885244eSDavid Brownell sdrv->shutdown(to_spi_device(dev)); 286b885244eSDavid Brownell } 287b885244eSDavid Brownell 28833e34dc6SDavid Brownell /** 28933e34dc6SDavid Brownell * spi_register_driver - register a SPI driver 29033e34dc6SDavid Brownell * @sdrv: the driver to register 29133e34dc6SDavid Brownell * Context: can sleep 29233e34dc6SDavid Brownell */ 293b885244eSDavid Brownell int spi_register_driver(struct spi_driver *sdrv) 294b885244eSDavid Brownell { 295b885244eSDavid Brownell sdrv->driver.bus = &spi_bus_type; 296b885244eSDavid Brownell if (sdrv->probe) 297b885244eSDavid Brownell sdrv->driver.probe = spi_drv_probe; 298b885244eSDavid Brownell if (sdrv->remove) 299b885244eSDavid Brownell sdrv->driver.remove = spi_drv_remove; 300b885244eSDavid Brownell if (sdrv->shutdown) 301b885244eSDavid Brownell sdrv->driver.shutdown = spi_drv_shutdown; 302b885244eSDavid Brownell return driver_register(&sdrv->driver); 303b885244eSDavid Brownell } 304b885244eSDavid Brownell EXPORT_SYMBOL_GPL(spi_register_driver); 305b885244eSDavid Brownell 3068ae12a0dSDavid Brownell /*-------------------------------------------------------------------------*/ 3078ae12a0dSDavid Brownell 3088ae12a0dSDavid Brownell /* SPI devices should normally not be created by SPI device drivers; that 3098ae12a0dSDavid Brownell * would make them board-specific. Similarly with SPI master drivers. 3108ae12a0dSDavid Brownell * Device registration normally goes into like arch/.../mach.../board-YYY.c 3118ae12a0dSDavid Brownell * with other readonly (flashable) information about mainboard devices. 3128ae12a0dSDavid Brownell */ 3138ae12a0dSDavid Brownell 3148ae12a0dSDavid Brownell struct boardinfo { 3158ae12a0dSDavid Brownell struct list_head list; 3162b9603a0SFeng Tang struct spi_board_info board_info; 3178ae12a0dSDavid Brownell }; 3188ae12a0dSDavid Brownell 3198ae12a0dSDavid Brownell static LIST_HEAD(board_list); 3202b9603a0SFeng Tang static LIST_HEAD(spi_master_list); 3212b9603a0SFeng Tang 3222b9603a0SFeng Tang /* 3232b9603a0SFeng Tang * Used to protect add/del opertion for board_info list and 3242b9603a0SFeng Tang * spi_master list, and their matching process 3252b9603a0SFeng Tang */ 32694040828SMatthias Kaehlcke static DEFINE_MUTEX(board_lock); 3278ae12a0dSDavid Brownell 328dc87c98eSGrant Likely /** 329dc87c98eSGrant Likely * spi_alloc_device - Allocate a new SPI device 330dc87c98eSGrant Likely * @master: Controller to which device is connected 331dc87c98eSGrant Likely * Context: can sleep 332dc87c98eSGrant Likely * 333dc87c98eSGrant Likely * Allows a driver to allocate and initialize a spi_device without 334dc87c98eSGrant Likely * registering it immediately. This allows a driver to directly 335dc87c98eSGrant Likely * fill the spi_device with device parameters before calling 336dc87c98eSGrant Likely * spi_add_device() on it. 337dc87c98eSGrant Likely * 338dc87c98eSGrant Likely * Caller is responsible to call spi_add_device() on the returned 339dc87c98eSGrant Likely * spi_device structure to add it to the SPI master. If the caller 340dc87c98eSGrant Likely * needs to discard the spi_device without adding it, then it should 341dc87c98eSGrant Likely * call spi_dev_put() on it. 342dc87c98eSGrant Likely * 343dc87c98eSGrant Likely * Returns a pointer to the new device, or NULL. 344dc87c98eSGrant Likely */ 345dc87c98eSGrant Likely struct spi_device *spi_alloc_device(struct spi_master *master) 346dc87c98eSGrant Likely { 347dc87c98eSGrant Likely struct spi_device *spi; 348dc87c98eSGrant Likely struct device *dev = master->dev.parent; 349dc87c98eSGrant Likely 350dc87c98eSGrant Likely if (!spi_master_get(master)) 351dc87c98eSGrant Likely return NULL; 352dc87c98eSGrant Likely 3535fe5f05eSJingoo Han spi = kzalloc(sizeof(*spi), GFP_KERNEL); 354dc87c98eSGrant Likely if (!spi) { 355dc87c98eSGrant Likely dev_err(dev, "cannot alloc spi_device\n"); 356dc87c98eSGrant Likely spi_master_put(master); 357dc87c98eSGrant Likely return NULL; 358dc87c98eSGrant Likely } 359dc87c98eSGrant Likely 360dc87c98eSGrant Likely spi->master = master; 361178db7d3SLaurent Pinchart spi->dev.parent = &master->dev; 362dc87c98eSGrant Likely spi->dev.bus = &spi_bus_type; 363dc87c98eSGrant Likely spi->dev.release = spidev_release; 364446411e1SAndreas Larsson spi->cs_gpio = -ENOENT; 365dc87c98eSGrant Likely device_initialize(&spi->dev); 366dc87c98eSGrant Likely return spi; 367dc87c98eSGrant Likely } 368dc87c98eSGrant Likely EXPORT_SYMBOL_GPL(spi_alloc_device); 369dc87c98eSGrant Likely 370e13ac47bSJarkko Nikula static void spi_dev_set_name(struct spi_device *spi) 371e13ac47bSJarkko Nikula { 372e13ac47bSJarkko Nikula struct acpi_device *adev = ACPI_COMPANION(&spi->dev); 373e13ac47bSJarkko Nikula 374e13ac47bSJarkko Nikula if (adev) { 375e13ac47bSJarkko Nikula dev_set_name(&spi->dev, "spi-%s", acpi_dev_name(adev)); 376e13ac47bSJarkko Nikula return; 377e13ac47bSJarkko Nikula } 378e13ac47bSJarkko Nikula 379e13ac47bSJarkko Nikula dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->master->dev), 380e13ac47bSJarkko Nikula spi->chip_select); 381e13ac47bSJarkko Nikula } 382e13ac47bSJarkko Nikula 383b6fb8d3aSMika Westerberg static int spi_dev_check(struct device *dev, void *data) 384b6fb8d3aSMika Westerberg { 385b6fb8d3aSMika Westerberg struct spi_device *spi = to_spi_device(dev); 386b6fb8d3aSMika Westerberg struct spi_device *new_spi = data; 387b6fb8d3aSMika Westerberg 388b6fb8d3aSMika Westerberg if (spi->master == new_spi->master && 389b6fb8d3aSMika Westerberg spi->chip_select == new_spi->chip_select) 390b6fb8d3aSMika Westerberg return -EBUSY; 391b6fb8d3aSMika Westerberg return 0; 392b6fb8d3aSMika Westerberg } 393b6fb8d3aSMika Westerberg 394dc87c98eSGrant Likely /** 395dc87c98eSGrant Likely * spi_add_device - Add spi_device allocated with spi_alloc_device 396dc87c98eSGrant Likely * @spi: spi_device to register 397dc87c98eSGrant Likely * 398dc87c98eSGrant Likely * Companion function to spi_alloc_device. Devices allocated with 399dc87c98eSGrant Likely * spi_alloc_device can be added onto the spi bus with this function. 400dc87c98eSGrant Likely * 401e48880e0SDavid Brownell * Returns 0 on success; negative errno on failure 402dc87c98eSGrant Likely */ 403dc87c98eSGrant Likely int spi_add_device(struct spi_device *spi) 404dc87c98eSGrant Likely { 405e48880e0SDavid Brownell static DEFINE_MUTEX(spi_add_lock); 40674317984SJean-Christophe PLAGNIOL-VILLARD struct spi_master *master = spi->master; 40774317984SJean-Christophe PLAGNIOL-VILLARD struct device *dev = master->dev.parent; 408dc87c98eSGrant Likely int status; 409dc87c98eSGrant Likely 410dc87c98eSGrant Likely /* Chipselects are numbered 0..max; validate. */ 41174317984SJean-Christophe PLAGNIOL-VILLARD if (spi->chip_select >= master->num_chipselect) { 412dc87c98eSGrant Likely dev_err(dev, "cs%d >= max %d\n", 413dc87c98eSGrant Likely spi->chip_select, 41474317984SJean-Christophe PLAGNIOL-VILLARD master->num_chipselect); 415dc87c98eSGrant Likely return -EINVAL; 416dc87c98eSGrant Likely } 417dc87c98eSGrant Likely 418dc87c98eSGrant Likely /* Set the bus ID string */ 419e13ac47bSJarkko Nikula spi_dev_set_name(spi); 420e48880e0SDavid Brownell 421e48880e0SDavid Brownell /* We need to make sure there's no other device with this 422e48880e0SDavid Brownell * chipselect **BEFORE** we call setup(), else we'll trash 423e48880e0SDavid Brownell * its configuration. Lock against concurrent add() calls. 424e48880e0SDavid Brownell */ 425e48880e0SDavid Brownell mutex_lock(&spi_add_lock); 426e48880e0SDavid Brownell 427b6fb8d3aSMika Westerberg status = bus_for_each_dev(&spi_bus_type, NULL, spi, spi_dev_check); 428b6fb8d3aSMika Westerberg if (status) { 429e48880e0SDavid Brownell dev_err(dev, "chipselect %d already in use\n", 430e48880e0SDavid Brownell spi->chip_select); 431e48880e0SDavid Brownell goto done; 432e48880e0SDavid Brownell } 433e48880e0SDavid Brownell 43474317984SJean-Christophe PLAGNIOL-VILLARD if (master->cs_gpios) 43574317984SJean-Christophe PLAGNIOL-VILLARD spi->cs_gpio = master->cs_gpios[spi->chip_select]; 43674317984SJean-Christophe PLAGNIOL-VILLARD 437e48880e0SDavid Brownell /* Drivers may modify this initial i/o setup, but will 438e48880e0SDavid Brownell * normally rely on the device being setup. Devices 439e48880e0SDavid Brownell * using SPI_CS_HIGH can't coexist well otherwise... 440e48880e0SDavid Brownell */ 4417d077197SDavid Brownell status = spi_setup(spi); 442dc87c98eSGrant Likely if (status < 0) { 443eb288a1fSLinus Walleij dev_err(dev, "can't setup %s, status %d\n", 444eb288a1fSLinus Walleij dev_name(&spi->dev), status); 445e48880e0SDavid Brownell goto done; 446dc87c98eSGrant Likely } 447dc87c98eSGrant Likely 448e48880e0SDavid Brownell /* Device may be bound to an active driver when this returns */ 449dc87c98eSGrant Likely status = device_add(&spi->dev); 450e48880e0SDavid Brownell if (status < 0) 451eb288a1fSLinus Walleij dev_err(dev, "can't add %s, status %d\n", 452eb288a1fSLinus Walleij dev_name(&spi->dev), status); 453e48880e0SDavid Brownell else 45435f74fcaSKay Sievers dev_dbg(dev, "registered child %s\n", dev_name(&spi->dev)); 455e48880e0SDavid Brownell 456e48880e0SDavid Brownell done: 457e48880e0SDavid Brownell mutex_unlock(&spi_add_lock); 458e48880e0SDavid Brownell return status; 459dc87c98eSGrant Likely } 460dc87c98eSGrant Likely EXPORT_SYMBOL_GPL(spi_add_device); 4618ae12a0dSDavid Brownell 46233e34dc6SDavid Brownell /** 46333e34dc6SDavid Brownell * spi_new_device - instantiate one new SPI device 46433e34dc6SDavid Brownell * @master: Controller to which device is connected 46533e34dc6SDavid Brownell * @chip: Describes the SPI device 46633e34dc6SDavid Brownell * Context: can sleep 46733e34dc6SDavid Brownell * 46833e34dc6SDavid Brownell * On typical mainboards, this is purely internal; and it's not needed 4698ae12a0dSDavid Brownell * after board init creates the hard-wired devices. Some development 4708ae12a0dSDavid Brownell * platforms may not be able to use spi_register_board_info though, and 4718ae12a0dSDavid Brownell * this is exported so that for example a USB or parport based adapter 4728ae12a0dSDavid Brownell * driver could add devices (which it would learn about out-of-band). 473082c8cb4SDavid Brownell * 474082c8cb4SDavid Brownell * Returns the new device, or NULL. 4758ae12a0dSDavid Brownell */ 476e9d5a461SAdrian Bunk struct spi_device *spi_new_device(struct spi_master *master, 477e9d5a461SAdrian Bunk struct spi_board_info *chip) 4788ae12a0dSDavid Brownell { 4798ae12a0dSDavid Brownell struct spi_device *proxy; 4808ae12a0dSDavid Brownell int status; 4818ae12a0dSDavid Brownell 482082c8cb4SDavid Brownell /* NOTE: caller did any chip->bus_num checks necessary. 483082c8cb4SDavid Brownell * 484082c8cb4SDavid Brownell * Also, unless we change the return value convention to use 485082c8cb4SDavid Brownell * error-or-pointer (not NULL-or-pointer), troubleshootability 486082c8cb4SDavid Brownell * suggests syslogged diagnostics are best here (ugh). 487082c8cb4SDavid Brownell */ 488082c8cb4SDavid Brownell 489dc87c98eSGrant Likely proxy = spi_alloc_device(master); 490dc87c98eSGrant Likely if (!proxy) 4918ae12a0dSDavid Brownell return NULL; 4928ae12a0dSDavid Brownell 493102eb975SGrant Likely WARN_ON(strlen(chip->modalias) >= sizeof(proxy->modalias)); 494102eb975SGrant Likely 4958ae12a0dSDavid Brownell proxy->chip_select = chip->chip_select; 4968ae12a0dSDavid Brownell proxy->max_speed_hz = chip->max_speed_hz; 497980a01c9SDavid Brownell proxy->mode = chip->mode; 4988ae12a0dSDavid Brownell proxy->irq = chip->irq; 499102eb975SGrant Likely strlcpy(proxy->modalias, chip->modalias, sizeof(proxy->modalias)); 5008ae12a0dSDavid Brownell proxy->dev.platform_data = (void *) chip->platform_data; 5018ae12a0dSDavid Brownell proxy->controller_data = chip->controller_data; 5028ae12a0dSDavid Brownell proxy->controller_state = NULL; 5038ae12a0dSDavid Brownell 504dc87c98eSGrant Likely status = spi_add_device(proxy); 5058ae12a0dSDavid Brownell if (status < 0) { 506dc87c98eSGrant Likely spi_dev_put(proxy); 5078ae12a0dSDavid Brownell return NULL; 5088ae12a0dSDavid Brownell } 509dc87c98eSGrant Likely 510dc87c98eSGrant Likely return proxy; 511dc87c98eSGrant Likely } 5128ae12a0dSDavid Brownell EXPORT_SYMBOL_GPL(spi_new_device); 5138ae12a0dSDavid Brownell 5142b9603a0SFeng Tang static void spi_match_master_to_boardinfo(struct spi_master *master, 5152b9603a0SFeng Tang struct spi_board_info *bi) 5162b9603a0SFeng Tang { 5172b9603a0SFeng Tang struct spi_device *dev; 5182b9603a0SFeng Tang 5192b9603a0SFeng Tang if (master->bus_num != bi->bus_num) 5202b9603a0SFeng Tang return; 5212b9603a0SFeng Tang 5222b9603a0SFeng Tang dev = spi_new_device(master, bi); 5232b9603a0SFeng Tang if (!dev) 5242b9603a0SFeng Tang dev_err(master->dev.parent, "can't create new device for %s\n", 5252b9603a0SFeng Tang bi->modalias); 5262b9603a0SFeng Tang } 5272b9603a0SFeng Tang 52833e34dc6SDavid Brownell /** 52933e34dc6SDavid Brownell * spi_register_board_info - register SPI devices for a given board 53033e34dc6SDavid Brownell * @info: array of chip descriptors 53133e34dc6SDavid Brownell * @n: how many descriptors are provided 53233e34dc6SDavid Brownell * Context: can sleep 53333e34dc6SDavid Brownell * 5348ae12a0dSDavid Brownell * Board-specific early init code calls this (probably during arch_initcall) 5358ae12a0dSDavid Brownell * with segments of the SPI device table. Any device nodes are created later, 5368ae12a0dSDavid Brownell * after the relevant parent SPI controller (bus_num) is defined. We keep 5378ae12a0dSDavid Brownell * this table of devices forever, so that reloading a controller driver will 5388ae12a0dSDavid Brownell * not make Linux forget about these hard-wired devices. 5398ae12a0dSDavid Brownell * 5408ae12a0dSDavid Brownell * Other code can also call this, e.g. a particular add-on board might provide 5418ae12a0dSDavid Brownell * SPI devices through its expansion connector, so code initializing that board 5428ae12a0dSDavid Brownell * would naturally declare its SPI devices. 5438ae12a0dSDavid Brownell * 5448ae12a0dSDavid Brownell * The board info passed can safely be __initdata ... but be careful of 5458ae12a0dSDavid Brownell * any embedded pointers (platform_data, etc), they're copied as-is. 5468ae12a0dSDavid Brownell */ 547fd4a319bSGrant Likely int spi_register_board_info(struct spi_board_info const *info, unsigned n) 5488ae12a0dSDavid Brownell { 5498ae12a0dSDavid Brownell struct boardinfo *bi; 5502b9603a0SFeng Tang int i; 5518ae12a0dSDavid Brownell 5522b9603a0SFeng Tang bi = kzalloc(n * sizeof(*bi), GFP_KERNEL); 5538ae12a0dSDavid Brownell if (!bi) 5548ae12a0dSDavid Brownell return -ENOMEM; 5558ae12a0dSDavid Brownell 5562b9603a0SFeng Tang for (i = 0; i < n; i++, bi++, info++) { 5572b9603a0SFeng Tang struct spi_master *master; 5582b9603a0SFeng Tang 5592b9603a0SFeng Tang memcpy(&bi->board_info, info, sizeof(*info)); 56094040828SMatthias Kaehlcke mutex_lock(&board_lock); 5618ae12a0dSDavid Brownell list_add_tail(&bi->list, &board_list); 5622b9603a0SFeng Tang list_for_each_entry(master, &spi_master_list, list) 5632b9603a0SFeng Tang spi_match_master_to_boardinfo(master, &bi->board_info); 56494040828SMatthias Kaehlcke mutex_unlock(&board_lock); 5652b9603a0SFeng Tang } 5662b9603a0SFeng Tang 5678ae12a0dSDavid Brownell return 0; 5688ae12a0dSDavid Brownell } 5698ae12a0dSDavid Brownell 5708ae12a0dSDavid Brownell /*-------------------------------------------------------------------------*/ 5718ae12a0dSDavid Brownell 572b158935fSMark Brown static void spi_set_cs(struct spi_device *spi, bool enable) 573b158935fSMark Brown { 574b158935fSMark Brown if (spi->mode & SPI_CS_HIGH) 575b158935fSMark Brown enable = !enable; 576b158935fSMark Brown 577b158935fSMark Brown if (spi->cs_gpio >= 0) 578b158935fSMark Brown gpio_set_value(spi->cs_gpio, !enable); 579b158935fSMark Brown else if (spi->master->set_cs) 580b158935fSMark Brown spi->master->set_cs(spi, !enable); 581b158935fSMark Brown } 582b158935fSMark Brown 583b158935fSMark Brown /* 584b158935fSMark Brown * spi_transfer_one_message - Default implementation of transfer_one_message() 585b158935fSMark Brown * 586b158935fSMark Brown * This is a standard implementation of transfer_one_message() for 587b158935fSMark Brown * drivers which impelment a transfer_one() operation. It provides 588b158935fSMark Brown * standard handling of delays and chip select management. 589b158935fSMark Brown */ 590b158935fSMark Brown static int spi_transfer_one_message(struct spi_master *master, 591b158935fSMark Brown struct spi_message *msg) 592b158935fSMark Brown { 593b158935fSMark Brown struct spi_transfer *xfer; 594b158935fSMark Brown bool keep_cs = false; 595b158935fSMark Brown int ret = 0; 596b158935fSMark Brown 597b158935fSMark Brown spi_set_cs(msg->spi, true); 598b158935fSMark Brown 599b158935fSMark Brown list_for_each_entry(xfer, &msg->transfers, transfer_list) { 600b158935fSMark Brown trace_spi_transfer_start(msg, xfer); 601b158935fSMark Brown 60216735d02SWolfram Sang reinit_completion(&master->xfer_completion); 603b158935fSMark Brown 604b158935fSMark Brown ret = master->transfer_one(master, msg->spi, xfer); 605b158935fSMark Brown if (ret < 0) { 606b158935fSMark Brown dev_err(&msg->spi->dev, 607b158935fSMark Brown "SPI transfer failed: %d\n", ret); 608b158935fSMark Brown goto out; 609b158935fSMark Brown } 610b158935fSMark Brown 61113a42798SAxel Lin if (ret > 0) { 61213a42798SAxel Lin ret = 0; 613b158935fSMark Brown wait_for_completion(&master->xfer_completion); 61413a42798SAxel Lin } 615b158935fSMark Brown 616b158935fSMark Brown trace_spi_transfer_stop(msg, xfer); 617b158935fSMark Brown 618b158935fSMark Brown if (msg->status != -EINPROGRESS) 619b158935fSMark Brown goto out; 620b158935fSMark Brown 621b158935fSMark Brown if (xfer->delay_usecs) 622b158935fSMark Brown udelay(xfer->delay_usecs); 623b158935fSMark Brown 624b158935fSMark Brown if (xfer->cs_change) { 625b158935fSMark Brown if (list_is_last(&xfer->transfer_list, 626b158935fSMark Brown &msg->transfers)) { 627b158935fSMark Brown keep_cs = true; 628b158935fSMark Brown } else { 629*0b73aa63SMark Brown spi_set_cs(msg->spi, false); 630*0b73aa63SMark Brown udelay(10); 631*0b73aa63SMark Brown spi_set_cs(msg->spi, true); 632b158935fSMark Brown } 633b158935fSMark Brown } 634b158935fSMark Brown 635b158935fSMark Brown msg->actual_length += xfer->len; 636b158935fSMark Brown } 637b158935fSMark Brown 638b158935fSMark Brown out: 639b158935fSMark Brown if (ret != 0 || !keep_cs) 640b158935fSMark Brown spi_set_cs(msg->spi, false); 641b158935fSMark Brown 642b158935fSMark Brown if (msg->status == -EINPROGRESS) 643b158935fSMark Brown msg->status = ret; 644b158935fSMark Brown 645b158935fSMark Brown spi_finalize_current_message(master); 646b158935fSMark Brown 647b158935fSMark Brown return ret; 648b158935fSMark Brown } 649b158935fSMark Brown 650b158935fSMark Brown /** 651b158935fSMark Brown * spi_finalize_current_transfer - report completion of a transfer 652b158935fSMark Brown * 653b158935fSMark Brown * Called by SPI drivers using the core transfer_one_message() 654b158935fSMark Brown * implementation to notify it that the current interrupt driven 6559e8f4882SGeert Uytterhoeven * transfer has finished and the next one may be scheduled. 656b158935fSMark Brown */ 657b158935fSMark Brown void spi_finalize_current_transfer(struct spi_master *master) 658b158935fSMark Brown { 659b158935fSMark Brown complete(&master->xfer_completion); 660b158935fSMark Brown } 661b158935fSMark Brown EXPORT_SYMBOL_GPL(spi_finalize_current_transfer); 662b158935fSMark Brown 663ffbbdd21SLinus Walleij /** 664ffbbdd21SLinus Walleij * spi_pump_messages - kthread work function which processes spi message queue 665ffbbdd21SLinus Walleij * @work: pointer to kthread work struct contained in the master struct 666ffbbdd21SLinus Walleij * 667ffbbdd21SLinus Walleij * This function checks if there is any spi message in the queue that 668ffbbdd21SLinus Walleij * needs processing and if so call out to the driver to initialize hardware 669ffbbdd21SLinus Walleij * and transfer each message. 670ffbbdd21SLinus Walleij * 671ffbbdd21SLinus Walleij */ 672ffbbdd21SLinus Walleij static void spi_pump_messages(struct kthread_work *work) 673ffbbdd21SLinus Walleij { 674ffbbdd21SLinus Walleij struct spi_master *master = 675ffbbdd21SLinus Walleij container_of(work, struct spi_master, pump_messages); 676ffbbdd21SLinus Walleij unsigned long flags; 677ffbbdd21SLinus Walleij bool was_busy = false; 678ffbbdd21SLinus Walleij int ret; 679ffbbdd21SLinus Walleij 680ffbbdd21SLinus Walleij /* Lock queue and check for queue work */ 681ffbbdd21SLinus Walleij spin_lock_irqsave(&master->queue_lock, flags); 682ffbbdd21SLinus Walleij if (list_empty(&master->queue) || !master->running) { 683b0b36b86SBryan Freed if (!master->busy) { 6849af4acc0SDan Carpenter spin_unlock_irqrestore(&master->queue_lock, flags); 685ffbbdd21SLinus Walleij return; 686ffbbdd21SLinus Walleij } 687ffbbdd21SLinus Walleij master->busy = false; 688ffbbdd21SLinus Walleij spin_unlock_irqrestore(&master->queue_lock, flags); 689b0b36b86SBryan Freed if (master->unprepare_transfer_hardware && 690b0b36b86SBryan Freed master->unprepare_transfer_hardware(master)) 691b0b36b86SBryan Freed dev_err(&master->dev, 692b0b36b86SBryan Freed "failed to unprepare transfer hardware\n"); 69349834de2SMark Brown if (master->auto_runtime_pm) { 69449834de2SMark Brown pm_runtime_mark_last_busy(master->dev.parent); 69549834de2SMark Brown pm_runtime_put_autosuspend(master->dev.parent); 69649834de2SMark Brown } 69756ec1978SMark Brown trace_spi_master_idle(master); 698ffbbdd21SLinus Walleij return; 699ffbbdd21SLinus Walleij } 700ffbbdd21SLinus Walleij 701ffbbdd21SLinus Walleij /* Make sure we are not already running a message */ 702ffbbdd21SLinus Walleij if (master->cur_msg) { 703ffbbdd21SLinus Walleij spin_unlock_irqrestore(&master->queue_lock, flags); 704ffbbdd21SLinus Walleij return; 705ffbbdd21SLinus Walleij } 706ffbbdd21SLinus Walleij /* Extract head of queue */ 707ffbbdd21SLinus Walleij master->cur_msg = 708a89e2d27SAxel Lin list_first_entry(&master->queue, struct spi_message, queue); 709ffbbdd21SLinus Walleij 710ffbbdd21SLinus Walleij list_del_init(&master->cur_msg->queue); 711ffbbdd21SLinus Walleij if (master->busy) 712ffbbdd21SLinus Walleij was_busy = true; 713ffbbdd21SLinus Walleij else 714ffbbdd21SLinus Walleij master->busy = true; 715ffbbdd21SLinus Walleij spin_unlock_irqrestore(&master->queue_lock, flags); 716ffbbdd21SLinus Walleij 71749834de2SMark Brown if (!was_busy && master->auto_runtime_pm) { 71849834de2SMark Brown ret = pm_runtime_get_sync(master->dev.parent); 71949834de2SMark Brown if (ret < 0) { 72049834de2SMark Brown dev_err(&master->dev, "Failed to power device: %d\n", 72149834de2SMark Brown ret); 72249834de2SMark Brown return; 72349834de2SMark Brown } 72449834de2SMark Brown } 72549834de2SMark Brown 72656ec1978SMark Brown if (!was_busy) 72756ec1978SMark Brown trace_spi_master_busy(master); 72856ec1978SMark Brown 7297dfd2bd7SShubhrajyoti D if (!was_busy && master->prepare_transfer_hardware) { 730ffbbdd21SLinus Walleij ret = master->prepare_transfer_hardware(master); 731ffbbdd21SLinus Walleij if (ret) { 732ffbbdd21SLinus Walleij dev_err(&master->dev, 733ffbbdd21SLinus Walleij "failed to prepare transfer hardware\n"); 73449834de2SMark Brown 73549834de2SMark Brown if (master->auto_runtime_pm) 73649834de2SMark Brown pm_runtime_put(master->dev.parent); 737ffbbdd21SLinus Walleij return; 738ffbbdd21SLinus Walleij } 739ffbbdd21SLinus Walleij } 740ffbbdd21SLinus Walleij 74156ec1978SMark Brown trace_spi_message_start(master->cur_msg); 74256ec1978SMark Brown 7432841a5fcSMark Brown if (master->prepare_message) { 7442841a5fcSMark Brown ret = master->prepare_message(master, master->cur_msg); 7452841a5fcSMark Brown if (ret) { 7462841a5fcSMark Brown dev_err(&master->dev, 7472841a5fcSMark Brown "failed to prepare message: %d\n", ret); 7482841a5fcSMark Brown master->cur_msg->status = ret; 7492841a5fcSMark Brown spi_finalize_current_message(master); 7502841a5fcSMark Brown return; 7512841a5fcSMark Brown } 7522841a5fcSMark Brown master->cur_msg_prepared = true; 7532841a5fcSMark Brown } 7542841a5fcSMark Brown 755ffbbdd21SLinus Walleij ret = master->transfer_one_message(master, master->cur_msg); 756ffbbdd21SLinus Walleij if (ret) { 757ffbbdd21SLinus Walleij dev_err(&master->dev, 758e120cc0dSDaniel Santos "failed to transfer one message from queue: %d\n", ret); 759e120cc0dSDaniel Santos master->cur_msg->status = ret; 760e120cc0dSDaniel Santos spi_finalize_current_message(master); 761ffbbdd21SLinus Walleij return; 762ffbbdd21SLinus Walleij } 763ffbbdd21SLinus Walleij } 764ffbbdd21SLinus Walleij 765ffbbdd21SLinus Walleij static int spi_init_queue(struct spi_master *master) 766ffbbdd21SLinus Walleij { 767ffbbdd21SLinus Walleij struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 }; 768ffbbdd21SLinus Walleij 769ffbbdd21SLinus Walleij INIT_LIST_HEAD(&master->queue); 770ffbbdd21SLinus Walleij spin_lock_init(&master->queue_lock); 771ffbbdd21SLinus Walleij 772ffbbdd21SLinus Walleij master->running = false; 773ffbbdd21SLinus Walleij master->busy = false; 774ffbbdd21SLinus Walleij 775ffbbdd21SLinus Walleij init_kthread_worker(&master->kworker); 776ffbbdd21SLinus Walleij master->kworker_task = kthread_run(kthread_worker_fn, 777f170168bSKees Cook &master->kworker, "%s", 778ffbbdd21SLinus Walleij dev_name(&master->dev)); 779ffbbdd21SLinus Walleij if (IS_ERR(master->kworker_task)) { 780ffbbdd21SLinus Walleij dev_err(&master->dev, "failed to create message pump task\n"); 781ffbbdd21SLinus Walleij return -ENOMEM; 782ffbbdd21SLinus Walleij } 783ffbbdd21SLinus Walleij init_kthread_work(&master->pump_messages, spi_pump_messages); 784ffbbdd21SLinus Walleij 785ffbbdd21SLinus Walleij /* 786ffbbdd21SLinus Walleij * Master config will indicate if this controller should run the 787ffbbdd21SLinus Walleij * message pump with high (realtime) priority to reduce the transfer 788ffbbdd21SLinus Walleij * latency on the bus by minimising the delay between a transfer 789ffbbdd21SLinus Walleij * request and the scheduling of the message pump thread. Without this 790ffbbdd21SLinus Walleij * setting the message pump thread will remain at default priority. 791ffbbdd21SLinus Walleij */ 792ffbbdd21SLinus Walleij if (master->rt) { 793ffbbdd21SLinus Walleij dev_info(&master->dev, 794ffbbdd21SLinus Walleij "will run message pump with realtime priority\n"); 795ffbbdd21SLinus Walleij sched_setscheduler(master->kworker_task, SCHED_FIFO, ¶m); 796ffbbdd21SLinus Walleij } 797ffbbdd21SLinus Walleij 798ffbbdd21SLinus Walleij return 0; 799ffbbdd21SLinus Walleij } 800ffbbdd21SLinus Walleij 801ffbbdd21SLinus Walleij /** 802ffbbdd21SLinus Walleij * spi_get_next_queued_message() - called by driver to check for queued 803ffbbdd21SLinus Walleij * messages 804ffbbdd21SLinus Walleij * @master: the master to check for queued messages 805ffbbdd21SLinus Walleij * 806ffbbdd21SLinus Walleij * If there are more messages in the queue, the next message is returned from 807ffbbdd21SLinus Walleij * this call. 808ffbbdd21SLinus Walleij */ 809ffbbdd21SLinus Walleij struct spi_message *spi_get_next_queued_message(struct spi_master *master) 810ffbbdd21SLinus Walleij { 811ffbbdd21SLinus Walleij struct spi_message *next; 812ffbbdd21SLinus Walleij unsigned long flags; 813ffbbdd21SLinus Walleij 814ffbbdd21SLinus Walleij /* get a pointer to the next message, if any */ 815ffbbdd21SLinus Walleij spin_lock_irqsave(&master->queue_lock, flags); 8161cfd97f9SAxel Lin next = list_first_entry_or_null(&master->queue, struct spi_message, 8171cfd97f9SAxel Lin queue); 818ffbbdd21SLinus Walleij spin_unlock_irqrestore(&master->queue_lock, flags); 819ffbbdd21SLinus Walleij 820ffbbdd21SLinus Walleij return next; 821ffbbdd21SLinus Walleij } 822ffbbdd21SLinus Walleij EXPORT_SYMBOL_GPL(spi_get_next_queued_message); 823ffbbdd21SLinus Walleij 824ffbbdd21SLinus Walleij /** 825ffbbdd21SLinus Walleij * spi_finalize_current_message() - the current message is complete 826ffbbdd21SLinus Walleij * @master: the master to return the message to 827ffbbdd21SLinus Walleij * 828ffbbdd21SLinus Walleij * Called by the driver to notify the core that the message in the front of the 829ffbbdd21SLinus Walleij * queue is complete and can be removed from the queue. 830ffbbdd21SLinus Walleij */ 831ffbbdd21SLinus Walleij void spi_finalize_current_message(struct spi_master *master) 832ffbbdd21SLinus Walleij { 833ffbbdd21SLinus Walleij struct spi_message *mesg; 834ffbbdd21SLinus Walleij unsigned long flags; 8352841a5fcSMark Brown int ret; 836ffbbdd21SLinus Walleij 837ffbbdd21SLinus Walleij spin_lock_irqsave(&master->queue_lock, flags); 838ffbbdd21SLinus Walleij mesg = master->cur_msg; 839ffbbdd21SLinus Walleij master->cur_msg = NULL; 840ffbbdd21SLinus Walleij 841ffbbdd21SLinus Walleij queue_kthread_work(&master->kworker, &master->pump_messages); 842ffbbdd21SLinus Walleij spin_unlock_irqrestore(&master->queue_lock, flags); 843ffbbdd21SLinus Walleij 8442841a5fcSMark Brown if (master->cur_msg_prepared && master->unprepare_message) { 8452841a5fcSMark Brown ret = master->unprepare_message(master, mesg); 8462841a5fcSMark Brown if (ret) { 8472841a5fcSMark Brown dev_err(&master->dev, 8482841a5fcSMark Brown "failed to unprepare message: %d\n", ret); 8492841a5fcSMark Brown } 8502841a5fcSMark Brown } 8512841a5fcSMark Brown master->cur_msg_prepared = false; 8522841a5fcSMark Brown 853ffbbdd21SLinus Walleij mesg->state = NULL; 854ffbbdd21SLinus Walleij if (mesg->complete) 855ffbbdd21SLinus Walleij mesg->complete(mesg->context); 85656ec1978SMark Brown 85756ec1978SMark Brown trace_spi_message_done(mesg); 858ffbbdd21SLinus Walleij } 859ffbbdd21SLinus Walleij EXPORT_SYMBOL_GPL(spi_finalize_current_message); 860ffbbdd21SLinus Walleij 861ffbbdd21SLinus Walleij static int spi_start_queue(struct spi_master *master) 862ffbbdd21SLinus Walleij { 863ffbbdd21SLinus Walleij unsigned long flags; 864ffbbdd21SLinus Walleij 865ffbbdd21SLinus Walleij spin_lock_irqsave(&master->queue_lock, flags); 866ffbbdd21SLinus Walleij 867ffbbdd21SLinus Walleij if (master->running || master->busy) { 868ffbbdd21SLinus Walleij spin_unlock_irqrestore(&master->queue_lock, flags); 869ffbbdd21SLinus Walleij return -EBUSY; 870ffbbdd21SLinus Walleij } 871ffbbdd21SLinus Walleij 872ffbbdd21SLinus Walleij master->running = true; 873ffbbdd21SLinus Walleij master->cur_msg = NULL; 874ffbbdd21SLinus Walleij spin_unlock_irqrestore(&master->queue_lock, flags); 875ffbbdd21SLinus Walleij 876ffbbdd21SLinus Walleij queue_kthread_work(&master->kworker, &master->pump_messages); 877ffbbdd21SLinus Walleij 878ffbbdd21SLinus Walleij return 0; 879ffbbdd21SLinus Walleij } 880ffbbdd21SLinus Walleij 881ffbbdd21SLinus Walleij static int spi_stop_queue(struct spi_master *master) 882ffbbdd21SLinus Walleij { 883ffbbdd21SLinus Walleij unsigned long flags; 884ffbbdd21SLinus Walleij unsigned limit = 500; 885ffbbdd21SLinus Walleij int ret = 0; 886ffbbdd21SLinus Walleij 887ffbbdd21SLinus Walleij spin_lock_irqsave(&master->queue_lock, flags); 888ffbbdd21SLinus Walleij 889ffbbdd21SLinus Walleij /* 890ffbbdd21SLinus Walleij * This is a bit lame, but is optimized for the common execution path. 891ffbbdd21SLinus Walleij * A wait_queue on the master->busy could be used, but then the common 892ffbbdd21SLinus Walleij * execution path (pump_messages) would be required to call wake_up or 893ffbbdd21SLinus Walleij * friends on every SPI message. Do this instead. 894ffbbdd21SLinus Walleij */ 895ffbbdd21SLinus Walleij while ((!list_empty(&master->queue) || master->busy) && limit--) { 896ffbbdd21SLinus Walleij spin_unlock_irqrestore(&master->queue_lock, flags); 897ffbbdd21SLinus Walleij msleep(10); 898ffbbdd21SLinus Walleij spin_lock_irqsave(&master->queue_lock, flags); 899ffbbdd21SLinus Walleij } 900ffbbdd21SLinus Walleij 901ffbbdd21SLinus Walleij if (!list_empty(&master->queue) || master->busy) 902ffbbdd21SLinus Walleij ret = -EBUSY; 903ffbbdd21SLinus Walleij else 904ffbbdd21SLinus Walleij master->running = false; 905ffbbdd21SLinus Walleij 906ffbbdd21SLinus Walleij spin_unlock_irqrestore(&master->queue_lock, flags); 907ffbbdd21SLinus Walleij 908ffbbdd21SLinus Walleij if (ret) { 909ffbbdd21SLinus Walleij dev_warn(&master->dev, 910ffbbdd21SLinus Walleij "could not stop message queue\n"); 911ffbbdd21SLinus Walleij return ret; 912ffbbdd21SLinus Walleij } 913ffbbdd21SLinus Walleij return ret; 914ffbbdd21SLinus Walleij } 915ffbbdd21SLinus Walleij 916ffbbdd21SLinus Walleij static int spi_destroy_queue(struct spi_master *master) 917ffbbdd21SLinus Walleij { 918ffbbdd21SLinus Walleij int ret; 919ffbbdd21SLinus Walleij 920ffbbdd21SLinus Walleij ret = spi_stop_queue(master); 921ffbbdd21SLinus Walleij 922ffbbdd21SLinus Walleij /* 923ffbbdd21SLinus Walleij * flush_kthread_worker will block until all work is done. 924ffbbdd21SLinus Walleij * If the reason that stop_queue timed out is that the work will never 925ffbbdd21SLinus Walleij * finish, then it does no good to call flush/stop thread, so 926ffbbdd21SLinus Walleij * return anyway. 927ffbbdd21SLinus Walleij */ 928ffbbdd21SLinus Walleij if (ret) { 929ffbbdd21SLinus Walleij dev_err(&master->dev, "problem destroying queue\n"); 930ffbbdd21SLinus Walleij return ret; 931ffbbdd21SLinus Walleij } 932ffbbdd21SLinus Walleij 933ffbbdd21SLinus Walleij flush_kthread_worker(&master->kworker); 934ffbbdd21SLinus Walleij kthread_stop(master->kworker_task); 935ffbbdd21SLinus Walleij 936ffbbdd21SLinus Walleij return 0; 937ffbbdd21SLinus Walleij } 938ffbbdd21SLinus Walleij 939ffbbdd21SLinus Walleij /** 940ffbbdd21SLinus Walleij * spi_queued_transfer - transfer function for queued transfers 941ffbbdd21SLinus Walleij * @spi: spi device which is requesting transfer 942ffbbdd21SLinus Walleij * @msg: spi message which is to handled is queued to driver queue 943ffbbdd21SLinus Walleij */ 944ffbbdd21SLinus Walleij static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg) 945ffbbdd21SLinus Walleij { 946ffbbdd21SLinus Walleij struct spi_master *master = spi->master; 947ffbbdd21SLinus Walleij unsigned long flags; 948ffbbdd21SLinus Walleij 949ffbbdd21SLinus Walleij spin_lock_irqsave(&master->queue_lock, flags); 950ffbbdd21SLinus Walleij 951ffbbdd21SLinus Walleij if (!master->running) { 952ffbbdd21SLinus Walleij spin_unlock_irqrestore(&master->queue_lock, flags); 953ffbbdd21SLinus Walleij return -ESHUTDOWN; 954ffbbdd21SLinus Walleij } 955ffbbdd21SLinus Walleij msg->actual_length = 0; 956ffbbdd21SLinus Walleij msg->status = -EINPROGRESS; 957ffbbdd21SLinus Walleij 958ffbbdd21SLinus Walleij list_add_tail(&msg->queue, &master->queue); 95996b3eaceSAxel Lin if (!master->busy) 960ffbbdd21SLinus Walleij queue_kthread_work(&master->kworker, &master->pump_messages); 961ffbbdd21SLinus Walleij 962ffbbdd21SLinus Walleij spin_unlock_irqrestore(&master->queue_lock, flags); 963ffbbdd21SLinus Walleij return 0; 964ffbbdd21SLinus Walleij } 965ffbbdd21SLinus Walleij 966ffbbdd21SLinus Walleij static int spi_master_initialize_queue(struct spi_master *master) 967ffbbdd21SLinus Walleij { 968ffbbdd21SLinus Walleij int ret; 969ffbbdd21SLinus Walleij 970ffbbdd21SLinus Walleij master->queued = true; 971ffbbdd21SLinus Walleij master->transfer = spi_queued_transfer; 972b158935fSMark Brown if (!master->transfer_one_message) 973b158935fSMark Brown master->transfer_one_message = spi_transfer_one_message; 974ffbbdd21SLinus Walleij 975ffbbdd21SLinus Walleij /* Initialize and start queue */ 976ffbbdd21SLinus Walleij ret = spi_init_queue(master); 977ffbbdd21SLinus Walleij if (ret) { 978ffbbdd21SLinus Walleij dev_err(&master->dev, "problem initializing queue\n"); 979ffbbdd21SLinus Walleij goto err_init_queue; 980ffbbdd21SLinus Walleij } 981ffbbdd21SLinus Walleij ret = spi_start_queue(master); 982ffbbdd21SLinus Walleij if (ret) { 983ffbbdd21SLinus Walleij dev_err(&master->dev, "problem starting queue\n"); 984ffbbdd21SLinus Walleij goto err_start_queue; 985ffbbdd21SLinus Walleij } 986ffbbdd21SLinus Walleij 987ffbbdd21SLinus Walleij return 0; 988ffbbdd21SLinus Walleij 989ffbbdd21SLinus Walleij err_start_queue: 990ffbbdd21SLinus Walleij err_init_queue: 991ffbbdd21SLinus Walleij spi_destroy_queue(master); 992ffbbdd21SLinus Walleij return ret; 993ffbbdd21SLinus Walleij } 994ffbbdd21SLinus Walleij 995ffbbdd21SLinus Walleij /*-------------------------------------------------------------------------*/ 996ffbbdd21SLinus Walleij 9977cb94361SAndreas Larsson #if defined(CONFIG_OF) 998d57a4282SGrant Likely /** 999d57a4282SGrant Likely * of_register_spi_devices() - Register child devices onto the SPI bus 1000d57a4282SGrant Likely * @master: Pointer to spi_master device 1001d57a4282SGrant Likely * 1002d57a4282SGrant Likely * Registers an spi_device for each child node of master node which has a 'reg' 1003d57a4282SGrant Likely * property. 1004d57a4282SGrant Likely */ 1005d57a4282SGrant Likely static void of_register_spi_devices(struct spi_master *master) 1006d57a4282SGrant Likely { 1007d57a4282SGrant Likely struct spi_device *spi; 1008d57a4282SGrant Likely struct device_node *nc; 1009d57a4282SGrant Likely int rc; 101089da4293STrent Piepho u32 value; 1011d57a4282SGrant Likely 1012d57a4282SGrant Likely if (!master->dev.of_node) 1013d57a4282SGrant Likely return; 1014d57a4282SGrant Likely 1015f3b6159eSAlexander Sverdlin for_each_available_child_of_node(master->dev.of_node, nc) { 1016d57a4282SGrant Likely /* Alloc an spi_device */ 1017d57a4282SGrant Likely spi = spi_alloc_device(master); 1018d57a4282SGrant Likely if (!spi) { 1019d57a4282SGrant Likely dev_err(&master->dev, "spi_device alloc error for %s\n", 1020d57a4282SGrant Likely nc->full_name); 1021d57a4282SGrant Likely spi_dev_put(spi); 1022d57a4282SGrant Likely continue; 1023d57a4282SGrant Likely } 1024d57a4282SGrant Likely 1025d57a4282SGrant Likely /* Select device driver */ 1026d57a4282SGrant Likely if (of_modalias_node(nc, spi->modalias, 1027d57a4282SGrant Likely sizeof(spi->modalias)) < 0) { 1028d57a4282SGrant Likely dev_err(&master->dev, "cannot find modalias for %s\n", 1029d57a4282SGrant Likely nc->full_name); 1030d57a4282SGrant Likely spi_dev_put(spi); 1031d57a4282SGrant Likely continue; 1032d57a4282SGrant Likely } 1033d57a4282SGrant Likely 1034d57a4282SGrant Likely /* Device address */ 103589da4293STrent Piepho rc = of_property_read_u32(nc, "reg", &value); 103689da4293STrent Piepho if (rc) { 103789da4293STrent Piepho dev_err(&master->dev, "%s has no valid 'reg' property (%d)\n", 103889da4293STrent Piepho nc->full_name, rc); 1039d57a4282SGrant Likely spi_dev_put(spi); 1040d57a4282SGrant Likely continue; 1041d57a4282SGrant Likely } 104289da4293STrent Piepho spi->chip_select = value; 1043d57a4282SGrant Likely 1044d57a4282SGrant Likely /* Mode (clock phase/polarity/etc.) */ 1045d57a4282SGrant Likely if (of_find_property(nc, "spi-cpha", NULL)) 1046d57a4282SGrant Likely spi->mode |= SPI_CPHA; 1047d57a4282SGrant Likely if (of_find_property(nc, "spi-cpol", NULL)) 1048d57a4282SGrant Likely spi->mode |= SPI_CPOL; 1049d57a4282SGrant Likely if (of_find_property(nc, "spi-cs-high", NULL)) 1050d57a4282SGrant Likely spi->mode |= SPI_CS_HIGH; 1051c20151dfSLars-Peter Clausen if (of_find_property(nc, "spi-3wire", NULL)) 1052c20151dfSLars-Peter Clausen spi->mode |= SPI_3WIRE; 1053d57a4282SGrant Likely 1054f477b7fbSwangyuhang /* Device DUAL/QUAD mode */ 105589da4293STrent Piepho if (!of_property_read_u32(nc, "spi-tx-bus-width", &value)) { 105689da4293STrent Piepho switch (value) { 105789da4293STrent Piepho case 1: 1058f477b7fbSwangyuhang break; 105989da4293STrent Piepho case 2: 1060f477b7fbSwangyuhang spi->mode |= SPI_TX_DUAL; 1061f477b7fbSwangyuhang break; 106289da4293STrent Piepho case 4: 1063f477b7fbSwangyuhang spi->mode |= SPI_TX_QUAD; 1064f477b7fbSwangyuhang break; 1065f477b7fbSwangyuhang default: 1066a822e99cSMark Brown dev_err(&master->dev, 1067a110f93dSwangyuhang "spi-tx-bus-width %d not supported\n", 106889da4293STrent Piepho value); 1069f477b7fbSwangyuhang spi_dev_put(spi); 1070f477b7fbSwangyuhang continue; 1071f477b7fbSwangyuhang } 1072a822e99cSMark Brown } 1073f477b7fbSwangyuhang 107489da4293STrent Piepho if (!of_property_read_u32(nc, "spi-rx-bus-width", &value)) { 107589da4293STrent Piepho switch (value) { 107689da4293STrent Piepho case 1: 1077f477b7fbSwangyuhang break; 107889da4293STrent Piepho case 2: 1079f477b7fbSwangyuhang spi->mode |= SPI_RX_DUAL; 1080f477b7fbSwangyuhang break; 108189da4293STrent Piepho case 4: 1082f477b7fbSwangyuhang spi->mode |= SPI_RX_QUAD; 1083f477b7fbSwangyuhang break; 1084f477b7fbSwangyuhang default: 1085a822e99cSMark Brown dev_err(&master->dev, 1086a110f93dSwangyuhang "spi-rx-bus-width %d not supported\n", 108789da4293STrent Piepho value); 1088f477b7fbSwangyuhang spi_dev_put(spi); 1089f477b7fbSwangyuhang continue; 1090f477b7fbSwangyuhang } 1091a822e99cSMark Brown } 1092f477b7fbSwangyuhang 1093d57a4282SGrant Likely /* Device speed */ 109489da4293STrent Piepho rc = of_property_read_u32(nc, "spi-max-frequency", &value); 109589da4293STrent Piepho if (rc) { 109689da4293STrent Piepho dev_err(&master->dev, "%s has no valid 'spi-max-frequency' property (%d)\n", 109789da4293STrent Piepho nc->full_name, rc); 1098d57a4282SGrant Likely spi_dev_put(spi); 1099d57a4282SGrant Likely continue; 1100d57a4282SGrant Likely } 110189da4293STrent Piepho spi->max_speed_hz = value; 1102d57a4282SGrant Likely 1103d57a4282SGrant Likely /* IRQ */ 1104d57a4282SGrant Likely spi->irq = irq_of_parse_and_map(nc, 0); 1105d57a4282SGrant Likely 1106d57a4282SGrant Likely /* Store a pointer to the node in the device structure */ 1107d57a4282SGrant Likely of_node_get(nc); 1108d57a4282SGrant Likely spi->dev.of_node = nc; 1109d57a4282SGrant Likely 1110d57a4282SGrant Likely /* Register the new device */ 111170fac17cSMathias Krause request_module("%s%s", SPI_MODULE_PREFIX, spi->modalias); 1112d57a4282SGrant Likely rc = spi_add_device(spi); 1113d57a4282SGrant Likely if (rc) { 1114d57a4282SGrant Likely dev_err(&master->dev, "spi_device register error %s\n", 1115d57a4282SGrant Likely nc->full_name); 1116d57a4282SGrant Likely spi_dev_put(spi); 1117d57a4282SGrant Likely } 1118d57a4282SGrant Likely 1119d57a4282SGrant Likely } 1120d57a4282SGrant Likely } 1121d57a4282SGrant Likely #else 1122d57a4282SGrant Likely static void of_register_spi_devices(struct spi_master *master) { } 1123d57a4282SGrant Likely #endif 1124d57a4282SGrant Likely 112564bee4d2SMika Westerberg #ifdef CONFIG_ACPI 112664bee4d2SMika Westerberg static int acpi_spi_add_resource(struct acpi_resource *ares, void *data) 112764bee4d2SMika Westerberg { 112864bee4d2SMika Westerberg struct spi_device *spi = data; 112964bee4d2SMika Westerberg 113064bee4d2SMika Westerberg if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) { 113164bee4d2SMika Westerberg struct acpi_resource_spi_serialbus *sb; 113264bee4d2SMika Westerberg 113364bee4d2SMika Westerberg sb = &ares->data.spi_serial_bus; 113464bee4d2SMika Westerberg if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_SPI) { 113564bee4d2SMika Westerberg spi->chip_select = sb->device_selection; 113664bee4d2SMika Westerberg spi->max_speed_hz = sb->connection_speed; 113764bee4d2SMika Westerberg 113864bee4d2SMika Westerberg if (sb->clock_phase == ACPI_SPI_SECOND_PHASE) 113964bee4d2SMika Westerberg spi->mode |= SPI_CPHA; 114064bee4d2SMika Westerberg if (sb->clock_polarity == ACPI_SPI_START_HIGH) 114164bee4d2SMika Westerberg spi->mode |= SPI_CPOL; 114264bee4d2SMika Westerberg if (sb->device_polarity == ACPI_SPI_ACTIVE_HIGH) 114364bee4d2SMika Westerberg spi->mode |= SPI_CS_HIGH; 114464bee4d2SMika Westerberg } 114564bee4d2SMika Westerberg } else if (spi->irq < 0) { 114664bee4d2SMika Westerberg struct resource r; 114764bee4d2SMika Westerberg 114864bee4d2SMika Westerberg if (acpi_dev_resource_interrupt(ares, 0, &r)) 114964bee4d2SMika Westerberg spi->irq = r.start; 115064bee4d2SMika Westerberg } 115164bee4d2SMika Westerberg 115264bee4d2SMika Westerberg /* Always tell the ACPI core to skip this resource */ 115364bee4d2SMika Westerberg return 1; 115464bee4d2SMika Westerberg } 115564bee4d2SMika Westerberg 115664bee4d2SMika Westerberg static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level, 115764bee4d2SMika Westerberg void *data, void **return_value) 115864bee4d2SMika Westerberg { 115964bee4d2SMika Westerberg struct spi_master *master = data; 116064bee4d2SMika Westerberg struct list_head resource_list; 116164bee4d2SMika Westerberg struct acpi_device *adev; 116264bee4d2SMika Westerberg struct spi_device *spi; 116364bee4d2SMika Westerberg int ret; 116464bee4d2SMika Westerberg 116564bee4d2SMika Westerberg if (acpi_bus_get_device(handle, &adev)) 116664bee4d2SMika Westerberg return AE_OK; 116764bee4d2SMika Westerberg if (acpi_bus_get_status(adev) || !adev->status.present) 116864bee4d2SMika Westerberg return AE_OK; 116964bee4d2SMika Westerberg 117064bee4d2SMika Westerberg spi = spi_alloc_device(master); 117164bee4d2SMika Westerberg if (!spi) { 117264bee4d2SMika Westerberg dev_err(&master->dev, "failed to allocate SPI device for %s\n", 117364bee4d2SMika Westerberg dev_name(&adev->dev)); 117464bee4d2SMika Westerberg return AE_NO_MEMORY; 117564bee4d2SMika Westerberg } 117664bee4d2SMika Westerberg 11777b199811SRafael J. Wysocki ACPI_COMPANION_SET(&spi->dev, adev); 117864bee4d2SMika Westerberg spi->irq = -1; 117964bee4d2SMika Westerberg 118064bee4d2SMika Westerberg INIT_LIST_HEAD(&resource_list); 118164bee4d2SMika Westerberg ret = acpi_dev_get_resources(adev, &resource_list, 118264bee4d2SMika Westerberg acpi_spi_add_resource, spi); 118364bee4d2SMika Westerberg acpi_dev_free_resource_list(&resource_list); 118464bee4d2SMika Westerberg 118564bee4d2SMika Westerberg if (ret < 0 || !spi->max_speed_hz) { 118664bee4d2SMika Westerberg spi_dev_put(spi); 118764bee4d2SMika Westerberg return AE_OK; 118864bee4d2SMika Westerberg } 118964bee4d2SMika Westerberg 119033cf00e5SMika Westerberg adev->power.flags.ignore_parent = true; 1191cf9eb39cSJarkko Nikula strlcpy(spi->modalias, acpi_device_hid(adev), sizeof(spi->modalias)); 119264bee4d2SMika Westerberg if (spi_add_device(spi)) { 119333cf00e5SMika Westerberg adev->power.flags.ignore_parent = false; 119464bee4d2SMika Westerberg dev_err(&master->dev, "failed to add SPI device %s from ACPI\n", 119564bee4d2SMika Westerberg dev_name(&adev->dev)); 119664bee4d2SMika Westerberg spi_dev_put(spi); 119764bee4d2SMika Westerberg } 119864bee4d2SMika Westerberg 119964bee4d2SMika Westerberg return AE_OK; 120064bee4d2SMika Westerberg } 120164bee4d2SMika Westerberg 120264bee4d2SMika Westerberg static void acpi_register_spi_devices(struct spi_master *master) 120364bee4d2SMika Westerberg { 120464bee4d2SMika Westerberg acpi_status status; 120564bee4d2SMika Westerberg acpi_handle handle; 120664bee4d2SMika Westerberg 120729896178SRafael J. Wysocki handle = ACPI_HANDLE(master->dev.parent); 120864bee4d2SMika Westerberg if (!handle) 120964bee4d2SMika Westerberg return; 121064bee4d2SMika Westerberg 121164bee4d2SMika Westerberg status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1, 121264bee4d2SMika Westerberg acpi_spi_add_device, NULL, 121364bee4d2SMika Westerberg master, NULL); 121464bee4d2SMika Westerberg if (ACPI_FAILURE(status)) 121564bee4d2SMika Westerberg dev_warn(&master->dev, "failed to enumerate SPI slaves\n"); 121664bee4d2SMika Westerberg } 121764bee4d2SMika Westerberg #else 121864bee4d2SMika Westerberg static inline void acpi_register_spi_devices(struct spi_master *master) {} 121964bee4d2SMika Westerberg #endif /* CONFIG_ACPI */ 122064bee4d2SMika Westerberg 122149dce689STony Jones static void spi_master_release(struct device *dev) 12228ae12a0dSDavid Brownell { 12238ae12a0dSDavid Brownell struct spi_master *master; 12248ae12a0dSDavid Brownell 122549dce689STony Jones master = container_of(dev, struct spi_master, dev); 12268ae12a0dSDavid Brownell kfree(master); 12278ae12a0dSDavid Brownell } 12288ae12a0dSDavid Brownell 12298ae12a0dSDavid Brownell static struct class spi_master_class = { 12308ae12a0dSDavid Brownell .name = "spi_master", 12318ae12a0dSDavid Brownell .owner = THIS_MODULE, 123249dce689STony Jones .dev_release = spi_master_release, 12338ae12a0dSDavid Brownell }; 12348ae12a0dSDavid Brownell 12358ae12a0dSDavid Brownell 1236ffbbdd21SLinus Walleij 12378ae12a0dSDavid Brownell /** 12388ae12a0dSDavid Brownell * spi_alloc_master - allocate SPI master controller 12398ae12a0dSDavid Brownell * @dev: the controller, possibly using the platform_bus 124033e34dc6SDavid Brownell * @size: how much zeroed driver-private data to allocate; the pointer to this 124149dce689STony Jones * memory is in the driver_data field of the returned device, 12420c868461SDavid Brownell * accessible with spi_master_get_devdata(). 124333e34dc6SDavid Brownell * Context: can sleep 12448ae12a0dSDavid Brownell * 12458ae12a0dSDavid Brownell * This call is used only by SPI master controller drivers, which are the 12468ae12a0dSDavid Brownell * only ones directly touching chip registers. It's how they allocate 1247ba1a0513Sdmitry pervushin * an spi_master structure, prior to calling spi_register_master(). 12488ae12a0dSDavid Brownell * 12498ae12a0dSDavid Brownell * This must be called from context that can sleep. It returns the SPI 12508ae12a0dSDavid Brownell * master structure on success, else NULL. 12518ae12a0dSDavid Brownell * 12528ae12a0dSDavid Brownell * The caller is responsible for assigning the bus number and initializing 1253ba1a0513Sdmitry pervushin * the master's methods before calling spi_register_master(); and (after errors 1254eb4af0f5SUwe Kleine-König * adding the device) calling spi_master_put() and kfree() to prevent a memory 1255eb4af0f5SUwe Kleine-König * leak. 12568ae12a0dSDavid Brownell */ 1257e9d5a461SAdrian Bunk struct spi_master *spi_alloc_master(struct device *dev, unsigned size) 12588ae12a0dSDavid Brownell { 12598ae12a0dSDavid Brownell struct spi_master *master; 12608ae12a0dSDavid Brownell 12610c868461SDavid Brownell if (!dev) 12620c868461SDavid Brownell return NULL; 12630c868461SDavid Brownell 12645fe5f05eSJingoo Han master = kzalloc(size + sizeof(*master), GFP_KERNEL); 12658ae12a0dSDavid Brownell if (!master) 12668ae12a0dSDavid Brownell return NULL; 12678ae12a0dSDavid Brownell 126849dce689STony Jones device_initialize(&master->dev); 12691e8a52e1SGrant Likely master->bus_num = -1; 12701e8a52e1SGrant Likely master->num_chipselect = 1; 127149dce689STony Jones master->dev.class = &spi_master_class; 127249dce689STony Jones master->dev.parent = get_device(dev); 12730c868461SDavid Brownell spi_master_set_devdata(master, &master[1]); 12748ae12a0dSDavid Brownell 12758ae12a0dSDavid Brownell return master; 12768ae12a0dSDavid Brownell } 12778ae12a0dSDavid Brownell EXPORT_SYMBOL_GPL(spi_alloc_master); 12788ae12a0dSDavid Brownell 127974317984SJean-Christophe PLAGNIOL-VILLARD #ifdef CONFIG_OF 128074317984SJean-Christophe PLAGNIOL-VILLARD static int of_spi_register_master(struct spi_master *master) 128174317984SJean-Christophe PLAGNIOL-VILLARD { 1282e80beb27SGrant Likely int nb, i, *cs; 128374317984SJean-Christophe PLAGNIOL-VILLARD struct device_node *np = master->dev.of_node; 128474317984SJean-Christophe PLAGNIOL-VILLARD 128574317984SJean-Christophe PLAGNIOL-VILLARD if (!np) 128674317984SJean-Christophe PLAGNIOL-VILLARD return 0; 128774317984SJean-Christophe PLAGNIOL-VILLARD 128874317984SJean-Christophe PLAGNIOL-VILLARD nb = of_gpio_named_count(np, "cs-gpios"); 12895fe5f05eSJingoo Han master->num_chipselect = max_t(int, nb, master->num_chipselect); 129074317984SJean-Christophe PLAGNIOL-VILLARD 12918ec5d84eSAndreas Larsson /* Return error only for an incorrectly formed cs-gpios property */ 12928ec5d84eSAndreas Larsson if (nb == 0 || nb == -ENOENT) 129374317984SJean-Christophe PLAGNIOL-VILLARD return 0; 12948ec5d84eSAndreas Larsson else if (nb < 0) 12958ec5d84eSAndreas Larsson return nb; 129674317984SJean-Christophe PLAGNIOL-VILLARD 129774317984SJean-Christophe PLAGNIOL-VILLARD cs = devm_kzalloc(&master->dev, 129874317984SJean-Christophe PLAGNIOL-VILLARD sizeof(int) * master->num_chipselect, 129974317984SJean-Christophe PLAGNIOL-VILLARD GFP_KERNEL); 130074317984SJean-Christophe PLAGNIOL-VILLARD master->cs_gpios = cs; 130174317984SJean-Christophe PLAGNIOL-VILLARD 130274317984SJean-Christophe PLAGNIOL-VILLARD if (!master->cs_gpios) 130374317984SJean-Christophe PLAGNIOL-VILLARD return -ENOMEM; 130474317984SJean-Christophe PLAGNIOL-VILLARD 13050da83bb1SAndreas Larsson for (i = 0; i < master->num_chipselect; i++) 1306446411e1SAndreas Larsson cs[i] = -ENOENT; 130774317984SJean-Christophe PLAGNIOL-VILLARD 130874317984SJean-Christophe PLAGNIOL-VILLARD for (i = 0; i < nb; i++) 130974317984SJean-Christophe PLAGNIOL-VILLARD cs[i] = of_get_named_gpio(np, "cs-gpios", i); 131074317984SJean-Christophe PLAGNIOL-VILLARD 131174317984SJean-Christophe PLAGNIOL-VILLARD return 0; 131274317984SJean-Christophe PLAGNIOL-VILLARD } 131374317984SJean-Christophe PLAGNIOL-VILLARD #else 131474317984SJean-Christophe PLAGNIOL-VILLARD static int of_spi_register_master(struct spi_master *master) 131574317984SJean-Christophe PLAGNIOL-VILLARD { 131674317984SJean-Christophe PLAGNIOL-VILLARD return 0; 131774317984SJean-Christophe PLAGNIOL-VILLARD } 131874317984SJean-Christophe PLAGNIOL-VILLARD #endif 131974317984SJean-Christophe PLAGNIOL-VILLARD 13208ae12a0dSDavid Brownell /** 13218ae12a0dSDavid Brownell * spi_register_master - register SPI master controller 13228ae12a0dSDavid Brownell * @master: initialized master, originally from spi_alloc_master() 132333e34dc6SDavid Brownell * Context: can sleep 13248ae12a0dSDavid Brownell * 13258ae12a0dSDavid Brownell * SPI master controllers connect to their drivers using some non-SPI bus, 13268ae12a0dSDavid Brownell * such as the platform bus. The final stage of probe() in that code 13278ae12a0dSDavid Brownell * includes calling spi_register_master() to hook up to this SPI bus glue. 13288ae12a0dSDavid Brownell * 13298ae12a0dSDavid Brownell * SPI controllers use board specific (often SOC specific) bus numbers, 13308ae12a0dSDavid Brownell * and board-specific addressing for SPI devices combines those numbers 13318ae12a0dSDavid Brownell * with chip select numbers. Since SPI does not directly support dynamic 13328ae12a0dSDavid Brownell * device identification, boards need configuration tables telling which 13338ae12a0dSDavid Brownell * chip is at which address. 13348ae12a0dSDavid Brownell * 13358ae12a0dSDavid Brownell * This must be called from context that can sleep. It returns zero on 13368ae12a0dSDavid Brownell * success, else a negative error code (dropping the master's refcount). 13370c868461SDavid Brownell * After a successful return, the caller is responsible for calling 13380c868461SDavid Brownell * spi_unregister_master(). 13398ae12a0dSDavid Brownell */ 1340e9d5a461SAdrian Bunk int spi_register_master(struct spi_master *master) 13418ae12a0dSDavid Brownell { 1342e44a45aeSDavid Brownell static atomic_t dyn_bus_id = ATOMIC_INIT((1<<15) - 1); 134349dce689STony Jones struct device *dev = master->dev.parent; 13442b9603a0SFeng Tang struct boardinfo *bi; 13458ae12a0dSDavid Brownell int status = -ENODEV; 13468ae12a0dSDavid Brownell int dynamic = 0; 13478ae12a0dSDavid Brownell 13480c868461SDavid Brownell if (!dev) 13490c868461SDavid Brownell return -ENODEV; 13500c868461SDavid Brownell 135174317984SJean-Christophe PLAGNIOL-VILLARD status = of_spi_register_master(master); 135274317984SJean-Christophe PLAGNIOL-VILLARD if (status) 135374317984SJean-Christophe PLAGNIOL-VILLARD return status; 135474317984SJean-Christophe PLAGNIOL-VILLARD 1355082c8cb4SDavid Brownell /* even if it's just one always-selected device, there must 1356082c8cb4SDavid Brownell * be at least one chipselect 1357082c8cb4SDavid Brownell */ 1358082c8cb4SDavid Brownell if (master->num_chipselect == 0) 1359082c8cb4SDavid Brownell return -EINVAL; 1360082c8cb4SDavid Brownell 1361bb29785eSGrant Likely if ((master->bus_num < 0) && master->dev.of_node) 1362bb29785eSGrant Likely master->bus_num = of_alias_get_id(master->dev.of_node, "spi"); 1363bb29785eSGrant Likely 13648ae12a0dSDavid Brownell /* convention: dynamically assigned bus IDs count down from the max */ 1365a020ed75SDavid Brownell if (master->bus_num < 0) { 1366082c8cb4SDavid Brownell /* FIXME switch to an IDR based scheme, something like 1367082c8cb4SDavid Brownell * I2C now uses, so we can't run out of "dynamic" IDs 1368082c8cb4SDavid Brownell */ 13698ae12a0dSDavid Brownell master->bus_num = atomic_dec_return(&dyn_bus_id); 1370b885244eSDavid Brownell dynamic = 1; 13718ae12a0dSDavid Brownell } 13728ae12a0dSDavid Brownell 1373cf32b71eSErnst Schwab spin_lock_init(&master->bus_lock_spinlock); 1374cf32b71eSErnst Schwab mutex_init(&master->bus_lock_mutex); 1375cf32b71eSErnst Schwab master->bus_lock_flag = 0; 1376b158935fSMark Brown init_completion(&master->xfer_completion); 1377cf32b71eSErnst Schwab 13788ae12a0dSDavid Brownell /* register the device, then userspace will see it. 13798ae12a0dSDavid Brownell * registration fails if the bus ID is in use. 13808ae12a0dSDavid Brownell */ 138135f74fcaSKay Sievers dev_set_name(&master->dev, "spi%u", master->bus_num); 138249dce689STony Jones status = device_add(&master->dev); 1383b885244eSDavid Brownell if (status < 0) 13848ae12a0dSDavid Brownell goto done; 138535f74fcaSKay Sievers dev_dbg(dev, "registered master %s%s\n", dev_name(&master->dev), 13868ae12a0dSDavid Brownell dynamic ? " (dynamic)" : ""); 13878ae12a0dSDavid Brownell 1388ffbbdd21SLinus Walleij /* If we're using a queued driver, start the queue */ 1389ffbbdd21SLinus Walleij if (master->transfer) 1390ffbbdd21SLinus Walleij dev_info(dev, "master is unqueued, this is deprecated\n"); 1391ffbbdd21SLinus Walleij else { 1392ffbbdd21SLinus Walleij status = spi_master_initialize_queue(master); 1393ffbbdd21SLinus Walleij if (status) { 1394e93b0724SAxel Lin device_del(&master->dev); 1395ffbbdd21SLinus Walleij goto done; 1396ffbbdd21SLinus Walleij } 1397ffbbdd21SLinus Walleij } 1398ffbbdd21SLinus Walleij 13992b9603a0SFeng Tang mutex_lock(&board_lock); 14002b9603a0SFeng Tang list_add_tail(&master->list, &spi_master_list); 14012b9603a0SFeng Tang list_for_each_entry(bi, &board_list, list) 14022b9603a0SFeng Tang spi_match_master_to_boardinfo(master, &bi->board_info); 14032b9603a0SFeng Tang mutex_unlock(&board_lock); 14042b9603a0SFeng Tang 140564bee4d2SMika Westerberg /* Register devices from the device tree and ACPI */ 140612b15e83SAnatolij Gustschin of_register_spi_devices(master); 140764bee4d2SMika Westerberg acpi_register_spi_devices(master); 14088ae12a0dSDavid Brownell done: 14098ae12a0dSDavid Brownell return status; 14108ae12a0dSDavid Brownell } 14118ae12a0dSDavid Brownell EXPORT_SYMBOL_GPL(spi_register_master); 14128ae12a0dSDavid Brownell 1413666d5b4cSMark Brown static void devm_spi_unregister(struct device *dev, void *res) 1414666d5b4cSMark Brown { 1415666d5b4cSMark Brown spi_unregister_master(*(struct spi_master **)res); 1416666d5b4cSMark Brown } 1417666d5b4cSMark Brown 1418666d5b4cSMark Brown /** 1419666d5b4cSMark Brown * dev_spi_register_master - register managed SPI master controller 1420666d5b4cSMark Brown * @dev: device managing SPI master 1421666d5b4cSMark Brown * @master: initialized master, originally from spi_alloc_master() 1422666d5b4cSMark Brown * Context: can sleep 1423666d5b4cSMark Brown * 1424666d5b4cSMark Brown * Register a SPI device as with spi_register_master() which will 1425666d5b4cSMark Brown * automatically be unregister 1426666d5b4cSMark Brown */ 1427666d5b4cSMark Brown int devm_spi_register_master(struct device *dev, struct spi_master *master) 1428666d5b4cSMark Brown { 1429666d5b4cSMark Brown struct spi_master **ptr; 1430666d5b4cSMark Brown int ret; 1431666d5b4cSMark Brown 1432666d5b4cSMark Brown ptr = devres_alloc(devm_spi_unregister, sizeof(*ptr), GFP_KERNEL); 1433666d5b4cSMark Brown if (!ptr) 1434666d5b4cSMark Brown return -ENOMEM; 1435666d5b4cSMark Brown 1436666d5b4cSMark Brown ret = spi_register_master(master); 14374b92894eSStephen Warren if (!ret) { 1438666d5b4cSMark Brown *ptr = master; 1439666d5b4cSMark Brown devres_add(dev, ptr); 1440666d5b4cSMark Brown } else { 1441666d5b4cSMark Brown devres_free(ptr); 1442666d5b4cSMark Brown } 1443666d5b4cSMark Brown 1444666d5b4cSMark Brown return ret; 1445666d5b4cSMark Brown } 1446666d5b4cSMark Brown EXPORT_SYMBOL_GPL(devm_spi_register_master); 1447666d5b4cSMark Brown 144834860089SDavid Lamparter static int __unregister(struct device *dev, void *null) 14498ae12a0dSDavid Brownell { 14500c868461SDavid Brownell spi_unregister_device(to_spi_device(dev)); 14518ae12a0dSDavid Brownell return 0; 14528ae12a0dSDavid Brownell } 14538ae12a0dSDavid Brownell 14548ae12a0dSDavid Brownell /** 14558ae12a0dSDavid Brownell * spi_unregister_master - unregister SPI master controller 14568ae12a0dSDavid Brownell * @master: the master being unregistered 145733e34dc6SDavid Brownell * Context: can sleep 14588ae12a0dSDavid Brownell * 14598ae12a0dSDavid Brownell * This call is used only by SPI master controller drivers, which are the 14608ae12a0dSDavid Brownell * only ones directly touching chip registers. 14618ae12a0dSDavid Brownell * 14628ae12a0dSDavid Brownell * This must be called from context that can sleep. 14638ae12a0dSDavid Brownell */ 14648ae12a0dSDavid Brownell void spi_unregister_master(struct spi_master *master) 14658ae12a0dSDavid Brownell { 146689fc9a1aSJeff Garzik int dummy; 146789fc9a1aSJeff Garzik 1468ffbbdd21SLinus Walleij if (master->queued) { 1469ffbbdd21SLinus Walleij if (spi_destroy_queue(master)) 1470ffbbdd21SLinus Walleij dev_err(&master->dev, "queue remove failed\n"); 1471ffbbdd21SLinus Walleij } 1472ffbbdd21SLinus Walleij 14732b9603a0SFeng Tang mutex_lock(&board_lock); 14742b9603a0SFeng Tang list_del(&master->list); 14752b9603a0SFeng Tang mutex_unlock(&board_lock); 14762b9603a0SFeng Tang 147797dbf37dSSebastian Andrzej Siewior dummy = device_for_each_child(&master->dev, NULL, __unregister); 147849dce689STony Jones device_unregister(&master->dev); 14798ae12a0dSDavid Brownell } 14808ae12a0dSDavid Brownell EXPORT_SYMBOL_GPL(spi_unregister_master); 14818ae12a0dSDavid Brownell 1482ffbbdd21SLinus Walleij int spi_master_suspend(struct spi_master *master) 1483ffbbdd21SLinus Walleij { 1484ffbbdd21SLinus Walleij int ret; 1485ffbbdd21SLinus Walleij 1486ffbbdd21SLinus Walleij /* Basically no-ops for non-queued masters */ 1487ffbbdd21SLinus Walleij if (!master->queued) 1488ffbbdd21SLinus Walleij return 0; 1489ffbbdd21SLinus Walleij 1490ffbbdd21SLinus Walleij ret = spi_stop_queue(master); 1491ffbbdd21SLinus Walleij if (ret) 1492ffbbdd21SLinus Walleij dev_err(&master->dev, "queue stop failed\n"); 1493ffbbdd21SLinus Walleij 1494ffbbdd21SLinus Walleij return ret; 1495ffbbdd21SLinus Walleij } 1496ffbbdd21SLinus Walleij EXPORT_SYMBOL_GPL(spi_master_suspend); 1497ffbbdd21SLinus Walleij 1498ffbbdd21SLinus Walleij int spi_master_resume(struct spi_master *master) 1499ffbbdd21SLinus Walleij { 1500ffbbdd21SLinus Walleij int ret; 1501ffbbdd21SLinus Walleij 1502ffbbdd21SLinus Walleij if (!master->queued) 1503ffbbdd21SLinus Walleij return 0; 1504ffbbdd21SLinus Walleij 1505ffbbdd21SLinus Walleij ret = spi_start_queue(master); 1506ffbbdd21SLinus Walleij if (ret) 1507ffbbdd21SLinus Walleij dev_err(&master->dev, "queue restart failed\n"); 1508ffbbdd21SLinus Walleij 1509ffbbdd21SLinus Walleij return ret; 1510ffbbdd21SLinus Walleij } 1511ffbbdd21SLinus Walleij EXPORT_SYMBOL_GPL(spi_master_resume); 1512ffbbdd21SLinus Walleij 15139f3b795aSMichał Mirosław static int __spi_master_match(struct device *dev, const void *data) 15145ed2c832SDave Young { 15155ed2c832SDave Young struct spi_master *m; 15169f3b795aSMichał Mirosław const u16 *bus_num = data; 15175ed2c832SDave Young 15185ed2c832SDave Young m = container_of(dev, struct spi_master, dev); 15195ed2c832SDave Young return m->bus_num == *bus_num; 15205ed2c832SDave Young } 15215ed2c832SDave Young 15228ae12a0dSDavid Brownell /** 15238ae12a0dSDavid Brownell * spi_busnum_to_master - look up master associated with bus_num 15248ae12a0dSDavid Brownell * @bus_num: the master's bus number 152533e34dc6SDavid Brownell * Context: can sleep 15268ae12a0dSDavid Brownell * 15278ae12a0dSDavid Brownell * This call may be used with devices that are registered after 15288ae12a0dSDavid Brownell * arch init time. It returns a refcounted pointer to the relevant 15298ae12a0dSDavid Brownell * spi_master (which the caller must release), or NULL if there is 15308ae12a0dSDavid Brownell * no such master registered. 15318ae12a0dSDavid Brownell */ 15328ae12a0dSDavid Brownell struct spi_master *spi_busnum_to_master(u16 bus_num) 15338ae12a0dSDavid Brownell { 153449dce689STony Jones struct device *dev; 15351e9a51dcSAtsushi Nemoto struct spi_master *master = NULL; 15368ae12a0dSDavid Brownell 1537695794aeSGreg Kroah-Hartman dev = class_find_device(&spi_master_class, NULL, &bus_num, 15385ed2c832SDave Young __spi_master_match); 15395ed2c832SDave Young if (dev) 15405ed2c832SDave Young master = container_of(dev, struct spi_master, dev); 15415ed2c832SDave Young /* reference got in class_find_device */ 15421e9a51dcSAtsushi Nemoto return master; 15438ae12a0dSDavid Brownell } 15448ae12a0dSDavid Brownell EXPORT_SYMBOL_GPL(spi_busnum_to_master); 15458ae12a0dSDavid Brownell 15468ae12a0dSDavid Brownell 15478ae12a0dSDavid Brownell /*-------------------------------------------------------------------------*/ 15488ae12a0dSDavid Brownell 15497d077197SDavid Brownell /* Core methods for SPI master protocol drivers. Some of the 15507d077197SDavid Brownell * other core methods are currently defined as inline functions. 15517d077197SDavid Brownell */ 15527d077197SDavid Brownell 15537d077197SDavid Brownell /** 15547d077197SDavid Brownell * spi_setup - setup SPI mode and clock rate 15557d077197SDavid Brownell * @spi: the device whose settings are being modified 15567d077197SDavid Brownell * Context: can sleep, and no requests are queued to the device 15577d077197SDavid Brownell * 15587d077197SDavid Brownell * SPI protocol drivers may need to update the transfer mode if the 15597d077197SDavid Brownell * device doesn't work with its default. They may likewise need 15607d077197SDavid Brownell * to update clock rates or word sizes from initial values. This function 15617d077197SDavid Brownell * changes those settings, and must be called from a context that can sleep. 15627d077197SDavid Brownell * Except for SPI_CS_HIGH, which takes effect immediately, the changes take 15637d077197SDavid Brownell * effect the next time the device is selected and data is transferred to 15647d077197SDavid Brownell * or from it. When this function returns, the spi device is deselected. 15657d077197SDavid Brownell * 15667d077197SDavid Brownell * Note that this call will fail if the protocol driver specifies an option 15677d077197SDavid Brownell * that the underlying controller or its driver does not support. For 15687d077197SDavid Brownell * example, not all hardware supports wire transfers using nine bit words, 15697d077197SDavid Brownell * LSB-first wire encoding, or active-high chipselects. 15707d077197SDavid Brownell */ 15717d077197SDavid Brownell int spi_setup(struct spi_device *spi) 15727d077197SDavid Brownell { 1573e7db06b5SDavid Brownell unsigned bad_bits; 1574caae070cSLaxman Dewangan int status = 0; 15757d077197SDavid Brownell 1576f477b7fbSwangyuhang /* check mode to prevent that DUAL and QUAD set at the same time 1577f477b7fbSwangyuhang */ 1578f477b7fbSwangyuhang if (((spi->mode & SPI_TX_DUAL) && (spi->mode & SPI_TX_QUAD)) || 1579f477b7fbSwangyuhang ((spi->mode & SPI_RX_DUAL) && (spi->mode & SPI_RX_QUAD))) { 1580f477b7fbSwangyuhang dev_err(&spi->dev, 1581f477b7fbSwangyuhang "setup: can not select dual and quad at the same time\n"); 1582f477b7fbSwangyuhang return -EINVAL; 1583f477b7fbSwangyuhang } 1584f477b7fbSwangyuhang /* if it is SPI_3WIRE mode, DUAL and QUAD should be forbidden 1585f477b7fbSwangyuhang */ 1586f477b7fbSwangyuhang if ((spi->mode & SPI_3WIRE) && (spi->mode & 1587f477b7fbSwangyuhang (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD))) 1588f477b7fbSwangyuhang return -EINVAL; 1589e7db06b5SDavid Brownell /* help drivers fail *cleanly* when they need options 1590e7db06b5SDavid Brownell * that aren't supported with their current master 1591e7db06b5SDavid Brownell */ 1592e7db06b5SDavid Brownell bad_bits = spi->mode & ~spi->master->mode_bits; 1593e7db06b5SDavid Brownell if (bad_bits) { 1594eb288a1fSLinus Walleij dev_err(&spi->dev, "setup: unsupported mode bits %x\n", 1595e7db06b5SDavid Brownell bad_bits); 1596e7db06b5SDavid Brownell return -EINVAL; 1597e7db06b5SDavid Brownell } 1598e7db06b5SDavid Brownell 15997d077197SDavid Brownell if (!spi->bits_per_word) 16007d077197SDavid Brownell spi->bits_per_word = 8; 16017d077197SDavid Brownell 1602caae070cSLaxman Dewangan if (spi->master->setup) 16037d077197SDavid Brownell status = spi->master->setup(spi); 16047d077197SDavid Brownell 16055fe5f05eSJingoo Han dev_dbg(&spi->dev, "setup mode %d, %s%s%s%s%u bits/w, %u Hz max --> %d\n", 16067d077197SDavid Brownell (int) (spi->mode & (SPI_CPOL | SPI_CPHA)), 16077d077197SDavid Brownell (spi->mode & SPI_CS_HIGH) ? "cs_high, " : "", 16087d077197SDavid Brownell (spi->mode & SPI_LSB_FIRST) ? "lsb, " : "", 16097d077197SDavid Brownell (spi->mode & SPI_3WIRE) ? "3wire, " : "", 16107d077197SDavid Brownell (spi->mode & SPI_LOOP) ? "loopback, " : "", 16117d077197SDavid Brownell spi->bits_per_word, spi->max_speed_hz, 16127d077197SDavid Brownell status); 16137d077197SDavid Brownell 16147d077197SDavid Brownell return status; 16157d077197SDavid Brownell } 16167d077197SDavid Brownell EXPORT_SYMBOL_GPL(spi_setup); 16177d077197SDavid Brownell 161890808738SMark Brown static int __spi_validate(struct spi_device *spi, struct spi_message *message) 1619cf32b71eSErnst Schwab { 1620cf32b71eSErnst Schwab struct spi_master *master = spi->master; 1621e6811d1dSLaxman Dewangan struct spi_transfer *xfer; 1622cf32b71eSErnst Schwab 162324a0013aSMark Brown if (list_empty(&message->transfers)) 162424a0013aSMark Brown return -EINVAL; 162524a0013aSMark Brown if (!message->complete) 162624a0013aSMark Brown return -EINVAL; 162724a0013aSMark Brown 1628cf32b71eSErnst Schwab /* Half-duplex links include original MicroWire, and ones with 1629cf32b71eSErnst Schwab * only one data pin like SPI_3WIRE (switches direction) or where 1630cf32b71eSErnst Schwab * either MOSI or MISO is missing. They can also be caused by 1631cf32b71eSErnst Schwab * software limitations. 1632cf32b71eSErnst Schwab */ 1633cf32b71eSErnst Schwab if ((master->flags & SPI_MASTER_HALF_DUPLEX) 1634cf32b71eSErnst Schwab || (spi->mode & SPI_3WIRE)) { 1635cf32b71eSErnst Schwab unsigned flags = master->flags; 1636cf32b71eSErnst Schwab 1637cf32b71eSErnst Schwab list_for_each_entry(xfer, &message->transfers, transfer_list) { 1638cf32b71eSErnst Schwab if (xfer->rx_buf && xfer->tx_buf) 1639cf32b71eSErnst Schwab return -EINVAL; 1640cf32b71eSErnst Schwab if ((flags & SPI_MASTER_NO_TX) && xfer->tx_buf) 1641cf32b71eSErnst Schwab return -EINVAL; 1642cf32b71eSErnst Schwab if ((flags & SPI_MASTER_NO_RX) && xfer->rx_buf) 1643cf32b71eSErnst Schwab return -EINVAL; 1644cf32b71eSErnst Schwab } 1645cf32b71eSErnst Schwab } 1646cf32b71eSErnst Schwab 1647e6811d1dSLaxman Dewangan /** 1648059b8ffeSLaxman Dewangan * Set transfer bits_per_word and max speed as spi device default if 1649059b8ffeSLaxman Dewangan * it is not set for this transfer. 1650f477b7fbSwangyuhang * Set transfer tx_nbits and rx_nbits as single transfer default 1651f477b7fbSwangyuhang * (SPI_NBITS_SINGLE) if it is not set for this transfer. 1652e6811d1dSLaxman Dewangan */ 1653e6811d1dSLaxman Dewangan list_for_each_entry(xfer, &message->transfers, transfer_list) { 1654078726ceSSourav Poddar message->frame_length += xfer->len; 1655e6811d1dSLaxman Dewangan if (!xfer->bits_per_word) 1656e6811d1dSLaxman Dewangan xfer->bits_per_word = spi->bits_per_word; 165756ede94aSGabor Juhos if (!xfer->speed_hz) { 1658059b8ffeSLaxman Dewangan xfer->speed_hz = spi->max_speed_hz; 165956ede94aSGabor Juhos if (master->max_speed_hz && 166056ede94aSGabor Juhos xfer->speed_hz > master->max_speed_hz) 166156ede94aSGabor Juhos xfer->speed_hz = master->max_speed_hz; 166256ede94aSGabor Juhos } 166356ede94aSGabor Juhos 1664543bb255SStephen Warren if (master->bits_per_word_mask) { 1665543bb255SStephen Warren /* Only 32 bits fit in the mask */ 1666543bb255SStephen Warren if (xfer->bits_per_word > 32) 1667543bb255SStephen Warren return -EINVAL; 1668543bb255SStephen Warren if (!(master->bits_per_word_mask & 1669543bb255SStephen Warren BIT(xfer->bits_per_word - 1))) 1670543bb255SStephen Warren return -EINVAL; 1671543bb255SStephen Warren } 1672a2fd4f9fSMark Brown 1673a2fd4f9fSMark Brown if (xfer->speed_hz && master->min_speed_hz && 1674a2fd4f9fSMark Brown xfer->speed_hz < master->min_speed_hz) 1675a2fd4f9fSMark Brown return -EINVAL; 1676a2fd4f9fSMark Brown if (xfer->speed_hz && master->max_speed_hz && 1677a2fd4f9fSMark Brown xfer->speed_hz > master->max_speed_hz) 1678d5ee722aSwangyuhang return -EINVAL; 1679f477b7fbSwangyuhang 1680f477b7fbSwangyuhang if (xfer->tx_buf && !xfer->tx_nbits) 1681f477b7fbSwangyuhang xfer->tx_nbits = SPI_NBITS_SINGLE; 1682f477b7fbSwangyuhang if (xfer->rx_buf && !xfer->rx_nbits) 1683f477b7fbSwangyuhang xfer->rx_nbits = SPI_NBITS_SINGLE; 1684f477b7fbSwangyuhang /* check transfer tx/rx_nbits: 16851afd9989SGeert Uytterhoeven * 1. check the value matches one of single, dual and quad 16861afd9989SGeert Uytterhoeven * 2. check tx/rx_nbits match the mode in spi_device 1687f477b7fbSwangyuhang */ 1688db90a441SSourav Poddar if (xfer->tx_buf) { 1689f477b7fbSwangyuhang if (xfer->tx_nbits != SPI_NBITS_SINGLE && 1690f477b7fbSwangyuhang xfer->tx_nbits != SPI_NBITS_DUAL && 1691f477b7fbSwangyuhang xfer->tx_nbits != SPI_NBITS_QUAD) 1692a2fd4f9fSMark Brown return -EINVAL; 1693f477b7fbSwangyuhang if ((xfer->tx_nbits == SPI_NBITS_DUAL) && 1694f477b7fbSwangyuhang !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD))) 1695f477b7fbSwangyuhang return -EINVAL; 1696f477b7fbSwangyuhang if ((xfer->tx_nbits == SPI_NBITS_QUAD) && 1697f477b7fbSwangyuhang !(spi->mode & SPI_TX_QUAD)) 1698f477b7fbSwangyuhang return -EINVAL; 1699db90a441SSourav Poddar } 1700f477b7fbSwangyuhang /* check transfer rx_nbits */ 1701db90a441SSourav Poddar if (xfer->rx_buf) { 1702f477b7fbSwangyuhang if (xfer->rx_nbits != SPI_NBITS_SINGLE && 1703f477b7fbSwangyuhang xfer->rx_nbits != SPI_NBITS_DUAL && 1704f477b7fbSwangyuhang xfer->rx_nbits != SPI_NBITS_QUAD) 1705f477b7fbSwangyuhang return -EINVAL; 1706f477b7fbSwangyuhang if ((xfer->rx_nbits == SPI_NBITS_DUAL) && 1707f477b7fbSwangyuhang !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD))) 1708f477b7fbSwangyuhang return -EINVAL; 1709f477b7fbSwangyuhang if ((xfer->rx_nbits == SPI_NBITS_QUAD) && 1710f477b7fbSwangyuhang !(spi->mode & SPI_RX_QUAD)) 1711f477b7fbSwangyuhang return -EINVAL; 1712e6811d1dSLaxman Dewangan } 1713e6811d1dSLaxman Dewangan } 1714e6811d1dSLaxman Dewangan 1715cf32b71eSErnst Schwab message->status = -EINPROGRESS; 171690808738SMark Brown 171790808738SMark Brown return 0; 171890808738SMark Brown } 171990808738SMark Brown 172090808738SMark Brown static int __spi_async(struct spi_device *spi, struct spi_message *message) 172190808738SMark Brown { 172290808738SMark Brown struct spi_master *master = spi->master; 172390808738SMark Brown 172490808738SMark Brown message->spi = spi; 172590808738SMark Brown 172690808738SMark Brown trace_spi_message_submit(message); 172790808738SMark Brown 1728cf32b71eSErnst Schwab return master->transfer(spi, message); 1729cf32b71eSErnst Schwab } 1730cf32b71eSErnst Schwab 1731568d0697SDavid Brownell /** 1732568d0697SDavid Brownell * spi_async - asynchronous SPI transfer 1733568d0697SDavid Brownell * @spi: device with which data will be exchanged 1734568d0697SDavid Brownell * @message: describes the data transfers, including completion callback 1735568d0697SDavid Brownell * Context: any (irqs may be blocked, etc) 1736568d0697SDavid Brownell * 1737568d0697SDavid Brownell * This call may be used in_irq and other contexts which can't sleep, 1738568d0697SDavid Brownell * as well as from task contexts which can sleep. 1739568d0697SDavid Brownell * 1740568d0697SDavid Brownell * The completion callback is invoked in a context which can't sleep. 1741568d0697SDavid Brownell * Before that invocation, the value of message->status is undefined. 1742568d0697SDavid Brownell * When the callback is issued, message->status holds either zero (to 1743568d0697SDavid Brownell * indicate complete success) or a negative error code. After that 1744568d0697SDavid Brownell * callback returns, the driver which issued the transfer request may 1745568d0697SDavid Brownell * deallocate the associated memory; it's no longer in use by any SPI 1746568d0697SDavid Brownell * core or controller driver code. 1747568d0697SDavid Brownell * 1748568d0697SDavid Brownell * Note that although all messages to a spi_device are handled in 1749568d0697SDavid Brownell * FIFO order, messages may go to different devices in other orders. 1750568d0697SDavid Brownell * Some device might be higher priority, or have various "hard" access 1751568d0697SDavid Brownell * time requirements, for example. 1752568d0697SDavid Brownell * 1753568d0697SDavid Brownell * On detection of any fault during the transfer, processing of 1754568d0697SDavid Brownell * the entire message is aborted, and the device is deselected. 1755568d0697SDavid Brownell * Until returning from the associated message completion callback, 1756568d0697SDavid Brownell * no other spi_message queued to that device will be processed. 1757568d0697SDavid Brownell * (This rule applies equally to all the synchronous transfer calls, 1758568d0697SDavid Brownell * which are wrappers around this core asynchronous primitive.) 1759568d0697SDavid Brownell */ 1760568d0697SDavid Brownell int spi_async(struct spi_device *spi, struct spi_message *message) 1761568d0697SDavid Brownell { 1762568d0697SDavid Brownell struct spi_master *master = spi->master; 1763cf32b71eSErnst Schwab int ret; 1764cf32b71eSErnst Schwab unsigned long flags; 1765568d0697SDavid Brownell 176690808738SMark Brown ret = __spi_validate(spi, message); 176790808738SMark Brown if (ret != 0) 176890808738SMark Brown return ret; 176990808738SMark Brown 1770cf32b71eSErnst Schwab spin_lock_irqsave(&master->bus_lock_spinlock, flags); 1771568d0697SDavid Brownell 1772cf32b71eSErnst Schwab if (master->bus_lock_flag) 1773cf32b71eSErnst Schwab ret = -EBUSY; 1774cf32b71eSErnst Schwab else 1775cf32b71eSErnst Schwab ret = __spi_async(spi, message); 1776568d0697SDavid Brownell 1777cf32b71eSErnst Schwab spin_unlock_irqrestore(&master->bus_lock_spinlock, flags); 1778cf32b71eSErnst Schwab 1779cf32b71eSErnst Schwab return ret; 1780568d0697SDavid Brownell } 1781568d0697SDavid Brownell EXPORT_SYMBOL_GPL(spi_async); 1782568d0697SDavid Brownell 1783cf32b71eSErnst Schwab /** 1784cf32b71eSErnst Schwab * spi_async_locked - version of spi_async with exclusive bus usage 1785cf32b71eSErnst Schwab * @spi: device with which data will be exchanged 1786cf32b71eSErnst Schwab * @message: describes the data transfers, including completion callback 1787cf32b71eSErnst Schwab * Context: any (irqs may be blocked, etc) 1788cf32b71eSErnst Schwab * 1789cf32b71eSErnst Schwab * This call may be used in_irq and other contexts which can't sleep, 1790cf32b71eSErnst Schwab * as well as from task contexts which can sleep. 1791cf32b71eSErnst Schwab * 1792cf32b71eSErnst Schwab * The completion callback is invoked in a context which can't sleep. 1793cf32b71eSErnst Schwab * Before that invocation, the value of message->status is undefined. 1794cf32b71eSErnst Schwab * When the callback is issued, message->status holds either zero (to 1795cf32b71eSErnst Schwab * indicate complete success) or a negative error code. After that 1796cf32b71eSErnst Schwab * callback returns, the driver which issued the transfer request may 1797cf32b71eSErnst Schwab * deallocate the associated memory; it's no longer in use by any SPI 1798cf32b71eSErnst Schwab * core or controller driver code. 1799cf32b71eSErnst Schwab * 1800cf32b71eSErnst Schwab * Note that although all messages to a spi_device are handled in 1801cf32b71eSErnst Schwab * FIFO order, messages may go to different devices in other orders. 1802cf32b71eSErnst Schwab * Some device might be higher priority, or have various "hard" access 1803cf32b71eSErnst Schwab * time requirements, for example. 1804cf32b71eSErnst Schwab * 1805cf32b71eSErnst Schwab * On detection of any fault during the transfer, processing of 1806cf32b71eSErnst Schwab * the entire message is aborted, and the device is deselected. 1807cf32b71eSErnst Schwab * Until returning from the associated message completion callback, 1808cf32b71eSErnst Schwab * no other spi_message queued to that device will be processed. 1809cf32b71eSErnst Schwab * (This rule applies equally to all the synchronous transfer calls, 1810cf32b71eSErnst Schwab * which are wrappers around this core asynchronous primitive.) 1811cf32b71eSErnst Schwab */ 1812cf32b71eSErnst Schwab int spi_async_locked(struct spi_device *spi, struct spi_message *message) 1813cf32b71eSErnst Schwab { 1814cf32b71eSErnst Schwab struct spi_master *master = spi->master; 1815cf32b71eSErnst Schwab int ret; 1816cf32b71eSErnst Schwab unsigned long flags; 1817cf32b71eSErnst Schwab 181890808738SMark Brown ret = __spi_validate(spi, message); 181990808738SMark Brown if (ret != 0) 182090808738SMark Brown return ret; 182190808738SMark Brown 1822cf32b71eSErnst Schwab spin_lock_irqsave(&master->bus_lock_spinlock, flags); 1823cf32b71eSErnst Schwab 1824cf32b71eSErnst Schwab ret = __spi_async(spi, message); 1825cf32b71eSErnst Schwab 1826cf32b71eSErnst Schwab spin_unlock_irqrestore(&master->bus_lock_spinlock, flags); 1827cf32b71eSErnst Schwab 1828cf32b71eSErnst Schwab return ret; 1829cf32b71eSErnst Schwab 1830cf32b71eSErnst Schwab } 1831cf32b71eSErnst Schwab EXPORT_SYMBOL_GPL(spi_async_locked); 1832cf32b71eSErnst Schwab 18337d077197SDavid Brownell 18347d077197SDavid Brownell /*-------------------------------------------------------------------------*/ 18357d077197SDavid Brownell 18367d077197SDavid Brownell /* Utility methods for SPI master protocol drivers, layered on 18377d077197SDavid Brownell * top of the core. Some other utility methods are defined as 18387d077197SDavid Brownell * inline functions. 18397d077197SDavid Brownell */ 18407d077197SDavid Brownell 18415d870c8eSAndrew Morton static void spi_complete(void *arg) 18425d870c8eSAndrew Morton { 18435d870c8eSAndrew Morton complete(arg); 18445d870c8eSAndrew Morton } 18455d870c8eSAndrew Morton 1846cf32b71eSErnst Schwab static int __spi_sync(struct spi_device *spi, struct spi_message *message, 1847cf32b71eSErnst Schwab int bus_locked) 1848cf32b71eSErnst Schwab { 1849cf32b71eSErnst Schwab DECLARE_COMPLETION_ONSTACK(done); 1850cf32b71eSErnst Schwab int status; 1851cf32b71eSErnst Schwab struct spi_master *master = spi->master; 1852cf32b71eSErnst Schwab 1853cf32b71eSErnst Schwab message->complete = spi_complete; 1854cf32b71eSErnst Schwab message->context = &done; 1855cf32b71eSErnst Schwab 1856cf32b71eSErnst Schwab if (!bus_locked) 1857cf32b71eSErnst Schwab mutex_lock(&master->bus_lock_mutex); 1858cf32b71eSErnst Schwab 1859cf32b71eSErnst Schwab status = spi_async_locked(spi, message); 1860cf32b71eSErnst Schwab 1861cf32b71eSErnst Schwab if (!bus_locked) 1862cf32b71eSErnst Schwab mutex_unlock(&master->bus_lock_mutex); 1863cf32b71eSErnst Schwab 1864cf32b71eSErnst Schwab if (status == 0) { 1865cf32b71eSErnst Schwab wait_for_completion(&done); 1866cf32b71eSErnst Schwab status = message->status; 1867cf32b71eSErnst Schwab } 1868cf32b71eSErnst Schwab message->context = NULL; 1869cf32b71eSErnst Schwab return status; 1870cf32b71eSErnst Schwab } 1871cf32b71eSErnst Schwab 18728ae12a0dSDavid Brownell /** 18738ae12a0dSDavid Brownell * spi_sync - blocking/synchronous SPI data transfers 18748ae12a0dSDavid Brownell * @spi: device with which data will be exchanged 18758ae12a0dSDavid Brownell * @message: describes the data transfers 187633e34dc6SDavid Brownell * Context: can sleep 18778ae12a0dSDavid Brownell * 18788ae12a0dSDavid Brownell * This call may only be used from a context that may sleep. The sleep 18798ae12a0dSDavid Brownell * is non-interruptible, and has no timeout. Low-overhead controller 18808ae12a0dSDavid Brownell * drivers may DMA directly into and out of the message buffers. 18818ae12a0dSDavid Brownell * 18828ae12a0dSDavid Brownell * Note that the SPI device's chip select is active during the message, 18838ae12a0dSDavid Brownell * and then is normally disabled between messages. Drivers for some 18848ae12a0dSDavid Brownell * frequently-used devices may want to minimize costs of selecting a chip, 18858ae12a0dSDavid Brownell * by leaving it selected in anticipation that the next message will go 18868ae12a0dSDavid Brownell * to the same chip. (That may increase power usage.) 18878ae12a0dSDavid Brownell * 18880c868461SDavid Brownell * Also, the caller is guaranteeing that the memory associated with the 18890c868461SDavid Brownell * message will not be freed before this call returns. 18900c868461SDavid Brownell * 18919b938b74SMarc Pignat * It returns zero on success, else a negative error code. 18928ae12a0dSDavid Brownell */ 18938ae12a0dSDavid Brownell int spi_sync(struct spi_device *spi, struct spi_message *message) 18948ae12a0dSDavid Brownell { 1895cf32b71eSErnst Schwab return __spi_sync(spi, message, 0); 18968ae12a0dSDavid Brownell } 18978ae12a0dSDavid Brownell EXPORT_SYMBOL_GPL(spi_sync); 18988ae12a0dSDavid Brownell 1899cf32b71eSErnst Schwab /** 1900cf32b71eSErnst Schwab * spi_sync_locked - version of spi_sync with exclusive bus usage 1901cf32b71eSErnst Schwab * @spi: device with which data will be exchanged 1902cf32b71eSErnst Schwab * @message: describes the data transfers 1903cf32b71eSErnst Schwab * Context: can sleep 1904cf32b71eSErnst Schwab * 1905cf32b71eSErnst Schwab * This call may only be used from a context that may sleep. The sleep 1906cf32b71eSErnst Schwab * is non-interruptible, and has no timeout. Low-overhead controller 1907cf32b71eSErnst Schwab * drivers may DMA directly into and out of the message buffers. 1908cf32b71eSErnst Schwab * 1909cf32b71eSErnst Schwab * This call should be used by drivers that require exclusive access to the 191025985edcSLucas De Marchi * SPI bus. It has to be preceded by a spi_bus_lock call. The SPI bus must 1911cf32b71eSErnst Schwab * be released by a spi_bus_unlock call when the exclusive access is over. 1912cf32b71eSErnst Schwab * 1913cf32b71eSErnst Schwab * It returns zero on success, else a negative error code. 1914cf32b71eSErnst Schwab */ 1915cf32b71eSErnst Schwab int spi_sync_locked(struct spi_device *spi, struct spi_message *message) 1916cf32b71eSErnst Schwab { 1917cf32b71eSErnst Schwab return __spi_sync(spi, message, 1); 1918cf32b71eSErnst Schwab } 1919cf32b71eSErnst Schwab EXPORT_SYMBOL_GPL(spi_sync_locked); 1920cf32b71eSErnst Schwab 1921cf32b71eSErnst Schwab /** 1922cf32b71eSErnst Schwab * spi_bus_lock - obtain a lock for exclusive SPI bus usage 1923cf32b71eSErnst Schwab * @master: SPI bus master that should be locked for exclusive bus access 1924cf32b71eSErnst Schwab * Context: can sleep 1925cf32b71eSErnst Schwab * 1926cf32b71eSErnst Schwab * This call may only be used from a context that may sleep. The sleep 1927cf32b71eSErnst Schwab * is non-interruptible, and has no timeout. 1928cf32b71eSErnst Schwab * 1929cf32b71eSErnst Schwab * This call should be used by drivers that require exclusive access to the 1930cf32b71eSErnst Schwab * SPI bus. The SPI bus must be released by a spi_bus_unlock call when the 1931cf32b71eSErnst Schwab * exclusive access is over. Data transfer must be done by spi_sync_locked 1932cf32b71eSErnst Schwab * and spi_async_locked calls when the SPI bus lock is held. 1933cf32b71eSErnst Schwab * 1934cf32b71eSErnst Schwab * It returns zero on success, else a negative error code. 1935cf32b71eSErnst Schwab */ 1936cf32b71eSErnst Schwab int spi_bus_lock(struct spi_master *master) 1937cf32b71eSErnst Schwab { 1938cf32b71eSErnst Schwab unsigned long flags; 1939cf32b71eSErnst Schwab 1940cf32b71eSErnst Schwab mutex_lock(&master->bus_lock_mutex); 1941cf32b71eSErnst Schwab 1942cf32b71eSErnst Schwab spin_lock_irqsave(&master->bus_lock_spinlock, flags); 1943cf32b71eSErnst Schwab master->bus_lock_flag = 1; 1944cf32b71eSErnst Schwab spin_unlock_irqrestore(&master->bus_lock_spinlock, flags); 1945cf32b71eSErnst Schwab 1946cf32b71eSErnst Schwab /* mutex remains locked until spi_bus_unlock is called */ 1947cf32b71eSErnst Schwab 1948cf32b71eSErnst Schwab return 0; 1949cf32b71eSErnst Schwab } 1950cf32b71eSErnst Schwab EXPORT_SYMBOL_GPL(spi_bus_lock); 1951cf32b71eSErnst Schwab 1952cf32b71eSErnst Schwab /** 1953cf32b71eSErnst Schwab * spi_bus_unlock - release the lock for exclusive SPI bus usage 1954cf32b71eSErnst Schwab * @master: SPI bus master that was locked for exclusive bus access 1955cf32b71eSErnst Schwab * Context: can sleep 1956cf32b71eSErnst Schwab * 1957cf32b71eSErnst Schwab * This call may only be used from a context that may sleep. The sleep 1958cf32b71eSErnst Schwab * is non-interruptible, and has no timeout. 1959cf32b71eSErnst Schwab * 1960cf32b71eSErnst Schwab * This call releases an SPI bus lock previously obtained by an spi_bus_lock 1961cf32b71eSErnst Schwab * call. 1962cf32b71eSErnst Schwab * 1963cf32b71eSErnst Schwab * It returns zero on success, else a negative error code. 1964cf32b71eSErnst Schwab */ 1965cf32b71eSErnst Schwab int spi_bus_unlock(struct spi_master *master) 1966cf32b71eSErnst Schwab { 1967cf32b71eSErnst Schwab master->bus_lock_flag = 0; 1968cf32b71eSErnst Schwab 1969cf32b71eSErnst Schwab mutex_unlock(&master->bus_lock_mutex); 1970cf32b71eSErnst Schwab 1971cf32b71eSErnst Schwab return 0; 1972cf32b71eSErnst Schwab } 1973cf32b71eSErnst Schwab EXPORT_SYMBOL_GPL(spi_bus_unlock); 1974cf32b71eSErnst Schwab 1975a9948b61SDavid Brownell /* portable code must never pass more than 32 bytes */ 1976a9948b61SDavid Brownell #define SPI_BUFSIZ max(32, SMP_CACHE_BYTES) 19778ae12a0dSDavid Brownell 19788ae12a0dSDavid Brownell static u8 *buf; 19798ae12a0dSDavid Brownell 19808ae12a0dSDavid Brownell /** 19818ae12a0dSDavid Brownell * spi_write_then_read - SPI synchronous write followed by read 19828ae12a0dSDavid Brownell * @spi: device with which data will be exchanged 19838ae12a0dSDavid Brownell * @txbuf: data to be written (need not be dma-safe) 19848ae12a0dSDavid Brownell * @n_tx: size of txbuf, in bytes 198527570497SJiri Pirko * @rxbuf: buffer into which data will be read (need not be dma-safe) 198627570497SJiri Pirko * @n_rx: size of rxbuf, in bytes 198733e34dc6SDavid Brownell * Context: can sleep 19888ae12a0dSDavid Brownell * 19898ae12a0dSDavid Brownell * This performs a half duplex MicroWire style transaction with the 19908ae12a0dSDavid Brownell * device, sending txbuf and then reading rxbuf. The return value 19918ae12a0dSDavid Brownell * is zero for success, else a negative errno status code. 1992b885244eSDavid Brownell * This call may only be used from a context that may sleep. 19938ae12a0dSDavid Brownell * 19940c868461SDavid Brownell * Parameters to this routine are always copied using a small buffer; 199533e34dc6SDavid Brownell * portable code should never use this for more than 32 bytes. 199633e34dc6SDavid Brownell * Performance-sensitive or bulk transfer code should instead use 19970c868461SDavid Brownell * spi_{async,sync}() calls with dma-safe buffers. 19988ae12a0dSDavid Brownell */ 19998ae12a0dSDavid Brownell int spi_write_then_read(struct spi_device *spi, 20000c4a1590SMark Brown const void *txbuf, unsigned n_tx, 20010c4a1590SMark Brown void *rxbuf, unsigned n_rx) 20028ae12a0dSDavid Brownell { 2003068f4070SDavid Brownell static DEFINE_MUTEX(lock); 20048ae12a0dSDavid Brownell 20058ae12a0dSDavid Brownell int status; 20068ae12a0dSDavid Brownell struct spi_message message; 2007bdff549eSDavid Brownell struct spi_transfer x[2]; 20088ae12a0dSDavid Brownell u8 *local_buf; 20098ae12a0dSDavid Brownell 2010b3a223eeSMark Brown /* Use preallocated DMA-safe buffer if we can. We can't avoid 2011b3a223eeSMark Brown * copying here, (as a pure convenience thing), but we can 2012b3a223eeSMark Brown * keep heap costs out of the hot path unless someone else is 2013b3a223eeSMark Brown * using the pre-allocated buffer or the transfer is too large. 20148ae12a0dSDavid Brownell */ 2015b3a223eeSMark Brown if ((n_tx + n_rx) > SPI_BUFSIZ || !mutex_trylock(&lock)) { 20162cd94c8aSMark Brown local_buf = kmalloc(max((unsigned)SPI_BUFSIZ, n_tx + n_rx), 20172cd94c8aSMark Brown GFP_KERNEL | GFP_DMA); 2018b3a223eeSMark Brown if (!local_buf) 2019b3a223eeSMark Brown return -ENOMEM; 2020b3a223eeSMark Brown } else { 2021b3a223eeSMark Brown local_buf = buf; 2022b3a223eeSMark Brown } 20238ae12a0dSDavid Brownell 20248275c642SVitaly Wool spi_message_init(&message); 20255fe5f05eSJingoo Han memset(x, 0, sizeof(x)); 2026bdff549eSDavid Brownell if (n_tx) { 2027bdff549eSDavid Brownell x[0].len = n_tx; 2028bdff549eSDavid Brownell spi_message_add_tail(&x[0], &message); 2029bdff549eSDavid Brownell } 2030bdff549eSDavid Brownell if (n_rx) { 2031bdff549eSDavid Brownell x[1].len = n_rx; 2032bdff549eSDavid Brownell spi_message_add_tail(&x[1], &message); 2033bdff549eSDavid Brownell } 20348275c642SVitaly Wool 20358ae12a0dSDavid Brownell memcpy(local_buf, txbuf, n_tx); 2036bdff549eSDavid Brownell x[0].tx_buf = local_buf; 2037bdff549eSDavid Brownell x[1].rx_buf = local_buf + n_tx; 20388ae12a0dSDavid Brownell 20398ae12a0dSDavid Brownell /* do the i/o */ 20408ae12a0dSDavid Brownell status = spi_sync(spi, &message); 20419b938b74SMarc Pignat if (status == 0) 2042bdff549eSDavid Brownell memcpy(rxbuf, x[1].rx_buf, n_rx); 20438ae12a0dSDavid Brownell 2044bdff549eSDavid Brownell if (x[0].tx_buf == buf) 2045068f4070SDavid Brownell mutex_unlock(&lock); 20468ae12a0dSDavid Brownell else 20478ae12a0dSDavid Brownell kfree(local_buf); 20488ae12a0dSDavid Brownell 20498ae12a0dSDavid Brownell return status; 20508ae12a0dSDavid Brownell } 20518ae12a0dSDavid Brownell EXPORT_SYMBOL_GPL(spi_write_then_read); 20528ae12a0dSDavid Brownell 20538ae12a0dSDavid Brownell /*-------------------------------------------------------------------------*/ 20548ae12a0dSDavid Brownell 20558ae12a0dSDavid Brownell static int __init spi_init(void) 20568ae12a0dSDavid Brownell { 2057b885244eSDavid Brownell int status; 20588ae12a0dSDavid Brownell 2059e94b1766SChristoph Lameter buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL); 2060b885244eSDavid Brownell if (!buf) { 2061b885244eSDavid Brownell status = -ENOMEM; 2062b885244eSDavid Brownell goto err0; 20638ae12a0dSDavid Brownell } 2064b885244eSDavid Brownell 2065b885244eSDavid Brownell status = bus_register(&spi_bus_type); 2066b885244eSDavid Brownell if (status < 0) 2067b885244eSDavid Brownell goto err1; 2068b885244eSDavid Brownell 2069b885244eSDavid Brownell status = class_register(&spi_master_class); 2070b885244eSDavid Brownell if (status < 0) 2071b885244eSDavid Brownell goto err2; 2072b885244eSDavid Brownell return 0; 2073b885244eSDavid Brownell 2074b885244eSDavid Brownell err2: 2075b885244eSDavid Brownell bus_unregister(&spi_bus_type); 2076b885244eSDavid Brownell err1: 2077b885244eSDavid Brownell kfree(buf); 2078b885244eSDavid Brownell buf = NULL; 2079b885244eSDavid Brownell err0: 2080b885244eSDavid Brownell return status; 2081b885244eSDavid Brownell } 2082b885244eSDavid Brownell 20838ae12a0dSDavid Brownell /* board_info is normally registered in arch_initcall(), 20848ae12a0dSDavid Brownell * but even essential drivers wait till later 2085b885244eSDavid Brownell * 2086b885244eSDavid Brownell * REVISIT only boardinfo really needs static linking. the rest (device and 2087b885244eSDavid Brownell * driver registration) _could_ be dynamically linked (modular) ... costs 2088b885244eSDavid Brownell * include needing to have boardinfo data structures be much more public. 20898ae12a0dSDavid Brownell */ 2090673c0c00SDavid Brownell postcore_initcall(spi_init); 20918ae12a0dSDavid Brownell 2092