18ae12a0dSDavid Brownell /* 2ca632f55SGrant Likely * SPI init/core code 38ae12a0dSDavid Brownell * 48ae12a0dSDavid Brownell * Copyright (C) 2005 David Brownell 5d57a4282SGrant Likely * Copyright (C) 2008 Secret Lab Technologies Ltd. 68ae12a0dSDavid Brownell * 78ae12a0dSDavid Brownell * This program is free software; you can redistribute it and/or modify 88ae12a0dSDavid Brownell * it under the terms of the GNU General Public License as published by 98ae12a0dSDavid Brownell * the Free Software Foundation; either version 2 of the License, or 108ae12a0dSDavid Brownell * (at your option) any later version. 118ae12a0dSDavid Brownell * 128ae12a0dSDavid Brownell * This program is distributed in the hope that it will be useful, 138ae12a0dSDavid Brownell * but WITHOUT ANY WARRANTY; without even the implied warranty of 148ae12a0dSDavid Brownell * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 158ae12a0dSDavid Brownell * GNU General Public License for more details. 168ae12a0dSDavid Brownell * 178ae12a0dSDavid Brownell * You should have received a copy of the GNU General Public License 188ae12a0dSDavid Brownell * along with this program; if not, write to the Free Software 198ae12a0dSDavid Brownell * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 208ae12a0dSDavid Brownell */ 218ae12a0dSDavid Brownell 228ae12a0dSDavid Brownell #include <linux/kernel.h> 23d57a4282SGrant Likely #include <linux/kmod.h> 248ae12a0dSDavid Brownell #include <linux/device.h> 258ae12a0dSDavid Brownell #include <linux/init.h> 268ae12a0dSDavid Brownell #include <linux/cache.h> 2794040828SMatthias Kaehlcke #include <linux/mutex.h> 282b7a32f7SSinan Akman #include <linux/of_device.h> 29d57a4282SGrant Likely #include <linux/of_irq.h> 305a0e3ad6STejun Heo #include <linux/slab.h> 31e0626e38SAnton Vorontsov #include <linux/mod_devicetable.h> 328ae12a0dSDavid Brownell #include <linux/spi/spi.h> 3374317984SJean-Christophe PLAGNIOL-VILLARD #include <linux/of_gpio.h> 343ae22e8cSMark Brown #include <linux/pm_runtime.h> 35025ed130SPaul Gortmaker #include <linux/export.h> 368bd75c77SClark Williams #include <linux/sched/rt.h> 37ffbbdd21SLinus Walleij #include <linux/delay.h> 38ffbbdd21SLinus Walleij #include <linux/kthread.h> 3964bee4d2SMika Westerberg #include <linux/ioport.h> 4064bee4d2SMika Westerberg #include <linux/acpi.h> 418ae12a0dSDavid Brownell 428ae12a0dSDavid Brownell static void spidev_release(struct device *dev) 438ae12a0dSDavid Brownell { 440ffa0285SHans-Peter Nilsson struct spi_device *spi = to_spi_device(dev); 458ae12a0dSDavid Brownell 468ae12a0dSDavid Brownell /* spi masters may cleanup for released devices */ 478ae12a0dSDavid Brownell if (spi->master->cleanup) 488ae12a0dSDavid Brownell spi->master->cleanup(spi); 498ae12a0dSDavid Brownell 500c868461SDavid Brownell spi_master_put(spi->master); 5107a389feSRoman Tereshonkov kfree(spi); 528ae12a0dSDavid Brownell } 538ae12a0dSDavid Brownell 548ae12a0dSDavid Brownell static ssize_t 558ae12a0dSDavid Brownell modalias_show(struct device *dev, struct device_attribute *a, char *buf) 568ae12a0dSDavid Brownell { 578ae12a0dSDavid Brownell const struct spi_device *spi = to_spi_device(dev); 588ae12a0dSDavid Brownell 59d8e328b3SGrant Likely return sprintf(buf, "%s%s\n", SPI_MODULE_PREFIX, spi->modalias); 608ae12a0dSDavid Brownell } 618ae12a0dSDavid Brownell 628ae12a0dSDavid Brownell static struct device_attribute spi_dev_attrs[] = { 638ae12a0dSDavid Brownell __ATTR_RO(modalias), 648ae12a0dSDavid Brownell __ATTR_NULL, 658ae12a0dSDavid Brownell }; 668ae12a0dSDavid Brownell 678ae12a0dSDavid Brownell /* modalias support makes "modprobe $MODALIAS" new-style hotplug work, 688ae12a0dSDavid Brownell * and the sysfs version makes coldplug work too. 698ae12a0dSDavid Brownell */ 708ae12a0dSDavid Brownell 7175368bf6SAnton Vorontsov static const struct spi_device_id *spi_match_id(const struct spi_device_id *id, 7275368bf6SAnton Vorontsov const struct spi_device *sdev) 7375368bf6SAnton Vorontsov { 7475368bf6SAnton Vorontsov while (id->name[0]) { 7575368bf6SAnton Vorontsov if (!strcmp(sdev->modalias, id->name)) 7675368bf6SAnton Vorontsov return id; 7775368bf6SAnton Vorontsov id++; 7875368bf6SAnton Vorontsov } 7975368bf6SAnton Vorontsov return NULL; 8075368bf6SAnton Vorontsov } 8175368bf6SAnton Vorontsov 8275368bf6SAnton Vorontsov const struct spi_device_id *spi_get_device_id(const struct spi_device *sdev) 8375368bf6SAnton Vorontsov { 8475368bf6SAnton Vorontsov const struct spi_driver *sdrv = to_spi_driver(sdev->dev.driver); 8575368bf6SAnton Vorontsov 8675368bf6SAnton Vorontsov return spi_match_id(sdrv->id_table, sdev); 8775368bf6SAnton Vorontsov } 8875368bf6SAnton Vorontsov EXPORT_SYMBOL_GPL(spi_get_device_id); 8975368bf6SAnton Vorontsov 908ae12a0dSDavid Brownell static int spi_match_device(struct device *dev, struct device_driver *drv) 918ae12a0dSDavid Brownell { 928ae12a0dSDavid Brownell const struct spi_device *spi = to_spi_device(dev); 9375368bf6SAnton Vorontsov const struct spi_driver *sdrv = to_spi_driver(drv); 9475368bf6SAnton Vorontsov 952b7a32f7SSinan Akman /* Attempt an OF style match */ 962b7a32f7SSinan Akman if (of_driver_match_device(dev, drv)) 972b7a32f7SSinan Akman return 1; 982b7a32f7SSinan Akman 9964bee4d2SMika Westerberg /* Then try ACPI */ 10064bee4d2SMika Westerberg if (acpi_driver_match_device(dev, drv)) 10164bee4d2SMika Westerberg return 1; 10264bee4d2SMika Westerberg 10375368bf6SAnton Vorontsov if (sdrv->id_table) 10475368bf6SAnton Vorontsov return !!spi_match_id(sdrv->id_table, spi); 1058ae12a0dSDavid Brownell 10635f74fcaSKay Sievers return strcmp(spi->modalias, drv->name) == 0; 1078ae12a0dSDavid Brownell } 1088ae12a0dSDavid Brownell 1097eff2e7aSKay Sievers static int spi_uevent(struct device *dev, struct kobj_uevent_env *env) 1108ae12a0dSDavid Brownell { 1118ae12a0dSDavid Brownell const struct spi_device *spi = to_spi_device(dev); 1128ae12a0dSDavid Brownell 113e0626e38SAnton Vorontsov add_uevent_var(env, "MODALIAS=%s%s", SPI_MODULE_PREFIX, spi->modalias); 1148ae12a0dSDavid Brownell return 0; 1158ae12a0dSDavid Brownell } 1168ae12a0dSDavid Brownell 1173ae22e8cSMark Brown #ifdef CONFIG_PM_SLEEP 1183ae22e8cSMark Brown static int spi_legacy_suspend(struct device *dev, pm_message_t message) 1198ae12a0dSDavid Brownell { 1203c72426fSDavid Brownell int value = 0; 121b885244eSDavid Brownell struct spi_driver *drv = to_spi_driver(dev->driver); 1228ae12a0dSDavid Brownell 1238ae12a0dSDavid Brownell /* suspend will stop irqs and dma; no more i/o */ 1243c72426fSDavid Brownell if (drv) { 1253c72426fSDavid Brownell if (drv->suspend) 126b885244eSDavid Brownell value = drv->suspend(to_spi_device(dev), message); 1273c72426fSDavid Brownell else 1283c72426fSDavid Brownell dev_dbg(dev, "... can't suspend\n"); 1293c72426fSDavid Brownell } 1308ae12a0dSDavid Brownell return value; 1318ae12a0dSDavid Brownell } 1328ae12a0dSDavid Brownell 1333ae22e8cSMark Brown static int spi_legacy_resume(struct device *dev) 1348ae12a0dSDavid Brownell { 1353c72426fSDavid Brownell int value = 0; 136b885244eSDavid Brownell struct spi_driver *drv = to_spi_driver(dev->driver); 1378ae12a0dSDavid Brownell 1388ae12a0dSDavid Brownell /* resume may restart the i/o queue */ 1393c72426fSDavid Brownell if (drv) { 1403c72426fSDavid Brownell if (drv->resume) 141b885244eSDavid Brownell value = drv->resume(to_spi_device(dev)); 1423c72426fSDavid Brownell else 1433c72426fSDavid Brownell dev_dbg(dev, "... can't resume\n"); 1443c72426fSDavid Brownell } 1458ae12a0dSDavid Brownell return value; 1468ae12a0dSDavid Brownell } 1478ae12a0dSDavid Brownell 1483ae22e8cSMark Brown static int spi_pm_suspend(struct device *dev) 1493ae22e8cSMark Brown { 1503ae22e8cSMark Brown const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 1513ae22e8cSMark Brown 1523ae22e8cSMark Brown if (pm) 1533ae22e8cSMark Brown return pm_generic_suspend(dev); 1543ae22e8cSMark Brown else 1553ae22e8cSMark Brown return spi_legacy_suspend(dev, PMSG_SUSPEND); 1563ae22e8cSMark Brown } 1573ae22e8cSMark Brown 1583ae22e8cSMark Brown static int spi_pm_resume(struct device *dev) 1593ae22e8cSMark Brown { 1603ae22e8cSMark Brown const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 1613ae22e8cSMark Brown 1623ae22e8cSMark Brown if (pm) 1633ae22e8cSMark Brown return pm_generic_resume(dev); 1643ae22e8cSMark Brown else 1653ae22e8cSMark Brown return spi_legacy_resume(dev); 1663ae22e8cSMark Brown } 1673ae22e8cSMark Brown 1683ae22e8cSMark Brown static int spi_pm_freeze(struct device *dev) 1693ae22e8cSMark Brown { 1703ae22e8cSMark Brown const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 1713ae22e8cSMark Brown 1723ae22e8cSMark Brown if (pm) 1733ae22e8cSMark Brown return pm_generic_freeze(dev); 1743ae22e8cSMark Brown else 1753ae22e8cSMark Brown return spi_legacy_suspend(dev, PMSG_FREEZE); 1763ae22e8cSMark Brown } 1773ae22e8cSMark Brown 1783ae22e8cSMark Brown static int spi_pm_thaw(struct device *dev) 1793ae22e8cSMark Brown { 1803ae22e8cSMark Brown const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 1813ae22e8cSMark Brown 1823ae22e8cSMark Brown if (pm) 1833ae22e8cSMark Brown return pm_generic_thaw(dev); 1843ae22e8cSMark Brown else 1853ae22e8cSMark Brown return spi_legacy_resume(dev); 1863ae22e8cSMark Brown } 1873ae22e8cSMark Brown 1883ae22e8cSMark Brown static int spi_pm_poweroff(struct device *dev) 1893ae22e8cSMark Brown { 1903ae22e8cSMark Brown const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 1913ae22e8cSMark Brown 1923ae22e8cSMark Brown if (pm) 1933ae22e8cSMark Brown return pm_generic_poweroff(dev); 1943ae22e8cSMark Brown else 1953ae22e8cSMark Brown return spi_legacy_suspend(dev, PMSG_HIBERNATE); 1963ae22e8cSMark Brown } 1973ae22e8cSMark Brown 1983ae22e8cSMark Brown static int spi_pm_restore(struct device *dev) 1993ae22e8cSMark Brown { 2003ae22e8cSMark Brown const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 2013ae22e8cSMark Brown 2023ae22e8cSMark Brown if (pm) 2033ae22e8cSMark Brown return pm_generic_restore(dev); 2043ae22e8cSMark Brown else 2053ae22e8cSMark Brown return spi_legacy_resume(dev); 2063ae22e8cSMark Brown } 2078ae12a0dSDavid Brownell #else 2083ae22e8cSMark Brown #define spi_pm_suspend NULL 2093ae22e8cSMark Brown #define spi_pm_resume NULL 2103ae22e8cSMark Brown #define spi_pm_freeze NULL 2113ae22e8cSMark Brown #define spi_pm_thaw NULL 2123ae22e8cSMark Brown #define spi_pm_poweroff NULL 2133ae22e8cSMark Brown #define spi_pm_restore NULL 2148ae12a0dSDavid Brownell #endif 2158ae12a0dSDavid Brownell 2163ae22e8cSMark Brown static const struct dev_pm_ops spi_pm = { 2173ae22e8cSMark Brown .suspend = spi_pm_suspend, 2183ae22e8cSMark Brown .resume = spi_pm_resume, 2193ae22e8cSMark Brown .freeze = spi_pm_freeze, 2203ae22e8cSMark Brown .thaw = spi_pm_thaw, 2213ae22e8cSMark Brown .poweroff = spi_pm_poweroff, 2223ae22e8cSMark Brown .restore = spi_pm_restore, 2233ae22e8cSMark Brown SET_RUNTIME_PM_OPS( 2243ae22e8cSMark Brown pm_generic_runtime_suspend, 2253ae22e8cSMark Brown pm_generic_runtime_resume, 22645f0a85cSRafael J. Wysocki NULL 2273ae22e8cSMark Brown ) 2283ae22e8cSMark Brown }; 2293ae22e8cSMark Brown 2308ae12a0dSDavid Brownell struct bus_type spi_bus_type = { 2318ae12a0dSDavid Brownell .name = "spi", 2328ae12a0dSDavid Brownell .dev_attrs = spi_dev_attrs, 2338ae12a0dSDavid Brownell .match = spi_match_device, 2348ae12a0dSDavid Brownell .uevent = spi_uevent, 2353ae22e8cSMark Brown .pm = &spi_pm, 2368ae12a0dSDavid Brownell }; 2378ae12a0dSDavid Brownell EXPORT_SYMBOL_GPL(spi_bus_type); 2388ae12a0dSDavid Brownell 239b885244eSDavid Brownell 240b885244eSDavid Brownell static int spi_drv_probe(struct device *dev) 241b885244eSDavid Brownell { 242b885244eSDavid Brownell const struct spi_driver *sdrv = to_spi_driver(dev->driver); 243b885244eSDavid Brownell 244b885244eSDavid Brownell return sdrv->probe(to_spi_device(dev)); 245b885244eSDavid Brownell } 246b885244eSDavid Brownell 247b885244eSDavid Brownell static int spi_drv_remove(struct device *dev) 248b885244eSDavid Brownell { 249b885244eSDavid Brownell const struct spi_driver *sdrv = to_spi_driver(dev->driver); 250b885244eSDavid Brownell 251b885244eSDavid Brownell return sdrv->remove(to_spi_device(dev)); 252b885244eSDavid Brownell } 253b885244eSDavid Brownell 254b885244eSDavid Brownell static void spi_drv_shutdown(struct device *dev) 255b885244eSDavid Brownell { 256b885244eSDavid Brownell const struct spi_driver *sdrv = to_spi_driver(dev->driver); 257b885244eSDavid Brownell 258b885244eSDavid Brownell sdrv->shutdown(to_spi_device(dev)); 259b885244eSDavid Brownell } 260b885244eSDavid Brownell 26133e34dc6SDavid Brownell /** 26233e34dc6SDavid Brownell * spi_register_driver - register a SPI driver 26333e34dc6SDavid Brownell * @sdrv: the driver to register 26433e34dc6SDavid Brownell * Context: can sleep 26533e34dc6SDavid Brownell */ 266b885244eSDavid Brownell int spi_register_driver(struct spi_driver *sdrv) 267b885244eSDavid Brownell { 268b885244eSDavid Brownell sdrv->driver.bus = &spi_bus_type; 269b885244eSDavid Brownell if (sdrv->probe) 270b885244eSDavid Brownell sdrv->driver.probe = spi_drv_probe; 271b885244eSDavid Brownell if (sdrv->remove) 272b885244eSDavid Brownell sdrv->driver.remove = spi_drv_remove; 273b885244eSDavid Brownell if (sdrv->shutdown) 274b885244eSDavid Brownell sdrv->driver.shutdown = spi_drv_shutdown; 275b885244eSDavid Brownell return driver_register(&sdrv->driver); 276b885244eSDavid Brownell } 277b885244eSDavid Brownell EXPORT_SYMBOL_GPL(spi_register_driver); 278b885244eSDavid Brownell 2798ae12a0dSDavid Brownell /*-------------------------------------------------------------------------*/ 2808ae12a0dSDavid Brownell 2818ae12a0dSDavid Brownell /* SPI devices should normally not be created by SPI device drivers; that 2828ae12a0dSDavid Brownell * would make them board-specific. Similarly with SPI master drivers. 2838ae12a0dSDavid Brownell * Device registration normally goes into like arch/.../mach.../board-YYY.c 2848ae12a0dSDavid Brownell * with other readonly (flashable) information about mainboard devices. 2858ae12a0dSDavid Brownell */ 2868ae12a0dSDavid Brownell 2878ae12a0dSDavid Brownell struct boardinfo { 2888ae12a0dSDavid Brownell struct list_head list; 2892b9603a0SFeng Tang struct spi_board_info board_info; 2908ae12a0dSDavid Brownell }; 2918ae12a0dSDavid Brownell 2928ae12a0dSDavid Brownell static LIST_HEAD(board_list); 2932b9603a0SFeng Tang static LIST_HEAD(spi_master_list); 2942b9603a0SFeng Tang 2952b9603a0SFeng Tang /* 2962b9603a0SFeng Tang * Used to protect add/del opertion for board_info list and 2972b9603a0SFeng Tang * spi_master list, and their matching process 2982b9603a0SFeng Tang */ 29994040828SMatthias Kaehlcke static DEFINE_MUTEX(board_lock); 3008ae12a0dSDavid Brownell 301dc87c98eSGrant Likely /** 302dc87c98eSGrant Likely * spi_alloc_device - Allocate a new SPI device 303dc87c98eSGrant Likely * @master: Controller to which device is connected 304dc87c98eSGrant Likely * Context: can sleep 305dc87c98eSGrant Likely * 306dc87c98eSGrant Likely * Allows a driver to allocate and initialize a spi_device without 307dc87c98eSGrant Likely * registering it immediately. This allows a driver to directly 308dc87c98eSGrant Likely * fill the spi_device with device parameters before calling 309dc87c98eSGrant Likely * spi_add_device() on it. 310dc87c98eSGrant Likely * 311dc87c98eSGrant Likely * Caller is responsible to call spi_add_device() on the returned 312dc87c98eSGrant Likely * spi_device structure to add it to the SPI master. If the caller 313dc87c98eSGrant Likely * needs to discard the spi_device without adding it, then it should 314dc87c98eSGrant Likely * call spi_dev_put() on it. 315dc87c98eSGrant Likely * 316dc87c98eSGrant Likely * Returns a pointer to the new device, or NULL. 317dc87c98eSGrant Likely */ 318dc87c98eSGrant Likely struct spi_device *spi_alloc_device(struct spi_master *master) 319dc87c98eSGrant Likely { 320dc87c98eSGrant Likely struct spi_device *spi; 321dc87c98eSGrant Likely struct device *dev = master->dev.parent; 322dc87c98eSGrant Likely 323dc87c98eSGrant Likely if (!spi_master_get(master)) 324dc87c98eSGrant Likely return NULL; 325dc87c98eSGrant Likely 326dc87c98eSGrant Likely spi = kzalloc(sizeof *spi, GFP_KERNEL); 327dc87c98eSGrant Likely if (!spi) { 328dc87c98eSGrant Likely dev_err(dev, "cannot alloc spi_device\n"); 329dc87c98eSGrant Likely spi_master_put(master); 330dc87c98eSGrant Likely return NULL; 331dc87c98eSGrant Likely } 332dc87c98eSGrant Likely 333dc87c98eSGrant Likely spi->master = master; 334178db7d3SLaurent Pinchart spi->dev.parent = &master->dev; 335dc87c98eSGrant Likely spi->dev.bus = &spi_bus_type; 336dc87c98eSGrant Likely spi->dev.release = spidev_release; 337446411e1SAndreas Larsson spi->cs_gpio = -ENOENT; 338dc87c98eSGrant Likely device_initialize(&spi->dev); 339dc87c98eSGrant Likely return spi; 340dc87c98eSGrant Likely } 341dc87c98eSGrant Likely EXPORT_SYMBOL_GPL(spi_alloc_device); 342dc87c98eSGrant Likely 343dc87c98eSGrant Likely /** 344dc87c98eSGrant Likely * spi_add_device - Add spi_device allocated with spi_alloc_device 345dc87c98eSGrant Likely * @spi: spi_device to register 346dc87c98eSGrant Likely * 347dc87c98eSGrant Likely * Companion function to spi_alloc_device. Devices allocated with 348dc87c98eSGrant Likely * spi_alloc_device can be added onto the spi bus with this function. 349dc87c98eSGrant Likely * 350e48880e0SDavid Brownell * Returns 0 on success; negative errno on failure 351dc87c98eSGrant Likely */ 352dc87c98eSGrant Likely int spi_add_device(struct spi_device *spi) 353dc87c98eSGrant Likely { 354e48880e0SDavid Brownell static DEFINE_MUTEX(spi_add_lock); 35574317984SJean-Christophe PLAGNIOL-VILLARD struct spi_master *master = spi->master; 35674317984SJean-Christophe PLAGNIOL-VILLARD struct device *dev = master->dev.parent; 3578ec130a0SRoman Tereshonkov struct device *d; 358dc87c98eSGrant Likely int status; 359dc87c98eSGrant Likely 360dc87c98eSGrant Likely /* Chipselects are numbered 0..max; validate. */ 36174317984SJean-Christophe PLAGNIOL-VILLARD if (spi->chip_select >= master->num_chipselect) { 362dc87c98eSGrant Likely dev_err(dev, "cs%d >= max %d\n", 363dc87c98eSGrant Likely spi->chip_select, 36474317984SJean-Christophe PLAGNIOL-VILLARD master->num_chipselect); 365dc87c98eSGrant Likely return -EINVAL; 366dc87c98eSGrant Likely } 367dc87c98eSGrant Likely 368dc87c98eSGrant Likely /* Set the bus ID string */ 36935f74fcaSKay Sievers dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->master->dev), 370dc87c98eSGrant Likely spi->chip_select); 371dc87c98eSGrant Likely 372e48880e0SDavid Brownell 373e48880e0SDavid Brownell /* We need to make sure there's no other device with this 374e48880e0SDavid Brownell * chipselect **BEFORE** we call setup(), else we'll trash 375e48880e0SDavid Brownell * its configuration. Lock against concurrent add() calls. 376e48880e0SDavid Brownell */ 377e48880e0SDavid Brownell mutex_lock(&spi_add_lock); 378e48880e0SDavid Brownell 3798ec130a0SRoman Tereshonkov d = bus_find_device_by_name(&spi_bus_type, NULL, dev_name(&spi->dev)); 3808ec130a0SRoman Tereshonkov if (d != NULL) { 381e48880e0SDavid Brownell dev_err(dev, "chipselect %d already in use\n", 382e48880e0SDavid Brownell spi->chip_select); 3838ec130a0SRoman Tereshonkov put_device(d); 384e48880e0SDavid Brownell status = -EBUSY; 385e48880e0SDavid Brownell goto done; 386e48880e0SDavid Brownell } 387e48880e0SDavid Brownell 38874317984SJean-Christophe PLAGNIOL-VILLARD if (master->cs_gpios) 38974317984SJean-Christophe PLAGNIOL-VILLARD spi->cs_gpio = master->cs_gpios[spi->chip_select]; 39074317984SJean-Christophe PLAGNIOL-VILLARD 391e48880e0SDavid Brownell /* Drivers may modify this initial i/o setup, but will 392e48880e0SDavid Brownell * normally rely on the device being setup. Devices 393e48880e0SDavid Brownell * using SPI_CS_HIGH can't coexist well otherwise... 394e48880e0SDavid Brownell */ 3957d077197SDavid Brownell status = spi_setup(spi); 396dc87c98eSGrant Likely if (status < 0) { 397eb288a1fSLinus Walleij dev_err(dev, "can't setup %s, status %d\n", 398eb288a1fSLinus Walleij dev_name(&spi->dev), status); 399e48880e0SDavid Brownell goto done; 400dc87c98eSGrant Likely } 401dc87c98eSGrant Likely 402e48880e0SDavid Brownell /* Device may be bound to an active driver when this returns */ 403dc87c98eSGrant Likely status = device_add(&spi->dev); 404e48880e0SDavid Brownell if (status < 0) 405eb288a1fSLinus Walleij dev_err(dev, "can't add %s, status %d\n", 406eb288a1fSLinus Walleij dev_name(&spi->dev), status); 407e48880e0SDavid Brownell else 40835f74fcaSKay Sievers dev_dbg(dev, "registered child %s\n", dev_name(&spi->dev)); 409e48880e0SDavid Brownell 410e48880e0SDavid Brownell done: 411e48880e0SDavid Brownell mutex_unlock(&spi_add_lock); 412e48880e0SDavid Brownell return status; 413dc87c98eSGrant Likely } 414dc87c98eSGrant Likely EXPORT_SYMBOL_GPL(spi_add_device); 4158ae12a0dSDavid Brownell 41633e34dc6SDavid Brownell /** 41733e34dc6SDavid Brownell * spi_new_device - instantiate one new SPI device 41833e34dc6SDavid Brownell * @master: Controller to which device is connected 41933e34dc6SDavid Brownell * @chip: Describes the SPI device 42033e34dc6SDavid Brownell * Context: can sleep 42133e34dc6SDavid Brownell * 42233e34dc6SDavid Brownell * On typical mainboards, this is purely internal; and it's not needed 4238ae12a0dSDavid Brownell * after board init creates the hard-wired devices. Some development 4248ae12a0dSDavid Brownell * platforms may not be able to use spi_register_board_info though, and 4258ae12a0dSDavid Brownell * this is exported so that for example a USB or parport based adapter 4268ae12a0dSDavid Brownell * driver could add devices (which it would learn about out-of-band). 427082c8cb4SDavid Brownell * 428082c8cb4SDavid Brownell * Returns the new device, or NULL. 4298ae12a0dSDavid Brownell */ 430e9d5a461SAdrian Bunk struct spi_device *spi_new_device(struct spi_master *master, 431e9d5a461SAdrian Bunk struct spi_board_info *chip) 4328ae12a0dSDavid Brownell { 4338ae12a0dSDavid Brownell struct spi_device *proxy; 4348ae12a0dSDavid Brownell int status; 4358ae12a0dSDavid Brownell 436082c8cb4SDavid Brownell /* NOTE: caller did any chip->bus_num checks necessary. 437082c8cb4SDavid Brownell * 438082c8cb4SDavid Brownell * Also, unless we change the return value convention to use 439082c8cb4SDavid Brownell * error-or-pointer (not NULL-or-pointer), troubleshootability 440082c8cb4SDavid Brownell * suggests syslogged diagnostics are best here (ugh). 441082c8cb4SDavid Brownell */ 442082c8cb4SDavid Brownell 443dc87c98eSGrant Likely proxy = spi_alloc_device(master); 444dc87c98eSGrant Likely if (!proxy) 4458ae12a0dSDavid Brownell return NULL; 4468ae12a0dSDavid Brownell 447102eb975SGrant Likely WARN_ON(strlen(chip->modalias) >= sizeof(proxy->modalias)); 448102eb975SGrant Likely 4498ae12a0dSDavid Brownell proxy->chip_select = chip->chip_select; 4508ae12a0dSDavid Brownell proxy->max_speed_hz = chip->max_speed_hz; 451980a01c9SDavid Brownell proxy->mode = chip->mode; 4528ae12a0dSDavid Brownell proxy->irq = chip->irq; 453102eb975SGrant Likely strlcpy(proxy->modalias, chip->modalias, sizeof(proxy->modalias)); 4548ae12a0dSDavid Brownell proxy->dev.platform_data = (void *) chip->platform_data; 4558ae12a0dSDavid Brownell proxy->controller_data = chip->controller_data; 4568ae12a0dSDavid Brownell proxy->controller_state = NULL; 4578ae12a0dSDavid Brownell 458dc87c98eSGrant Likely status = spi_add_device(proxy); 4598ae12a0dSDavid Brownell if (status < 0) { 460dc87c98eSGrant Likely spi_dev_put(proxy); 4618ae12a0dSDavid Brownell return NULL; 4628ae12a0dSDavid Brownell } 463dc87c98eSGrant Likely 464dc87c98eSGrant Likely return proxy; 465dc87c98eSGrant Likely } 4668ae12a0dSDavid Brownell EXPORT_SYMBOL_GPL(spi_new_device); 4678ae12a0dSDavid Brownell 4682b9603a0SFeng Tang static void spi_match_master_to_boardinfo(struct spi_master *master, 4692b9603a0SFeng Tang struct spi_board_info *bi) 4702b9603a0SFeng Tang { 4712b9603a0SFeng Tang struct spi_device *dev; 4722b9603a0SFeng Tang 4732b9603a0SFeng Tang if (master->bus_num != bi->bus_num) 4742b9603a0SFeng Tang return; 4752b9603a0SFeng Tang 4762b9603a0SFeng Tang dev = spi_new_device(master, bi); 4772b9603a0SFeng Tang if (!dev) 4782b9603a0SFeng Tang dev_err(master->dev.parent, "can't create new device for %s\n", 4792b9603a0SFeng Tang bi->modalias); 4802b9603a0SFeng Tang } 4812b9603a0SFeng Tang 48233e34dc6SDavid Brownell /** 48333e34dc6SDavid Brownell * spi_register_board_info - register SPI devices for a given board 48433e34dc6SDavid Brownell * @info: array of chip descriptors 48533e34dc6SDavid Brownell * @n: how many descriptors are provided 48633e34dc6SDavid Brownell * Context: can sleep 48733e34dc6SDavid Brownell * 4888ae12a0dSDavid Brownell * Board-specific early init code calls this (probably during arch_initcall) 4898ae12a0dSDavid Brownell * with segments of the SPI device table. Any device nodes are created later, 4908ae12a0dSDavid Brownell * after the relevant parent SPI controller (bus_num) is defined. We keep 4918ae12a0dSDavid Brownell * this table of devices forever, so that reloading a controller driver will 4928ae12a0dSDavid Brownell * not make Linux forget about these hard-wired devices. 4938ae12a0dSDavid Brownell * 4948ae12a0dSDavid Brownell * Other code can also call this, e.g. a particular add-on board might provide 4958ae12a0dSDavid Brownell * SPI devices through its expansion connector, so code initializing that board 4968ae12a0dSDavid Brownell * would naturally declare its SPI devices. 4978ae12a0dSDavid Brownell * 4988ae12a0dSDavid Brownell * The board info passed can safely be __initdata ... but be careful of 4998ae12a0dSDavid Brownell * any embedded pointers (platform_data, etc), they're copied as-is. 5008ae12a0dSDavid Brownell */ 501fd4a319bSGrant Likely int spi_register_board_info(struct spi_board_info const *info, unsigned n) 5028ae12a0dSDavid Brownell { 5038ae12a0dSDavid Brownell struct boardinfo *bi; 5042b9603a0SFeng Tang int i; 5058ae12a0dSDavid Brownell 5062b9603a0SFeng Tang bi = kzalloc(n * sizeof(*bi), GFP_KERNEL); 5078ae12a0dSDavid Brownell if (!bi) 5088ae12a0dSDavid Brownell return -ENOMEM; 5098ae12a0dSDavid Brownell 5102b9603a0SFeng Tang for (i = 0; i < n; i++, bi++, info++) { 5112b9603a0SFeng Tang struct spi_master *master; 5122b9603a0SFeng Tang 5132b9603a0SFeng Tang memcpy(&bi->board_info, info, sizeof(*info)); 51494040828SMatthias Kaehlcke mutex_lock(&board_lock); 5158ae12a0dSDavid Brownell list_add_tail(&bi->list, &board_list); 5162b9603a0SFeng Tang list_for_each_entry(master, &spi_master_list, list) 5172b9603a0SFeng Tang spi_match_master_to_boardinfo(master, &bi->board_info); 51894040828SMatthias Kaehlcke mutex_unlock(&board_lock); 5192b9603a0SFeng Tang } 5202b9603a0SFeng Tang 5218ae12a0dSDavid Brownell return 0; 5228ae12a0dSDavid Brownell } 5238ae12a0dSDavid Brownell 5248ae12a0dSDavid Brownell /*-------------------------------------------------------------------------*/ 5258ae12a0dSDavid Brownell 526ffbbdd21SLinus Walleij /** 527ffbbdd21SLinus Walleij * spi_pump_messages - kthread work function which processes spi message queue 528ffbbdd21SLinus Walleij * @work: pointer to kthread work struct contained in the master struct 529ffbbdd21SLinus Walleij * 530ffbbdd21SLinus Walleij * This function checks if there is any spi message in the queue that 531ffbbdd21SLinus Walleij * needs processing and if so call out to the driver to initialize hardware 532ffbbdd21SLinus Walleij * and transfer each message. 533ffbbdd21SLinus Walleij * 534ffbbdd21SLinus Walleij */ 535ffbbdd21SLinus Walleij static void spi_pump_messages(struct kthread_work *work) 536ffbbdd21SLinus Walleij { 537ffbbdd21SLinus Walleij struct spi_master *master = 538ffbbdd21SLinus Walleij container_of(work, struct spi_master, pump_messages); 539ffbbdd21SLinus Walleij unsigned long flags; 540ffbbdd21SLinus Walleij bool was_busy = false; 541ffbbdd21SLinus Walleij int ret; 542ffbbdd21SLinus Walleij 543ffbbdd21SLinus Walleij /* Lock queue and check for queue work */ 544ffbbdd21SLinus Walleij spin_lock_irqsave(&master->queue_lock, flags); 545ffbbdd21SLinus Walleij if (list_empty(&master->queue) || !master->running) { 546b0b36b86SBryan Freed if (!master->busy) { 5479af4acc0SDan Carpenter spin_unlock_irqrestore(&master->queue_lock, flags); 548ffbbdd21SLinus Walleij return; 549ffbbdd21SLinus Walleij } 550ffbbdd21SLinus Walleij master->busy = false; 551ffbbdd21SLinus Walleij spin_unlock_irqrestore(&master->queue_lock, flags); 552b0b36b86SBryan Freed if (master->unprepare_transfer_hardware && 553b0b36b86SBryan Freed master->unprepare_transfer_hardware(master)) 554b0b36b86SBryan Freed dev_err(&master->dev, 555b0b36b86SBryan Freed "failed to unprepare transfer hardware\n"); 556*49834de2SMark Brown if (master->auto_runtime_pm) { 557*49834de2SMark Brown pm_runtime_mark_last_busy(master->dev.parent); 558*49834de2SMark Brown pm_runtime_put_autosuspend(master->dev.parent); 559*49834de2SMark Brown } 560ffbbdd21SLinus Walleij return; 561ffbbdd21SLinus Walleij } 562ffbbdd21SLinus Walleij 563ffbbdd21SLinus Walleij /* Make sure we are not already running a message */ 564ffbbdd21SLinus Walleij if (master->cur_msg) { 565ffbbdd21SLinus Walleij spin_unlock_irqrestore(&master->queue_lock, flags); 566ffbbdd21SLinus Walleij return; 567ffbbdd21SLinus Walleij } 568ffbbdd21SLinus Walleij /* Extract head of queue */ 569ffbbdd21SLinus Walleij master->cur_msg = 570ffbbdd21SLinus Walleij list_entry(master->queue.next, struct spi_message, queue); 571ffbbdd21SLinus Walleij 572ffbbdd21SLinus Walleij list_del_init(&master->cur_msg->queue); 573ffbbdd21SLinus Walleij if (master->busy) 574ffbbdd21SLinus Walleij was_busy = true; 575ffbbdd21SLinus Walleij else 576ffbbdd21SLinus Walleij master->busy = true; 577ffbbdd21SLinus Walleij spin_unlock_irqrestore(&master->queue_lock, flags); 578ffbbdd21SLinus Walleij 579*49834de2SMark Brown if (!was_busy && master->auto_runtime_pm) { 580*49834de2SMark Brown ret = pm_runtime_get_sync(master->dev.parent); 581*49834de2SMark Brown if (ret < 0) { 582*49834de2SMark Brown dev_err(&master->dev, "Failed to power device: %d\n", 583*49834de2SMark Brown ret); 584*49834de2SMark Brown return; 585*49834de2SMark Brown } 586*49834de2SMark Brown } 587*49834de2SMark Brown 5887dfd2bd7SShubhrajyoti D if (!was_busy && master->prepare_transfer_hardware) { 589ffbbdd21SLinus Walleij ret = master->prepare_transfer_hardware(master); 590ffbbdd21SLinus Walleij if (ret) { 591ffbbdd21SLinus Walleij dev_err(&master->dev, 592ffbbdd21SLinus Walleij "failed to prepare transfer hardware\n"); 593*49834de2SMark Brown 594*49834de2SMark Brown if (master->auto_runtime_pm) 595*49834de2SMark Brown pm_runtime_put(master->dev.parent); 596ffbbdd21SLinus Walleij return; 597ffbbdd21SLinus Walleij } 598ffbbdd21SLinus Walleij } 599ffbbdd21SLinus Walleij 600ffbbdd21SLinus Walleij ret = master->transfer_one_message(master, master->cur_msg); 601ffbbdd21SLinus Walleij if (ret) { 602ffbbdd21SLinus Walleij dev_err(&master->dev, 603ffbbdd21SLinus Walleij "failed to transfer one message from queue\n"); 604ffbbdd21SLinus Walleij return; 605ffbbdd21SLinus Walleij } 606ffbbdd21SLinus Walleij } 607ffbbdd21SLinus Walleij 608ffbbdd21SLinus Walleij static int spi_init_queue(struct spi_master *master) 609ffbbdd21SLinus Walleij { 610ffbbdd21SLinus Walleij struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 }; 611ffbbdd21SLinus Walleij 612ffbbdd21SLinus Walleij INIT_LIST_HEAD(&master->queue); 613ffbbdd21SLinus Walleij spin_lock_init(&master->queue_lock); 614ffbbdd21SLinus Walleij 615ffbbdd21SLinus Walleij master->running = false; 616ffbbdd21SLinus Walleij master->busy = false; 617ffbbdd21SLinus Walleij 618ffbbdd21SLinus Walleij init_kthread_worker(&master->kworker); 619ffbbdd21SLinus Walleij master->kworker_task = kthread_run(kthread_worker_fn, 620f170168bSKees Cook &master->kworker, "%s", 621ffbbdd21SLinus Walleij dev_name(&master->dev)); 622ffbbdd21SLinus Walleij if (IS_ERR(master->kworker_task)) { 623ffbbdd21SLinus Walleij dev_err(&master->dev, "failed to create message pump task\n"); 624ffbbdd21SLinus Walleij return -ENOMEM; 625ffbbdd21SLinus Walleij } 626ffbbdd21SLinus Walleij init_kthread_work(&master->pump_messages, spi_pump_messages); 627ffbbdd21SLinus Walleij 628ffbbdd21SLinus Walleij /* 629ffbbdd21SLinus Walleij * Master config will indicate if this controller should run the 630ffbbdd21SLinus Walleij * message pump with high (realtime) priority to reduce the transfer 631ffbbdd21SLinus Walleij * latency on the bus by minimising the delay between a transfer 632ffbbdd21SLinus Walleij * request and the scheduling of the message pump thread. Without this 633ffbbdd21SLinus Walleij * setting the message pump thread will remain at default priority. 634ffbbdd21SLinus Walleij */ 635ffbbdd21SLinus Walleij if (master->rt) { 636ffbbdd21SLinus Walleij dev_info(&master->dev, 637ffbbdd21SLinus Walleij "will run message pump with realtime priority\n"); 638ffbbdd21SLinus Walleij sched_setscheduler(master->kworker_task, SCHED_FIFO, ¶m); 639ffbbdd21SLinus Walleij } 640ffbbdd21SLinus Walleij 641ffbbdd21SLinus Walleij return 0; 642ffbbdd21SLinus Walleij } 643ffbbdd21SLinus Walleij 644ffbbdd21SLinus Walleij /** 645ffbbdd21SLinus Walleij * spi_get_next_queued_message() - called by driver to check for queued 646ffbbdd21SLinus Walleij * messages 647ffbbdd21SLinus Walleij * @master: the master to check for queued messages 648ffbbdd21SLinus Walleij * 649ffbbdd21SLinus Walleij * If there are more messages in the queue, the next message is returned from 650ffbbdd21SLinus Walleij * this call. 651ffbbdd21SLinus Walleij */ 652ffbbdd21SLinus Walleij struct spi_message *spi_get_next_queued_message(struct spi_master *master) 653ffbbdd21SLinus Walleij { 654ffbbdd21SLinus Walleij struct spi_message *next; 655ffbbdd21SLinus Walleij unsigned long flags; 656ffbbdd21SLinus Walleij 657ffbbdd21SLinus Walleij /* get a pointer to the next message, if any */ 658ffbbdd21SLinus Walleij spin_lock_irqsave(&master->queue_lock, flags); 659ffbbdd21SLinus Walleij if (list_empty(&master->queue)) 660ffbbdd21SLinus Walleij next = NULL; 661ffbbdd21SLinus Walleij else 662ffbbdd21SLinus Walleij next = list_entry(master->queue.next, 663ffbbdd21SLinus Walleij struct spi_message, queue); 664ffbbdd21SLinus Walleij spin_unlock_irqrestore(&master->queue_lock, flags); 665ffbbdd21SLinus Walleij 666ffbbdd21SLinus Walleij return next; 667ffbbdd21SLinus Walleij } 668ffbbdd21SLinus Walleij EXPORT_SYMBOL_GPL(spi_get_next_queued_message); 669ffbbdd21SLinus Walleij 670ffbbdd21SLinus Walleij /** 671ffbbdd21SLinus Walleij * spi_finalize_current_message() - the current message is complete 672ffbbdd21SLinus Walleij * @master: the master to return the message to 673ffbbdd21SLinus Walleij * 674ffbbdd21SLinus Walleij * Called by the driver to notify the core that the message in the front of the 675ffbbdd21SLinus Walleij * queue is complete and can be removed from the queue. 676ffbbdd21SLinus Walleij */ 677ffbbdd21SLinus Walleij void spi_finalize_current_message(struct spi_master *master) 678ffbbdd21SLinus Walleij { 679ffbbdd21SLinus Walleij struct spi_message *mesg; 680ffbbdd21SLinus Walleij unsigned long flags; 681ffbbdd21SLinus Walleij 682ffbbdd21SLinus Walleij spin_lock_irqsave(&master->queue_lock, flags); 683ffbbdd21SLinus Walleij mesg = master->cur_msg; 684ffbbdd21SLinus Walleij master->cur_msg = NULL; 685ffbbdd21SLinus Walleij 686ffbbdd21SLinus Walleij queue_kthread_work(&master->kworker, &master->pump_messages); 687ffbbdd21SLinus Walleij spin_unlock_irqrestore(&master->queue_lock, flags); 688ffbbdd21SLinus Walleij 689ffbbdd21SLinus Walleij mesg->state = NULL; 690ffbbdd21SLinus Walleij if (mesg->complete) 691ffbbdd21SLinus Walleij mesg->complete(mesg->context); 692ffbbdd21SLinus Walleij } 693ffbbdd21SLinus Walleij EXPORT_SYMBOL_GPL(spi_finalize_current_message); 694ffbbdd21SLinus Walleij 695ffbbdd21SLinus Walleij static int spi_start_queue(struct spi_master *master) 696ffbbdd21SLinus Walleij { 697ffbbdd21SLinus Walleij unsigned long flags; 698ffbbdd21SLinus Walleij 699ffbbdd21SLinus Walleij spin_lock_irqsave(&master->queue_lock, flags); 700ffbbdd21SLinus Walleij 701ffbbdd21SLinus Walleij if (master->running || master->busy) { 702ffbbdd21SLinus Walleij spin_unlock_irqrestore(&master->queue_lock, flags); 703ffbbdd21SLinus Walleij return -EBUSY; 704ffbbdd21SLinus Walleij } 705ffbbdd21SLinus Walleij 706ffbbdd21SLinus Walleij master->running = true; 707ffbbdd21SLinus Walleij master->cur_msg = NULL; 708ffbbdd21SLinus Walleij spin_unlock_irqrestore(&master->queue_lock, flags); 709ffbbdd21SLinus Walleij 710ffbbdd21SLinus Walleij queue_kthread_work(&master->kworker, &master->pump_messages); 711ffbbdd21SLinus Walleij 712ffbbdd21SLinus Walleij return 0; 713ffbbdd21SLinus Walleij } 714ffbbdd21SLinus Walleij 715ffbbdd21SLinus Walleij static int spi_stop_queue(struct spi_master *master) 716ffbbdd21SLinus Walleij { 717ffbbdd21SLinus Walleij unsigned long flags; 718ffbbdd21SLinus Walleij unsigned limit = 500; 719ffbbdd21SLinus Walleij int ret = 0; 720ffbbdd21SLinus Walleij 721ffbbdd21SLinus Walleij spin_lock_irqsave(&master->queue_lock, flags); 722ffbbdd21SLinus Walleij 723ffbbdd21SLinus Walleij /* 724ffbbdd21SLinus Walleij * This is a bit lame, but is optimized for the common execution path. 725ffbbdd21SLinus Walleij * A wait_queue on the master->busy could be used, but then the common 726ffbbdd21SLinus Walleij * execution path (pump_messages) would be required to call wake_up or 727ffbbdd21SLinus Walleij * friends on every SPI message. Do this instead. 728ffbbdd21SLinus Walleij */ 729ffbbdd21SLinus Walleij while ((!list_empty(&master->queue) || master->busy) && limit--) { 730ffbbdd21SLinus Walleij spin_unlock_irqrestore(&master->queue_lock, flags); 731ffbbdd21SLinus Walleij msleep(10); 732ffbbdd21SLinus Walleij spin_lock_irqsave(&master->queue_lock, flags); 733ffbbdd21SLinus Walleij } 734ffbbdd21SLinus Walleij 735ffbbdd21SLinus Walleij if (!list_empty(&master->queue) || master->busy) 736ffbbdd21SLinus Walleij ret = -EBUSY; 737ffbbdd21SLinus Walleij else 738ffbbdd21SLinus Walleij master->running = false; 739ffbbdd21SLinus Walleij 740ffbbdd21SLinus Walleij spin_unlock_irqrestore(&master->queue_lock, flags); 741ffbbdd21SLinus Walleij 742ffbbdd21SLinus Walleij if (ret) { 743ffbbdd21SLinus Walleij dev_warn(&master->dev, 744ffbbdd21SLinus Walleij "could not stop message queue\n"); 745ffbbdd21SLinus Walleij return ret; 746ffbbdd21SLinus Walleij } 747ffbbdd21SLinus Walleij return ret; 748ffbbdd21SLinus Walleij } 749ffbbdd21SLinus Walleij 750ffbbdd21SLinus Walleij static int spi_destroy_queue(struct spi_master *master) 751ffbbdd21SLinus Walleij { 752ffbbdd21SLinus Walleij int ret; 753ffbbdd21SLinus Walleij 754ffbbdd21SLinus Walleij ret = spi_stop_queue(master); 755ffbbdd21SLinus Walleij 756ffbbdd21SLinus Walleij /* 757ffbbdd21SLinus Walleij * flush_kthread_worker will block until all work is done. 758ffbbdd21SLinus Walleij * If the reason that stop_queue timed out is that the work will never 759ffbbdd21SLinus Walleij * finish, then it does no good to call flush/stop thread, so 760ffbbdd21SLinus Walleij * return anyway. 761ffbbdd21SLinus Walleij */ 762ffbbdd21SLinus Walleij if (ret) { 763ffbbdd21SLinus Walleij dev_err(&master->dev, "problem destroying queue\n"); 764ffbbdd21SLinus Walleij return ret; 765ffbbdd21SLinus Walleij } 766ffbbdd21SLinus Walleij 767ffbbdd21SLinus Walleij flush_kthread_worker(&master->kworker); 768ffbbdd21SLinus Walleij kthread_stop(master->kworker_task); 769ffbbdd21SLinus Walleij 770ffbbdd21SLinus Walleij return 0; 771ffbbdd21SLinus Walleij } 772ffbbdd21SLinus Walleij 773ffbbdd21SLinus Walleij /** 774ffbbdd21SLinus Walleij * spi_queued_transfer - transfer function for queued transfers 775ffbbdd21SLinus Walleij * @spi: spi device which is requesting transfer 776ffbbdd21SLinus Walleij * @msg: spi message which is to handled is queued to driver queue 777ffbbdd21SLinus Walleij */ 778ffbbdd21SLinus Walleij static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg) 779ffbbdd21SLinus Walleij { 780ffbbdd21SLinus Walleij struct spi_master *master = spi->master; 781ffbbdd21SLinus Walleij unsigned long flags; 782ffbbdd21SLinus Walleij 783ffbbdd21SLinus Walleij spin_lock_irqsave(&master->queue_lock, flags); 784ffbbdd21SLinus Walleij 785ffbbdd21SLinus Walleij if (!master->running) { 786ffbbdd21SLinus Walleij spin_unlock_irqrestore(&master->queue_lock, flags); 787ffbbdd21SLinus Walleij return -ESHUTDOWN; 788ffbbdd21SLinus Walleij } 789ffbbdd21SLinus Walleij msg->actual_length = 0; 790ffbbdd21SLinus Walleij msg->status = -EINPROGRESS; 791ffbbdd21SLinus Walleij 792ffbbdd21SLinus Walleij list_add_tail(&msg->queue, &master->queue); 793ffbbdd21SLinus Walleij if (master->running && !master->busy) 794ffbbdd21SLinus Walleij queue_kthread_work(&master->kworker, &master->pump_messages); 795ffbbdd21SLinus Walleij 796ffbbdd21SLinus Walleij spin_unlock_irqrestore(&master->queue_lock, flags); 797ffbbdd21SLinus Walleij return 0; 798ffbbdd21SLinus Walleij } 799ffbbdd21SLinus Walleij 800ffbbdd21SLinus Walleij static int spi_master_initialize_queue(struct spi_master *master) 801ffbbdd21SLinus Walleij { 802ffbbdd21SLinus Walleij int ret; 803ffbbdd21SLinus Walleij 804ffbbdd21SLinus Walleij master->queued = true; 805ffbbdd21SLinus Walleij master->transfer = spi_queued_transfer; 806ffbbdd21SLinus Walleij 807ffbbdd21SLinus Walleij /* Initialize and start queue */ 808ffbbdd21SLinus Walleij ret = spi_init_queue(master); 809ffbbdd21SLinus Walleij if (ret) { 810ffbbdd21SLinus Walleij dev_err(&master->dev, "problem initializing queue\n"); 811ffbbdd21SLinus Walleij goto err_init_queue; 812ffbbdd21SLinus Walleij } 813ffbbdd21SLinus Walleij ret = spi_start_queue(master); 814ffbbdd21SLinus Walleij if (ret) { 815ffbbdd21SLinus Walleij dev_err(&master->dev, "problem starting queue\n"); 816ffbbdd21SLinus Walleij goto err_start_queue; 817ffbbdd21SLinus Walleij } 818ffbbdd21SLinus Walleij 819ffbbdd21SLinus Walleij return 0; 820ffbbdd21SLinus Walleij 821ffbbdd21SLinus Walleij err_start_queue: 822ffbbdd21SLinus Walleij err_init_queue: 823ffbbdd21SLinus Walleij spi_destroy_queue(master); 824ffbbdd21SLinus Walleij return ret; 825ffbbdd21SLinus Walleij } 826ffbbdd21SLinus Walleij 827ffbbdd21SLinus Walleij /*-------------------------------------------------------------------------*/ 828ffbbdd21SLinus Walleij 8297cb94361SAndreas Larsson #if defined(CONFIG_OF) 830d57a4282SGrant Likely /** 831d57a4282SGrant Likely * of_register_spi_devices() - Register child devices onto the SPI bus 832d57a4282SGrant Likely * @master: Pointer to spi_master device 833d57a4282SGrant Likely * 834d57a4282SGrant Likely * Registers an spi_device for each child node of master node which has a 'reg' 835d57a4282SGrant Likely * property. 836d57a4282SGrant Likely */ 837d57a4282SGrant Likely static void of_register_spi_devices(struct spi_master *master) 838d57a4282SGrant Likely { 839d57a4282SGrant Likely struct spi_device *spi; 840d57a4282SGrant Likely struct device_node *nc; 841d57a4282SGrant Likely const __be32 *prop; 842cb71941aSDavid Daney char modalias[SPI_NAME_SIZE + 4]; 843d57a4282SGrant Likely int rc; 844d57a4282SGrant Likely int len; 845d57a4282SGrant Likely 846d57a4282SGrant Likely if (!master->dev.of_node) 847d57a4282SGrant Likely return; 848d57a4282SGrant Likely 849f3b6159eSAlexander Sverdlin for_each_available_child_of_node(master->dev.of_node, nc) { 850d57a4282SGrant Likely /* Alloc an spi_device */ 851d57a4282SGrant Likely spi = spi_alloc_device(master); 852d57a4282SGrant Likely if (!spi) { 853d57a4282SGrant Likely dev_err(&master->dev, "spi_device alloc error for %s\n", 854d57a4282SGrant Likely nc->full_name); 855d57a4282SGrant Likely spi_dev_put(spi); 856d57a4282SGrant Likely continue; 857d57a4282SGrant Likely } 858d57a4282SGrant Likely 859d57a4282SGrant Likely /* Select device driver */ 860d57a4282SGrant Likely if (of_modalias_node(nc, spi->modalias, 861d57a4282SGrant Likely sizeof(spi->modalias)) < 0) { 862d57a4282SGrant Likely dev_err(&master->dev, "cannot find modalias for %s\n", 863d57a4282SGrant Likely nc->full_name); 864d57a4282SGrant Likely spi_dev_put(spi); 865d57a4282SGrant Likely continue; 866d57a4282SGrant Likely } 867d57a4282SGrant Likely 868d57a4282SGrant Likely /* Device address */ 869d57a4282SGrant Likely prop = of_get_property(nc, "reg", &len); 870d57a4282SGrant Likely if (!prop || len < sizeof(*prop)) { 871d57a4282SGrant Likely dev_err(&master->dev, "%s has no 'reg' property\n", 872d57a4282SGrant Likely nc->full_name); 873d57a4282SGrant Likely spi_dev_put(spi); 874d57a4282SGrant Likely continue; 875d57a4282SGrant Likely } 876d57a4282SGrant Likely spi->chip_select = be32_to_cpup(prop); 877d57a4282SGrant Likely 878d57a4282SGrant Likely /* Mode (clock phase/polarity/etc.) */ 879d57a4282SGrant Likely if (of_find_property(nc, "spi-cpha", NULL)) 880d57a4282SGrant Likely spi->mode |= SPI_CPHA; 881d57a4282SGrant Likely if (of_find_property(nc, "spi-cpol", NULL)) 882d57a4282SGrant Likely spi->mode |= SPI_CPOL; 883d57a4282SGrant Likely if (of_find_property(nc, "spi-cs-high", NULL)) 884d57a4282SGrant Likely spi->mode |= SPI_CS_HIGH; 885c20151dfSLars-Peter Clausen if (of_find_property(nc, "spi-3wire", NULL)) 886c20151dfSLars-Peter Clausen spi->mode |= SPI_3WIRE; 887d57a4282SGrant Likely 888d57a4282SGrant Likely /* Device speed */ 889d57a4282SGrant Likely prop = of_get_property(nc, "spi-max-frequency", &len); 890d57a4282SGrant Likely if (!prop || len < sizeof(*prop)) { 891d57a4282SGrant Likely dev_err(&master->dev, "%s has no 'spi-max-frequency' property\n", 892d57a4282SGrant Likely nc->full_name); 893d57a4282SGrant Likely spi_dev_put(spi); 894d57a4282SGrant Likely continue; 895d57a4282SGrant Likely } 896d57a4282SGrant Likely spi->max_speed_hz = be32_to_cpup(prop); 897d57a4282SGrant Likely 898d57a4282SGrant Likely /* IRQ */ 899d57a4282SGrant Likely spi->irq = irq_of_parse_and_map(nc, 0); 900d57a4282SGrant Likely 901d57a4282SGrant Likely /* Store a pointer to the node in the device structure */ 902d57a4282SGrant Likely of_node_get(nc); 903d57a4282SGrant Likely spi->dev.of_node = nc; 904d57a4282SGrant Likely 905d57a4282SGrant Likely /* Register the new device */ 906cb71941aSDavid Daney snprintf(modalias, sizeof(modalias), "%s%s", SPI_MODULE_PREFIX, 907cb71941aSDavid Daney spi->modalias); 908cb71941aSDavid Daney request_module(modalias); 909d57a4282SGrant Likely rc = spi_add_device(spi); 910d57a4282SGrant Likely if (rc) { 911d57a4282SGrant Likely dev_err(&master->dev, "spi_device register error %s\n", 912d57a4282SGrant Likely nc->full_name); 913d57a4282SGrant Likely spi_dev_put(spi); 914d57a4282SGrant Likely } 915d57a4282SGrant Likely 916d57a4282SGrant Likely } 917d57a4282SGrant Likely } 918d57a4282SGrant Likely #else 919d57a4282SGrant Likely static void of_register_spi_devices(struct spi_master *master) { } 920d57a4282SGrant Likely #endif 921d57a4282SGrant Likely 92264bee4d2SMika Westerberg #ifdef CONFIG_ACPI 92364bee4d2SMika Westerberg static int acpi_spi_add_resource(struct acpi_resource *ares, void *data) 92464bee4d2SMika Westerberg { 92564bee4d2SMika Westerberg struct spi_device *spi = data; 92664bee4d2SMika Westerberg 92764bee4d2SMika Westerberg if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) { 92864bee4d2SMika Westerberg struct acpi_resource_spi_serialbus *sb; 92964bee4d2SMika Westerberg 93064bee4d2SMika Westerberg sb = &ares->data.spi_serial_bus; 93164bee4d2SMika Westerberg if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_SPI) { 93264bee4d2SMika Westerberg spi->chip_select = sb->device_selection; 93364bee4d2SMika Westerberg spi->max_speed_hz = sb->connection_speed; 93464bee4d2SMika Westerberg 93564bee4d2SMika Westerberg if (sb->clock_phase == ACPI_SPI_SECOND_PHASE) 93664bee4d2SMika Westerberg spi->mode |= SPI_CPHA; 93764bee4d2SMika Westerberg if (sb->clock_polarity == ACPI_SPI_START_HIGH) 93864bee4d2SMika Westerberg spi->mode |= SPI_CPOL; 93964bee4d2SMika Westerberg if (sb->device_polarity == ACPI_SPI_ACTIVE_HIGH) 94064bee4d2SMika Westerberg spi->mode |= SPI_CS_HIGH; 94164bee4d2SMika Westerberg } 94264bee4d2SMika Westerberg } else if (spi->irq < 0) { 94364bee4d2SMika Westerberg struct resource r; 94464bee4d2SMika Westerberg 94564bee4d2SMika Westerberg if (acpi_dev_resource_interrupt(ares, 0, &r)) 94664bee4d2SMika Westerberg spi->irq = r.start; 94764bee4d2SMika Westerberg } 94864bee4d2SMika Westerberg 94964bee4d2SMika Westerberg /* Always tell the ACPI core to skip this resource */ 95064bee4d2SMika Westerberg return 1; 95164bee4d2SMika Westerberg } 95264bee4d2SMika Westerberg 95364bee4d2SMika Westerberg static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level, 95464bee4d2SMika Westerberg void *data, void **return_value) 95564bee4d2SMika Westerberg { 95664bee4d2SMika Westerberg struct spi_master *master = data; 95764bee4d2SMika Westerberg struct list_head resource_list; 95864bee4d2SMika Westerberg struct acpi_device *adev; 95964bee4d2SMika Westerberg struct spi_device *spi; 96064bee4d2SMika Westerberg int ret; 96164bee4d2SMika Westerberg 96264bee4d2SMika Westerberg if (acpi_bus_get_device(handle, &adev)) 96364bee4d2SMika Westerberg return AE_OK; 96464bee4d2SMika Westerberg if (acpi_bus_get_status(adev) || !adev->status.present) 96564bee4d2SMika Westerberg return AE_OK; 96664bee4d2SMika Westerberg 96764bee4d2SMika Westerberg spi = spi_alloc_device(master); 96864bee4d2SMika Westerberg if (!spi) { 96964bee4d2SMika Westerberg dev_err(&master->dev, "failed to allocate SPI device for %s\n", 97064bee4d2SMika Westerberg dev_name(&adev->dev)); 97164bee4d2SMika Westerberg return AE_NO_MEMORY; 97264bee4d2SMika Westerberg } 97364bee4d2SMika Westerberg 97464bee4d2SMika Westerberg ACPI_HANDLE_SET(&spi->dev, handle); 97564bee4d2SMika Westerberg spi->irq = -1; 97664bee4d2SMika Westerberg 97764bee4d2SMika Westerberg INIT_LIST_HEAD(&resource_list); 97864bee4d2SMika Westerberg ret = acpi_dev_get_resources(adev, &resource_list, 97964bee4d2SMika Westerberg acpi_spi_add_resource, spi); 98064bee4d2SMika Westerberg acpi_dev_free_resource_list(&resource_list); 98164bee4d2SMika Westerberg 98264bee4d2SMika Westerberg if (ret < 0 || !spi->max_speed_hz) { 98364bee4d2SMika Westerberg spi_dev_put(spi); 98464bee4d2SMika Westerberg return AE_OK; 98564bee4d2SMika Westerberg } 98664bee4d2SMika Westerberg 98764bee4d2SMika Westerberg strlcpy(spi->modalias, dev_name(&adev->dev), sizeof(spi->modalias)); 98864bee4d2SMika Westerberg if (spi_add_device(spi)) { 98964bee4d2SMika Westerberg dev_err(&master->dev, "failed to add SPI device %s from ACPI\n", 99064bee4d2SMika Westerberg dev_name(&adev->dev)); 99164bee4d2SMika Westerberg spi_dev_put(spi); 99264bee4d2SMika Westerberg } 99364bee4d2SMika Westerberg 99464bee4d2SMika Westerberg return AE_OK; 99564bee4d2SMika Westerberg } 99664bee4d2SMika Westerberg 99764bee4d2SMika Westerberg static void acpi_register_spi_devices(struct spi_master *master) 99864bee4d2SMika Westerberg { 99964bee4d2SMika Westerberg acpi_status status; 100064bee4d2SMika Westerberg acpi_handle handle; 100164bee4d2SMika Westerberg 100229896178SRafael J. Wysocki handle = ACPI_HANDLE(master->dev.parent); 100364bee4d2SMika Westerberg if (!handle) 100464bee4d2SMika Westerberg return; 100564bee4d2SMika Westerberg 100664bee4d2SMika Westerberg status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1, 100764bee4d2SMika Westerberg acpi_spi_add_device, NULL, 100864bee4d2SMika Westerberg master, NULL); 100964bee4d2SMika Westerberg if (ACPI_FAILURE(status)) 101064bee4d2SMika Westerberg dev_warn(&master->dev, "failed to enumerate SPI slaves\n"); 101164bee4d2SMika Westerberg } 101264bee4d2SMika Westerberg #else 101364bee4d2SMika Westerberg static inline void acpi_register_spi_devices(struct spi_master *master) {} 101464bee4d2SMika Westerberg #endif /* CONFIG_ACPI */ 101564bee4d2SMika Westerberg 101649dce689STony Jones static void spi_master_release(struct device *dev) 10178ae12a0dSDavid Brownell { 10188ae12a0dSDavid Brownell struct spi_master *master; 10198ae12a0dSDavid Brownell 102049dce689STony Jones master = container_of(dev, struct spi_master, dev); 10218ae12a0dSDavid Brownell kfree(master); 10228ae12a0dSDavid Brownell } 10238ae12a0dSDavid Brownell 10248ae12a0dSDavid Brownell static struct class spi_master_class = { 10258ae12a0dSDavid Brownell .name = "spi_master", 10268ae12a0dSDavid Brownell .owner = THIS_MODULE, 102749dce689STony Jones .dev_release = spi_master_release, 10288ae12a0dSDavid Brownell }; 10298ae12a0dSDavid Brownell 10308ae12a0dSDavid Brownell 1031ffbbdd21SLinus Walleij 10328ae12a0dSDavid Brownell /** 10338ae12a0dSDavid Brownell * spi_alloc_master - allocate SPI master controller 10348ae12a0dSDavid Brownell * @dev: the controller, possibly using the platform_bus 103533e34dc6SDavid Brownell * @size: how much zeroed driver-private data to allocate; the pointer to this 103649dce689STony Jones * memory is in the driver_data field of the returned device, 10370c868461SDavid Brownell * accessible with spi_master_get_devdata(). 103833e34dc6SDavid Brownell * Context: can sleep 10398ae12a0dSDavid Brownell * 10408ae12a0dSDavid Brownell * This call is used only by SPI master controller drivers, which are the 10418ae12a0dSDavid Brownell * only ones directly touching chip registers. It's how they allocate 1042ba1a0513Sdmitry pervushin * an spi_master structure, prior to calling spi_register_master(). 10438ae12a0dSDavid Brownell * 10448ae12a0dSDavid Brownell * This must be called from context that can sleep. It returns the SPI 10458ae12a0dSDavid Brownell * master structure on success, else NULL. 10468ae12a0dSDavid Brownell * 10478ae12a0dSDavid Brownell * The caller is responsible for assigning the bus number and initializing 1048ba1a0513Sdmitry pervushin * the master's methods before calling spi_register_master(); and (after errors 1049eb4af0f5SUwe Kleine-König * adding the device) calling spi_master_put() and kfree() to prevent a memory 1050eb4af0f5SUwe Kleine-König * leak. 10518ae12a0dSDavid Brownell */ 1052e9d5a461SAdrian Bunk struct spi_master *spi_alloc_master(struct device *dev, unsigned size) 10538ae12a0dSDavid Brownell { 10548ae12a0dSDavid Brownell struct spi_master *master; 10558ae12a0dSDavid Brownell 10560c868461SDavid Brownell if (!dev) 10570c868461SDavid Brownell return NULL; 10580c868461SDavid Brownell 1059e94b1766SChristoph Lameter master = kzalloc(size + sizeof *master, GFP_KERNEL); 10608ae12a0dSDavid Brownell if (!master) 10618ae12a0dSDavid Brownell return NULL; 10628ae12a0dSDavid Brownell 106349dce689STony Jones device_initialize(&master->dev); 10641e8a52e1SGrant Likely master->bus_num = -1; 10651e8a52e1SGrant Likely master->num_chipselect = 1; 106649dce689STony Jones master->dev.class = &spi_master_class; 106749dce689STony Jones master->dev.parent = get_device(dev); 10680c868461SDavid Brownell spi_master_set_devdata(master, &master[1]); 10698ae12a0dSDavid Brownell 10708ae12a0dSDavid Brownell return master; 10718ae12a0dSDavid Brownell } 10728ae12a0dSDavid Brownell EXPORT_SYMBOL_GPL(spi_alloc_master); 10738ae12a0dSDavid Brownell 107474317984SJean-Christophe PLAGNIOL-VILLARD #ifdef CONFIG_OF 107574317984SJean-Christophe PLAGNIOL-VILLARD static int of_spi_register_master(struct spi_master *master) 107674317984SJean-Christophe PLAGNIOL-VILLARD { 1077e80beb27SGrant Likely int nb, i, *cs; 107874317984SJean-Christophe PLAGNIOL-VILLARD struct device_node *np = master->dev.of_node; 107974317984SJean-Christophe PLAGNIOL-VILLARD 108074317984SJean-Christophe PLAGNIOL-VILLARD if (!np) 108174317984SJean-Christophe PLAGNIOL-VILLARD return 0; 108274317984SJean-Christophe PLAGNIOL-VILLARD 108374317984SJean-Christophe PLAGNIOL-VILLARD nb = of_gpio_named_count(np, "cs-gpios"); 1084e80beb27SGrant Likely master->num_chipselect = max(nb, (int)master->num_chipselect); 108574317984SJean-Christophe PLAGNIOL-VILLARD 10868ec5d84eSAndreas Larsson /* Return error only for an incorrectly formed cs-gpios property */ 10878ec5d84eSAndreas Larsson if (nb == 0 || nb == -ENOENT) 108874317984SJean-Christophe PLAGNIOL-VILLARD return 0; 10898ec5d84eSAndreas Larsson else if (nb < 0) 10908ec5d84eSAndreas Larsson return nb; 109174317984SJean-Christophe PLAGNIOL-VILLARD 109274317984SJean-Christophe PLAGNIOL-VILLARD cs = devm_kzalloc(&master->dev, 109374317984SJean-Christophe PLAGNIOL-VILLARD sizeof(int) * master->num_chipselect, 109474317984SJean-Christophe PLAGNIOL-VILLARD GFP_KERNEL); 109574317984SJean-Christophe PLAGNIOL-VILLARD master->cs_gpios = cs; 109674317984SJean-Christophe PLAGNIOL-VILLARD 109774317984SJean-Christophe PLAGNIOL-VILLARD if (!master->cs_gpios) 109874317984SJean-Christophe PLAGNIOL-VILLARD return -ENOMEM; 109974317984SJean-Christophe PLAGNIOL-VILLARD 11000da83bb1SAndreas Larsson for (i = 0; i < master->num_chipselect; i++) 1101446411e1SAndreas Larsson cs[i] = -ENOENT; 110274317984SJean-Christophe PLAGNIOL-VILLARD 110374317984SJean-Christophe PLAGNIOL-VILLARD for (i = 0; i < nb; i++) 110474317984SJean-Christophe PLAGNIOL-VILLARD cs[i] = of_get_named_gpio(np, "cs-gpios", i); 110574317984SJean-Christophe PLAGNIOL-VILLARD 110674317984SJean-Christophe PLAGNIOL-VILLARD return 0; 110774317984SJean-Christophe PLAGNIOL-VILLARD } 110874317984SJean-Christophe PLAGNIOL-VILLARD #else 110974317984SJean-Christophe PLAGNIOL-VILLARD static int of_spi_register_master(struct spi_master *master) 111074317984SJean-Christophe PLAGNIOL-VILLARD { 111174317984SJean-Christophe PLAGNIOL-VILLARD return 0; 111274317984SJean-Christophe PLAGNIOL-VILLARD } 111374317984SJean-Christophe PLAGNIOL-VILLARD #endif 111474317984SJean-Christophe PLAGNIOL-VILLARD 11158ae12a0dSDavid Brownell /** 11168ae12a0dSDavid Brownell * spi_register_master - register SPI master controller 11178ae12a0dSDavid Brownell * @master: initialized master, originally from spi_alloc_master() 111833e34dc6SDavid Brownell * Context: can sleep 11198ae12a0dSDavid Brownell * 11208ae12a0dSDavid Brownell * SPI master controllers connect to their drivers using some non-SPI bus, 11218ae12a0dSDavid Brownell * such as the platform bus. The final stage of probe() in that code 11228ae12a0dSDavid Brownell * includes calling spi_register_master() to hook up to this SPI bus glue. 11238ae12a0dSDavid Brownell * 11248ae12a0dSDavid Brownell * SPI controllers use board specific (often SOC specific) bus numbers, 11258ae12a0dSDavid Brownell * and board-specific addressing for SPI devices combines those numbers 11268ae12a0dSDavid Brownell * with chip select numbers. Since SPI does not directly support dynamic 11278ae12a0dSDavid Brownell * device identification, boards need configuration tables telling which 11288ae12a0dSDavid Brownell * chip is at which address. 11298ae12a0dSDavid Brownell * 11308ae12a0dSDavid Brownell * This must be called from context that can sleep. It returns zero on 11318ae12a0dSDavid Brownell * success, else a negative error code (dropping the master's refcount). 11320c868461SDavid Brownell * After a successful return, the caller is responsible for calling 11330c868461SDavid Brownell * spi_unregister_master(). 11348ae12a0dSDavid Brownell */ 1135e9d5a461SAdrian Bunk int spi_register_master(struct spi_master *master) 11368ae12a0dSDavid Brownell { 1137e44a45aeSDavid Brownell static atomic_t dyn_bus_id = ATOMIC_INIT((1<<15) - 1); 113849dce689STony Jones struct device *dev = master->dev.parent; 11392b9603a0SFeng Tang struct boardinfo *bi; 11408ae12a0dSDavid Brownell int status = -ENODEV; 11418ae12a0dSDavid Brownell int dynamic = 0; 11428ae12a0dSDavid Brownell 11430c868461SDavid Brownell if (!dev) 11440c868461SDavid Brownell return -ENODEV; 11450c868461SDavid Brownell 114674317984SJean-Christophe PLAGNIOL-VILLARD status = of_spi_register_master(master); 114774317984SJean-Christophe PLAGNIOL-VILLARD if (status) 114874317984SJean-Christophe PLAGNIOL-VILLARD return status; 114974317984SJean-Christophe PLAGNIOL-VILLARD 1150082c8cb4SDavid Brownell /* even if it's just one always-selected device, there must 1151082c8cb4SDavid Brownell * be at least one chipselect 1152082c8cb4SDavid Brownell */ 1153082c8cb4SDavid Brownell if (master->num_chipselect == 0) 1154082c8cb4SDavid Brownell return -EINVAL; 1155082c8cb4SDavid Brownell 1156bb29785eSGrant Likely if ((master->bus_num < 0) && master->dev.of_node) 1157bb29785eSGrant Likely master->bus_num = of_alias_get_id(master->dev.of_node, "spi"); 1158bb29785eSGrant Likely 11598ae12a0dSDavid Brownell /* convention: dynamically assigned bus IDs count down from the max */ 1160a020ed75SDavid Brownell if (master->bus_num < 0) { 1161082c8cb4SDavid Brownell /* FIXME switch to an IDR based scheme, something like 1162082c8cb4SDavid Brownell * I2C now uses, so we can't run out of "dynamic" IDs 1163082c8cb4SDavid Brownell */ 11648ae12a0dSDavid Brownell master->bus_num = atomic_dec_return(&dyn_bus_id); 1165b885244eSDavid Brownell dynamic = 1; 11668ae12a0dSDavid Brownell } 11678ae12a0dSDavid Brownell 1168cf32b71eSErnst Schwab spin_lock_init(&master->bus_lock_spinlock); 1169cf32b71eSErnst Schwab mutex_init(&master->bus_lock_mutex); 1170cf32b71eSErnst Schwab master->bus_lock_flag = 0; 1171cf32b71eSErnst Schwab 11728ae12a0dSDavid Brownell /* register the device, then userspace will see it. 11738ae12a0dSDavid Brownell * registration fails if the bus ID is in use. 11748ae12a0dSDavid Brownell */ 117535f74fcaSKay Sievers dev_set_name(&master->dev, "spi%u", master->bus_num); 117649dce689STony Jones status = device_add(&master->dev); 1177b885244eSDavid Brownell if (status < 0) 11788ae12a0dSDavid Brownell goto done; 117935f74fcaSKay Sievers dev_dbg(dev, "registered master %s%s\n", dev_name(&master->dev), 11808ae12a0dSDavid Brownell dynamic ? " (dynamic)" : ""); 11818ae12a0dSDavid Brownell 1182ffbbdd21SLinus Walleij /* If we're using a queued driver, start the queue */ 1183ffbbdd21SLinus Walleij if (master->transfer) 1184ffbbdd21SLinus Walleij dev_info(dev, "master is unqueued, this is deprecated\n"); 1185ffbbdd21SLinus Walleij else { 1186ffbbdd21SLinus Walleij status = spi_master_initialize_queue(master); 1187ffbbdd21SLinus Walleij if (status) { 1188ffbbdd21SLinus Walleij device_unregister(&master->dev); 1189ffbbdd21SLinus Walleij goto done; 1190ffbbdd21SLinus Walleij } 1191ffbbdd21SLinus Walleij } 1192ffbbdd21SLinus Walleij 11932b9603a0SFeng Tang mutex_lock(&board_lock); 11942b9603a0SFeng Tang list_add_tail(&master->list, &spi_master_list); 11952b9603a0SFeng Tang list_for_each_entry(bi, &board_list, list) 11962b9603a0SFeng Tang spi_match_master_to_boardinfo(master, &bi->board_info); 11972b9603a0SFeng Tang mutex_unlock(&board_lock); 11982b9603a0SFeng Tang 119964bee4d2SMika Westerberg /* Register devices from the device tree and ACPI */ 120012b15e83SAnatolij Gustschin of_register_spi_devices(master); 120164bee4d2SMika Westerberg acpi_register_spi_devices(master); 12028ae12a0dSDavid Brownell done: 12038ae12a0dSDavid Brownell return status; 12048ae12a0dSDavid Brownell } 12058ae12a0dSDavid Brownell EXPORT_SYMBOL_GPL(spi_register_master); 12068ae12a0dSDavid Brownell 120734860089SDavid Lamparter static int __unregister(struct device *dev, void *null) 12088ae12a0dSDavid Brownell { 12090c868461SDavid Brownell spi_unregister_device(to_spi_device(dev)); 12108ae12a0dSDavid Brownell return 0; 12118ae12a0dSDavid Brownell } 12128ae12a0dSDavid Brownell 12138ae12a0dSDavid Brownell /** 12148ae12a0dSDavid Brownell * spi_unregister_master - unregister SPI master controller 12158ae12a0dSDavid Brownell * @master: the master being unregistered 121633e34dc6SDavid Brownell * Context: can sleep 12178ae12a0dSDavid Brownell * 12188ae12a0dSDavid Brownell * This call is used only by SPI master controller drivers, which are the 12198ae12a0dSDavid Brownell * only ones directly touching chip registers. 12208ae12a0dSDavid Brownell * 12218ae12a0dSDavid Brownell * This must be called from context that can sleep. 12228ae12a0dSDavid Brownell */ 12238ae12a0dSDavid Brownell void spi_unregister_master(struct spi_master *master) 12248ae12a0dSDavid Brownell { 122589fc9a1aSJeff Garzik int dummy; 122689fc9a1aSJeff Garzik 1227ffbbdd21SLinus Walleij if (master->queued) { 1228ffbbdd21SLinus Walleij if (spi_destroy_queue(master)) 1229ffbbdd21SLinus Walleij dev_err(&master->dev, "queue remove failed\n"); 1230ffbbdd21SLinus Walleij } 1231ffbbdd21SLinus Walleij 12322b9603a0SFeng Tang mutex_lock(&board_lock); 12332b9603a0SFeng Tang list_del(&master->list); 12342b9603a0SFeng Tang mutex_unlock(&board_lock); 12352b9603a0SFeng Tang 123697dbf37dSSebastian Andrzej Siewior dummy = device_for_each_child(&master->dev, NULL, __unregister); 123749dce689STony Jones device_unregister(&master->dev); 12388ae12a0dSDavid Brownell } 12398ae12a0dSDavid Brownell EXPORT_SYMBOL_GPL(spi_unregister_master); 12408ae12a0dSDavid Brownell 1241ffbbdd21SLinus Walleij int spi_master_suspend(struct spi_master *master) 1242ffbbdd21SLinus Walleij { 1243ffbbdd21SLinus Walleij int ret; 1244ffbbdd21SLinus Walleij 1245ffbbdd21SLinus Walleij /* Basically no-ops for non-queued masters */ 1246ffbbdd21SLinus Walleij if (!master->queued) 1247ffbbdd21SLinus Walleij return 0; 1248ffbbdd21SLinus Walleij 1249ffbbdd21SLinus Walleij ret = spi_stop_queue(master); 1250ffbbdd21SLinus Walleij if (ret) 1251ffbbdd21SLinus Walleij dev_err(&master->dev, "queue stop failed\n"); 1252ffbbdd21SLinus Walleij 1253ffbbdd21SLinus Walleij return ret; 1254ffbbdd21SLinus Walleij } 1255ffbbdd21SLinus Walleij EXPORT_SYMBOL_GPL(spi_master_suspend); 1256ffbbdd21SLinus Walleij 1257ffbbdd21SLinus Walleij int spi_master_resume(struct spi_master *master) 1258ffbbdd21SLinus Walleij { 1259ffbbdd21SLinus Walleij int ret; 1260ffbbdd21SLinus Walleij 1261ffbbdd21SLinus Walleij if (!master->queued) 1262ffbbdd21SLinus Walleij return 0; 1263ffbbdd21SLinus Walleij 1264ffbbdd21SLinus Walleij ret = spi_start_queue(master); 1265ffbbdd21SLinus Walleij if (ret) 1266ffbbdd21SLinus Walleij dev_err(&master->dev, "queue restart failed\n"); 1267ffbbdd21SLinus Walleij 1268ffbbdd21SLinus Walleij return ret; 1269ffbbdd21SLinus Walleij } 1270ffbbdd21SLinus Walleij EXPORT_SYMBOL_GPL(spi_master_resume); 1271ffbbdd21SLinus Walleij 12729f3b795aSMichał Mirosław static int __spi_master_match(struct device *dev, const void *data) 12735ed2c832SDave Young { 12745ed2c832SDave Young struct spi_master *m; 12759f3b795aSMichał Mirosław const u16 *bus_num = data; 12765ed2c832SDave Young 12775ed2c832SDave Young m = container_of(dev, struct spi_master, dev); 12785ed2c832SDave Young return m->bus_num == *bus_num; 12795ed2c832SDave Young } 12805ed2c832SDave Young 12818ae12a0dSDavid Brownell /** 12828ae12a0dSDavid Brownell * spi_busnum_to_master - look up master associated with bus_num 12838ae12a0dSDavid Brownell * @bus_num: the master's bus number 128433e34dc6SDavid Brownell * Context: can sleep 12858ae12a0dSDavid Brownell * 12868ae12a0dSDavid Brownell * This call may be used with devices that are registered after 12878ae12a0dSDavid Brownell * arch init time. It returns a refcounted pointer to the relevant 12888ae12a0dSDavid Brownell * spi_master (which the caller must release), or NULL if there is 12898ae12a0dSDavid Brownell * no such master registered. 12908ae12a0dSDavid Brownell */ 12918ae12a0dSDavid Brownell struct spi_master *spi_busnum_to_master(u16 bus_num) 12928ae12a0dSDavid Brownell { 129349dce689STony Jones struct device *dev; 12941e9a51dcSAtsushi Nemoto struct spi_master *master = NULL; 12958ae12a0dSDavid Brownell 1296695794aeSGreg Kroah-Hartman dev = class_find_device(&spi_master_class, NULL, &bus_num, 12975ed2c832SDave Young __spi_master_match); 12985ed2c832SDave Young if (dev) 12995ed2c832SDave Young master = container_of(dev, struct spi_master, dev); 13005ed2c832SDave Young /* reference got in class_find_device */ 13011e9a51dcSAtsushi Nemoto return master; 13028ae12a0dSDavid Brownell } 13038ae12a0dSDavid Brownell EXPORT_SYMBOL_GPL(spi_busnum_to_master); 13048ae12a0dSDavid Brownell 13058ae12a0dSDavid Brownell 13068ae12a0dSDavid Brownell /*-------------------------------------------------------------------------*/ 13078ae12a0dSDavid Brownell 13087d077197SDavid Brownell /* Core methods for SPI master protocol drivers. Some of the 13097d077197SDavid Brownell * other core methods are currently defined as inline functions. 13107d077197SDavid Brownell */ 13117d077197SDavid Brownell 13127d077197SDavid Brownell /** 13137d077197SDavid Brownell * spi_setup - setup SPI mode and clock rate 13147d077197SDavid Brownell * @spi: the device whose settings are being modified 13157d077197SDavid Brownell * Context: can sleep, and no requests are queued to the device 13167d077197SDavid Brownell * 13177d077197SDavid Brownell * SPI protocol drivers may need to update the transfer mode if the 13187d077197SDavid Brownell * device doesn't work with its default. They may likewise need 13197d077197SDavid Brownell * to update clock rates or word sizes from initial values. This function 13207d077197SDavid Brownell * changes those settings, and must be called from a context that can sleep. 13217d077197SDavid Brownell * Except for SPI_CS_HIGH, which takes effect immediately, the changes take 13227d077197SDavid Brownell * effect the next time the device is selected and data is transferred to 13237d077197SDavid Brownell * or from it. When this function returns, the spi device is deselected. 13247d077197SDavid Brownell * 13257d077197SDavid Brownell * Note that this call will fail if the protocol driver specifies an option 13267d077197SDavid Brownell * that the underlying controller or its driver does not support. For 13277d077197SDavid Brownell * example, not all hardware supports wire transfers using nine bit words, 13287d077197SDavid Brownell * LSB-first wire encoding, or active-high chipselects. 13297d077197SDavid Brownell */ 13307d077197SDavid Brownell int spi_setup(struct spi_device *spi) 13317d077197SDavid Brownell { 1332e7db06b5SDavid Brownell unsigned bad_bits; 1333caae070cSLaxman Dewangan int status = 0; 13347d077197SDavid Brownell 1335e7db06b5SDavid Brownell /* help drivers fail *cleanly* when they need options 1336e7db06b5SDavid Brownell * that aren't supported with their current master 1337e7db06b5SDavid Brownell */ 1338e7db06b5SDavid Brownell bad_bits = spi->mode & ~spi->master->mode_bits; 1339e7db06b5SDavid Brownell if (bad_bits) { 1340eb288a1fSLinus Walleij dev_err(&spi->dev, "setup: unsupported mode bits %x\n", 1341e7db06b5SDavid Brownell bad_bits); 1342e7db06b5SDavid Brownell return -EINVAL; 1343e7db06b5SDavid Brownell } 1344e7db06b5SDavid Brownell 13457d077197SDavid Brownell if (!spi->bits_per_word) 13467d077197SDavid Brownell spi->bits_per_word = 8; 13477d077197SDavid Brownell 1348caae070cSLaxman Dewangan if (spi->master->setup) 13497d077197SDavid Brownell status = spi->master->setup(spi); 13507d077197SDavid Brownell 13517d077197SDavid Brownell dev_dbg(&spi->dev, "setup mode %d, %s%s%s%s" 13527d077197SDavid Brownell "%u bits/w, %u Hz max --> %d\n", 13537d077197SDavid Brownell (int) (spi->mode & (SPI_CPOL | SPI_CPHA)), 13547d077197SDavid Brownell (spi->mode & SPI_CS_HIGH) ? "cs_high, " : "", 13557d077197SDavid Brownell (spi->mode & SPI_LSB_FIRST) ? "lsb, " : "", 13567d077197SDavid Brownell (spi->mode & SPI_3WIRE) ? "3wire, " : "", 13577d077197SDavid Brownell (spi->mode & SPI_LOOP) ? "loopback, " : "", 13587d077197SDavid Brownell spi->bits_per_word, spi->max_speed_hz, 13597d077197SDavid Brownell status); 13607d077197SDavid Brownell 13617d077197SDavid Brownell return status; 13627d077197SDavid Brownell } 13637d077197SDavid Brownell EXPORT_SYMBOL_GPL(spi_setup); 13647d077197SDavid Brownell 1365cf32b71eSErnst Schwab static int __spi_async(struct spi_device *spi, struct spi_message *message) 1366cf32b71eSErnst Schwab { 1367cf32b71eSErnst Schwab struct spi_master *master = spi->master; 1368e6811d1dSLaxman Dewangan struct spi_transfer *xfer; 1369cf32b71eSErnst Schwab 1370cf32b71eSErnst Schwab /* Half-duplex links include original MicroWire, and ones with 1371cf32b71eSErnst Schwab * only one data pin like SPI_3WIRE (switches direction) or where 1372cf32b71eSErnst Schwab * either MOSI or MISO is missing. They can also be caused by 1373cf32b71eSErnst Schwab * software limitations. 1374cf32b71eSErnst Schwab */ 1375cf32b71eSErnst Schwab if ((master->flags & SPI_MASTER_HALF_DUPLEX) 1376cf32b71eSErnst Schwab || (spi->mode & SPI_3WIRE)) { 1377cf32b71eSErnst Schwab unsigned flags = master->flags; 1378cf32b71eSErnst Schwab 1379cf32b71eSErnst Schwab list_for_each_entry(xfer, &message->transfers, transfer_list) { 1380cf32b71eSErnst Schwab if (xfer->rx_buf && xfer->tx_buf) 1381cf32b71eSErnst Schwab return -EINVAL; 1382cf32b71eSErnst Schwab if ((flags & SPI_MASTER_NO_TX) && xfer->tx_buf) 1383cf32b71eSErnst Schwab return -EINVAL; 1384cf32b71eSErnst Schwab if ((flags & SPI_MASTER_NO_RX) && xfer->rx_buf) 1385cf32b71eSErnst Schwab return -EINVAL; 1386cf32b71eSErnst Schwab } 1387cf32b71eSErnst Schwab } 1388cf32b71eSErnst Schwab 1389e6811d1dSLaxman Dewangan /** 1390059b8ffeSLaxman Dewangan * Set transfer bits_per_word and max speed as spi device default if 1391059b8ffeSLaxman Dewangan * it is not set for this transfer. 1392e6811d1dSLaxman Dewangan */ 1393e6811d1dSLaxman Dewangan list_for_each_entry(xfer, &message->transfers, transfer_list) { 1394e6811d1dSLaxman Dewangan if (!xfer->bits_per_word) 1395e6811d1dSLaxman Dewangan xfer->bits_per_word = spi->bits_per_word; 1396059b8ffeSLaxman Dewangan if (!xfer->speed_hz) 1397059b8ffeSLaxman Dewangan xfer->speed_hz = spi->max_speed_hz; 1398543bb255SStephen Warren if (master->bits_per_word_mask) { 1399543bb255SStephen Warren /* Only 32 bits fit in the mask */ 1400543bb255SStephen Warren if (xfer->bits_per_word > 32) 1401543bb255SStephen Warren return -EINVAL; 1402543bb255SStephen Warren if (!(master->bits_per_word_mask & 1403543bb255SStephen Warren BIT(xfer->bits_per_word - 1))) 1404543bb255SStephen Warren return -EINVAL; 1405543bb255SStephen Warren } 1406e6811d1dSLaxman Dewangan } 1407e6811d1dSLaxman Dewangan 1408cf32b71eSErnst Schwab message->spi = spi; 1409cf32b71eSErnst Schwab message->status = -EINPROGRESS; 1410cf32b71eSErnst Schwab return master->transfer(spi, message); 1411cf32b71eSErnst Schwab } 1412cf32b71eSErnst Schwab 1413568d0697SDavid Brownell /** 1414568d0697SDavid Brownell * spi_async - asynchronous SPI transfer 1415568d0697SDavid Brownell * @spi: device with which data will be exchanged 1416568d0697SDavid Brownell * @message: describes the data transfers, including completion callback 1417568d0697SDavid Brownell * Context: any (irqs may be blocked, etc) 1418568d0697SDavid Brownell * 1419568d0697SDavid Brownell * This call may be used in_irq and other contexts which can't sleep, 1420568d0697SDavid Brownell * as well as from task contexts which can sleep. 1421568d0697SDavid Brownell * 1422568d0697SDavid Brownell * The completion callback is invoked in a context which can't sleep. 1423568d0697SDavid Brownell * Before that invocation, the value of message->status is undefined. 1424568d0697SDavid Brownell * When the callback is issued, message->status holds either zero (to 1425568d0697SDavid Brownell * indicate complete success) or a negative error code. After that 1426568d0697SDavid Brownell * callback returns, the driver which issued the transfer request may 1427568d0697SDavid Brownell * deallocate the associated memory; it's no longer in use by any SPI 1428568d0697SDavid Brownell * core or controller driver code. 1429568d0697SDavid Brownell * 1430568d0697SDavid Brownell * Note that although all messages to a spi_device are handled in 1431568d0697SDavid Brownell * FIFO order, messages may go to different devices in other orders. 1432568d0697SDavid Brownell * Some device might be higher priority, or have various "hard" access 1433568d0697SDavid Brownell * time requirements, for example. 1434568d0697SDavid Brownell * 1435568d0697SDavid Brownell * On detection of any fault during the transfer, processing of 1436568d0697SDavid Brownell * the entire message is aborted, and the device is deselected. 1437568d0697SDavid Brownell * Until returning from the associated message completion callback, 1438568d0697SDavid Brownell * no other spi_message queued to that device will be processed. 1439568d0697SDavid Brownell * (This rule applies equally to all the synchronous transfer calls, 1440568d0697SDavid Brownell * which are wrappers around this core asynchronous primitive.) 1441568d0697SDavid Brownell */ 1442568d0697SDavid Brownell int spi_async(struct spi_device *spi, struct spi_message *message) 1443568d0697SDavid Brownell { 1444568d0697SDavid Brownell struct spi_master *master = spi->master; 1445cf32b71eSErnst Schwab int ret; 1446cf32b71eSErnst Schwab unsigned long flags; 1447568d0697SDavid Brownell 1448cf32b71eSErnst Schwab spin_lock_irqsave(&master->bus_lock_spinlock, flags); 1449568d0697SDavid Brownell 1450cf32b71eSErnst Schwab if (master->bus_lock_flag) 1451cf32b71eSErnst Schwab ret = -EBUSY; 1452cf32b71eSErnst Schwab else 1453cf32b71eSErnst Schwab ret = __spi_async(spi, message); 1454568d0697SDavid Brownell 1455cf32b71eSErnst Schwab spin_unlock_irqrestore(&master->bus_lock_spinlock, flags); 1456cf32b71eSErnst Schwab 1457cf32b71eSErnst Schwab return ret; 1458568d0697SDavid Brownell } 1459568d0697SDavid Brownell EXPORT_SYMBOL_GPL(spi_async); 1460568d0697SDavid Brownell 1461cf32b71eSErnst Schwab /** 1462cf32b71eSErnst Schwab * spi_async_locked - version of spi_async with exclusive bus usage 1463cf32b71eSErnst Schwab * @spi: device with which data will be exchanged 1464cf32b71eSErnst Schwab * @message: describes the data transfers, including completion callback 1465cf32b71eSErnst Schwab * Context: any (irqs may be blocked, etc) 1466cf32b71eSErnst Schwab * 1467cf32b71eSErnst Schwab * This call may be used in_irq and other contexts which can't sleep, 1468cf32b71eSErnst Schwab * as well as from task contexts which can sleep. 1469cf32b71eSErnst Schwab * 1470cf32b71eSErnst Schwab * The completion callback is invoked in a context which can't sleep. 1471cf32b71eSErnst Schwab * Before that invocation, the value of message->status is undefined. 1472cf32b71eSErnst Schwab * When the callback is issued, message->status holds either zero (to 1473cf32b71eSErnst Schwab * indicate complete success) or a negative error code. After that 1474cf32b71eSErnst Schwab * callback returns, the driver which issued the transfer request may 1475cf32b71eSErnst Schwab * deallocate the associated memory; it's no longer in use by any SPI 1476cf32b71eSErnst Schwab * core or controller driver code. 1477cf32b71eSErnst Schwab * 1478cf32b71eSErnst Schwab * Note that although all messages to a spi_device are handled in 1479cf32b71eSErnst Schwab * FIFO order, messages may go to different devices in other orders. 1480cf32b71eSErnst Schwab * Some device might be higher priority, or have various "hard" access 1481cf32b71eSErnst Schwab * time requirements, for example. 1482cf32b71eSErnst Schwab * 1483cf32b71eSErnst Schwab * On detection of any fault during the transfer, processing of 1484cf32b71eSErnst Schwab * the entire message is aborted, and the device is deselected. 1485cf32b71eSErnst Schwab * Until returning from the associated message completion callback, 1486cf32b71eSErnst Schwab * no other spi_message queued to that device will be processed. 1487cf32b71eSErnst Schwab * (This rule applies equally to all the synchronous transfer calls, 1488cf32b71eSErnst Schwab * which are wrappers around this core asynchronous primitive.) 1489cf32b71eSErnst Schwab */ 1490cf32b71eSErnst Schwab int spi_async_locked(struct spi_device *spi, struct spi_message *message) 1491cf32b71eSErnst Schwab { 1492cf32b71eSErnst Schwab struct spi_master *master = spi->master; 1493cf32b71eSErnst Schwab int ret; 1494cf32b71eSErnst Schwab unsigned long flags; 1495cf32b71eSErnst Schwab 1496cf32b71eSErnst Schwab spin_lock_irqsave(&master->bus_lock_spinlock, flags); 1497cf32b71eSErnst Schwab 1498cf32b71eSErnst Schwab ret = __spi_async(spi, message); 1499cf32b71eSErnst Schwab 1500cf32b71eSErnst Schwab spin_unlock_irqrestore(&master->bus_lock_spinlock, flags); 1501cf32b71eSErnst Schwab 1502cf32b71eSErnst Schwab return ret; 1503cf32b71eSErnst Schwab 1504cf32b71eSErnst Schwab } 1505cf32b71eSErnst Schwab EXPORT_SYMBOL_GPL(spi_async_locked); 1506cf32b71eSErnst Schwab 15077d077197SDavid Brownell 15087d077197SDavid Brownell /*-------------------------------------------------------------------------*/ 15097d077197SDavid Brownell 15107d077197SDavid Brownell /* Utility methods for SPI master protocol drivers, layered on 15117d077197SDavid Brownell * top of the core. Some other utility methods are defined as 15127d077197SDavid Brownell * inline functions. 15137d077197SDavid Brownell */ 15147d077197SDavid Brownell 15155d870c8eSAndrew Morton static void spi_complete(void *arg) 15165d870c8eSAndrew Morton { 15175d870c8eSAndrew Morton complete(arg); 15185d870c8eSAndrew Morton } 15195d870c8eSAndrew Morton 1520cf32b71eSErnst Schwab static int __spi_sync(struct spi_device *spi, struct spi_message *message, 1521cf32b71eSErnst Schwab int bus_locked) 1522cf32b71eSErnst Schwab { 1523cf32b71eSErnst Schwab DECLARE_COMPLETION_ONSTACK(done); 1524cf32b71eSErnst Schwab int status; 1525cf32b71eSErnst Schwab struct spi_master *master = spi->master; 1526cf32b71eSErnst Schwab 1527cf32b71eSErnst Schwab message->complete = spi_complete; 1528cf32b71eSErnst Schwab message->context = &done; 1529cf32b71eSErnst Schwab 1530cf32b71eSErnst Schwab if (!bus_locked) 1531cf32b71eSErnst Schwab mutex_lock(&master->bus_lock_mutex); 1532cf32b71eSErnst Schwab 1533cf32b71eSErnst Schwab status = spi_async_locked(spi, message); 1534cf32b71eSErnst Schwab 1535cf32b71eSErnst Schwab if (!bus_locked) 1536cf32b71eSErnst Schwab mutex_unlock(&master->bus_lock_mutex); 1537cf32b71eSErnst Schwab 1538cf32b71eSErnst Schwab if (status == 0) { 1539cf32b71eSErnst Schwab wait_for_completion(&done); 1540cf32b71eSErnst Schwab status = message->status; 1541cf32b71eSErnst Schwab } 1542cf32b71eSErnst Schwab message->context = NULL; 1543cf32b71eSErnst Schwab return status; 1544cf32b71eSErnst Schwab } 1545cf32b71eSErnst Schwab 15468ae12a0dSDavid Brownell /** 15478ae12a0dSDavid Brownell * spi_sync - blocking/synchronous SPI data transfers 15488ae12a0dSDavid Brownell * @spi: device with which data will be exchanged 15498ae12a0dSDavid Brownell * @message: describes the data transfers 155033e34dc6SDavid Brownell * Context: can sleep 15518ae12a0dSDavid Brownell * 15528ae12a0dSDavid Brownell * This call may only be used from a context that may sleep. The sleep 15538ae12a0dSDavid Brownell * is non-interruptible, and has no timeout. Low-overhead controller 15548ae12a0dSDavid Brownell * drivers may DMA directly into and out of the message buffers. 15558ae12a0dSDavid Brownell * 15568ae12a0dSDavid Brownell * Note that the SPI device's chip select is active during the message, 15578ae12a0dSDavid Brownell * and then is normally disabled between messages. Drivers for some 15588ae12a0dSDavid Brownell * frequently-used devices may want to minimize costs of selecting a chip, 15598ae12a0dSDavid Brownell * by leaving it selected in anticipation that the next message will go 15608ae12a0dSDavid Brownell * to the same chip. (That may increase power usage.) 15618ae12a0dSDavid Brownell * 15620c868461SDavid Brownell * Also, the caller is guaranteeing that the memory associated with the 15630c868461SDavid Brownell * message will not be freed before this call returns. 15640c868461SDavid Brownell * 15659b938b74SMarc Pignat * It returns zero on success, else a negative error code. 15668ae12a0dSDavid Brownell */ 15678ae12a0dSDavid Brownell int spi_sync(struct spi_device *spi, struct spi_message *message) 15688ae12a0dSDavid Brownell { 1569cf32b71eSErnst Schwab return __spi_sync(spi, message, 0); 15708ae12a0dSDavid Brownell } 15718ae12a0dSDavid Brownell EXPORT_SYMBOL_GPL(spi_sync); 15728ae12a0dSDavid Brownell 1573cf32b71eSErnst Schwab /** 1574cf32b71eSErnst Schwab * spi_sync_locked - version of spi_sync with exclusive bus usage 1575cf32b71eSErnst Schwab * @spi: device with which data will be exchanged 1576cf32b71eSErnst Schwab * @message: describes the data transfers 1577cf32b71eSErnst Schwab * Context: can sleep 1578cf32b71eSErnst Schwab * 1579cf32b71eSErnst Schwab * This call may only be used from a context that may sleep. The sleep 1580cf32b71eSErnst Schwab * is non-interruptible, and has no timeout. Low-overhead controller 1581cf32b71eSErnst Schwab * drivers may DMA directly into and out of the message buffers. 1582cf32b71eSErnst Schwab * 1583cf32b71eSErnst Schwab * This call should be used by drivers that require exclusive access to the 158425985edcSLucas De Marchi * SPI bus. It has to be preceded by a spi_bus_lock call. The SPI bus must 1585cf32b71eSErnst Schwab * be released by a spi_bus_unlock call when the exclusive access is over. 1586cf32b71eSErnst Schwab * 1587cf32b71eSErnst Schwab * It returns zero on success, else a negative error code. 1588cf32b71eSErnst Schwab */ 1589cf32b71eSErnst Schwab int spi_sync_locked(struct spi_device *spi, struct spi_message *message) 1590cf32b71eSErnst Schwab { 1591cf32b71eSErnst Schwab return __spi_sync(spi, message, 1); 1592cf32b71eSErnst Schwab } 1593cf32b71eSErnst Schwab EXPORT_SYMBOL_GPL(spi_sync_locked); 1594cf32b71eSErnst Schwab 1595cf32b71eSErnst Schwab /** 1596cf32b71eSErnst Schwab * spi_bus_lock - obtain a lock for exclusive SPI bus usage 1597cf32b71eSErnst Schwab * @master: SPI bus master that should be locked for exclusive bus access 1598cf32b71eSErnst Schwab * Context: can sleep 1599cf32b71eSErnst Schwab * 1600cf32b71eSErnst Schwab * This call may only be used from a context that may sleep. The sleep 1601cf32b71eSErnst Schwab * is non-interruptible, and has no timeout. 1602cf32b71eSErnst Schwab * 1603cf32b71eSErnst Schwab * This call should be used by drivers that require exclusive access to the 1604cf32b71eSErnst Schwab * SPI bus. The SPI bus must be released by a spi_bus_unlock call when the 1605cf32b71eSErnst Schwab * exclusive access is over. Data transfer must be done by spi_sync_locked 1606cf32b71eSErnst Schwab * and spi_async_locked calls when the SPI bus lock is held. 1607cf32b71eSErnst Schwab * 1608cf32b71eSErnst Schwab * It returns zero on success, else a negative error code. 1609cf32b71eSErnst Schwab */ 1610cf32b71eSErnst Schwab int spi_bus_lock(struct spi_master *master) 1611cf32b71eSErnst Schwab { 1612cf32b71eSErnst Schwab unsigned long flags; 1613cf32b71eSErnst Schwab 1614cf32b71eSErnst Schwab mutex_lock(&master->bus_lock_mutex); 1615cf32b71eSErnst Schwab 1616cf32b71eSErnst Schwab spin_lock_irqsave(&master->bus_lock_spinlock, flags); 1617cf32b71eSErnst Schwab master->bus_lock_flag = 1; 1618cf32b71eSErnst Schwab spin_unlock_irqrestore(&master->bus_lock_spinlock, flags); 1619cf32b71eSErnst Schwab 1620cf32b71eSErnst Schwab /* mutex remains locked until spi_bus_unlock is called */ 1621cf32b71eSErnst Schwab 1622cf32b71eSErnst Schwab return 0; 1623cf32b71eSErnst Schwab } 1624cf32b71eSErnst Schwab EXPORT_SYMBOL_GPL(spi_bus_lock); 1625cf32b71eSErnst Schwab 1626cf32b71eSErnst Schwab /** 1627cf32b71eSErnst Schwab * spi_bus_unlock - release the lock for exclusive SPI bus usage 1628cf32b71eSErnst Schwab * @master: SPI bus master that was locked for exclusive bus access 1629cf32b71eSErnst Schwab * Context: can sleep 1630cf32b71eSErnst Schwab * 1631cf32b71eSErnst Schwab * This call may only be used from a context that may sleep. The sleep 1632cf32b71eSErnst Schwab * is non-interruptible, and has no timeout. 1633cf32b71eSErnst Schwab * 1634cf32b71eSErnst Schwab * This call releases an SPI bus lock previously obtained by an spi_bus_lock 1635cf32b71eSErnst Schwab * call. 1636cf32b71eSErnst Schwab * 1637cf32b71eSErnst Schwab * It returns zero on success, else a negative error code. 1638cf32b71eSErnst Schwab */ 1639cf32b71eSErnst Schwab int spi_bus_unlock(struct spi_master *master) 1640cf32b71eSErnst Schwab { 1641cf32b71eSErnst Schwab master->bus_lock_flag = 0; 1642cf32b71eSErnst Schwab 1643cf32b71eSErnst Schwab mutex_unlock(&master->bus_lock_mutex); 1644cf32b71eSErnst Schwab 1645cf32b71eSErnst Schwab return 0; 1646cf32b71eSErnst Schwab } 1647cf32b71eSErnst Schwab EXPORT_SYMBOL_GPL(spi_bus_unlock); 1648cf32b71eSErnst Schwab 1649a9948b61SDavid Brownell /* portable code must never pass more than 32 bytes */ 1650a9948b61SDavid Brownell #define SPI_BUFSIZ max(32,SMP_CACHE_BYTES) 16518ae12a0dSDavid Brownell 16528ae12a0dSDavid Brownell static u8 *buf; 16538ae12a0dSDavid Brownell 16548ae12a0dSDavid Brownell /** 16558ae12a0dSDavid Brownell * spi_write_then_read - SPI synchronous write followed by read 16568ae12a0dSDavid Brownell * @spi: device with which data will be exchanged 16578ae12a0dSDavid Brownell * @txbuf: data to be written (need not be dma-safe) 16588ae12a0dSDavid Brownell * @n_tx: size of txbuf, in bytes 165927570497SJiri Pirko * @rxbuf: buffer into which data will be read (need not be dma-safe) 166027570497SJiri Pirko * @n_rx: size of rxbuf, in bytes 166133e34dc6SDavid Brownell * Context: can sleep 16628ae12a0dSDavid Brownell * 16638ae12a0dSDavid Brownell * This performs a half duplex MicroWire style transaction with the 16648ae12a0dSDavid Brownell * device, sending txbuf and then reading rxbuf. The return value 16658ae12a0dSDavid Brownell * is zero for success, else a negative errno status code. 1666b885244eSDavid Brownell * This call may only be used from a context that may sleep. 16678ae12a0dSDavid Brownell * 16680c868461SDavid Brownell * Parameters to this routine are always copied using a small buffer; 166933e34dc6SDavid Brownell * portable code should never use this for more than 32 bytes. 167033e34dc6SDavid Brownell * Performance-sensitive or bulk transfer code should instead use 16710c868461SDavid Brownell * spi_{async,sync}() calls with dma-safe buffers. 16728ae12a0dSDavid Brownell */ 16738ae12a0dSDavid Brownell int spi_write_then_read(struct spi_device *spi, 16740c4a1590SMark Brown const void *txbuf, unsigned n_tx, 16750c4a1590SMark Brown void *rxbuf, unsigned n_rx) 16768ae12a0dSDavid Brownell { 1677068f4070SDavid Brownell static DEFINE_MUTEX(lock); 16788ae12a0dSDavid Brownell 16798ae12a0dSDavid Brownell int status; 16808ae12a0dSDavid Brownell struct spi_message message; 1681bdff549eSDavid Brownell struct spi_transfer x[2]; 16828ae12a0dSDavid Brownell u8 *local_buf; 16838ae12a0dSDavid Brownell 1684b3a223eeSMark Brown /* Use preallocated DMA-safe buffer if we can. We can't avoid 1685b3a223eeSMark Brown * copying here, (as a pure convenience thing), but we can 1686b3a223eeSMark Brown * keep heap costs out of the hot path unless someone else is 1687b3a223eeSMark Brown * using the pre-allocated buffer or the transfer is too large. 16888ae12a0dSDavid Brownell */ 1689b3a223eeSMark Brown if ((n_tx + n_rx) > SPI_BUFSIZ || !mutex_trylock(&lock)) { 16902cd94c8aSMark Brown local_buf = kmalloc(max((unsigned)SPI_BUFSIZ, n_tx + n_rx), 16912cd94c8aSMark Brown GFP_KERNEL | GFP_DMA); 1692b3a223eeSMark Brown if (!local_buf) 1693b3a223eeSMark Brown return -ENOMEM; 1694b3a223eeSMark Brown } else { 1695b3a223eeSMark Brown local_buf = buf; 1696b3a223eeSMark Brown } 16978ae12a0dSDavid Brownell 16988275c642SVitaly Wool spi_message_init(&message); 1699bdff549eSDavid Brownell memset(x, 0, sizeof x); 1700bdff549eSDavid Brownell if (n_tx) { 1701bdff549eSDavid Brownell x[0].len = n_tx; 1702bdff549eSDavid Brownell spi_message_add_tail(&x[0], &message); 1703bdff549eSDavid Brownell } 1704bdff549eSDavid Brownell if (n_rx) { 1705bdff549eSDavid Brownell x[1].len = n_rx; 1706bdff549eSDavid Brownell spi_message_add_tail(&x[1], &message); 1707bdff549eSDavid Brownell } 17088275c642SVitaly Wool 17098ae12a0dSDavid Brownell memcpy(local_buf, txbuf, n_tx); 1710bdff549eSDavid Brownell x[0].tx_buf = local_buf; 1711bdff549eSDavid Brownell x[1].rx_buf = local_buf + n_tx; 17128ae12a0dSDavid Brownell 17138ae12a0dSDavid Brownell /* do the i/o */ 17148ae12a0dSDavid Brownell status = spi_sync(spi, &message); 17159b938b74SMarc Pignat if (status == 0) 1716bdff549eSDavid Brownell memcpy(rxbuf, x[1].rx_buf, n_rx); 17178ae12a0dSDavid Brownell 1718bdff549eSDavid Brownell if (x[0].tx_buf == buf) 1719068f4070SDavid Brownell mutex_unlock(&lock); 17208ae12a0dSDavid Brownell else 17218ae12a0dSDavid Brownell kfree(local_buf); 17228ae12a0dSDavid Brownell 17238ae12a0dSDavid Brownell return status; 17248ae12a0dSDavid Brownell } 17258ae12a0dSDavid Brownell EXPORT_SYMBOL_GPL(spi_write_then_read); 17268ae12a0dSDavid Brownell 17278ae12a0dSDavid Brownell /*-------------------------------------------------------------------------*/ 17288ae12a0dSDavid Brownell 17298ae12a0dSDavid Brownell static int __init spi_init(void) 17308ae12a0dSDavid Brownell { 1731b885244eSDavid Brownell int status; 17328ae12a0dSDavid Brownell 1733e94b1766SChristoph Lameter buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL); 1734b885244eSDavid Brownell if (!buf) { 1735b885244eSDavid Brownell status = -ENOMEM; 1736b885244eSDavid Brownell goto err0; 17378ae12a0dSDavid Brownell } 1738b885244eSDavid Brownell 1739b885244eSDavid Brownell status = bus_register(&spi_bus_type); 1740b885244eSDavid Brownell if (status < 0) 1741b885244eSDavid Brownell goto err1; 1742b885244eSDavid Brownell 1743b885244eSDavid Brownell status = class_register(&spi_master_class); 1744b885244eSDavid Brownell if (status < 0) 1745b885244eSDavid Brownell goto err2; 1746b885244eSDavid Brownell return 0; 1747b885244eSDavid Brownell 1748b885244eSDavid Brownell err2: 1749b885244eSDavid Brownell bus_unregister(&spi_bus_type); 1750b885244eSDavid Brownell err1: 1751b885244eSDavid Brownell kfree(buf); 1752b885244eSDavid Brownell buf = NULL; 1753b885244eSDavid Brownell err0: 1754b885244eSDavid Brownell return status; 1755b885244eSDavid Brownell } 1756b885244eSDavid Brownell 17578ae12a0dSDavid Brownell /* board_info is normally registered in arch_initcall(), 17588ae12a0dSDavid Brownell * but even essential drivers wait till later 1759b885244eSDavid Brownell * 1760b885244eSDavid Brownell * REVISIT only boardinfo really needs static linking. the rest (device and 1761b885244eSDavid Brownell * driver registration) _could_ be dynamically linked (modular) ... costs 1762b885244eSDavid Brownell * include needing to have boardinfo data structures be much more public. 17638ae12a0dSDavid Brownell */ 1764673c0c00SDavid Brownell postcore_initcall(spi_init); 17658ae12a0dSDavid Brownell 1766