1 /* 2 * SPI init/core code 3 * 4 * Copyright (C) 2005 David Brownell 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, write to the Free Software 18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 19 */ 20 21 #include <linux/kernel.h> 22 #include <linux/device.h> 23 #include <linux/init.h> 24 #include <linux/cache.h> 25 #include <linux/mutex.h> 26 #include <linux/of_device.h> 27 #include <linux/slab.h> 28 #include <linux/mod_devicetable.h> 29 #include <linux/spi/spi.h> 30 #include <linux/of_spi.h> 31 #include <linux/pm_runtime.h> 32 #include <linux/export.h> 33 34 static void spidev_release(struct device *dev) 35 { 36 struct spi_device *spi = to_spi_device(dev); 37 38 /* spi masters may cleanup for released devices */ 39 if (spi->master->cleanup) 40 spi->master->cleanup(spi); 41 42 spi_master_put(spi->master); 43 kfree(spi); 44 } 45 46 static ssize_t 47 modalias_show(struct device *dev, struct device_attribute *a, char *buf) 48 { 49 const struct spi_device *spi = to_spi_device(dev); 50 51 return sprintf(buf, "%s\n", spi->modalias); 52 } 53 54 static struct device_attribute spi_dev_attrs[] = { 55 __ATTR_RO(modalias), 56 __ATTR_NULL, 57 }; 58 59 /* modalias support makes "modprobe $MODALIAS" new-style hotplug work, 60 * and the sysfs version makes coldplug work too. 61 */ 62 63 static const struct spi_device_id *spi_match_id(const struct spi_device_id *id, 64 const struct spi_device *sdev) 65 { 66 while (id->name[0]) { 67 if (!strcmp(sdev->modalias, id->name)) 68 return id; 69 id++; 70 } 71 return NULL; 72 } 73 74 const struct spi_device_id *spi_get_device_id(const struct spi_device *sdev) 75 { 76 const struct spi_driver *sdrv = to_spi_driver(sdev->dev.driver); 77 78 return spi_match_id(sdrv->id_table, sdev); 79 } 80 EXPORT_SYMBOL_GPL(spi_get_device_id); 81 82 static int spi_match_device(struct device *dev, struct device_driver *drv) 83 { 84 const struct spi_device *spi = to_spi_device(dev); 85 const struct spi_driver *sdrv = to_spi_driver(drv); 86 87 /* Attempt an OF style match */ 88 if (of_driver_match_device(dev, drv)) 89 return 1; 90 91 if (sdrv->id_table) 92 return !!spi_match_id(sdrv->id_table, spi); 93 94 return strcmp(spi->modalias, drv->name) == 0; 95 } 96 97 static int spi_uevent(struct device *dev, struct kobj_uevent_env *env) 98 { 99 const struct spi_device *spi = to_spi_device(dev); 100 101 add_uevent_var(env, "MODALIAS=%s%s", SPI_MODULE_PREFIX, spi->modalias); 102 return 0; 103 } 104 105 #ifdef CONFIG_PM_SLEEP 106 static int spi_legacy_suspend(struct device *dev, pm_message_t message) 107 { 108 int value = 0; 109 struct spi_driver *drv = to_spi_driver(dev->driver); 110 111 /* suspend will stop irqs and dma; no more i/o */ 112 if (drv) { 113 if (drv->suspend) 114 value = drv->suspend(to_spi_device(dev), message); 115 else 116 dev_dbg(dev, "... can't suspend\n"); 117 } 118 return value; 119 } 120 121 static int spi_legacy_resume(struct device *dev) 122 { 123 int value = 0; 124 struct spi_driver *drv = to_spi_driver(dev->driver); 125 126 /* resume may restart the i/o queue */ 127 if (drv) { 128 if (drv->resume) 129 value = drv->resume(to_spi_device(dev)); 130 else 131 dev_dbg(dev, "... can't resume\n"); 132 } 133 return value; 134 } 135 136 static int spi_pm_suspend(struct device *dev) 137 { 138 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 139 140 if (pm) 141 return pm_generic_suspend(dev); 142 else 143 return spi_legacy_suspend(dev, PMSG_SUSPEND); 144 } 145 146 static int spi_pm_resume(struct device *dev) 147 { 148 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 149 150 if (pm) 151 return pm_generic_resume(dev); 152 else 153 return spi_legacy_resume(dev); 154 } 155 156 static int spi_pm_freeze(struct device *dev) 157 { 158 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 159 160 if (pm) 161 return pm_generic_freeze(dev); 162 else 163 return spi_legacy_suspend(dev, PMSG_FREEZE); 164 } 165 166 static int spi_pm_thaw(struct device *dev) 167 { 168 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 169 170 if (pm) 171 return pm_generic_thaw(dev); 172 else 173 return spi_legacy_resume(dev); 174 } 175 176 static int spi_pm_poweroff(struct device *dev) 177 { 178 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 179 180 if (pm) 181 return pm_generic_poweroff(dev); 182 else 183 return spi_legacy_suspend(dev, PMSG_HIBERNATE); 184 } 185 186 static int spi_pm_restore(struct device *dev) 187 { 188 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 189 190 if (pm) 191 return pm_generic_restore(dev); 192 else 193 return spi_legacy_resume(dev); 194 } 195 #else 196 #define spi_pm_suspend NULL 197 #define spi_pm_resume NULL 198 #define spi_pm_freeze NULL 199 #define spi_pm_thaw NULL 200 #define spi_pm_poweroff NULL 201 #define spi_pm_restore NULL 202 #endif 203 204 static const struct dev_pm_ops spi_pm = { 205 .suspend = spi_pm_suspend, 206 .resume = spi_pm_resume, 207 .freeze = spi_pm_freeze, 208 .thaw = spi_pm_thaw, 209 .poweroff = spi_pm_poweroff, 210 .restore = spi_pm_restore, 211 SET_RUNTIME_PM_OPS( 212 pm_generic_runtime_suspend, 213 pm_generic_runtime_resume, 214 pm_generic_runtime_idle 215 ) 216 }; 217 218 struct bus_type spi_bus_type = { 219 .name = "spi", 220 .dev_attrs = spi_dev_attrs, 221 .match = spi_match_device, 222 .uevent = spi_uevent, 223 .pm = &spi_pm, 224 }; 225 EXPORT_SYMBOL_GPL(spi_bus_type); 226 227 228 static int spi_drv_probe(struct device *dev) 229 { 230 const struct spi_driver *sdrv = to_spi_driver(dev->driver); 231 232 return sdrv->probe(to_spi_device(dev)); 233 } 234 235 static int spi_drv_remove(struct device *dev) 236 { 237 const struct spi_driver *sdrv = to_spi_driver(dev->driver); 238 239 return sdrv->remove(to_spi_device(dev)); 240 } 241 242 static void spi_drv_shutdown(struct device *dev) 243 { 244 const struct spi_driver *sdrv = to_spi_driver(dev->driver); 245 246 sdrv->shutdown(to_spi_device(dev)); 247 } 248 249 /** 250 * spi_register_driver - register a SPI driver 251 * @sdrv: the driver to register 252 * Context: can sleep 253 */ 254 int spi_register_driver(struct spi_driver *sdrv) 255 { 256 sdrv->driver.bus = &spi_bus_type; 257 if (sdrv->probe) 258 sdrv->driver.probe = spi_drv_probe; 259 if (sdrv->remove) 260 sdrv->driver.remove = spi_drv_remove; 261 if (sdrv->shutdown) 262 sdrv->driver.shutdown = spi_drv_shutdown; 263 return driver_register(&sdrv->driver); 264 } 265 EXPORT_SYMBOL_GPL(spi_register_driver); 266 267 /*-------------------------------------------------------------------------*/ 268 269 /* SPI devices should normally not be created by SPI device drivers; that 270 * would make them board-specific. Similarly with SPI master drivers. 271 * Device registration normally goes into like arch/.../mach.../board-YYY.c 272 * with other readonly (flashable) information about mainboard devices. 273 */ 274 275 struct boardinfo { 276 struct list_head list; 277 struct spi_board_info board_info; 278 }; 279 280 static LIST_HEAD(board_list); 281 static LIST_HEAD(spi_master_list); 282 283 /* 284 * Used to protect add/del opertion for board_info list and 285 * spi_master list, and their matching process 286 */ 287 static DEFINE_MUTEX(board_lock); 288 289 /** 290 * spi_alloc_device - Allocate a new SPI device 291 * @master: Controller to which device is connected 292 * Context: can sleep 293 * 294 * Allows a driver to allocate and initialize a spi_device without 295 * registering it immediately. This allows a driver to directly 296 * fill the spi_device with device parameters before calling 297 * spi_add_device() on it. 298 * 299 * Caller is responsible to call spi_add_device() on the returned 300 * spi_device structure to add it to the SPI master. If the caller 301 * needs to discard the spi_device without adding it, then it should 302 * call spi_dev_put() on it. 303 * 304 * Returns a pointer to the new device, or NULL. 305 */ 306 struct spi_device *spi_alloc_device(struct spi_master *master) 307 { 308 struct spi_device *spi; 309 struct device *dev = master->dev.parent; 310 311 if (!spi_master_get(master)) 312 return NULL; 313 314 spi = kzalloc(sizeof *spi, GFP_KERNEL); 315 if (!spi) { 316 dev_err(dev, "cannot alloc spi_device\n"); 317 spi_master_put(master); 318 return NULL; 319 } 320 321 spi->master = master; 322 spi->dev.parent = dev; 323 spi->dev.bus = &spi_bus_type; 324 spi->dev.release = spidev_release; 325 device_initialize(&spi->dev); 326 return spi; 327 } 328 EXPORT_SYMBOL_GPL(spi_alloc_device); 329 330 /** 331 * spi_add_device - Add spi_device allocated with spi_alloc_device 332 * @spi: spi_device to register 333 * 334 * Companion function to spi_alloc_device. Devices allocated with 335 * spi_alloc_device can be added onto the spi bus with this function. 336 * 337 * Returns 0 on success; negative errno on failure 338 */ 339 int spi_add_device(struct spi_device *spi) 340 { 341 static DEFINE_MUTEX(spi_add_lock); 342 struct device *dev = spi->master->dev.parent; 343 struct device *d; 344 int status; 345 346 /* Chipselects are numbered 0..max; validate. */ 347 if (spi->chip_select >= spi->master->num_chipselect) { 348 dev_err(dev, "cs%d >= max %d\n", 349 spi->chip_select, 350 spi->master->num_chipselect); 351 return -EINVAL; 352 } 353 354 /* Set the bus ID string */ 355 dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->master->dev), 356 spi->chip_select); 357 358 359 /* We need to make sure there's no other device with this 360 * chipselect **BEFORE** we call setup(), else we'll trash 361 * its configuration. Lock against concurrent add() calls. 362 */ 363 mutex_lock(&spi_add_lock); 364 365 d = bus_find_device_by_name(&spi_bus_type, NULL, dev_name(&spi->dev)); 366 if (d != NULL) { 367 dev_err(dev, "chipselect %d already in use\n", 368 spi->chip_select); 369 put_device(d); 370 status = -EBUSY; 371 goto done; 372 } 373 374 /* Drivers may modify this initial i/o setup, but will 375 * normally rely on the device being setup. Devices 376 * using SPI_CS_HIGH can't coexist well otherwise... 377 */ 378 status = spi_setup(spi); 379 if (status < 0) { 380 dev_err(dev, "can't setup %s, status %d\n", 381 dev_name(&spi->dev), status); 382 goto done; 383 } 384 385 /* Device may be bound to an active driver when this returns */ 386 status = device_add(&spi->dev); 387 if (status < 0) 388 dev_err(dev, "can't add %s, status %d\n", 389 dev_name(&spi->dev), status); 390 else 391 dev_dbg(dev, "registered child %s\n", dev_name(&spi->dev)); 392 393 done: 394 mutex_unlock(&spi_add_lock); 395 return status; 396 } 397 EXPORT_SYMBOL_GPL(spi_add_device); 398 399 /** 400 * spi_new_device - instantiate one new SPI device 401 * @master: Controller to which device is connected 402 * @chip: Describes the SPI device 403 * Context: can sleep 404 * 405 * On typical mainboards, this is purely internal; and it's not needed 406 * after board init creates the hard-wired devices. Some development 407 * platforms may not be able to use spi_register_board_info though, and 408 * this is exported so that for example a USB or parport based adapter 409 * driver could add devices (which it would learn about out-of-band). 410 * 411 * Returns the new device, or NULL. 412 */ 413 struct spi_device *spi_new_device(struct spi_master *master, 414 struct spi_board_info *chip) 415 { 416 struct spi_device *proxy; 417 int status; 418 419 /* NOTE: caller did any chip->bus_num checks necessary. 420 * 421 * Also, unless we change the return value convention to use 422 * error-or-pointer (not NULL-or-pointer), troubleshootability 423 * suggests syslogged diagnostics are best here (ugh). 424 */ 425 426 proxy = spi_alloc_device(master); 427 if (!proxy) 428 return NULL; 429 430 WARN_ON(strlen(chip->modalias) >= sizeof(proxy->modalias)); 431 432 proxy->chip_select = chip->chip_select; 433 proxy->max_speed_hz = chip->max_speed_hz; 434 proxy->mode = chip->mode; 435 proxy->irq = chip->irq; 436 strlcpy(proxy->modalias, chip->modalias, sizeof(proxy->modalias)); 437 proxy->dev.platform_data = (void *) chip->platform_data; 438 proxy->controller_data = chip->controller_data; 439 proxy->controller_state = NULL; 440 441 status = spi_add_device(proxy); 442 if (status < 0) { 443 spi_dev_put(proxy); 444 return NULL; 445 } 446 447 return proxy; 448 } 449 EXPORT_SYMBOL_GPL(spi_new_device); 450 451 static void spi_match_master_to_boardinfo(struct spi_master *master, 452 struct spi_board_info *bi) 453 { 454 struct spi_device *dev; 455 456 if (master->bus_num != bi->bus_num) 457 return; 458 459 dev = spi_new_device(master, bi); 460 if (!dev) 461 dev_err(master->dev.parent, "can't create new device for %s\n", 462 bi->modalias); 463 } 464 465 /** 466 * spi_register_board_info - register SPI devices for a given board 467 * @info: array of chip descriptors 468 * @n: how many descriptors are provided 469 * Context: can sleep 470 * 471 * Board-specific early init code calls this (probably during arch_initcall) 472 * with segments of the SPI device table. Any device nodes are created later, 473 * after the relevant parent SPI controller (bus_num) is defined. We keep 474 * this table of devices forever, so that reloading a controller driver will 475 * not make Linux forget about these hard-wired devices. 476 * 477 * Other code can also call this, e.g. a particular add-on board might provide 478 * SPI devices through its expansion connector, so code initializing that board 479 * would naturally declare its SPI devices. 480 * 481 * The board info passed can safely be __initdata ... but be careful of 482 * any embedded pointers (platform_data, etc), they're copied as-is. 483 */ 484 int __init 485 spi_register_board_info(struct spi_board_info const *info, unsigned n) 486 { 487 struct boardinfo *bi; 488 int i; 489 490 bi = kzalloc(n * sizeof(*bi), GFP_KERNEL); 491 if (!bi) 492 return -ENOMEM; 493 494 for (i = 0; i < n; i++, bi++, info++) { 495 struct spi_master *master; 496 497 memcpy(&bi->board_info, info, sizeof(*info)); 498 mutex_lock(&board_lock); 499 list_add_tail(&bi->list, &board_list); 500 list_for_each_entry(master, &spi_master_list, list) 501 spi_match_master_to_boardinfo(master, &bi->board_info); 502 mutex_unlock(&board_lock); 503 } 504 505 return 0; 506 } 507 508 /*-------------------------------------------------------------------------*/ 509 510 static void spi_master_release(struct device *dev) 511 { 512 struct spi_master *master; 513 514 master = container_of(dev, struct spi_master, dev); 515 kfree(master); 516 } 517 518 static struct class spi_master_class = { 519 .name = "spi_master", 520 .owner = THIS_MODULE, 521 .dev_release = spi_master_release, 522 }; 523 524 525 /** 526 * spi_alloc_master - allocate SPI master controller 527 * @dev: the controller, possibly using the platform_bus 528 * @size: how much zeroed driver-private data to allocate; the pointer to this 529 * memory is in the driver_data field of the returned device, 530 * accessible with spi_master_get_devdata(). 531 * Context: can sleep 532 * 533 * This call is used only by SPI master controller drivers, which are the 534 * only ones directly touching chip registers. It's how they allocate 535 * an spi_master structure, prior to calling spi_register_master(). 536 * 537 * This must be called from context that can sleep. It returns the SPI 538 * master structure on success, else NULL. 539 * 540 * The caller is responsible for assigning the bus number and initializing 541 * the master's methods before calling spi_register_master(); and (after errors 542 * adding the device) calling spi_master_put() to prevent a memory leak. 543 */ 544 struct spi_master *spi_alloc_master(struct device *dev, unsigned size) 545 { 546 struct spi_master *master; 547 548 if (!dev) 549 return NULL; 550 551 master = kzalloc(size + sizeof *master, GFP_KERNEL); 552 if (!master) 553 return NULL; 554 555 device_initialize(&master->dev); 556 master->dev.class = &spi_master_class; 557 master->dev.parent = get_device(dev); 558 spi_master_set_devdata(master, &master[1]); 559 560 return master; 561 } 562 EXPORT_SYMBOL_GPL(spi_alloc_master); 563 564 /** 565 * spi_register_master - register SPI master controller 566 * @master: initialized master, originally from spi_alloc_master() 567 * Context: can sleep 568 * 569 * SPI master controllers connect to their drivers using some non-SPI bus, 570 * such as the platform bus. The final stage of probe() in that code 571 * includes calling spi_register_master() to hook up to this SPI bus glue. 572 * 573 * SPI controllers use board specific (often SOC specific) bus numbers, 574 * and board-specific addressing for SPI devices combines those numbers 575 * with chip select numbers. Since SPI does not directly support dynamic 576 * device identification, boards need configuration tables telling which 577 * chip is at which address. 578 * 579 * This must be called from context that can sleep. It returns zero on 580 * success, else a negative error code (dropping the master's refcount). 581 * After a successful return, the caller is responsible for calling 582 * spi_unregister_master(). 583 */ 584 int spi_register_master(struct spi_master *master) 585 { 586 static atomic_t dyn_bus_id = ATOMIC_INIT((1<<15) - 1); 587 struct device *dev = master->dev.parent; 588 struct boardinfo *bi; 589 int status = -ENODEV; 590 int dynamic = 0; 591 592 if (!dev) 593 return -ENODEV; 594 595 /* even if it's just one always-selected device, there must 596 * be at least one chipselect 597 */ 598 if (master->num_chipselect == 0) 599 return -EINVAL; 600 601 /* convention: dynamically assigned bus IDs count down from the max */ 602 if (master->bus_num < 0) { 603 /* FIXME switch to an IDR based scheme, something like 604 * I2C now uses, so we can't run out of "dynamic" IDs 605 */ 606 master->bus_num = atomic_dec_return(&dyn_bus_id); 607 dynamic = 1; 608 } 609 610 spin_lock_init(&master->bus_lock_spinlock); 611 mutex_init(&master->bus_lock_mutex); 612 master->bus_lock_flag = 0; 613 614 /* register the device, then userspace will see it. 615 * registration fails if the bus ID is in use. 616 */ 617 dev_set_name(&master->dev, "spi%u", master->bus_num); 618 status = device_add(&master->dev); 619 if (status < 0) 620 goto done; 621 dev_dbg(dev, "registered master %s%s\n", dev_name(&master->dev), 622 dynamic ? " (dynamic)" : ""); 623 624 mutex_lock(&board_lock); 625 list_add_tail(&master->list, &spi_master_list); 626 list_for_each_entry(bi, &board_list, list) 627 spi_match_master_to_boardinfo(master, &bi->board_info); 628 mutex_unlock(&board_lock); 629 630 status = 0; 631 632 /* Register devices from the device tree */ 633 of_register_spi_devices(master); 634 done: 635 return status; 636 } 637 EXPORT_SYMBOL_GPL(spi_register_master); 638 639 640 static int __unregister(struct device *dev, void *null) 641 { 642 spi_unregister_device(to_spi_device(dev)); 643 return 0; 644 } 645 646 /** 647 * spi_unregister_master - unregister SPI master controller 648 * @master: the master being unregistered 649 * Context: can sleep 650 * 651 * This call is used only by SPI master controller drivers, which are the 652 * only ones directly touching chip registers. 653 * 654 * This must be called from context that can sleep. 655 */ 656 void spi_unregister_master(struct spi_master *master) 657 { 658 int dummy; 659 660 mutex_lock(&board_lock); 661 list_del(&master->list); 662 mutex_unlock(&board_lock); 663 664 dummy = device_for_each_child(&master->dev, NULL, __unregister); 665 device_unregister(&master->dev); 666 } 667 EXPORT_SYMBOL_GPL(spi_unregister_master); 668 669 static int __spi_master_match(struct device *dev, void *data) 670 { 671 struct spi_master *m; 672 u16 *bus_num = data; 673 674 m = container_of(dev, struct spi_master, dev); 675 return m->bus_num == *bus_num; 676 } 677 678 /** 679 * spi_busnum_to_master - look up master associated with bus_num 680 * @bus_num: the master's bus number 681 * Context: can sleep 682 * 683 * This call may be used with devices that are registered after 684 * arch init time. It returns a refcounted pointer to the relevant 685 * spi_master (which the caller must release), or NULL if there is 686 * no such master registered. 687 */ 688 struct spi_master *spi_busnum_to_master(u16 bus_num) 689 { 690 struct device *dev; 691 struct spi_master *master = NULL; 692 693 dev = class_find_device(&spi_master_class, NULL, &bus_num, 694 __spi_master_match); 695 if (dev) 696 master = container_of(dev, struct spi_master, dev); 697 /* reference got in class_find_device */ 698 return master; 699 } 700 EXPORT_SYMBOL_GPL(spi_busnum_to_master); 701 702 703 /*-------------------------------------------------------------------------*/ 704 705 /* Core methods for SPI master protocol drivers. Some of the 706 * other core methods are currently defined as inline functions. 707 */ 708 709 /** 710 * spi_setup - setup SPI mode and clock rate 711 * @spi: the device whose settings are being modified 712 * Context: can sleep, and no requests are queued to the device 713 * 714 * SPI protocol drivers may need to update the transfer mode if the 715 * device doesn't work with its default. They may likewise need 716 * to update clock rates or word sizes from initial values. This function 717 * changes those settings, and must be called from a context that can sleep. 718 * Except for SPI_CS_HIGH, which takes effect immediately, the changes take 719 * effect the next time the device is selected and data is transferred to 720 * or from it. When this function returns, the spi device is deselected. 721 * 722 * Note that this call will fail if the protocol driver specifies an option 723 * that the underlying controller or its driver does not support. For 724 * example, not all hardware supports wire transfers using nine bit words, 725 * LSB-first wire encoding, or active-high chipselects. 726 */ 727 int spi_setup(struct spi_device *spi) 728 { 729 unsigned bad_bits; 730 int status; 731 732 /* help drivers fail *cleanly* when they need options 733 * that aren't supported with their current master 734 */ 735 bad_bits = spi->mode & ~spi->master->mode_bits; 736 if (bad_bits) { 737 dev_err(&spi->dev, "setup: unsupported mode bits %x\n", 738 bad_bits); 739 return -EINVAL; 740 } 741 742 if (!spi->bits_per_word) 743 spi->bits_per_word = 8; 744 745 status = spi->master->setup(spi); 746 747 dev_dbg(&spi->dev, "setup mode %d, %s%s%s%s" 748 "%u bits/w, %u Hz max --> %d\n", 749 (int) (spi->mode & (SPI_CPOL | SPI_CPHA)), 750 (spi->mode & SPI_CS_HIGH) ? "cs_high, " : "", 751 (spi->mode & SPI_LSB_FIRST) ? "lsb, " : "", 752 (spi->mode & SPI_3WIRE) ? "3wire, " : "", 753 (spi->mode & SPI_LOOP) ? "loopback, " : "", 754 spi->bits_per_word, spi->max_speed_hz, 755 status); 756 757 return status; 758 } 759 EXPORT_SYMBOL_GPL(spi_setup); 760 761 static int __spi_async(struct spi_device *spi, struct spi_message *message) 762 { 763 struct spi_master *master = spi->master; 764 765 /* Half-duplex links include original MicroWire, and ones with 766 * only one data pin like SPI_3WIRE (switches direction) or where 767 * either MOSI or MISO is missing. They can also be caused by 768 * software limitations. 769 */ 770 if ((master->flags & SPI_MASTER_HALF_DUPLEX) 771 || (spi->mode & SPI_3WIRE)) { 772 struct spi_transfer *xfer; 773 unsigned flags = master->flags; 774 775 list_for_each_entry(xfer, &message->transfers, transfer_list) { 776 if (xfer->rx_buf && xfer->tx_buf) 777 return -EINVAL; 778 if ((flags & SPI_MASTER_NO_TX) && xfer->tx_buf) 779 return -EINVAL; 780 if ((flags & SPI_MASTER_NO_RX) && xfer->rx_buf) 781 return -EINVAL; 782 } 783 } 784 785 message->spi = spi; 786 message->status = -EINPROGRESS; 787 return master->transfer(spi, message); 788 } 789 790 /** 791 * spi_async - asynchronous SPI transfer 792 * @spi: device with which data will be exchanged 793 * @message: describes the data transfers, including completion callback 794 * Context: any (irqs may be blocked, etc) 795 * 796 * This call may be used in_irq and other contexts which can't sleep, 797 * as well as from task contexts which can sleep. 798 * 799 * The completion callback is invoked in a context which can't sleep. 800 * Before that invocation, the value of message->status is undefined. 801 * When the callback is issued, message->status holds either zero (to 802 * indicate complete success) or a negative error code. After that 803 * callback returns, the driver which issued the transfer request may 804 * deallocate the associated memory; it's no longer in use by any SPI 805 * core or controller driver code. 806 * 807 * Note that although all messages to a spi_device are handled in 808 * FIFO order, messages may go to different devices in other orders. 809 * Some device might be higher priority, or have various "hard" access 810 * time requirements, for example. 811 * 812 * On detection of any fault during the transfer, processing of 813 * the entire message is aborted, and the device is deselected. 814 * Until returning from the associated message completion callback, 815 * no other spi_message queued to that device will be processed. 816 * (This rule applies equally to all the synchronous transfer calls, 817 * which are wrappers around this core asynchronous primitive.) 818 */ 819 int spi_async(struct spi_device *spi, struct spi_message *message) 820 { 821 struct spi_master *master = spi->master; 822 int ret; 823 unsigned long flags; 824 825 spin_lock_irqsave(&master->bus_lock_spinlock, flags); 826 827 if (master->bus_lock_flag) 828 ret = -EBUSY; 829 else 830 ret = __spi_async(spi, message); 831 832 spin_unlock_irqrestore(&master->bus_lock_spinlock, flags); 833 834 return ret; 835 } 836 EXPORT_SYMBOL_GPL(spi_async); 837 838 /** 839 * spi_async_locked - version of spi_async with exclusive bus usage 840 * @spi: device with which data will be exchanged 841 * @message: describes the data transfers, including completion callback 842 * Context: any (irqs may be blocked, etc) 843 * 844 * This call may be used in_irq and other contexts which can't sleep, 845 * as well as from task contexts which can sleep. 846 * 847 * The completion callback is invoked in a context which can't sleep. 848 * Before that invocation, the value of message->status is undefined. 849 * When the callback is issued, message->status holds either zero (to 850 * indicate complete success) or a negative error code. After that 851 * callback returns, the driver which issued the transfer request may 852 * deallocate the associated memory; it's no longer in use by any SPI 853 * core or controller driver code. 854 * 855 * Note that although all messages to a spi_device are handled in 856 * FIFO order, messages may go to different devices in other orders. 857 * Some device might be higher priority, or have various "hard" access 858 * time requirements, for example. 859 * 860 * On detection of any fault during the transfer, processing of 861 * the entire message is aborted, and the device is deselected. 862 * Until returning from the associated message completion callback, 863 * no other spi_message queued to that device will be processed. 864 * (This rule applies equally to all the synchronous transfer calls, 865 * which are wrappers around this core asynchronous primitive.) 866 */ 867 int spi_async_locked(struct spi_device *spi, struct spi_message *message) 868 { 869 struct spi_master *master = spi->master; 870 int ret; 871 unsigned long flags; 872 873 spin_lock_irqsave(&master->bus_lock_spinlock, flags); 874 875 ret = __spi_async(spi, message); 876 877 spin_unlock_irqrestore(&master->bus_lock_spinlock, flags); 878 879 return ret; 880 881 } 882 EXPORT_SYMBOL_GPL(spi_async_locked); 883 884 885 /*-------------------------------------------------------------------------*/ 886 887 /* Utility methods for SPI master protocol drivers, layered on 888 * top of the core. Some other utility methods are defined as 889 * inline functions. 890 */ 891 892 static void spi_complete(void *arg) 893 { 894 complete(arg); 895 } 896 897 static int __spi_sync(struct spi_device *spi, struct spi_message *message, 898 int bus_locked) 899 { 900 DECLARE_COMPLETION_ONSTACK(done); 901 int status; 902 struct spi_master *master = spi->master; 903 904 message->complete = spi_complete; 905 message->context = &done; 906 907 if (!bus_locked) 908 mutex_lock(&master->bus_lock_mutex); 909 910 status = spi_async_locked(spi, message); 911 912 if (!bus_locked) 913 mutex_unlock(&master->bus_lock_mutex); 914 915 if (status == 0) { 916 wait_for_completion(&done); 917 status = message->status; 918 } 919 message->context = NULL; 920 return status; 921 } 922 923 /** 924 * spi_sync - blocking/synchronous SPI data transfers 925 * @spi: device with which data will be exchanged 926 * @message: describes the data transfers 927 * Context: can sleep 928 * 929 * This call may only be used from a context that may sleep. The sleep 930 * is non-interruptible, and has no timeout. Low-overhead controller 931 * drivers may DMA directly into and out of the message buffers. 932 * 933 * Note that the SPI device's chip select is active during the message, 934 * and then is normally disabled between messages. Drivers for some 935 * frequently-used devices may want to minimize costs of selecting a chip, 936 * by leaving it selected in anticipation that the next message will go 937 * to the same chip. (That may increase power usage.) 938 * 939 * Also, the caller is guaranteeing that the memory associated with the 940 * message will not be freed before this call returns. 941 * 942 * It returns zero on success, else a negative error code. 943 */ 944 int spi_sync(struct spi_device *spi, struct spi_message *message) 945 { 946 return __spi_sync(spi, message, 0); 947 } 948 EXPORT_SYMBOL_GPL(spi_sync); 949 950 /** 951 * spi_sync_locked - version of spi_sync with exclusive bus usage 952 * @spi: device with which data will be exchanged 953 * @message: describes the data transfers 954 * Context: can sleep 955 * 956 * This call may only be used from a context that may sleep. The sleep 957 * is non-interruptible, and has no timeout. Low-overhead controller 958 * drivers may DMA directly into and out of the message buffers. 959 * 960 * This call should be used by drivers that require exclusive access to the 961 * SPI bus. It has to be preceded by a spi_bus_lock call. The SPI bus must 962 * be released by a spi_bus_unlock call when the exclusive access is over. 963 * 964 * It returns zero on success, else a negative error code. 965 */ 966 int spi_sync_locked(struct spi_device *spi, struct spi_message *message) 967 { 968 return __spi_sync(spi, message, 1); 969 } 970 EXPORT_SYMBOL_GPL(spi_sync_locked); 971 972 /** 973 * spi_bus_lock - obtain a lock for exclusive SPI bus usage 974 * @master: SPI bus master that should be locked for exclusive bus access 975 * Context: can sleep 976 * 977 * This call may only be used from a context that may sleep. The sleep 978 * is non-interruptible, and has no timeout. 979 * 980 * This call should be used by drivers that require exclusive access to the 981 * SPI bus. The SPI bus must be released by a spi_bus_unlock call when the 982 * exclusive access is over. Data transfer must be done by spi_sync_locked 983 * and spi_async_locked calls when the SPI bus lock is held. 984 * 985 * It returns zero on success, else a negative error code. 986 */ 987 int spi_bus_lock(struct spi_master *master) 988 { 989 unsigned long flags; 990 991 mutex_lock(&master->bus_lock_mutex); 992 993 spin_lock_irqsave(&master->bus_lock_spinlock, flags); 994 master->bus_lock_flag = 1; 995 spin_unlock_irqrestore(&master->bus_lock_spinlock, flags); 996 997 /* mutex remains locked until spi_bus_unlock is called */ 998 999 return 0; 1000 } 1001 EXPORT_SYMBOL_GPL(spi_bus_lock); 1002 1003 /** 1004 * spi_bus_unlock - release the lock for exclusive SPI bus usage 1005 * @master: SPI bus master that was locked for exclusive bus access 1006 * Context: can sleep 1007 * 1008 * This call may only be used from a context that may sleep. The sleep 1009 * is non-interruptible, and has no timeout. 1010 * 1011 * This call releases an SPI bus lock previously obtained by an spi_bus_lock 1012 * call. 1013 * 1014 * It returns zero on success, else a negative error code. 1015 */ 1016 int spi_bus_unlock(struct spi_master *master) 1017 { 1018 master->bus_lock_flag = 0; 1019 1020 mutex_unlock(&master->bus_lock_mutex); 1021 1022 return 0; 1023 } 1024 EXPORT_SYMBOL_GPL(spi_bus_unlock); 1025 1026 /* portable code must never pass more than 32 bytes */ 1027 #define SPI_BUFSIZ max(32,SMP_CACHE_BYTES) 1028 1029 static u8 *buf; 1030 1031 /** 1032 * spi_write_then_read - SPI synchronous write followed by read 1033 * @spi: device with which data will be exchanged 1034 * @txbuf: data to be written (need not be dma-safe) 1035 * @n_tx: size of txbuf, in bytes 1036 * @rxbuf: buffer into which data will be read (need not be dma-safe) 1037 * @n_rx: size of rxbuf, in bytes 1038 * Context: can sleep 1039 * 1040 * This performs a half duplex MicroWire style transaction with the 1041 * device, sending txbuf and then reading rxbuf. The return value 1042 * is zero for success, else a negative errno status code. 1043 * This call may only be used from a context that may sleep. 1044 * 1045 * Parameters to this routine are always copied using a small buffer; 1046 * portable code should never use this for more than 32 bytes. 1047 * Performance-sensitive or bulk transfer code should instead use 1048 * spi_{async,sync}() calls with dma-safe buffers. 1049 */ 1050 int spi_write_then_read(struct spi_device *spi, 1051 const void *txbuf, unsigned n_tx, 1052 void *rxbuf, unsigned n_rx) 1053 { 1054 static DEFINE_MUTEX(lock); 1055 1056 int status; 1057 struct spi_message message; 1058 struct spi_transfer x[2]; 1059 u8 *local_buf; 1060 1061 /* Use preallocated DMA-safe buffer. We can't avoid copying here, 1062 * (as a pure convenience thing), but we can keep heap costs 1063 * out of the hot path ... 1064 */ 1065 if ((n_tx + n_rx) > SPI_BUFSIZ) 1066 return -EINVAL; 1067 1068 spi_message_init(&message); 1069 memset(x, 0, sizeof x); 1070 if (n_tx) { 1071 x[0].len = n_tx; 1072 spi_message_add_tail(&x[0], &message); 1073 } 1074 if (n_rx) { 1075 x[1].len = n_rx; 1076 spi_message_add_tail(&x[1], &message); 1077 } 1078 1079 /* ... unless someone else is using the pre-allocated buffer */ 1080 if (!mutex_trylock(&lock)) { 1081 local_buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL); 1082 if (!local_buf) 1083 return -ENOMEM; 1084 } else 1085 local_buf = buf; 1086 1087 memcpy(local_buf, txbuf, n_tx); 1088 x[0].tx_buf = local_buf; 1089 x[1].rx_buf = local_buf + n_tx; 1090 1091 /* do the i/o */ 1092 status = spi_sync(spi, &message); 1093 if (status == 0) 1094 memcpy(rxbuf, x[1].rx_buf, n_rx); 1095 1096 if (x[0].tx_buf == buf) 1097 mutex_unlock(&lock); 1098 else 1099 kfree(local_buf); 1100 1101 return status; 1102 } 1103 EXPORT_SYMBOL_GPL(spi_write_then_read); 1104 1105 /*-------------------------------------------------------------------------*/ 1106 1107 static int __init spi_init(void) 1108 { 1109 int status; 1110 1111 buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL); 1112 if (!buf) { 1113 status = -ENOMEM; 1114 goto err0; 1115 } 1116 1117 status = bus_register(&spi_bus_type); 1118 if (status < 0) 1119 goto err1; 1120 1121 status = class_register(&spi_master_class); 1122 if (status < 0) 1123 goto err2; 1124 return 0; 1125 1126 err2: 1127 bus_unregister(&spi_bus_type); 1128 err1: 1129 kfree(buf); 1130 buf = NULL; 1131 err0: 1132 return status; 1133 } 1134 1135 /* board_info is normally registered in arch_initcall(), 1136 * but even essential drivers wait till later 1137 * 1138 * REVISIT only boardinfo really needs static linking. the rest (device and 1139 * driver registration) _could_ be dynamically linked (modular) ... costs 1140 * include needing to have boardinfo data structures be much more public. 1141 */ 1142 postcore_initcall(spi_init); 1143 1144