1 /* 2 * SPI init/core code 3 * 4 * Copyright (C) 2005 David Brownell 5 * Copyright (C) 2008 Secret Lab Technologies Ltd. 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License as published by 9 * the Free Software Foundation; either version 2 of the License, or 10 * (at your option) any later version. 11 * 12 * This program is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * GNU General Public License for more details. 16 */ 17 18 #include <linux/kernel.h> 19 #include <linux/device.h> 20 #include <linux/init.h> 21 #include <linux/cache.h> 22 #include <linux/dma-mapping.h> 23 #include <linux/dmaengine.h> 24 #include <linux/mutex.h> 25 #include <linux/of_device.h> 26 #include <linux/of_irq.h> 27 #include <linux/clk/clk-conf.h> 28 #include <linux/slab.h> 29 #include <linux/mod_devicetable.h> 30 #include <linux/spi/spi.h> 31 #include <linux/of_gpio.h> 32 #include <linux/pm_runtime.h> 33 #include <linux/pm_domain.h> 34 #include <linux/export.h> 35 #include <linux/sched/rt.h> 36 #include <linux/delay.h> 37 #include <linux/kthread.h> 38 #include <linux/ioport.h> 39 #include <linux/acpi.h> 40 41 #define CREATE_TRACE_POINTS 42 #include <trace/events/spi.h> 43 44 static void spidev_release(struct device *dev) 45 { 46 struct spi_device *spi = to_spi_device(dev); 47 48 /* spi masters may cleanup for released devices */ 49 if (spi->master->cleanup) 50 spi->master->cleanup(spi); 51 52 spi_master_put(spi->master); 53 kfree(spi); 54 } 55 56 static ssize_t 57 modalias_show(struct device *dev, struct device_attribute *a, char *buf) 58 { 59 const struct spi_device *spi = to_spi_device(dev); 60 int len; 61 62 len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1); 63 if (len != -ENODEV) 64 return len; 65 66 return sprintf(buf, "%s%s\n", SPI_MODULE_PREFIX, spi->modalias); 67 } 68 static DEVICE_ATTR_RO(modalias); 69 70 #define SPI_STATISTICS_ATTRS(field, file) \ 71 static ssize_t spi_master_##field##_show(struct device *dev, \ 72 struct device_attribute *attr, \ 73 char *buf) \ 74 { \ 75 struct spi_master *master = container_of(dev, \ 76 struct spi_master, dev); \ 77 return spi_statistics_##field##_show(&master->statistics, buf); \ 78 } \ 79 static struct device_attribute dev_attr_spi_master_##field = { \ 80 .attr = { .name = file, .mode = S_IRUGO }, \ 81 .show = spi_master_##field##_show, \ 82 }; \ 83 static ssize_t spi_device_##field##_show(struct device *dev, \ 84 struct device_attribute *attr, \ 85 char *buf) \ 86 { \ 87 struct spi_device *spi = to_spi_device(dev); \ 88 return spi_statistics_##field##_show(&spi->statistics, buf); \ 89 } \ 90 static struct device_attribute dev_attr_spi_device_##field = { \ 91 .attr = { .name = file, .mode = S_IRUGO }, \ 92 .show = spi_device_##field##_show, \ 93 } 94 95 #define SPI_STATISTICS_SHOW_NAME(name, file, field, format_string) \ 96 static ssize_t spi_statistics_##name##_show(struct spi_statistics *stat, \ 97 char *buf) \ 98 { \ 99 unsigned long flags; \ 100 ssize_t len; \ 101 spin_lock_irqsave(&stat->lock, flags); \ 102 len = sprintf(buf, format_string, stat->field); \ 103 spin_unlock_irqrestore(&stat->lock, flags); \ 104 return len; \ 105 } \ 106 SPI_STATISTICS_ATTRS(name, file) 107 108 #define SPI_STATISTICS_SHOW(field, format_string) \ 109 SPI_STATISTICS_SHOW_NAME(field, __stringify(field), \ 110 field, format_string) 111 112 SPI_STATISTICS_SHOW(messages, "%lu"); 113 SPI_STATISTICS_SHOW(transfers, "%lu"); 114 SPI_STATISTICS_SHOW(errors, "%lu"); 115 SPI_STATISTICS_SHOW(timedout, "%lu"); 116 117 SPI_STATISTICS_SHOW(spi_sync, "%lu"); 118 SPI_STATISTICS_SHOW(spi_sync_immediate, "%lu"); 119 SPI_STATISTICS_SHOW(spi_async, "%lu"); 120 121 SPI_STATISTICS_SHOW(bytes, "%llu"); 122 SPI_STATISTICS_SHOW(bytes_rx, "%llu"); 123 SPI_STATISTICS_SHOW(bytes_tx, "%llu"); 124 125 #define SPI_STATISTICS_TRANSFER_BYTES_HISTO(index, number) \ 126 SPI_STATISTICS_SHOW_NAME(transfer_bytes_histo##index, \ 127 "transfer_bytes_histo_" number, \ 128 transfer_bytes_histo[index], "%lu") 129 SPI_STATISTICS_TRANSFER_BYTES_HISTO(0, "0-1"); 130 SPI_STATISTICS_TRANSFER_BYTES_HISTO(1, "2-3"); 131 SPI_STATISTICS_TRANSFER_BYTES_HISTO(2, "4-7"); 132 SPI_STATISTICS_TRANSFER_BYTES_HISTO(3, "8-15"); 133 SPI_STATISTICS_TRANSFER_BYTES_HISTO(4, "16-31"); 134 SPI_STATISTICS_TRANSFER_BYTES_HISTO(5, "32-63"); 135 SPI_STATISTICS_TRANSFER_BYTES_HISTO(6, "64-127"); 136 SPI_STATISTICS_TRANSFER_BYTES_HISTO(7, "128-255"); 137 SPI_STATISTICS_TRANSFER_BYTES_HISTO(8, "256-511"); 138 SPI_STATISTICS_TRANSFER_BYTES_HISTO(9, "512-1023"); 139 SPI_STATISTICS_TRANSFER_BYTES_HISTO(10, "1024-2047"); 140 SPI_STATISTICS_TRANSFER_BYTES_HISTO(11, "2048-4095"); 141 SPI_STATISTICS_TRANSFER_BYTES_HISTO(12, "4096-8191"); 142 SPI_STATISTICS_TRANSFER_BYTES_HISTO(13, "8192-16383"); 143 SPI_STATISTICS_TRANSFER_BYTES_HISTO(14, "16384-32767"); 144 SPI_STATISTICS_TRANSFER_BYTES_HISTO(15, "32768-65535"); 145 SPI_STATISTICS_TRANSFER_BYTES_HISTO(16, "65536+"); 146 147 static struct attribute *spi_dev_attrs[] = { 148 &dev_attr_modalias.attr, 149 NULL, 150 }; 151 152 static const struct attribute_group spi_dev_group = { 153 .attrs = spi_dev_attrs, 154 }; 155 156 static struct attribute *spi_device_statistics_attrs[] = { 157 &dev_attr_spi_device_messages.attr, 158 &dev_attr_spi_device_transfers.attr, 159 &dev_attr_spi_device_errors.attr, 160 &dev_attr_spi_device_timedout.attr, 161 &dev_attr_spi_device_spi_sync.attr, 162 &dev_attr_spi_device_spi_sync_immediate.attr, 163 &dev_attr_spi_device_spi_async.attr, 164 &dev_attr_spi_device_bytes.attr, 165 &dev_attr_spi_device_bytes_rx.attr, 166 &dev_attr_spi_device_bytes_tx.attr, 167 &dev_attr_spi_device_transfer_bytes_histo0.attr, 168 &dev_attr_spi_device_transfer_bytes_histo1.attr, 169 &dev_attr_spi_device_transfer_bytes_histo2.attr, 170 &dev_attr_spi_device_transfer_bytes_histo3.attr, 171 &dev_attr_spi_device_transfer_bytes_histo4.attr, 172 &dev_attr_spi_device_transfer_bytes_histo5.attr, 173 &dev_attr_spi_device_transfer_bytes_histo6.attr, 174 &dev_attr_spi_device_transfer_bytes_histo7.attr, 175 &dev_attr_spi_device_transfer_bytes_histo8.attr, 176 &dev_attr_spi_device_transfer_bytes_histo9.attr, 177 &dev_attr_spi_device_transfer_bytes_histo10.attr, 178 &dev_attr_spi_device_transfer_bytes_histo11.attr, 179 &dev_attr_spi_device_transfer_bytes_histo12.attr, 180 &dev_attr_spi_device_transfer_bytes_histo13.attr, 181 &dev_attr_spi_device_transfer_bytes_histo14.attr, 182 &dev_attr_spi_device_transfer_bytes_histo15.attr, 183 &dev_attr_spi_device_transfer_bytes_histo16.attr, 184 NULL, 185 }; 186 187 static const struct attribute_group spi_device_statistics_group = { 188 .name = "statistics", 189 .attrs = spi_device_statistics_attrs, 190 }; 191 192 static const struct attribute_group *spi_dev_groups[] = { 193 &spi_dev_group, 194 &spi_device_statistics_group, 195 NULL, 196 }; 197 198 static struct attribute *spi_master_statistics_attrs[] = { 199 &dev_attr_spi_master_messages.attr, 200 &dev_attr_spi_master_transfers.attr, 201 &dev_attr_spi_master_errors.attr, 202 &dev_attr_spi_master_timedout.attr, 203 &dev_attr_spi_master_spi_sync.attr, 204 &dev_attr_spi_master_spi_sync_immediate.attr, 205 &dev_attr_spi_master_spi_async.attr, 206 &dev_attr_spi_master_bytes.attr, 207 &dev_attr_spi_master_bytes_rx.attr, 208 &dev_attr_spi_master_bytes_tx.attr, 209 &dev_attr_spi_master_transfer_bytes_histo0.attr, 210 &dev_attr_spi_master_transfer_bytes_histo1.attr, 211 &dev_attr_spi_master_transfer_bytes_histo2.attr, 212 &dev_attr_spi_master_transfer_bytes_histo3.attr, 213 &dev_attr_spi_master_transfer_bytes_histo4.attr, 214 &dev_attr_spi_master_transfer_bytes_histo5.attr, 215 &dev_attr_spi_master_transfer_bytes_histo6.attr, 216 &dev_attr_spi_master_transfer_bytes_histo7.attr, 217 &dev_attr_spi_master_transfer_bytes_histo8.attr, 218 &dev_attr_spi_master_transfer_bytes_histo9.attr, 219 &dev_attr_spi_master_transfer_bytes_histo10.attr, 220 &dev_attr_spi_master_transfer_bytes_histo11.attr, 221 &dev_attr_spi_master_transfer_bytes_histo12.attr, 222 &dev_attr_spi_master_transfer_bytes_histo13.attr, 223 &dev_attr_spi_master_transfer_bytes_histo14.attr, 224 &dev_attr_spi_master_transfer_bytes_histo15.attr, 225 &dev_attr_spi_master_transfer_bytes_histo16.attr, 226 NULL, 227 }; 228 229 static const struct attribute_group spi_master_statistics_group = { 230 .name = "statistics", 231 .attrs = spi_master_statistics_attrs, 232 }; 233 234 static const struct attribute_group *spi_master_groups[] = { 235 &spi_master_statistics_group, 236 NULL, 237 }; 238 239 void spi_statistics_add_transfer_stats(struct spi_statistics *stats, 240 struct spi_transfer *xfer, 241 struct spi_master *master) 242 { 243 unsigned long flags; 244 int l2len = min(fls(xfer->len), SPI_STATISTICS_HISTO_SIZE) - 1; 245 246 if (l2len < 0) 247 l2len = 0; 248 249 spin_lock_irqsave(&stats->lock, flags); 250 251 stats->transfers++; 252 stats->transfer_bytes_histo[l2len]++; 253 254 stats->bytes += xfer->len; 255 if ((xfer->tx_buf) && 256 (xfer->tx_buf != master->dummy_tx)) 257 stats->bytes_tx += xfer->len; 258 if ((xfer->rx_buf) && 259 (xfer->rx_buf != master->dummy_rx)) 260 stats->bytes_rx += xfer->len; 261 262 spin_unlock_irqrestore(&stats->lock, flags); 263 } 264 EXPORT_SYMBOL_GPL(spi_statistics_add_transfer_stats); 265 266 /* modalias support makes "modprobe $MODALIAS" new-style hotplug work, 267 * and the sysfs version makes coldplug work too. 268 */ 269 270 static const struct spi_device_id *spi_match_id(const struct spi_device_id *id, 271 const struct spi_device *sdev) 272 { 273 while (id->name[0]) { 274 if (!strcmp(sdev->modalias, id->name)) 275 return id; 276 id++; 277 } 278 return NULL; 279 } 280 281 const struct spi_device_id *spi_get_device_id(const struct spi_device *sdev) 282 { 283 const struct spi_driver *sdrv = to_spi_driver(sdev->dev.driver); 284 285 return spi_match_id(sdrv->id_table, sdev); 286 } 287 EXPORT_SYMBOL_GPL(spi_get_device_id); 288 289 static int spi_match_device(struct device *dev, struct device_driver *drv) 290 { 291 const struct spi_device *spi = to_spi_device(dev); 292 const struct spi_driver *sdrv = to_spi_driver(drv); 293 294 /* Attempt an OF style match */ 295 if (of_driver_match_device(dev, drv)) 296 return 1; 297 298 /* Then try ACPI */ 299 if (acpi_driver_match_device(dev, drv)) 300 return 1; 301 302 if (sdrv->id_table) 303 return !!spi_match_id(sdrv->id_table, spi); 304 305 return strcmp(spi->modalias, drv->name) == 0; 306 } 307 308 static int spi_uevent(struct device *dev, struct kobj_uevent_env *env) 309 { 310 const struct spi_device *spi = to_spi_device(dev); 311 int rc; 312 313 rc = acpi_device_uevent_modalias(dev, env); 314 if (rc != -ENODEV) 315 return rc; 316 317 add_uevent_var(env, "MODALIAS=%s%s", SPI_MODULE_PREFIX, spi->modalias); 318 return 0; 319 } 320 321 struct bus_type spi_bus_type = { 322 .name = "spi", 323 .dev_groups = spi_dev_groups, 324 .match = spi_match_device, 325 .uevent = spi_uevent, 326 }; 327 EXPORT_SYMBOL_GPL(spi_bus_type); 328 329 330 static int spi_drv_probe(struct device *dev) 331 { 332 const struct spi_driver *sdrv = to_spi_driver(dev->driver); 333 struct spi_device *spi = to_spi_device(dev); 334 int ret; 335 336 ret = of_clk_set_defaults(dev->of_node, false); 337 if (ret) 338 return ret; 339 340 if (dev->of_node) { 341 spi->irq = of_irq_get(dev->of_node, 0); 342 if (spi->irq == -EPROBE_DEFER) 343 return -EPROBE_DEFER; 344 if (spi->irq < 0) 345 spi->irq = 0; 346 } 347 348 ret = dev_pm_domain_attach(dev, true); 349 if (ret != -EPROBE_DEFER) { 350 ret = sdrv->probe(spi); 351 if (ret) 352 dev_pm_domain_detach(dev, true); 353 } 354 355 return ret; 356 } 357 358 static int spi_drv_remove(struct device *dev) 359 { 360 const struct spi_driver *sdrv = to_spi_driver(dev->driver); 361 int ret; 362 363 ret = sdrv->remove(to_spi_device(dev)); 364 dev_pm_domain_detach(dev, true); 365 366 return ret; 367 } 368 369 static void spi_drv_shutdown(struct device *dev) 370 { 371 const struct spi_driver *sdrv = to_spi_driver(dev->driver); 372 373 sdrv->shutdown(to_spi_device(dev)); 374 } 375 376 /** 377 * __spi_register_driver - register a SPI driver 378 * @owner: owner module of the driver to register 379 * @sdrv: the driver to register 380 * Context: can sleep 381 * 382 * Return: zero on success, else a negative error code. 383 */ 384 int __spi_register_driver(struct module *owner, struct spi_driver *sdrv) 385 { 386 sdrv->driver.owner = owner; 387 sdrv->driver.bus = &spi_bus_type; 388 if (sdrv->probe) 389 sdrv->driver.probe = spi_drv_probe; 390 if (sdrv->remove) 391 sdrv->driver.remove = spi_drv_remove; 392 if (sdrv->shutdown) 393 sdrv->driver.shutdown = spi_drv_shutdown; 394 return driver_register(&sdrv->driver); 395 } 396 EXPORT_SYMBOL_GPL(__spi_register_driver); 397 398 /*-------------------------------------------------------------------------*/ 399 400 /* SPI devices should normally not be created by SPI device drivers; that 401 * would make them board-specific. Similarly with SPI master drivers. 402 * Device registration normally goes into like arch/.../mach.../board-YYY.c 403 * with other readonly (flashable) information about mainboard devices. 404 */ 405 406 struct boardinfo { 407 struct list_head list; 408 struct spi_board_info board_info; 409 }; 410 411 static LIST_HEAD(board_list); 412 static LIST_HEAD(spi_master_list); 413 414 /* 415 * Used to protect add/del opertion for board_info list and 416 * spi_master list, and their matching process 417 */ 418 static DEFINE_MUTEX(board_lock); 419 420 /** 421 * spi_alloc_device - Allocate a new SPI device 422 * @master: Controller to which device is connected 423 * Context: can sleep 424 * 425 * Allows a driver to allocate and initialize a spi_device without 426 * registering it immediately. This allows a driver to directly 427 * fill the spi_device with device parameters before calling 428 * spi_add_device() on it. 429 * 430 * Caller is responsible to call spi_add_device() on the returned 431 * spi_device structure to add it to the SPI master. If the caller 432 * needs to discard the spi_device without adding it, then it should 433 * call spi_dev_put() on it. 434 * 435 * Return: a pointer to the new device, or NULL. 436 */ 437 struct spi_device *spi_alloc_device(struct spi_master *master) 438 { 439 struct spi_device *spi; 440 441 if (!spi_master_get(master)) 442 return NULL; 443 444 spi = kzalloc(sizeof(*spi), GFP_KERNEL); 445 if (!spi) { 446 spi_master_put(master); 447 return NULL; 448 } 449 450 spi->master = master; 451 spi->dev.parent = &master->dev; 452 spi->dev.bus = &spi_bus_type; 453 spi->dev.release = spidev_release; 454 spi->cs_gpio = -ENOENT; 455 456 spin_lock_init(&spi->statistics.lock); 457 458 device_initialize(&spi->dev); 459 return spi; 460 } 461 EXPORT_SYMBOL_GPL(spi_alloc_device); 462 463 static void spi_dev_set_name(struct spi_device *spi) 464 { 465 struct acpi_device *adev = ACPI_COMPANION(&spi->dev); 466 467 if (adev) { 468 dev_set_name(&spi->dev, "spi-%s", acpi_dev_name(adev)); 469 return; 470 } 471 472 dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->master->dev), 473 spi->chip_select); 474 } 475 476 static int spi_dev_check(struct device *dev, void *data) 477 { 478 struct spi_device *spi = to_spi_device(dev); 479 struct spi_device *new_spi = data; 480 481 if (spi->master == new_spi->master && 482 spi->chip_select == new_spi->chip_select) 483 return -EBUSY; 484 return 0; 485 } 486 487 /** 488 * spi_add_device - Add spi_device allocated with spi_alloc_device 489 * @spi: spi_device to register 490 * 491 * Companion function to spi_alloc_device. Devices allocated with 492 * spi_alloc_device can be added onto the spi bus with this function. 493 * 494 * Return: 0 on success; negative errno on failure 495 */ 496 int spi_add_device(struct spi_device *spi) 497 { 498 static DEFINE_MUTEX(spi_add_lock); 499 struct spi_master *master = spi->master; 500 struct device *dev = master->dev.parent; 501 int status; 502 503 /* Chipselects are numbered 0..max; validate. */ 504 if (spi->chip_select >= master->num_chipselect) { 505 dev_err(dev, "cs%d >= max %d\n", 506 spi->chip_select, 507 master->num_chipselect); 508 return -EINVAL; 509 } 510 511 /* Set the bus ID string */ 512 spi_dev_set_name(spi); 513 514 /* We need to make sure there's no other device with this 515 * chipselect **BEFORE** we call setup(), else we'll trash 516 * its configuration. Lock against concurrent add() calls. 517 */ 518 mutex_lock(&spi_add_lock); 519 520 status = bus_for_each_dev(&spi_bus_type, NULL, spi, spi_dev_check); 521 if (status) { 522 dev_err(dev, "chipselect %d already in use\n", 523 spi->chip_select); 524 goto done; 525 } 526 527 if (master->cs_gpios) 528 spi->cs_gpio = master->cs_gpios[spi->chip_select]; 529 530 /* Drivers may modify this initial i/o setup, but will 531 * normally rely on the device being setup. Devices 532 * using SPI_CS_HIGH can't coexist well otherwise... 533 */ 534 status = spi_setup(spi); 535 if (status < 0) { 536 dev_err(dev, "can't setup %s, status %d\n", 537 dev_name(&spi->dev), status); 538 goto done; 539 } 540 541 /* Device may be bound to an active driver when this returns */ 542 status = device_add(&spi->dev); 543 if (status < 0) 544 dev_err(dev, "can't add %s, status %d\n", 545 dev_name(&spi->dev), status); 546 else 547 dev_dbg(dev, "registered child %s\n", dev_name(&spi->dev)); 548 549 done: 550 mutex_unlock(&spi_add_lock); 551 return status; 552 } 553 EXPORT_SYMBOL_GPL(spi_add_device); 554 555 /** 556 * spi_new_device - instantiate one new SPI device 557 * @master: Controller to which device is connected 558 * @chip: Describes the SPI device 559 * Context: can sleep 560 * 561 * On typical mainboards, this is purely internal; and it's not needed 562 * after board init creates the hard-wired devices. Some development 563 * platforms may not be able to use spi_register_board_info though, and 564 * this is exported so that for example a USB or parport based adapter 565 * driver could add devices (which it would learn about out-of-band). 566 * 567 * Return: the new device, or NULL. 568 */ 569 struct spi_device *spi_new_device(struct spi_master *master, 570 struct spi_board_info *chip) 571 { 572 struct spi_device *proxy; 573 int status; 574 575 /* NOTE: caller did any chip->bus_num checks necessary. 576 * 577 * Also, unless we change the return value convention to use 578 * error-or-pointer (not NULL-or-pointer), troubleshootability 579 * suggests syslogged diagnostics are best here (ugh). 580 */ 581 582 proxy = spi_alloc_device(master); 583 if (!proxy) 584 return NULL; 585 586 WARN_ON(strlen(chip->modalias) >= sizeof(proxy->modalias)); 587 588 proxy->chip_select = chip->chip_select; 589 proxy->max_speed_hz = chip->max_speed_hz; 590 proxy->mode = chip->mode; 591 proxy->irq = chip->irq; 592 strlcpy(proxy->modalias, chip->modalias, sizeof(proxy->modalias)); 593 proxy->dev.platform_data = (void *) chip->platform_data; 594 proxy->controller_data = chip->controller_data; 595 proxy->controller_state = NULL; 596 597 status = spi_add_device(proxy); 598 if (status < 0) { 599 spi_dev_put(proxy); 600 return NULL; 601 } 602 603 return proxy; 604 } 605 EXPORT_SYMBOL_GPL(spi_new_device); 606 607 /** 608 * spi_unregister_device - unregister a single SPI device 609 * @spi: spi_device to unregister 610 * 611 * Start making the passed SPI device vanish. Normally this would be handled 612 * by spi_unregister_master(). 613 */ 614 void spi_unregister_device(struct spi_device *spi) 615 { 616 if (!spi) 617 return; 618 619 if (spi->dev.of_node) 620 of_node_clear_flag(spi->dev.of_node, OF_POPULATED); 621 device_unregister(&spi->dev); 622 } 623 EXPORT_SYMBOL_GPL(spi_unregister_device); 624 625 static void spi_match_master_to_boardinfo(struct spi_master *master, 626 struct spi_board_info *bi) 627 { 628 struct spi_device *dev; 629 630 if (master->bus_num != bi->bus_num) 631 return; 632 633 dev = spi_new_device(master, bi); 634 if (!dev) 635 dev_err(master->dev.parent, "can't create new device for %s\n", 636 bi->modalias); 637 } 638 639 /** 640 * spi_register_board_info - register SPI devices for a given board 641 * @info: array of chip descriptors 642 * @n: how many descriptors are provided 643 * Context: can sleep 644 * 645 * Board-specific early init code calls this (probably during arch_initcall) 646 * with segments of the SPI device table. Any device nodes are created later, 647 * after the relevant parent SPI controller (bus_num) is defined. We keep 648 * this table of devices forever, so that reloading a controller driver will 649 * not make Linux forget about these hard-wired devices. 650 * 651 * Other code can also call this, e.g. a particular add-on board might provide 652 * SPI devices through its expansion connector, so code initializing that board 653 * would naturally declare its SPI devices. 654 * 655 * The board info passed can safely be __initdata ... but be careful of 656 * any embedded pointers (platform_data, etc), they're copied as-is. 657 * 658 * Return: zero on success, else a negative error code. 659 */ 660 int spi_register_board_info(struct spi_board_info const *info, unsigned n) 661 { 662 struct boardinfo *bi; 663 int i; 664 665 if (!n) 666 return -EINVAL; 667 668 bi = kzalloc(n * sizeof(*bi), GFP_KERNEL); 669 if (!bi) 670 return -ENOMEM; 671 672 for (i = 0; i < n; i++, bi++, info++) { 673 struct spi_master *master; 674 675 memcpy(&bi->board_info, info, sizeof(*info)); 676 mutex_lock(&board_lock); 677 list_add_tail(&bi->list, &board_list); 678 list_for_each_entry(master, &spi_master_list, list) 679 spi_match_master_to_boardinfo(master, &bi->board_info); 680 mutex_unlock(&board_lock); 681 } 682 683 return 0; 684 } 685 686 /*-------------------------------------------------------------------------*/ 687 688 static void spi_set_cs(struct spi_device *spi, bool enable) 689 { 690 if (spi->mode & SPI_CS_HIGH) 691 enable = !enable; 692 693 if (gpio_is_valid(spi->cs_gpio)) 694 gpio_set_value(spi->cs_gpio, !enable); 695 else if (spi->master->set_cs) 696 spi->master->set_cs(spi, !enable); 697 } 698 699 #ifdef CONFIG_HAS_DMA 700 static int spi_map_buf(struct spi_master *master, struct device *dev, 701 struct sg_table *sgt, void *buf, size_t len, 702 enum dma_data_direction dir) 703 { 704 const bool vmalloced_buf = is_vmalloc_addr(buf); 705 int desc_len; 706 int sgs; 707 struct page *vm_page; 708 void *sg_buf; 709 size_t min; 710 int i, ret; 711 712 if (vmalloced_buf) { 713 desc_len = PAGE_SIZE; 714 sgs = DIV_ROUND_UP(len + offset_in_page(buf), desc_len); 715 } else { 716 desc_len = master->max_dma_len; 717 sgs = DIV_ROUND_UP(len, desc_len); 718 } 719 720 ret = sg_alloc_table(sgt, sgs, GFP_KERNEL); 721 if (ret != 0) 722 return ret; 723 724 for (i = 0; i < sgs; i++) { 725 726 if (vmalloced_buf) { 727 min = min_t(size_t, 728 len, desc_len - offset_in_page(buf)); 729 vm_page = vmalloc_to_page(buf); 730 if (!vm_page) { 731 sg_free_table(sgt); 732 return -ENOMEM; 733 } 734 sg_set_page(&sgt->sgl[i], vm_page, 735 min, offset_in_page(buf)); 736 } else { 737 min = min_t(size_t, len, desc_len); 738 sg_buf = buf; 739 sg_set_buf(&sgt->sgl[i], sg_buf, min); 740 } 741 742 743 buf += min; 744 len -= min; 745 } 746 747 ret = dma_map_sg(dev, sgt->sgl, sgt->nents, dir); 748 if (!ret) 749 ret = -ENOMEM; 750 if (ret < 0) { 751 sg_free_table(sgt); 752 return ret; 753 } 754 755 sgt->nents = ret; 756 757 return 0; 758 } 759 760 static void spi_unmap_buf(struct spi_master *master, struct device *dev, 761 struct sg_table *sgt, enum dma_data_direction dir) 762 { 763 if (sgt->orig_nents) { 764 dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir); 765 sg_free_table(sgt); 766 } 767 } 768 769 static int __spi_map_msg(struct spi_master *master, struct spi_message *msg) 770 { 771 struct device *tx_dev, *rx_dev; 772 struct spi_transfer *xfer; 773 int ret; 774 775 if (!master->can_dma) 776 return 0; 777 778 if (master->dma_tx) 779 tx_dev = master->dma_tx->device->dev; 780 else 781 tx_dev = &master->dev; 782 783 if (master->dma_rx) 784 rx_dev = master->dma_rx->device->dev; 785 else 786 rx_dev = &master->dev; 787 788 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 789 if (!master->can_dma(master, msg->spi, xfer)) 790 continue; 791 792 if (xfer->tx_buf != NULL) { 793 ret = spi_map_buf(master, tx_dev, &xfer->tx_sg, 794 (void *)xfer->tx_buf, xfer->len, 795 DMA_TO_DEVICE); 796 if (ret != 0) 797 return ret; 798 } 799 800 if (xfer->rx_buf != NULL) { 801 ret = spi_map_buf(master, rx_dev, &xfer->rx_sg, 802 xfer->rx_buf, xfer->len, 803 DMA_FROM_DEVICE); 804 if (ret != 0) { 805 spi_unmap_buf(master, tx_dev, &xfer->tx_sg, 806 DMA_TO_DEVICE); 807 return ret; 808 } 809 } 810 } 811 812 master->cur_msg_mapped = true; 813 814 return 0; 815 } 816 817 static int __spi_unmap_msg(struct spi_master *master, struct spi_message *msg) 818 { 819 struct spi_transfer *xfer; 820 struct device *tx_dev, *rx_dev; 821 822 if (!master->cur_msg_mapped || !master->can_dma) 823 return 0; 824 825 if (master->dma_tx) 826 tx_dev = master->dma_tx->device->dev; 827 else 828 tx_dev = &master->dev; 829 830 if (master->dma_rx) 831 rx_dev = master->dma_rx->device->dev; 832 else 833 rx_dev = &master->dev; 834 835 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 836 if (!master->can_dma(master, msg->spi, xfer)) 837 continue; 838 839 spi_unmap_buf(master, rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE); 840 spi_unmap_buf(master, tx_dev, &xfer->tx_sg, DMA_TO_DEVICE); 841 } 842 843 return 0; 844 } 845 #else /* !CONFIG_HAS_DMA */ 846 static inline int __spi_map_msg(struct spi_master *master, 847 struct spi_message *msg) 848 { 849 return 0; 850 } 851 852 static inline int __spi_unmap_msg(struct spi_master *master, 853 struct spi_message *msg) 854 { 855 return 0; 856 } 857 #endif /* !CONFIG_HAS_DMA */ 858 859 static inline int spi_unmap_msg(struct spi_master *master, 860 struct spi_message *msg) 861 { 862 struct spi_transfer *xfer; 863 864 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 865 /* 866 * Restore the original value of tx_buf or rx_buf if they are 867 * NULL. 868 */ 869 if (xfer->tx_buf == master->dummy_tx) 870 xfer->tx_buf = NULL; 871 if (xfer->rx_buf == master->dummy_rx) 872 xfer->rx_buf = NULL; 873 } 874 875 return __spi_unmap_msg(master, msg); 876 } 877 878 static int spi_map_msg(struct spi_master *master, struct spi_message *msg) 879 { 880 struct spi_transfer *xfer; 881 void *tmp; 882 unsigned int max_tx, max_rx; 883 884 if (master->flags & (SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX)) { 885 max_tx = 0; 886 max_rx = 0; 887 888 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 889 if ((master->flags & SPI_MASTER_MUST_TX) && 890 !xfer->tx_buf) 891 max_tx = max(xfer->len, max_tx); 892 if ((master->flags & SPI_MASTER_MUST_RX) && 893 !xfer->rx_buf) 894 max_rx = max(xfer->len, max_rx); 895 } 896 897 if (max_tx) { 898 tmp = krealloc(master->dummy_tx, max_tx, 899 GFP_KERNEL | GFP_DMA); 900 if (!tmp) 901 return -ENOMEM; 902 master->dummy_tx = tmp; 903 memset(tmp, 0, max_tx); 904 } 905 906 if (max_rx) { 907 tmp = krealloc(master->dummy_rx, max_rx, 908 GFP_KERNEL | GFP_DMA); 909 if (!tmp) 910 return -ENOMEM; 911 master->dummy_rx = tmp; 912 } 913 914 if (max_tx || max_rx) { 915 list_for_each_entry(xfer, &msg->transfers, 916 transfer_list) { 917 if (!xfer->tx_buf) 918 xfer->tx_buf = master->dummy_tx; 919 if (!xfer->rx_buf) 920 xfer->rx_buf = master->dummy_rx; 921 } 922 } 923 } 924 925 return __spi_map_msg(master, msg); 926 } 927 928 /* 929 * spi_transfer_one_message - Default implementation of transfer_one_message() 930 * 931 * This is a standard implementation of transfer_one_message() for 932 * drivers which impelment a transfer_one() operation. It provides 933 * standard handling of delays and chip select management. 934 */ 935 static int spi_transfer_one_message(struct spi_master *master, 936 struct spi_message *msg) 937 { 938 struct spi_transfer *xfer; 939 bool keep_cs = false; 940 int ret = 0; 941 unsigned long ms = 1; 942 struct spi_statistics *statm = &master->statistics; 943 struct spi_statistics *stats = &msg->spi->statistics; 944 945 spi_set_cs(msg->spi, true); 946 947 SPI_STATISTICS_INCREMENT_FIELD(statm, messages); 948 SPI_STATISTICS_INCREMENT_FIELD(stats, messages); 949 950 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 951 trace_spi_transfer_start(msg, xfer); 952 953 spi_statistics_add_transfer_stats(statm, xfer, master); 954 spi_statistics_add_transfer_stats(stats, xfer, master); 955 956 if (xfer->tx_buf || xfer->rx_buf) { 957 reinit_completion(&master->xfer_completion); 958 959 ret = master->transfer_one(master, msg->spi, xfer); 960 if (ret < 0) { 961 SPI_STATISTICS_INCREMENT_FIELD(statm, 962 errors); 963 SPI_STATISTICS_INCREMENT_FIELD(stats, 964 errors); 965 dev_err(&msg->spi->dev, 966 "SPI transfer failed: %d\n", ret); 967 goto out; 968 } 969 970 if (ret > 0) { 971 ret = 0; 972 ms = xfer->len * 8 * 1000 / xfer->speed_hz; 973 ms += ms + 100; /* some tolerance */ 974 975 ms = wait_for_completion_timeout(&master->xfer_completion, 976 msecs_to_jiffies(ms)); 977 } 978 979 if (ms == 0) { 980 SPI_STATISTICS_INCREMENT_FIELD(statm, 981 timedout); 982 SPI_STATISTICS_INCREMENT_FIELD(stats, 983 timedout); 984 dev_err(&msg->spi->dev, 985 "SPI transfer timed out\n"); 986 msg->status = -ETIMEDOUT; 987 } 988 } else { 989 if (xfer->len) 990 dev_err(&msg->spi->dev, 991 "Bufferless transfer has length %u\n", 992 xfer->len); 993 } 994 995 trace_spi_transfer_stop(msg, xfer); 996 997 if (msg->status != -EINPROGRESS) 998 goto out; 999 1000 if (xfer->delay_usecs) 1001 udelay(xfer->delay_usecs); 1002 1003 if (xfer->cs_change) { 1004 if (list_is_last(&xfer->transfer_list, 1005 &msg->transfers)) { 1006 keep_cs = true; 1007 } else { 1008 spi_set_cs(msg->spi, false); 1009 udelay(10); 1010 spi_set_cs(msg->spi, true); 1011 } 1012 } 1013 1014 msg->actual_length += xfer->len; 1015 } 1016 1017 out: 1018 if (ret != 0 || !keep_cs) 1019 spi_set_cs(msg->spi, false); 1020 1021 if (msg->status == -EINPROGRESS) 1022 msg->status = ret; 1023 1024 if (msg->status && master->handle_err) 1025 master->handle_err(master, msg); 1026 1027 spi_finalize_current_message(master); 1028 1029 return ret; 1030 } 1031 1032 /** 1033 * spi_finalize_current_transfer - report completion of a transfer 1034 * @master: the master reporting completion 1035 * 1036 * Called by SPI drivers using the core transfer_one_message() 1037 * implementation to notify it that the current interrupt driven 1038 * transfer has finished and the next one may be scheduled. 1039 */ 1040 void spi_finalize_current_transfer(struct spi_master *master) 1041 { 1042 complete(&master->xfer_completion); 1043 } 1044 EXPORT_SYMBOL_GPL(spi_finalize_current_transfer); 1045 1046 /** 1047 * __spi_pump_messages - function which processes spi message queue 1048 * @master: master to process queue for 1049 * @in_kthread: true if we are in the context of the message pump thread 1050 * 1051 * This function checks if there is any spi message in the queue that 1052 * needs processing and if so call out to the driver to initialize hardware 1053 * and transfer each message. 1054 * 1055 * Note that it is called both from the kthread itself and also from 1056 * inside spi_sync(); the queue extraction handling at the top of the 1057 * function should deal with this safely. 1058 */ 1059 static void __spi_pump_messages(struct spi_master *master, bool in_kthread) 1060 { 1061 unsigned long flags; 1062 bool was_busy = false; 1063 int ret; 1064 1065 /* Lock queue */ 1066 spin_lock_irqsave(&master->queue_lock, flags); 1067 1068 /* Make sure we are not already running a message */ 1069 if (master->cur_msg) { 1070 spin_unlock_irqrestore(&master->queue_lock, flags); 1071 return; 1072 } 1073 1074 /* If another context is idling the device then defer */ 1075 if (master->idling) { 1076 queue_kthread_work(&master->kworker, &master->pump_messages); 1077 spin_unlock_irqrestore(&master->queue_lock, flags); 1078 return; 1079 } 1080 1081 /* Check if the queue is idle */ 1082 if (list_empty(&master->queue) || !master->running) { 1083 if (!master->busy) { 1084 spin_unlock_irqrestore(&master->queue_lock, flags); 1085 return; 1086 } 1087 1088 /* Only do teardown in the thread */ 1089 if (!in_kthread) { 1090 queue_kthread_work(&master->kworker, 1091 &master->pump_messages); 1092 spin_unlock_irqrestore(&master->queue_lock, flags); 1093 return; 1094 } 1095 1096 master->busy = false; 1097 master->idling = true; 1098 spin_unlock_irqrestore(&master->queue_lock, flags); 1099 1100 kfree(master->dummy_rx); 1101 master->dummy_rx = NULL; 1102 kfree(master->dummy_tx); 1103 master->dummy_tx = NULL; 1104 if (master->unprepare_transfer_hardware && 1105 master->unprepare_transfer_hardware(master)) 1106 dev_err(&master->dev, 1107 "failed to unprepare transfer hardware\n"); 1108 if (master->auto_runtime_pm) { 1109 pm_runtime_mark_last_busy(master->dev.parent); 1110 pm_runtime_put_autosuspend(master->dev.parent); 1111 } 1112 trace_spi_master_idle(master); 1113 1114 spin_lock_irqsave(&master->queue_lock, flags); 1115 master->idling = false; 1116 spin_unlock_irqrestore(&master->queue_lock, flags); 1117 return; 1118 } 1119 1120 /* Extract head of queue */ 1121 master->cur_msg = 1122 list_first_entry(&master->queue, struct spi_message, queue); 1123 1124 list_del_init(&master->cur_msg->queue); 1125 if (master->busy) 1126 was_busy = true; 1127 else 1128 master->busy = true; 1129 spin_unlock_irqrestore(&master->queue_lock, flags); 1130 1131 if (!was_busy && master->auto_runtime_pm) { 1132 ret = pm_runtime_get_sync(master->dev.parent); 1133 if (ret < 0) { 1134 dev_err(&master->dev, "Failed to power device: %d\n", 1135 ret); 1136 return; 1137 } 1138 } 1139 1140 if (!was_busy) 1141 trace_spi_master_busy(master); 1142 1143 if (!was_busy && master->prepare_transfer_hardware) { 1144 ret = master->prepare_transfer_hardware(master); 1145 if (ret) { 1146 dev_err(&master->dev, 1147 "failed to prepare transfer hardware\n"); 1148 1149 if (master->auto_runtime_pm) 1150 pm_runtime_put(master->dev.parent); 1151 return; 1152 } 1153 } 1154 1155 trace_spi_message_start(master->cur_msg); 1156 1157 if (master->prepare_message) { 1158 ret = master->prepare_message(master, master->cur_msg); 1159 if (ret) { 1160 dev_err(&master->dev, 1161 "failed to prepare message: %d\n", ret); 1162 master->cur_msg->status = ret; 1163 spi_finalize_current_message(master); 1164 return; 1165 } 1166 master->cur_msg_prepared = true; 1167 } 1168 1169 ret = spi_map_msg(master, master->cur_msg); 1170 if (ret) { 1171 master->cur_msg->status = ret; 1172 spi_finalize_current_message(master); 1173 return; 1174 } 1175 1176 ret = master->transfer_one_message(master, master->cur_msg); 1177 if (ret) { 1178 dev_err(&master->dev, 1179 "failed to transfer one message from queue\n"); 1180 return; 1181 } 1182 } 1183 1184 /** 1185 * spi_pump_messages - kthread work function which processes spi message queue 1186 * @work: pointer to kthread work struct contained in the master struct 1187 */ 1188 static void spi_pump_messages(struct kthread_work *work) 1189 { 1190 struct spi_master *master = 1191 container_of(work, struct spi_master, pump_messages); 1192 1193 __spi_pump_messages(master, true); 1194 } 1195 1196 static int spi_init_queue(struct spi_master *master) 1197 { 1198 struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 }; 1199 1200 master->running = false; 1201 master->busy = false; 1202 1203 init_kthread_worker(&master->kworker); 1204 master->kworker_task = kthread_run(kthread_worker_fn, 1205 &master->kworker, "%s", 1206 dev_name(&master->dev)); 1207 if (IS_ERR(master->kworker_task)) { 1208 dev_err(&master->dev, "failed to create message pump task\n"); 1209 return PTR_ERR(master->kworker_task); 1210 } 1211 init_kthread_work(&master->pump_messages, spi_pump_messages); 1212 1213 /* 1214 * Master config will indicate if this controller should run the 1215 * message pump with high (realtime) priority to reduce the transfer 1216 * latency on the bus by minimising the delay between a transfer 1217 * request and the scheduling of the message pump thread. Without this 1218 * setting the message pump thread will remain at default priority. 1219 */ 1220 if (master->rt) { 1221 dev_info(&master->dev, 1222 "will run message pump with realtime priority\n"); 1223 sched_setscheduler(master->kworker_task, SCHED_FIFO, ¶m); 1224 } 1225 1226 return 0; 1227 } 1228 1229 /** 1230 * spi_get_next_queued_message() - called by driver to check for queued 1231 * messages 1232 * @master: the master to check for queued messages 1233 * 1234 * If there are more messages in the queue, the next message is returned from 1235 * this call. 1236 * 1237 * Return: the next message in the queue, else NULL if the queue is empty. 1238 */ 1239 struct spi_message *spi_get_next_queued_message(struct spi_master *master) 1240 { 1241 struct spi_message *next; 1242 unsigned long flags; 1243 1244 /* get a pointer to the next message, if any */ 1245 spin_lock_irqsave(&master->queue_lock, flags); 1246 next = list_first_entry_or_null(&master->queue, struct spi_message, 1247 queue); 1248 spin_unlock_irqrestore(&master->queue_lock, flags); 1249 1250 return next; 1251 } 1252 EXPORT_SYMBOL_GPL(spi_get_next_queued_message); 1253 1254 /** 1255 * spi_finalize_current_message() - the current message is complete 1256 * @master: the master to return the message to 1257 * 1258 * Called by the driver to notify the core that the message in the front of the 1259 * queue is complete and can be removed from the queue. 1260 */ 1261 void spi_finalize_current_message(struct spi_master *master) 1262 { 1263 struct spi_message *mesg; 1264 unsigned long flags; 1265 int ret; 1266 1267 spin_lock_irqsave(&master->queue_lock, flags); 1268 mesg = master->cur_msg; 1269 spin_unlock_irqrestore(&master->queue_lock, flags); 1270 1271 spi_unmap_msg(master, mesg); 1272 1273 if (master->cur_msg_prepared && master->unprepare_message) { 1274 ret = master->unprepare_message(master, mesg); 1275 if (ret) { 1276 dev_err(&master->dev, 1277 "failed to unprepare message: %d\n", ret); 1278 } 1279 } 1280 1281 spin_lock_irqsave(&master->queue_lock, flags); 1282 master->cur_msg = NULL; 1283 master->cur_msg_prepared = false; 1284 queue_kthread_work(&master->kworker, &master->pump_messages); 1285 spin_unlock_irqrestore(&master->queue_lock, flags); 1286 1287 trace_spi_message_done(mesg); 1288 1289 mesg->state = NULL; 1290 if (mesg->complete) 1291 mesg->complete(mesg->context); 1292 } 1293 EXPORT_SYMBOL_GPL(spi_finalize_current_message); 1294 1295 static int spi_start_queue(struct spi_master *master) 1296 { 1297 unsigned long flags; 1298 1299 spin_lock_irqsave(&master->queue_lock, flags); 1300 1301 if (master->running || master->busy) { 1302 spin_unlock_irqrestore(&master->queue_lock, flags); 1303 return -EBUSY; 1304 } 1305 1306 master->running = true; 1307 master->cur_msg = NULL; 1308 spin_unlock_irqrestore(&master->queue_lock, flags); 1309 1310 queue_kthread_work(&master->kworker, &master->pump_messages); 1311 1312 return 0; 1313 } 1314 1315 static int spi_stop_queue(struct spi_master *master) 1316 { 1317 unsigned long flags; 1318 unsigned limit = 500; 1319 int ret = 0; 1320 1321 spin_lock_irqsave(&master->queue_lock, flags); 1322 1323 /* 1324 * This is a bit lame, but is optimized for the common execution path. 1325 * A wait_queue on the master->busy could be used, but then the common 1326 * execution path (pump_messages) would be required to call wake_up or 1327 * friends on every SPI message. Do this instead. 1328 */ 1329 while ((!list_empty(&master->queue) || master->busy) && limit--) { 1330 spin_unlock_irqrestore(&master->queue_lock, flags); 1331 usleep_range(10000, 11000); 1332 spin_lock_irqsave(&master->queue_lock, flags); 1333 } 1334 1335 if (!list_empty(&master->queue) || master->busy) 1336 ret = -EBUSY; 1337 else 1338 master->running = false; 1339 1340 spin_unlock_irqrestore(&master->queue_lock, flags); 1341 1342 if (ret) { 1343 dev_warn(&master->dev, 1344 "could not stop message queue\n"); 1345 return ret; 1346 } 1347 return ret; 1348 } 1349 1350 static int spi_destroy_queue(struct spi_master *master) 1351 { 1352 int ret; 1353 1354 ret = spi_stop_queue(master); 1355 1356 /* 1357 * flush_kthread_worker will block until all work is done. 1358 * If the reason that stop_queue timed out is that the work will never 1359 * finish, then it does no good to call flush/stop thread, so 1360 * return anyway. 1361 */ 1362 if (ret) { 1363 dev_err(&master->dev, "problem destroying queue\n"); 1364 return ret; 1365 } 1366 1367 flush_kthread_worker(&master->kworker); 1368 kthread_stop(master->kworker_task); 1369 1370 return 0; 1371 } 1372 1373 static int __spi_queued_transfer(struct spi_device *spi, 1374 struct spi_message *msg, 1375 bool need_pump) 1376 { 1377 struct spi_master *master = spi->master; 1378 unsigned long flags; 1379 1380 spin_lock_irqsave(&master->queue_lock, flags); 1381 1382 if (!master->running) { 1383 spin_unlock_irqrestore(&master->queue_lock, flags); 1384 return -ESHUTDOWN; 1385 } 1386 msg->actual_length = 0; 1387 msg->status = -EINPROGRESS; 1388 1389 list_add_tail(&msg->queue, &master->queue); 1390 if (!master->busy && need_pump) 1391 queue_kthread_work(&master->kworker, &master->pump_messages); 1392 1393 spin_unlock_irqrestore(&master->queue_lock, flags); 1394 return 0; 1395 } 1396 1397 /** 1398 * spi_queued_transfer - transfer function for queued transfers 1399 * @spi: spi device which is requesting transfer 1400 * @msg: spi message which is to handled is queued to driver queue 1401 * 1402 * Return: zero on success, else a negative error code. 1403 */ 1404 static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg) 1405 { 1406 return __spi_queued_transfer(spi, msg, true); 1407 } 1408 1409 static int spi_master_initialize_queue(struct spi_master *master) 1410 { 1411 int ret; 1412 1413 master->transfer = spi_queued_transfer; 1414 if (!master->transfer_one_message) 1415 master->transfer_one_message = spi_transfer_one_message; 1416 1417 /* Initialize and start queue */ 1418 ret = spi_init_queue(master); 1419 if (ret) { 1420 dev_err(&master->dev, "problem initializing queue\n"); 1421 goto err_init_queue; 1422 } 1423 master->queued = true; 1424 ret = spi_start_queue(master); 1425 if (ret) { 1426 dev_err(&master->dev, "problem starting queue\n"); 1427 goto err_start_queue; 1428 } 1429 1430 return 0; 1431 1432 err_start_queue: 1433 spi_destroy_queue(master); 1434 err_init_queue: 1435 return ret; 1436 } 1437 1438 /*-------------------------------------------------------------------------*/ 1439 1440 #if defined(CONFIG_OF) 1441 static struct spi_device * 1442 of_register_spi_device(struct spi_master *master, struct device_node *nc) 1443 { 1444 struct spi_device *spi; 1445 int rc; 1446 u32 value; 1447 1448 /* Alloc an spi_device */ 1449 spi = spi_alloc_device(master); 1450 if (!spi) { 1451 dev_err(&master->dev, "spi_device alloc error for %s\n", 1452 nc->full_name); 1453 rc = -ENOMEM; 1454 goto err_out; 1455 } 1456 1457 /* Select device driver */ 1458 rc = of_modalias_node(nc, spi->modalias, 1459 sizeof(spi->modalias)); 1460 if (rc < 0) { 1461 dev_err(&master->dev, "cannot find modalias for %s\n", 1462 nc->full_name); 1463 goto err_out; 1464 } 1465 1466 /* Device address */ 1467 rc = of_property_read_u32(nc, "reg", &value); 1468 if (rc) { 1469 dev_err(&master->dev, "%s has no valid 'reg' property (%d)\n", 1470 nc->full_name, rc); 1471 goto err_out; 1472 } 1473 spi->chip_select = value; 1474 1475 /* Mode (clock phase/polarity/etc.) */ 1476 if (of_find_property(nc, "spi-cpha", NULL)) 1477 spi->mode |= SPI_CPHA; 1478 if (of_find_property(nc, "spi-cpol", NULL)) 1479 spi->mode |= SPI_CPOL; 1480 if (of_find_property(nc, "spi-cs-high", NULL)) 1481 spi->mode |= SPI_CS_HIGH; 1482 if (of_find_property(nc, "spi-3wire", NULL)) 1483 spi->mode |= SPI_3WIRE; 1484 if (of_find_property(nc, "spi-lsb-first", NULL)) 1485 spi->mode |= SPI_LSB_FIRST; 1486 1487 /* Device DUAL/QUAD mode */ 1488 if (!of_property_read_u32(nc, "spi-tx-bus-width", &value)) { 1489 switch (value) { 1490 case 1: 1491 break; 1492 case 2: 1493 spi->mode |= SPI_TX_DUAL; 1494 break; 1495 case 4: 1496 spi->mode |= SPI_TX_QUAD; 1497 break; 1498 default: 1499 dev_warn(&master->dev, 1500 "spi-tx-bus-width %d not supported\n", 1501 value); 1502 break; 1503 } 1504 } 1505 1506 if (!of_property_read_u32(nc, "spi-rx-bus-width", &value)) { 1507 switch (value) { 1508 case 1: 1509 break; 1510 case 2: 1511 spi->mode |= SPI_RX_DUAL; 1512 break; 1513 case 4: 1514 spi->mode |= SPI_RX_QUAD; 1515 break; 1516 default: 1517 dev_warn(&master->dev, 1518 "spi-rx-bus-width %d not supported\n", 1519 value); 1520 break; 1521 } 1522 } 1523 1524 /* Device speed */ 1525 rc = of_property_read_u32(nc, "spi-max-frequency", &value); 1526 if (rc) { 1527 dev_err(&master->dev, "%s has no valid 'spi-max-frequency' property (%d)\n", 1528 nc->full_name, rc); 1529 goto err_out; 1530 } 1531 spi->max_speed_hz = value; 1532 1533 /* Store a pointer to the node in the device structure */ 1534 of_node_get(nc); 1535 spi->dev.of_node = nc; 1536 1537 /* Register the new device */ 1538 rc = spi_add_device(spi); 1539 if (rc) { 1540 dev_err(&master->dev, "spi_device register error %s\n", 1541 nc->full_name); 1542 goto err_out; 1543 } 1544 1545 return spi; 1546 1547 err_out: 1548 spi_dev_put(spi); 1549 return ERR_PTR(rc); 1550 } 1551 1552 /** 1553 * of_register_spi_devices() - Register child devices onto the SPI bus 1554 * @master: Pointer to spi_master device 1555 * 1556 * Registers an spi_device for each child node of master node which has a 'reg' 1557 * property. 1558 */ 1559 static void of_register_spi_devices(struct spi_master *master) 1560 { 1561 struct spi_device *spi; 1562 struct device_node *nc; 1563 1564 if (!master->dev.of_node) 1565 return; 1566 1567 for_each_available_child_of_node(master->dev.of_node, nc) { 1568 if (of_node_test_and_set_flag(nc, OF_POPULATED)) 1569 continue; 1570 spi = of_register_spi_device(master, nc); 1571 if (IS_ERR(spi)) 1572 dev_warn(&master->dev, "Failed to create SPI device for %s\n", 1573 nc->full_name); 1574 } 1575 } 1576 #else 1577 static void of_register_spi_devices(struct spi_master *master) { } 1578 #endif 1579 1580 #ifdef CONFIG_ACPI 1581 static int acpi_spi_add_resource(struct acpi_resource *ares, void *data) 1582 { 1583 struct spi_device *spi = data; 1584 1585 if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) { 1586 struct acpi_resource_spi_serialbus *sb; 1587 1588 sb = &ares->data.spi_serial_bus; 1589 if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_SPI) { 1590 spi->chip_select = sb->device_selection; 1591 spi->max_speed_hz = sb->connection_speed; 1592 1593 if (sb->clock_phase == ACPI_SPI_SECOND_PHASE) 1594 spi->mode |= SPI_CPHA; 1595 if (sb->clock_polarity == ACPI_SPI_START_HIGH) 1596 spi->mode |= SPI_CPOL; 1597 if (sb->device_polarity == ACPI_SPI_ACTIVE_HIGH) 1598 spi->mode |= SPI_CS_HIGH; 1599 } 1600 } else if (spi->irq < 0) { 1601 struct resource r; 1602 1603 if (acpi_dev_resource_interrupt(ares, 0, &r)) 1604 spi->irq = r.start; 1605 } 1606 1607 /* Always tell the ACPI core to skip this resource */ 1608 return 1; 1609 } 1610 1611 static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level, 1612 void *data, void **return_value) 1613 { 1614 struct spi_master *master = data; 1615 struct list_head resource_list; 1616 struct acpi_device *adev; 1617 struct spi_device *spi; 1618 int ret; 1619 1620 if (acpi_bus_get_device(handle, &adev)) 1621 return AE_OK; 1622 if (acpi_bus_get_status(adev) || !adev->status.present) 1623 return AE_OK; 1624 1625 spi = spi_alloc_device(master); 1626 if (!spi) { 1627 dev_err(&master->dev, "failed to allocate SPI device for %s\n", 1628 dev_name(&adev->dev)); 1629 return AE_NO_MEMORY; 1630 } 1631 1632 ACPI_COMPANION_SET(&spi->dev, adev); 1633 spi->irq = -1; 1634 1635 INIT_LIST_HEAD(&resource_list); 1636 ret = acpi_dev_get_resources(adev, &resource_list, 1637 acpi_spi_add_resource, spi); 1638 acpi_dev_free_resource_list(&resource_list); 1639 1640 if (ret < 0 || !spi->max_speed_hz) { 1641 spi_dev_put(spi); 1642 return AE_OK; 1643 } 1644 1645 if (spi->irq < 0) 1646 spi->irq = acpi_dev_gpio_irq_get(adev, 0); 1647 1648 adev->power.flags.ignore_parent = true; 1649 strlcpy(spi->modalias, acpi_device_hid(adev), sizeof(spi->modalias)); 1650 if (spi_add_device(spi)) { 1651 adev->power.flags.ignore_parent = false; 1652 dev_err(&master->dev, "failed to add SPI device %s from ACPI\n", 1653 dev_name(&adev->dev)); 1654 spi_dev_put(spi); 1655 } 1656 1657 return AE_OK; 1658 } 1659 1660 static void acpi_register_spi_devices(struct spi_master *master) 1661 { 1662 acpi_status status; 1663 acpi_handle handle; 1664 1665 handle = ACPI_HANDLE(master->dev.parent); 1666 if (!handle) 1667 return; 1668 1669 status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1, 1670 acpi_spi_add_device, NULL, 1671 master, NULL); 1672 if (ACPI_FAILURE(status)) 1673 dev_warn(&master->dev, "failed to enumerate SPI slaves\n"); 1674 } 1675 #else 1676 static inline void acpi_register_spi_devices(struct spi_master *master) {} 1677 #endif /* CONFIG_ACPI */ 1678 1679 static void spi_master_release(struct device *dev) 1680 { 1681 struct spi_master *master; 1682 1683 master = container_of(dev, struct spi_master, dev); 1684 kfree(master); 1685 } 1686 1687 static struct class spi_master_class = { 1688 .name = "spi_master", 1689 .owner = THIS_MODULE, 1690 .dev_release = spi_master_release, 1691 .dev_groups = spi_master_groups, 1692 }; 1693 1694 1695 /** 1696 * spi_alloc_master - allocate SPI master controller 1697 * @dev: the controller, possibly using the platform_bus 1698 * @size: how much zeroed driver-private data to allocate; the pointer to this 1699 * memory is in the driver_data field of the returned device, 1700 * accessible with spi_master_get_devdata(). 1701 * Context: can sleep 1702 * 1703 * This call is used only by SPI master controller drivers, which are the 1704 * only ones directly touching chip registers. It's how they allocate 1705 * an spi_master structure, prior to calling spi_register_master(). 1706 * 1707 * This must be called from context that can sleep. 1708 * 1709 * The caller is responsible for assigning the bus number and initializing 1710 * the master's methods before calling spi_register_master(); and (after errors 1711 * adding the device) calling spi_master_put() to prevent a memory leak. 1712 * 1713 * Return: the SPI master structure on success, else NULL. 1714 */ 1715 struct spi_master *spi_alloc_master(struct device *dev, unsigned size) 1716 { 1717 struct spi_master *master; 1718 1719 if (!dev) 1720 return NULL; 1721 1722 master = kzalloc(size + sizeof(*master), GFP_KERNEL); 1723 if (!master) 1724 return NULL; 1725 1726 device_initialize(&master->dev); 1727 master->bus_num = -1; 1728 master->num_chipselect = 1; 1729 master->dev.class = &spi_master_class; 1730 master->dev.parent = dev; 1731 spi_master_set_devdata(master, &master[1]); 1732 1733 return master; 1734 } 1735 EXPORT_SYMBOL_GPL(spi_alloc_master); 1736 1737 #ifdef CONFIG_OF 1738 static int of_spi_register_master(struct spi_master *master) 1739 { 1740 int nb, i, *cs; 1741 struct device_node *np = master->dev.of_node; 1742 1743 if (!np) 1744 return 0; 1745 1746 nb = of_gpio_named_count(np, "cs-gpios"); 1747 master->num_chipselect = max_t(int, nb, master->num_chipselect); 1748 1749 /* Return error only for an incorrectly formed cs-gpios property */ 1750 if (nb == 0 || nb == -ENOENT) 1751 return 0; 1752 else if (nb < 0) 1753 return nb; 1754 1755 cs = devm_kzalloc(&master->dev, 1756 sizeof(int) * master->num_chipselect, 1757 GFP_KERNEL); 1758 master->cs_gpios = cs; 1759 1760 if (!master->cs_gpios) 1761 return -ENOMEM; 1762 1763 for (i = 0; i < master->num_chipselect; i++) 1764 cs[i] = -ENOENT; 1765 1766 for (i = 0; i < nb; i++) 1767 cs[i] = of_get_named_gpio(np, "cs-gpios", i); 1768 1769 return 0; 1770 } 1771 #else 1772 static int of_spi_register_master(struct spi_master *master) 1773 { 1774 return 0; 1775 } 1776 #endif 1777 1778 /** 1779 * spi_register_master - register SPI master controller 1780 * @master: initialized master, originally from spi_alloc_master() 1781 * Context: can sleep 1782 * 1783 * SPI master controllers connect to their drivers using some non-SPI bus, 1784 * such as the platform bus. The final stage of probe() in that code 1785 * includes calling spi_register_master() to hook up to this SPI bus glue. 1786 * 1787 * SPI controllers use board specific (often SOC specific) bus numbers, 1788 * and board-specific addressing for SPI devices combines those numbers 1789 * with chip select numbers. Since SPI does not directly support dynamic 1790 * device identification, boards need configuration tables telling which 1791 * chip is at which address. 1792 * 1793 * This must be called from context that can sleep. It returns zero on 1794 * success, else a negative error code (dropping the master's refcount). 1795 * After a successful return, the caller is responsible for calling 1796 * spi_unregister_master(). 1797 * 1798 * Return: zero on success, else a negative error code. 1799 */ 1800 int spi_register_master(struct spi_master *master) 1801 { 1802 static atomic_t dyn_bus_id = ATOMIC_INIT((1<<15) - 1); 1803 struct device *dev = master->dev.parent; 1804 struct boardinfo *bi; 1805 int status = -ENODEV; 1806 int dynamic = 0; 1807 1808 if (!dev) 1809 return -ENODEV; 1810 1811 status = of_spi_register_master(master); 1812 if (status) 1813 return status; 1814 1815 /* even if it's just one always-selected device, there must 1816 * be at least one chipselect 1817 */ 1818 if (master->num_chipselect == 0) 1819 return -EINVAL; 1820 1821 if ((master->bus_num < 0) && master->dev.of_node) 1822 master->bus_num = of_alias_get_id(master->dev.of_node, "spi"); 1823 1824 /* convention: dynamically assigned bus IDs count down from the max */ 1825 if (master->bus_num < 0) { 1826 /* FIXME switch to an IDR based scheme, something like 1827 * I2C now uses, so we can't run out of "dynamic" IDs 1828 */ 1829 master->bus_num = atomic_dec_return(&dyn_bus_id); 1830 dynamic = 1; 1831 } 1832 1833 INIT_LIST_HEAD(&master->queue); 1834 spin_lock_init(&master->queue_lock); 1835 spin_lock_init(&master->bus_lock_spinlock); 1836 mutex_init(&master->bus_lock_mutex); 1837 master->bus_lock_flag = 0; 1838 init_completion(&master->xfer_completion); 1839 if (!master->max_dma_len) 1840 master->max_dma_len = INT_MAX; 1841 1842 /* register the device, then userspace will see it. 1843 * registration fails if the bus ID is in use. 1844 */ 1845 dev_set_name(&master->dev, "spi%u", master->bus_num); 1846 status = device_add(&master->dev); 1847 if (status < 0) 1848 goto done; 1849 dev_dbg(dev, "registered master %s%s\n", dev_name(&master->dev), 1850 dynamic ? " (dynamic)" : ""); 1851 1852 /* If we're using a queued driver, start the queue */ 1853 if (master->transfer) 1854 dev_info(dev, "master is unqueued, this is deprecated\n"); 1855 else { 1856 status = spi_master_initialize_queue(master); 1857 if (status) { 1858 device_del(&master->dev); 1859 goto done; 1860 } 1861 } 1862 /* add statistics */ 1863 spin_lock_init(&master->statistics.lock); 1864 1865 mutex_lock(&board_lock); 1866 list_add_tail(&master->list, &spi_master_list); 1867 list_for_each_entry(bi, &board_list, list) 1868 spi_match_master_to_boardinfo(master, &bi->board_info); 1869 mutex_unlock(&board_lock); 1870 1871 /* Register devices from the device tree and ACPI */ 1872 of_register_spi_devices(master); 1873 acpi_register_spi_devices(master); 1874 done: 1875 return status; 1876 } 1877 EXPORT_SYMBOL_GPL(spi_register_master); 1878 1879 static void devm_spi_unregister(struct device *dev, void *res) 1880 { 1881 spi_unregister_master(*(struct spi_master **)res); 1882 } 1883 1884 /** 1885 * dev_spi_register_master - register managed SPI master controller 1886 * @dev: device managing SPI master 1887 * @master: initialized master, originally from spi_alloc_master() 1888 * Context: can sleep 1889 * 1890 * Register a SPI device as with spi_register_master() which will 1891 * automatically be unregister 1892 * 1893 * Return: zero on success, else a negative error code. 1894 */ 1895 int devm_spi_register_master(struct device *dev, struct spi_master *master) 1896 { 1897 struct spi_master **ptr; 1898 int ret; 1899 1900 ptr = devres_alloc(devm_spi_unregister, sizeof(*ptr), GFP_KERNEL); 1901 if (!ptr) 1902 return -ENOMEM; 1903 1904 ret = spi_register_master(master); 1905 if (!ret) { 1906 *ptr = master; 1907 devres_add(dev, ptr); 1908 } else { 1909 devres_free(ptr); 1910 } 1911 1912 return ret; 1913 } 1914 EXPORT_SYMBOL_GPL(devm_spi_register_master); 1915 1916 static int __unregister(struct device *dev, void *null) 1917 { 1918 spi_unregister_device(to_spi_device(dev)); 1919 return 0; 1920 } 1921 1922 /** 1923 * spi_unregister_master - unregister SPI master controller 1924 * @master: the master being unregistered 1925 * Context: can sleep 1926 * 1927 * This call is used only by SPI master controller drivers, which are the 1928 * only ones directly touching chip registers. 1929 * 1930 * This must be called from context that can sleep. 1931 */ 1932 void spi_unregister_master(struct spi_master *master) 1933 { 1934 int dummy; 1935 1936 if (master->queued) { 1937 if (spi_destroy_queue(master)) 1938 dev_err(&master->dev, "queue remove failed\n"); 1939 } 1940 1941 mutex_lock(&board_lock); 1942 list_del(&master->list); 1943 mutex_unlock(&board_lock); 1944 1945 dummy = device_for_each_child(&master->dev, NULL, __unregister); 1946 device_unregister(&master->dev); 1947 } 1948 EXPORT_SYMBOL_GPL(spi_unregister_master); 1949 1950 int spi_master_suspend(struct spi_master *master) 1951 { 1952 int ret; 1953 1954 /* Basically no-ops for non-queued masters */ 1955 if (!master->queued) 1956 return 0; 1957 1958 ret = spi_stop_queue(master); 1959 if (ret) 1960 dev_err(&master->dev, "queue stop failed\n"); 1961 1962 return ret; 1963 } 1964 EXPORT_SYMBOL_GPL(spi_master_suspend); 1965 1966 int spi_master_resume(struct spi_master *master) 1967 { 1968 int ret; 1969 1970 if (!master->queued) 1971 return 0; 1972 1973 ret = spi_start_queue(master); 1974 if (ret) 1975 dev_err(&master->dev, "queue restart failed\n"); 1976 1977 return ret; 1978 } 1979 EXPORT_SYMBOL_GPL(spi_master_resume); 1980 1981 static int __spi_master_match(struct device *dev, const void *data) 1982 { 1983 struct spi_master *m; 1984 const u16 *bus_num = data; 1985 1986 m = container_of(dev, struct spi_master, dev); 1987 return m->bus_num == *bus_num; 1988 } 1989 1990 /** 1991 * spi_busnum_to_master - look up master associated with bus_num 1992 * @bus_num: the master's bus number 1993 * Context: can sleep 1994 * 1995 * This call may be used with devices that are registered after 1996 * arch init time. It returns a refcounted pointer to the relevant 1997 * spi_master (which the caller must release), or NULL if there is 1998 * no such master registered. 1999 * 2000 * Return: the SPI master structure on success, else NULL. 2001 */ 2002 struct spi_master *spi_busnum_to_master(u16 bus_num) 2003 { 2004 struct device *dev; 2005 struct spi_master *master = NULL; 2006 2007 dev = class_find_device(&spi_master_class, NULL, &bus_num, 2008 __spi_master_match); 2009 if (dev) 2010 master = container_of(dev, struct spi_master, dev); 2011 /* reference got in class_find_device */ 2012 return master; 2013 } 2014 EXPORT_SYMBOL_GPL(spi_busnum_to_master); 2015 2016 2017 /*-------------------------------------------------------------------------*/ 2018 2019 /* Core methods for SPI master protocol drivers. Some of the 2020 * other core methods are currently defined as inline functions. 2021 */ 2022 2023 static int __spi_validate_bits_per_word(struct spi_master *master, u8 bits_per_word) 2024 { 2025 if (master->bits_per_word_mask) { 2026 /* Only 32 bits fit in the mask */ 2027 if (bits_per_word > 32) 2028 return -EINVAL; 2029 if (!(master->bits_per_word_mask & 2030 SPI_BPW_MASK(bits_per_word))) 2031 return -EINVAL; 2032 } 2033 2034 return 0; 2035 } 2036 2037 /** 2038 * spi_setup - setup SPI mode and clock rate 2039 * @spi: the device whose settings are being modified 2040 * Context: can sleep, and no requests are queued to the device 2041 * 2042 * SPI protocol drivers may need to update the transfer mode if the 2043 * device doesn't work with its default. They may likewise need 2044 * to update clock rates or word sizes from initial values. This function 2045 * changes those settings, and must be called from a context that can sleep. 2046 * Except for SPI_CS_HIGH, which takes effect immediately, the changes take 2047 * effect the next time the device is selected and data is transferred to 2048 * or from it. When this function returns, the spi device is deselected. 2049 * 2050 * Note that this call will fail if the protocol driver specifies an option 2051 * that the underlying controller or its driver does not support. For 2052 * example, not all hardware supports wire transfers using nine bit words, 2053 * LSB-first wire encoding, or active-high chipselects. 2054 * 2055 * Return: zero on success, else a negative error code. 2056 */ 2057 int spi_setup(struct spi_device *spi) 2058 { 2059 unsigned bad_bits, ugly_bits; 2060 int status; 2061 2062 /* check mode to prevent that DUAL and QUAD set at the same time 2063 */ 2064 if (((spi->mode & SPI_TX_DUAL) && (spi->mode & SPI_TX_QUAD)) || 2065 ((spi->mode & SPI_RX_DUAL) && (spi->mode & SPI_RX_QUAD))) { 2066 dev_err(&spi->dev, 2067 "setup: can not select dual and quad at the same time\n"); 2068 return -EINVAL; 2069 } 2070 /* if it is SPI_3WIRE mode, DUAL and QUAD should be forbidden 2071 */ 2072 if ((spi->mode & SPI_3WIRE) && (spi->mode & 2073 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD))) 2074 return -EINVAL; 2075 /* help drivers fail *cleanly* when they need options 2076 * that aren't supported with their current master 2077 */ 2078 bad_bits = spi->mode & ~spi->master->mode_bits; 2079 ugly_bits = bad_bits & 2080 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD); 2081 if (ugly_bits) { 2082 dev_warn(&spi->dev, 2083 "setup: ignoring unsupported mode bits %x\n", 2084 ugly_bits); 2085 spi->mode &= ~ugly_bits; 2086 bad_bits &= ~ugly_bits; 2087 } 2088 if (bad_bits) { 2089 dev_err(&spi->dev, "setup: unsupported mode bits %x\n", 2090 bad_bits); 2091 return -EINVAL; 2092 } 2093 2094 if (!spi->bits_per_word) 2095 spi->bits_per_word = 8; 2096 2097 status = __spi_validate_bits_per_word(spi->master, spi->bits_per_word); 2098 if (status) 2099 return status; 2100 2101 if (!spi->max_speed_hz) 2102 spi->max_speed_hz = spi->master->max_speed_hz; 2103 2104 if (spi->master->setup) 2105 status = spi->master->setup(spi); 2106 2107 spi_set_cs(spi, false); 2108 2109 dev_dbg(&spi->dev, "setup mode %d, %s%s%s%s%u bits/w, %u Hz max --> %d\n", 2110 (int) (spi->mode & (SPI_CPOL | SPI_CPHA)), 2111 (spi->mode & SPI_CS_HIGH) ? "cs_high, " : "", 2112 (spi->mode & SPI_LSB_FIRST) ? "lsb, " : "", 2113 (spi->mode & SPI_3WIRE) ? "3wire, " : "", 2114 (spi->mode & SPI_LOOP) ? "loopback, " : "", 2115 spi->bits_per_word, spi->max_speed_hz, 2116 status); 2117 2118 return status; 2119 } 2120 EXPORT_SYMBOL_GPL(spi_setup); 2121 2122 static int __spi_validate(struct spi_device *spi, struct spi_message *message) 2123 { 2124 struct spi_master *master = spi->master; 2125 struct spi_transfer *xfer; 2126 int w_size; 2127 2128 if (list_empty(&message->transfers)) 2129 return -EINVAL; 2130 2131 /* Half-duplex links include original MicroWire, and ones with 2132 * only one data pin like SPI_3WIRE (switches direction) or where 2133 * either MOSI or MISO is missing. They can also be caused by 2134 * software limitations. 2135 */ 2136 if ((master->flags & SPI_MASTER_HALF_DUPLEX) 2137 || (spi->mode & SPI_3WIRE)) { 2138 unsigned flags = master->flags; 2139 2140 list_for_each_entry(xfer, &message->transfers, transfer_list) { 2141 if (xfer->rx_buf && xfer->tx_buf) 2142 return -EINVAL; 2143 if ((flags & SPI_MASTER_NO_TX) && xfer->tx_buf) 2144 return -EINVAL; 2145 if ((flags & SPI_MASTER_NO_RX) && xfer->rx_buf) 2146 return -EINVAL; 2147 } 2148 } 2149 2150 /** 2151 * Set transfer bits_per_word and max speed as spi device default if 2152 * it is not set for this transfer. 2153 * Set transfer tx_nbits and rx_nbits as single transfer default 2154 * (SPI_NBITS_SINGLE) if it is not set for this transfer. 2155 */ 2156 message->frame_length = 0; 2157 list_for_each_entry(xfer, &message->transfers, transfer_list) { 2158 message->frame_length += xfer->len; 2159 if (!xfer->bits_per_word) 2160 xfer->bits_per_word = spi->bits_per_word; 2161 2162 if (!xfer->speed_hz) 2163 xfer->speed_hz = spi->max_speed_hz; 2164 if (!xfer->speed_hz) 2165 xfer->speed_hz = master->max_speed_hz; 2166 2167 if (master->max_speed_hz && 2168 xfer->speed_hz > master->max_speed_hz) 2169 xfer->speed_hz = master->max_speed_hz; 2170 2171 if (__spi_validate_bits_per_word(master, xfer->bits_per_word)) 2172 return -EINVAL; 2173 2174 /* 2175 * SPI transfer length should be multiple of SPI word size 2176 * where SPI word size should be power-of-two multiple 2177 */ 2178 if (xfer->bits_per_word <= 8) 2179 w_size = 1; 2180 else if (xfer->bits_per_word <= 16) 2181 w_size = 2; 2182 else 2183 w_size = 4; 2184 2185 /* No partial transfers accepted */ 2186 if (xfer->len % w_size) 2187 return -EINVAL; 2188 2189 if (xfer->speed_hz && master->min_speed_hz && 2190 xfer->speed_hz < master->min_speed_hz) 2191 return -EINVAL; 2192 2193 if (xfer->tx_buf && !xfer->tx_nbits) 2194 xfer->tx_nbits = SPI_NBITS_SINGLE; 2195 if (xfer->rx_buf && !xfer->rx_nbits) 2196 xfer->rx_nbits = SPI_NBITS_SINGLE; 2197 /* check transfer tx/rx_nbits: 2198 * 1. check the value matches one of single, dual and quad 2199 * 2. check tx/rx_nbits match the mode in spi_device 2200 */ 2201 if (xfer->tx_buf) { 2202 if (xfer->tx_nbits != SPI_NBITS_SINGLE && 2203 xfer->tx_nbits != SPI_NBITS_DUAL && 2204 xfer->tx_nbits != SPI_NBITS_QUAD) 2205 return -EINVAL; 2206 if ((xfer->tx_nbits == SPI_NBITS_DUAL) && 2207 !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD))) 2208 return -EINVAL; 2209 if ((xfer->tx_nbits == SPI_NBITS_QUAD) && 2210 !(spi->mode & SPI_TX_QUAD)) 2211 return -EINVAL; 2212 } 2213 /* check transfer rx_nbits */ 2214 if (xfer->rx_buf) { 2215 if (xfer->rx_nbits != SPI_NBITS_SINGLE && 2216 xfer->rx_nbits != SPI_NBITS_DUAL && 2217 xfer->rx_nbits != SPI_NBITS_QUAD) 2218 return -EINVAL; 2219 if ((xfer->rx_nbits == SPI_NBITS_DUAL) && 2220 !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD))) 2221 return -EINVAL; 2222 if ((xfer->rx_nbits == SPI_NBITS_QUAD) && 2223 !(spi->mode & SPI_RX_QUAD)) 2224 return -EINVAL; 2225 } 2226 } 2227 2228 message->status = -EINPROGRESS; 2229 2230 return 0; 2231 } 2232 2233 static int __spi_async(struct spi_device *spi, struct spi_message *message) 2234 { 2235 struct spi_master *master = spi->master; 2236 2237 message->spi = spi; 2238 2239 SPI_STATISTICS_INCREMENT_FIELD(&master->statistics, spi_async); 2240 SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_async); 2241 2242 trace_spi_message_submit(message); 2243 2244 return master->transfer(spi, message); 2245 } 2246 2247 /** 2248 * spi_async - asynchronous SPI transfer 2249 * @spi: device with which data will be exchanged 2250 * @message: describes the data transfers, including completion callback 2251 * Context: any (irqs may be blocked, etc) 2252 * 2253 * This call may be used in_irq and other contexts which can't sleep, 2254 * as well as from task contexts which can sleep. 2255 * 2256 * The completion callback is invoked in a context which can't sleep. 2257 * Before that invocation, the value of message->status is undefined. 2258 * When the callback is issued, message->status holds either zero (to 2259 * indicate complete success) or a negative error code. After that 2260 * callback returns, the driver which issued the transfer request may 2261 * deallocate the associated memory; it's no longer in use by any SPI 2262 * core or controller driver code. 2263 * 2264 * Note that although all messages to a spi_device are handled in 2265 * FIFO order, messages may go to different devices in other orders. 2266 * Some device might be higher priority, or have various "hard" access 2267 * time requirements, for example. 2268 * 2269 * On detection of any fault during the transfer, processing of 2270 * the entire message is aborted, and the device is deselected. 2271 * Until returning from the associated message completion callback, 2272 * no other spi_message queued to that device will be processed. 2273 * (This rule applies equally to all the synchronous transfer calls, 2274 * which are wrappers around this core asynchronous primitive.) 2275 * 2276 * Return: zero on success, else a negative error code. 2277 */ 2278 int spi_async(struct spi_device *spi, struct spi_message *message) 2279 { 2280 struct spi_master *master = spi->master; 2281 int ret; 2282 unsigned long flags; 2283 2284 ret = __spi_validate(spi, message); 2285 if (ret != 0) 2286 return ret; 2287 2288 spin_lock_irqsave(&master->bus_lock_spinlock, flags); 2289 2290 if (master->bus_lock_flag) 2291 ret = -EBUSY; 2292 else 2293 ret = __spi_async(spi, message); 2294 2295 spin_unlock_irqrestore(&master->bus_lock_spinlock, flags); 2296 2297 return ret; 2298 } 2299 EXPORT_SYMBOL_GPL(spi_async); 2300 2301 /** 2302 * spi_async_locked - version of spi_async with exclusive bus usage 2303 * @spi: device with which data will be exchanged 2304 * @message: describes the data transfers, including completion callback 2305 * Context: any (irqs may be blocked, etc) 2306 * 2307 * This call may be used in_irq and other contexts which can't sleep, 2308 * as well as from task contexts which can sleep. 2309 * 2310 * The completion callback is invoked in a context which can't sleep. 2311 * Before that invocation, the value of message->status is undefined. 2312 * When the callback is issued, message->status holds either zero (to 2313 * indicate complete success) or a negative error code. After that 2314 * callback returns, the driver which issued the transfer request may 2315 * deallocate the associated memory; it's no longer in use by any SPI 2316 * core or controller driver code. 2317 * 2318 * Note that although all messages to a spi_device are handled in 2319 * FIFO order, messages may go to different devices in other orders. 2320 * Some device might be higher priority, or have various "hard" access 2321 * time requirements, for example. 2322 * 2323 * On detection of any fault during the transfer, processing of 2324 * the entire message is aborted, and the device is deselected. 2325 * Until returning from the associated message completion callback, 2326 * no other spi_message queued to that device will be processed. 2327 * (This rule applies equally to all the synchronous transfer calls, 2328 * which are wrappers around this core asynchronous primitive.) 2329 * 2330 * Return: zero on success, else a negative error code. 2331 */ 2332 int spi_async_locked(struct spi_device *spi, struct spi_message *message) 2333 { 2334 struct spi_master *master = spi->master; 2335 int ret; 2336 unsigned long flags; 2337 2338 ret = __spi_validate(spi, message); 2339 if (ret != 0) 2340 return ret; 2341 2342 spin_lock_irqsave(&master->bus_lock_spinlock, flags); 2343 2344 ret = __spi_async(spi, message); 2345 2346 spin_unlock_irqrestore(&master->bus_lock_spinlock, flags); 2347 2348 return ret; 2349 2350 } 2351 EXPORT_SYMBOL_GPL(spi_async_locked); 2352 2353 2354 /*-------------------------------------------------------------------------*/ 2355 2356 /* Utility methods for SPI master protocol drivers, layered on 2357 * top of the core. Some other utility methods are defined as 2358 * inline functions. 2359 */ 2360 2361 static void spi_complete(void *arg) 2362 { 2363 complete(arg); 2364 } 2365 2366 static int __spi_sync(struct spi_device *spi, struct spi_message *message, 2367 int bus_locked) 2368 { 2369 DECLARE_COMPLETION_ONSTACK(done); 2370 int status; 2371 struct spi_master *master = spi->master; 2372 unsigned long flags; 2373 2374 status = __spi_validate(spi, message); 2375 if (status != 0) 2376 return status; 2377 2378 message->complete = spi_complete; 2379 message->context = &done; 2380 message->spi = spi; 2381 2382 SPI_STATISTICS_INCREMENT_FIELD(&master->statistics, spi_sync); 2383 SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_sync); 2384 2385 if (!bus_locked) 2386 mutex_lock(&master->bus_lock_mutex); 2387 2388 /* If we're not using the legacy transfer method then we will 2389 * try to transfer in the calling context so special case. 2390 * This code would be less tricky if we could remove the 2391 * support for driver implemented message queues. 2392 */ 2393 if (master->transfer == spi_queued_transfer) { 2394 spin_lock_irqsave(&master->bus_lock_spinlock, flags); 2395 2396 trace_spi_message_submit(message); 2397 2398 status = __spi_queued_transfer(spi, message, false); 2399 2400 spin_unlock_irqrestore(&master->bus_lock_spinlock, flags); 2401 } else { 2402 status = spi_async_locked(spi, message); 2403 } 2404 2405 if (!bus_locked) 2406 mutex_unlock(&master->bus_lock_mutex); 2407 2408 if (status == 0) { 2409 /* Push out the messages in the calling context if we 2410 * can. 2411 */ 2412 if (master->transfer == spi_queued_transfer) { 2413 SPI_STATISTICS_INCREMENT_FIELD(&master->statistics, 2414 spi_sync_immediate); 2415 SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, 2416 spi_sync_immediate); 2417 __spi_pump_messages(master, false); 2418 } 2419 2420 wait_for_completion(&done); 2421 status = message->status; 2422 } 2423 message->context = NULL; 2424 return status; 2425 } 2426 2427 /** 2428 * spi_sync - blocking/synchronous SPI data transfers 2429 * @spi: device with which data will be exchanged 2430 * @message: describes the data transfers 2431 * Context: can sleep 2432 * 2433 * This call may only be used from a context that may sleep. The sleep 2434 * is non-interruptible, and has no timeout. Low-overhead controller 2435 * drivers may DMA directly into and out of the message buffers. 2436 * 2437 * Note that the SPI device's chip select is active during the message, 2438 * and then is normally disabled between messages. Drivers for some 2439 * frequently-used devices may want to minimize costs of selecting a chip, 2440 * by leaving it selected in anticipation that the next message will go 2441 * to the same chip. (That may increase power usage.) 2442 * 2443 * Also, the caller is guaranteeing that the memory associated with the 2444 * message will not be freed before this call returns. 2445 * 2446 * Return: zero on success, else a negative error code. 2447 */ 2448 int spi_sync(struct spi_device *spi, struct spi_message *message) 2449 { 2450 return __spi_sync(spi, message, 0); 2451 } 2452 EXPORT_SYMBOL_GPL(spi_sync); 2453 2454 /** 2455 * spi_sync_locked - version of spi_sync with exclusive bus usage 2456 * @spi: device with which data will be exchanged 2457 * @message: describes the data transfers 2458 * Context: can sleep 2459 * 2460 * This call may only be used from a context that may sleep. The sleep 2461 * is non-interruptible, and has no timeout. Low-overhead controller 2462 * drivers may DMA directly into and out of the message buffers. 2463 * 2464 * This call should be used by drivers that require exclusive access to the 2465 * SPI bus. It has to be preceded by a spi_bus_lock call. The SPI bus must 2466 * be released by a spi_bus_unlock call when the exclusive access is over. 2467 * 2468 * Return: zero on success, else a negative error code. 2469 */ 2470 int spi_sync_locked(struct spi_device *spi, struct spi_message *message) 2471 { 2472 return __spi_sync(spi, message, 1); 2473 } 2474 EXPORT_SYMBOL_GPL(spi_sync_locked); 2475 2476 /** 2477 * spi_bus_lock - obtain a lock for exclusive SPI bus usage 2478 * @master: SPI bus master that should be locked for exclusive bus access 2479 * Context: can sleep 2480 * 2481 * This call may only be used from a context that may sleep. The sleep 2482 * is non-interruptible, and has no timeout. 2483 * 2484 * This call should be used by drivers that require exclusive access to the 2485 * SPI bus. The SPI bus must be released by a spi_bus_unlock call when the 2486 * exclusive access is over. Data transfer must be done by spi_sync_locked 2487 * and spi_async_locked calls when the SPI bus lock is held. 2488 * 2489 * Return: always zero. 2490 */ 2491 int spi_bus_lock(struct spi_master *master) 2492 { 2493 unsigned long flags; 2494 2495 mutex_lock(&master->bus_lock_mutex); 2496 2497 spin_lock_irqsave(&master->bus_lock_spinlock, flags); 2498 master->bus_lock_flag = 1; 2499 spin_unlock_irqrestore(&master->bus_lock_spinlock, flags); 2500 2501 /* mutex remains locked until spi_bus_unlock is called */ 2502 2503 return 0; 2504 } 2505 EXPORT_SYMBOL_GPL(spi_bus_lock); 2506 2507 /** 2508 * spi_bus_unlock - release the lock for exclusive SPI bus usage 2509 * @master: SPI bus master that was locked for exclusive bus access 2510 * Context: can sleep 2511 * 2512 * This call may only be used from a context that may sleep. The sleep 2513 * is non-interruptible, and has no timeout. 2514 * 2515 * This call releases an SPI bus lock previously obtained by an spi_bus_lock 2516 * call. 2517 * 2518 * Return: always zero. 2519 */ 2520 int spi_bus_unlock(struct spi_master *master) 2521 { 2522 master->bus_lock_flag = 0; 2523 2524 mutex_unlock(&master->bus_lock_mutex); 2525 2526 return 0; 2527 } 2528 EXPORT_SYMBOL_GPL(spi_bus_unlock); 2529 2530 /* portable code must never pass more than 32 bytes */ 2531 #define SPI_BUFSIZ max(32, SMP_CACHE_BYTES) 2532 2533 static u8 *buf; 2534 2535 /** 2536 * spi_write_then_read - SPI synchronous write followed by read 2537 * @spi: device with which data will be exchanged 2538 * @txbuf: data to be written (need not be dma-safe) 2539 * @n_tx: size of txbuf, in bytes 2540 * @rxbuf: buffer into which data will be read (need not be dma-safe) 2541 * @n_rx: size of rxbuf, in bytes 2542 * Context: can sleep 2543 * 2544 * This performs a half duplex MicroWire style transaction with the 2545 * device, sending txbuf and then reading rxbuf. The return value 2546 * is zero for success, else a negative errno status code. 2547 * This call may only be used from a context that may sleep. 2548 * 2549 * Parameters to this routine are always copied using a small buffer; 2550 * portable code should never use this for more than 32 bytes. 2551 * Performance-sensitive or bulk transfer code should instead use 2552 * spi_{async,sync}() calls with dma-safe buffers. 2553 * 2554 * Return: zero on success, else a negative error code. 2555 */ 2556 int spi_write_then_read(struct spi_device *spi, 2557 const void *txbuf, unsigned n_tx, 2558 void *rxbuf, unsigned n_rx) 2559 { 2560 static DEFINE_MUTEX(lock); 2561 2562 int status; 2563 struct spi_message message; 2564 struct spi_transfer x[2]; 2565 u8 *local_buf; 2566 2567 /* Use preallocated DMA-safe buffer if we can. We can't avoid 2568 * copying here, (as a pure convenience thing), but we can 2569 * keep heap costs out of the hot path unless someone else is 2570 * using the pre-allocated buffer or the transfer is too large. 2571 */ 2572 if ((n_tx + n_rx) > SPI_BUFSIZ || !mutex_trylock(&lock)) { 2573 local_buf = kmalloc(max((unsigned)SPI_BUFSIZ, n_tx + n_rx), 2574 GFP_KERNEL | GFP_DMA); 2575 if (!local_buf) 2576 return -ENOMEM; 2577 } else { 2578 local_buf = buf; 2579 } 2580 2581 spi_message_init(&message); 2582 memset(x, 0, sizeof(x)); 2583 if (n_tx) { 2584 x[0].len = n_tx; 2585 spi_message_add_tail(&x[0], &message); 2586 } 2587 if (n_rx) { 2588 x[1].len = n_rx; 2589 spi_message_add_tail(&x[1], &message); 2590 } 2591 2592 memcpy(local_buf, txbuf, n_tx); 2593 x[0].tx_buf = local_buf; 2594 x[1].rx_buf = local_buf + n_tx; 2595 2596 /* do the i/o */ 2597 status = spi_sync(spi, &message); 2598 if (status == 0) 2599 memcpy(rxbuf, x[1].rx_buf, n_rx); 2600 2601 if (x[0].tx_buf == buf) 2602 mutex_unlock(&lock); 2603 else 2604 kfree(local_buf); 2605 2606 return status; 2607 } 2608 EXPORT_SYMBOL_GPL(spi_write_then_read); 2609 2610 /*-------------------------------------------------------------------------*/ 2611 2612 #if IS_ENABLED(CONFIG_OF_DYNAMIC) 2613 static int __spi_of_device_match(struct device *dev, void *data) 2614 { 2615 return dev->of_node == data; 2616 } 2617 2618 /* must call put_device() when done with returned spi_device device */ 2619 static struct spi_device *of_find_spi_device_by_node(struct device_node *node) 2620 { 2621 struct device *dev = bus_find_device(&spi_bus_type, NULL, node, 2622 __spi_of_device_match); 2623 return dev ? to_spi_device(dev) : NULL; 2624 } 2625 2626 static int __spi_of_master_match(struct device *dev, const void *data) 2627 { 2628 return dev->of_node == data; 2629 } 2630 2631 /* the spi masters are not using spi_bus, so we find it with another way */ 2632 static struct spi_master *of_find_spi_master_by_node(struct device_node *node) 2633 { 2634 struct device *dev; 2635 2636 dev = class_find_device(&spi_master_class, NULL, node, 2637 __spi_of_master_match); 2638 if (!dev) 2639 return NULL; 2640 2641 /* reference got in class_find_device */ 2642 return container_of(dev, struct spi_master, dev); 2643 } 2644 2645 static int of_spi_notify(struct notifier_block *nb, unsigned long action, 2646 void *arg) 2647 { 2648 struct of_reconfig_data *rd = arg; 2649 struct spi_master *master; 2650 struct spi_device *spi; 2651 2652 switch (of_reconfig_get_state_change(action, arg)) { 2653 case OF_RECONFIG_CHANGE_ADD: 2654 master = of_find_spi_master_by_node(rd->dn->parent); 2655 if (master == NULL) 2656 return NOTIFY_OK; /* not for us */ 2657 2658 if (of_node_test_and_set_flag(rd->dn, OF_POPULATED)) { 2659 put_device(&master->dev); 2660 return NOTIFY_OK; 2661 } 2662 2663 spi = of_register_spi_device(master, rd->dn); 2664 put_device(&master->dev); 2665 2666 if (IS_ERR(spi)) { 2667 pr_err("%s: failed to create for '%s'\n", 2668 __func__, rd->dn->full_name); 2669 return notifier_from_errno(PTR_ERR(spi)); 2670 } 2671 break; 2672 2673 case OF_RECONFIG_CHANGE_REMOVE: 2674 /* already depopulated? */ 2675 if (!of_node_check_flag(rd->dn, OF_POPULATED)) 2676 return NOTIFY_OK; 2677 2678 /* find our device by node */ 2679 spi = of_find_spi_device_by_node(rd->dn); 2680 if (spi == NULL) 2681 return NOTIFY_OK; /* no? not meant for us */ 2682 2683 /* unregister takes one ref away */ 2684 spi_unregister_device(spi); 2685 2686 /* and put the reference of the find */ 2687 put_device(&spi->dev); 2688 break; 2689 } 2690 2691 return NOTIFY_OK; 2692 } 2693 2694 static struct notifier_block spi_of_notifier = { 2695 .notifier_call = of_spi_notify, 2696 }; 2697 #else /* IS_ENABLED(CONFIG_OF_DYNAMIC) */ 2698 extern struct notifier_block spi_of_notifier; 2699 #endif /* IS_ENABLED(CONFIG_OF_DYNAMIC) */ 2700 2701 static int __init spi_init(void) 2702 { 2703 int status; 2704 2705 buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL); 2706 if (!buf) { 2707 status = -ENOMEM; 2708 goto err0; 2709 } 2710 2711 status = bus_register(&spi_bus_type); 2712 if (status < 0) 2713 goto err1; 2714 2715 status = class_register(&spi_master_class); 2716 if (status < 0) 2717 goto err2; 2718 2719 if (IS_ENABLED(CONFIG_OF_DYNAMIC)) 2720 WARN_ON(of_reconfig_notifier_register(&spi_of_notifier)); 2721 2722 return 0; 2723 2724 err2: 2725 bus_unregister(&spi_bus_type); 2726 err1: 2727 kfree(buf); 2728 buf = NULL; 2729 err0: 2730 return status; 2731 } 2732 2733 /* board_info is normally registered in arch_initcall(), 2734 * but even essential drivers wait till later 2735 * 2736 * REVISIT only boardinfo really needs static linking. the rest (device and 2737 * driver registration) _could_ be dynamically linked (modular) ... costs 2738 * include needing to have boardinfo data structures be much more public. 2739 */ 2740 postcore_initcall(spi_init); 2741 2742