1 /* 2 * SPI init/core code 3 * 4 * Copyright (C) 2005 David Brownell 5 * Copyright (C) 2008 Secret Lab Technologies Ltd. 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License as published by 9 * the Free Software Foundation; either version 2 of the License, or 10 * (at your option) any later version. 11 * 12 * This program is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * GNU General Public License for more details. 16 */ 17 18 #include <linux/kernel.h> 19 #include <linux/device.h> 20 #include <linux/init.h> 21 #include <linux/cache.h> 22 #include <linux/dma-mapping.h> 23 #include <linux/dmaengine.h> 24 #include <linux/mutex.h> 25 #include <linux/of_device.h> 26 #include <linux/of_irq.h> 27 #include <linux/clk/clk-conf.h> 28 #include <linux/slab.h> 29 #include <linux/mod_devicetable.h> 30 #include <linux/spi/spi.h> 31 #include <linux/of_gpio.h> 32 #include <linux/pm_runtime.h> 33 #include <linux/pm_domain.h> 34 #include <linux/property.h> 35 #include <linux/export.h> 36 #include <linux/sched/rt.h> 37 #include <uapi/linux/sched/types.h> 38 #include <linux/delay.h> 39 #include <linux/kthread.h> 40 #include <linux/ioport.h> 41 #include <linux/acpi.h> 42 #include <linux/highmem.h> 43 44 #define CREATE_TRACE_POINTS 45 #include <trace/events/spi.h> 46 47 static void spidev_release(struct device *dev) 48 { 49 struct spi_device *spi = to_spi_device(dev); 50 51 /* spi masters may cleanup for released devices */ 52 if (spi->master->cleanup) 53 spi->master->cleanup(spi); 54 55 spi_master_put(spi->master); 56 kfree(spi); 57 } 58 59 static ssize_t 60 modalias_show(struct device *dev, struct device_attribute *a, char *buf) 61 { 62 const struct spi_device *spi = to_spi_device(dev); 63 int len; 64 65 len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1); 66 if (len != -ENODEV) 67 return len; 68 69 return sprintf(buf, "%s%s\n", SPI_MODULE_PREFIX, spi->modalias); 70 } 71 static DEVICE_ATTR_RO(modalias); 72 73 #define SPI_STATISTICS_ATTRS(field, file) \ 74 static ssize_t spi_master_##field##_show(struct device *dev, \ 75 struct device_attribute *attr, \ 76 char *buf) \ 77 { \ 78 struct spi_master *master = container_of(dev, \ 79 struct spi_master, dev); \ 80 return spi_statistics_##field##_show(&master->statistics, buf); \ 81 } \ 82 static struct device_attribute dev_attr_spi_master_##field = { \ 83 .attr = { .name = file, .mode = S_IRUGO }, \ 84 .show = spi_master_##field##_show, \ 85 }; \ 86 static ssize_t spi_device_##field##_show(struct device *dev, \ 87 struct device_attribute *attr, \ 88 char *buf) \ 89 { \ 90 struct spi_device *spi = to_spi_device(dev); \ 91 return spi_statistics_##field##_show(&spi->statistics, buf); \ 92 } \ 93 static struct device_attribute dev_attr_spi_device_##field = { \ 94 .attr = { .name = file, .mode = S_IRUGO }, \ 95 .show = spi_device_##field##_show, \ 96 } 97 98 #define SPI_STATISTICS_SHOW_NAME(name, file, field, format_string) \ 99 static ssize_t spi_statistics_##name##_show(struct spi_statistics *stat, \ 100 char *buf) \ 101 { \ 102 unsigned long flags; \ 103 ssize_t len; \ 104 spin_lock_irqsave(&stat->lock, flags); \ 105 len = sprintf(buf, format_string, stat->field); \ 106 spin_unlock_irqrestore(&stat->lock, flags); \ 107 return len; \ 108 } \ 109 SPI_STATISTICS_ATTRS(name, file) 110 111 #define SPI_STATISTICS_SHOW(field, format_string) \ 112 SPI_STATISTICS_SHOW_NAME(field, __stringify(field), \ 113 field, format_string) 114 115 SPI_STATISTICS_SHOW(messages, "%lu"); 116 SPI_STATISTICS_SHOW(transfers, "%lu"); 117 SPI_STATISTICS_SHOW(errors, "%lu"); 118 SPI_STATISTICS_SHOW(timedout, "%lu"); 119 120 SPI_STATISTICS_SHOW(spi_sync, "%lu"); 121 SPI_STATISTICS_SHOW(spi_sync_immediate, "%lu"); 122 SPI_STATISTICS_SHOW(spi_async, "%lu"); 123 124 SPI_STATISTICS_SHOW(bytes, "%llu"); 125 SPI_STATISTICS_SHOW(bytes_rx, "%llu"); 126 SPI_STATISTICS_SHOW(bytes_tx, "%llu"); 127 128 #define SPI_STATISTICS_TRANSFER_BYTES_HISTO(index, number) \ 129 SPI_STATISTICS_SHOW_NAME(transfer_bytes_histo##index, \ 130 "transfer_bytes_histo_" number, \ 131 transfer_bytes_histo[index], "%lu") 132 SPI_STATISTICS_TRANSFER_BYTES_HISTO(0, "0-1"); 133 SPI_STATISTICS_TRANSFER_BYTES_HISTO(1, "2-3"); 134 SPI_STATISTICS_TRANSFER_BYTES_HISTO(2, "4-7"); 135 SPI_STATISTICS_TRANSFER_BYTES_HISTO(3, "8-15"); 136 SPI_STATISTICS_TRANSFER_BYTES_HISTO(4, "16-31"); 137 SPI_STATISTICS_TRANSFER_BYTES_HISTO(5, "32-63"); 138 SPI_STATISTICS_TRANSFER_BYTES_HISTO(6, "64-127"); 139 SPI_STATISTICS_TRANSFER_BYTES_HISTO(7, "128-255"); 140 SPI_STATISTICS_TRANSFER_BYTES_HISTO(8, "256-511"); 141 SPI_STATISTICS_TRANSFER_BYTES_HISTO(9, "512-1023"); 142 SPI_STATISTICS_TRANSFER_BYTES_HISTO(10, "1024-2047"); 143 SPI_STATISTICS_TRANSFER_BYTES_HISTO(11, "2048-4095"); 144 SPI_STATISTICS_TRANSFER_BYTES_HISTO(12, "4096-8191"); 145 SPI_STATISTICS_TRANSFER_BYTES_HISTO(13, "8192-16383"); 146 SPI_STATISTICS_TRANSFER_BYTES_HISTO(14, "16384-32767"); 147 SPI_STATISTICS_TRANSFER_BYTES_HISTO(15, "32768-65535"); 148 SPI_STATISTICS_TRANSFER_BYTES_HISTO(16, "65536+"); 149 150 SPI_STATISTICS_SHOW(transfers_split_maxsize, "%lu"); 151 152 static struct attribute *spi_dev_attrs[] = { 153 &dev_attr_modalias.attr, 154 NULL, 155 }; 156 157 static const struct attribute_group spi_dev_group = { 158 .attrs = spi_dev_attrs, 159 }; 160 161 static struct attribute *spi_device_statistics_attrs[] = { 162 &dev_attr_spi_device_messages.attr, 163 &dev_attr_spi_device_transfers.attr, 164 &dev_attr_spi_device_errors.attr, 165 &dev_attr_spi_device_timedout.attr, 166 &dev_attr_spi_device_spi_sync.attr, 167 &dev_attr_spi_device_spi_sync_immediate.attr, 168 &dev_attr_spi_device_spi_async.attr, 169 &dev_attr_spi_device_bytes.attr, 170 &dev_attr_spi_device_bytes_rx.attr, 171 &dev_attr_spi_device_bytes_tx.attr, 172 &dev_attr_spi_device_transfer_bytes_histo0.attr, 173 &dev_attr_spi_device_transfer_bytes_histo1.attr, 174 &dev_attr_spi_device_transfer_bytes_histo2.attr, 175 &dev_attr_spi_device_transfer_bytes_histo3.attr, 176 &dev_attr_spi_device_transfer_bytes_histo4.attr, 177 &dev_attr_spi_device_transfer_bytes_histo5.attr, 178 &dev_attr_spi_device_transfer_bytes_histo6.attr, 179 &dev_attr_spi_device_transfer_bytes_histo7.attr, 180 &dev_attr_spi_device_transfer_bytes_histo8.attr, 181 &dev_attr_spi_device_transfer_bytes_histo9.attr, 182 &dev_attr_spi_device_transfer_bytes_histo10.attr, 183 &dev_attr_spi_device_transfer_bytes_histo11.attr, 184 &dev_attr_spi_device_transfer_bytes_histo12.attr, 185 &dev_attr_spi_device_transfer_bytes_histo13.attr, 186 &dev_attr_spi_device_transfer_bytes_histo14.attr, 187 &dev_attr_spi_device_transfer_bytes_histo15.attr, 188 &dev_attr_spi_device_transfer_bytes_histo16.attr, 189 &dev_attr_spi_device_transfers_split_maxsize.attr, 190 NULL, 191 }; 192 193 static const struct attribute_group spi_device_statistics_group = { 194 .name = "statistics", 195 .attrs = spi_device_statistics_attrs, 196 }; 197 198 static const struct attribute_group *spi_dev_groups[] = { 199 &spi_dev_group, 200 &spi_device_statistics_group, 201 NULL, 202 }; 203 204 static struct attribute *spi_master_statistics_attrs[] = { 205 &dev_attr_spi_master_messages.attr, 206 &dev_attr_spi_master_transfers.attr, 207 &dev_attr_spi_master_errors.attr, 208 &dev_attr_spi_master_timedout.attr, 209 &dev_attr_spi_master_spi_sync.attr, 210 &dev_attr_spi_master_spi_sync_immediate.attr, 211 &dev_attr_spi_master_spi_async.attr, 212 &dev_attr_spi_master_bytes.attr, 213 &dev_attr_spi_master_bytes_rx.attr, 214 &dev_attr_spi_master_bytes_tx.attr, 215 &dev_attr_spi_master_transfer_bytes_histo0.attr, 216 &dev_attr_spi_master_transfer_bytes_histo1.attr, 217 &dev_attr_spi_master_transfer_bytes_histo2.attr, 218 &dev_attr_spi_master_transfer_bytes_histo3.attr, 219 &dev_attr_spi_master_transfer_bytes_histo4.attr, 220 &dev_attr_spi_master_transfer_bytes_histo5.attr, 221 &dev_attr_spi_master_transfer_bytes_histo6.attr, 222 &dev_attr_spi_master_transfer_bytes_histo7.attr, 223 &dev_attr_spi_master_transfer_bytes_histo8.attr, 224 &dev_attr_spi_master_transfer_bytes_histo9.attr, 225 &dev_attr_spi_master_transfer_bytes_histo10.attr, 226 &dev_attr_spi_master_transfer_bytes_histo11.attr, 227 &dev_attr_spi_master_transfer_bytes_histo12.attr, 228 &dev_attr_spi_master_transfer_bytes_histo13.attr, 229 &dev_attr_spi_master_transfer_bytes_histo14.attr, 230 &dev_attr_spi_master_transfer_bytes_histo15.attr, 231 &dev_attr_spi_master_transfer_bytes_histo16.attr, 232 &dev_attr_spi_master_transfers_split_maxsize.attr, 233 NULL, 234 }; 235 236 static const struct attribute_group spi_master_statistics_group = { 237 .name = "statistics", 238 .attrs = spi_master_statistics_attrs, 239 }; 240 241 static const struct attribute_group *spi_master_groups[] = { 242 &spi_master_statistics_group, 243 NULL, 244 }; 245 246 void spi_statistics_add_transfer_stats(struct spi_statistics *stats, 247 struct spi_transfer *xfer, 248 struct spi_master *master) 249 { 250 unsigned long flags; 251 int l2len = min(fls(xfer->len), SPI_STATISTICS_HISTO_SIZE) - 1; 252 253 if (l2len < 0) 254 l2len = 0; 255 256 spin_lock_irqsave(&stats->lock, flags); 257 258 stats->transfers++; 259 stats->transfer_bytes_histo[l2len]++; 260 261 stats->bytes += xfer->len; 262 if ((xfer->tx_buf) && 263 (xfer->tx_buf != master->dummy_tx)) 264 stats->bytes_tx += xfer->len; 265 if ((xfer->rx_buf) && 266 (xfer->rx_buf != master->dummy_rx)) 267 stats->bytes_rx += xfer->len; 268 269 spin_unlock_irqrestore(&stats->lock, flags); 270 } 271 EXPORT_SYMBOL_GPL(spi_statistics_add_transfer_stats); 272 273 /* modalias support makes "modprobe $MODALIAS" new-style hotplug work, 274 * and the sysfs version makes coldplug work too. 275 */ 276 277 static const struct spi_device_id *spi_match_id(const struct spi_device_id *id, 278 const struct spi_device *sdev) 279 { 280 while (id->name[0]) { 281 if (!strcmp(sdev->modalias, id->name)) 282 return id; 283 id++; 284 } 285 return NULL; 286 } 287 288 const struct spi_device_id *spi_get_device_id(const struct spi_device *sdev) 289 { 290 const struct spi_driver *sdrv = to_spi_driver(sdev->dev.driver); 291 292 return spi_match_id(sdrv->id_table, sdev); 293 } 294 EXPORT_SYMBOL_GPL(spi_get_device_id); 295 296 static int spi_match_device(struct device *dev, struct device_driver *drv) 297 { 298 const struct spi_device *spi = to_spi_device(dev); 299 const struct spi_driver *sdrv = to_spi_driver(drv); 300 301 /* Attempt an OF style match */ 302 if (of_driver_match_device(dev, drv)) 303 return 1; 304 305 /* Then try ACPI */ 306 if (acpi_driver_match_device(dev, drv)) 307 return 1; 308 309 if (sdrv->id_table) 310 return !!spi_match_id(sdrv->id_table, spi); 311 312 return strcmp(spi->modalias, drv->name) == 0; 313 } 314 315 static int spi_uevent(struct device *dev, struct kobj_uevent_env *env) 316 { 317 const struct spi_device *spi = to_spi_device(dev); 318 int rc; 319 320 rc = acpi_device_uevent_modalias(dev, env); 321 if (rc != -ENODEV) 322 return rc; 323 324 add_uevent_var(env, "MODALIAS=%s%s", SPI_MODULE_PREFIX, spi->modalias); 325 return 0; 326 } 327 328 struct bus_type spi_bus_type = { 329 .name = "spi", 330 .dev_groups = spi_dev_groups, 331 .match = spi_match_device, 332 .uevent = spi_uevent, 333 }; 334 EXPORT_SYMBOL_GPL(spi_bus_type); 335 336 337 static int spi_drv_probe(struct device *dev) 338 { 339 const struct spi_driver *sdrv = to_spi_driver(dev->driver); 340 struct spi_device *spi = to_spi_device(dev); 341 int ret; 342 343 ret = of_clk_set_defaults(dev->of_node, false); 344 if (ret) 345 return ret; 346 347 if (dev->of_node) { 348 spi->irq = of_irq_get(dev->of_node, 0); 349 if (spi->irq == -EPROBE_DEFER) 350 return -EPROBE_DEFER; 351 if (spi->irq < 0) 352 spi->irq = 0; 353 } 354 355 ret = dev_pm_domain_attach(dev, true); 356 if (ret != -EPROBE_DEFER) { 357 ret = sdrv->probe(spi); 358 if (ret) 359 dev_pm_domain_detach(dev, true); 360 } 361 362 return ret; 363 } 364 365 static int spi_drv_remove(struct device *dev) 366 { 367 const struct spi_driver *sdrv = to_spi_driver(dev->driver); 368 int ret; 369 370 ret = sdrv->remove(to_spi_device(dev)); 371 dev_pm_domain_detach(dev, true); 372 373 return ret; 374 } 375 376 static void spi_drv_shutdown(struct device *dev) 377 { 378 const struct spi_driver *sdrv = to_spi_driver(dev->driver); 379 380 sdrv->shutdown(to_spi_device(dev)); 381 } 382 383 /** 384 * __spi_register_driver - register a SPI driver 385 * @owner: owner module of the driver to register 386 * @sdrv: the driver to register 387 * Context: can sleep 388 * 389 * Return: zero on success, else a negative error code. 390 */ 391 int __spi_register_driver(struct module *owner, struct spi_driver *sdrv) 392 { 393 sdrv->driver.owner = owner; 394 sdrv->driver.bus = &spi_bus_type; 395 if (sdrv->probe) 396 sdrv->driver.probe = spi_drv_probe; 397 if (sdrv->remove) 398 sdrv->driver.remove = spi_drv_remove; 399 if (sdrv->shutdown) 400 sdrv->driver.shutdown = spi_drv_shutdown; 401 return driver_register(&sdrv->driver); 402 } 403 EXPORT_SYMBOL_GPL(__spi_register_driver); 404 405 /*-------------------------------------------------------------------------*/ 406 407 /* SPI devices should normally not be created by SPI device drivers; that 408 * would make them board-specific. Similarly with SPI master drivers. 409 * Device registration normally goes into like arch/.../mach.../board-YYY.c 410 * with other readonly (flashable) information about mainboard devices. 411 */ 412 413 struct boardinfo { 414 struct list_head list; 415 struct spi_board_info board_info; 416 }; 417 418 static LIST_HEAD(board_list); 419 static LIST_HEAD(spi_master_list); 420 421 /* 422 * Used to protect add/del opertion for board_info list and 423 * spi_master list, and their matching process 424 */ 425 static DEFINE_MUTEX(board_lock); 426 427 /** 428 * spi_alloc_device - Allocate a new SPI device 429 * @master: Controller to which device is connected 430 * Context: can sleep 431 * 432 * Allows a driver to allocate and initialize a spi_device without 433 * registering it immediately. This allows a driver to directly 434 * fill the spi_device with device parameters before calling 435 * spi_add_device() on it. 436 * 437 * Caller is responsible to call spi_add_device() on the returned 438 * spi_device structure to add it to the SPI master. If the caller 439 * needs to discard the spi_device without adding it, then it should 440 * call spi_dev_put() on it. 441 * 442 * Return: a pointer to the new device, or NULL. 443 */ 444 struct spi_device *spi_alloc_device(struct spi_master *master) 445 { 446 struct spi_device *spi; 447 448 if (!spi_master_get(master)) 449 return NULL; 450 451 spi = kzalloc(sizeof(*spi), GFP_KERNEL); 452 if (!spi) { 453 spi_master_put(master); 454 return NULL; 455 } 456 457 spi->master = master; 458 spi->dev.parent = &master->dev; 459 spi->dev.bus = &spi_bus_type; 460 spi->dev.release = spidev_release; 461 spi->cs_gpio = -ENOENT; 462 463 spin_lock_init(&spi->statistics.lock); 464 465 device_initialize(&spi->dev); 466 return spi; 467 } 468 EXPORT_SYMBOL_GPL(spi_alloc_device); 469 470 static void spi_dev_set_name(struct spi_device *spi) 471 { 472 struct acpi_device *adev = ACPI_COMPANION(&spi->dev); 473 474 if (adev) { 475 dev_set_name(&spi->dev, "spi-%s", acpi_dev_name(adev)); 476 return; 477 } 478 479 dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->master->dev), 480 spi->chip_select); 481 } 482 483 static int spi_dev_check(struct device *dev, void *data) 484 { 485 struct spi_device *spi = to_spi_device(dev); 486 struct spi_device *new_spi = data; 487 488 if (spi->master == new_spi->master && 489 spi->chip_select == new_spi->chip_select) 490 return -EBUSY; 491 return 0; 492 } 493 494 /** 495 * spi_add_device - Add spi_device allocated with spi_alloc_device 496 * @spi: spi_device to register 497 * 498 * Companion function to spi_alloc_device. Devices allocated with 499 * spi_alloc_device can be added onto the spi bus with this function. 500 * 501 * Return: 0 on success; negative errno on failure 502 */ 503 int spi_add_device(struct spi_device *spi) 504 { 505 static DEFINE_MUTEX(spi_add_lock); 506 struct spi_master *master = spi->master; 507 struct device *dev = master->dev.parent; 508 int status; 509 510 /* Chipselects are numbered 0..max; validate. */ 511 if (spi->chip_select >= master->num_chipselect) { 512 dev_err(dev, "cs%d >= max %d\n", 513 spi->chip_select, 514 master->num_chipselect); 515 return -EINVAL; 516 } 517 518 /* Set the bus ID string */ 519 spi_dev_set_name(spi); 520 521 /* We need to make sure there's no other device with this 522 * chipselect **BEFORE** we call setup(), else we'll trash 523 * its configuration. Lock against concurrent add() calls. 524 */ 525 mutex_lock(&spi_add_lock); 526 527 status = bus_for_each_dev(&spi_bus_type, NULL, spi, spi_dev_check); 528 if (status) { 529 dev_err(dev, "chipselect %d already in use\n", 530 spi->chip_select); 531 goto done; 532 } 533 534 if (master->cs_gpios) 535 spi->cs_gpio = master->cs_gpios[spi->chip_select]; 536 537 /* Drivers may modify this initial i/o setup, but will 538 * normally rely on the device being setup. Devices 539 * using SPI_CS_HIGH can't coexist well otherwise... 540 */ 541 status = spi_setup(spi); 542 if (status < 0) { 543 dev_err(dev, "can't setup %s, status %d\n", 544 dev_name(&spi->dev), status); 545 goto done; 546 } 547 548 /* Device may be bound to an active driver when this returns */ 549 status = device_add(&spi->dev); 550 if (status < 0) 551 dev_err(dev, "can't add %s, status %d\n", 552 dev_name(&spi->dev), status); 553 else 554 dev_dbg(dev, "registered child %s\n", dev_name(&spi->dev)); 555 556 done: 557 mutex_unlock(&spi_add_lock); 558 return status; 559 } 560 EXPORT_SYMBOL_GPL(spi_add_device); 561 562 /** 563 * spi_new_device - instantiate one new SPI device 564 * @master: Controller to which device is connected 565 * @chip: Describes the SPI device 566 * Context: can sleep 567 * 568 * On typical mainboards, this is purely internal; and it's not needed 569 * after board init creates the hard-wired devices. Some development 570 * platforms may not be able to use spi_register_board_info though, and 571 * this is exported so that for example a USB or parport based adapter 572 * driver could add devices (which it would learn about out-of-band). 573 * 574 * Return: the new device, or NULL. 575 */ 576 struct spi_device *spi_new_device(struct spi_master *master, 577 struct spi_board_info *chip) 578 { 579 struct spi_device *proxy; 580 int status; 581 582 /* NOTE: caller did any chip->bus_num checks necessary. 583 * 584 * Also, unless we change the return value convention to use 585 * error-or-pointer (not NULL-or-pointer), troubleshootability 586 * suggests syslogged diagnostics are best here (ugh). 587 */ 588 589 proxy = spi_alloc_device(master); 590 if (!proxy) 591 return NULL; 592 593 WARN_ON(strlen(chip->modalias) >= sizeof(proxy->modalias)); 594 595 proxy->chip_select = chip->chip_select; 596 proxy->max_speed_hz = chip->max_speed_hz; 597 proxy->mode = chip->mode; 598 proxy->irq = chip->irq; 599 strlcpy(proxy->modalias, chip->modalias, sizeof(proxy->modalias)); 600 proxy->dev.platform_data = (void *) chip->platform_data; 601 proxy->controller_data = chip->controller_data; 602 proxy->controller_state = NULL; 603 604 if (chip->properties) { 605 status = device_add_properties(&proxy->dev, chip->properties); 606 if (status) { 607 dev_err(&master->dev, 608 "failed to add properties to '%s': %d\n", 609 chip->modalias, status); 610 goto err_dev_put; 611 } 612 } 613 614 status = spi_add_device(proxy); 615 if (status < 0) 616 goto err_remove_props; 617 618 return proxy; 619 620 err_remove_props: 621 if (chip->properties) 622 device_remove_properties(&proxy->dev); 623 err_dev_put: 624 spi_dev_put(proxy); 625 return NULL; 626 } 627 EXPORT_SYMBOL_GPL(spi_new_device); 628 629 /** 630 * spi_unregister_device - unregister a single SPI device 631 * @spi: spi_device to unregister 632 * 633 * Start making the passed SPI device vanish. Normally this would be handled 634 * by spi_unregister_master(). 635 */ 636 void spi_unregister_device(struct spi_device *spi) 637 { 638 if (!spi) 639 return; 640 641 if (spi->dev.of_node) { 642 of_node_clear_flag(spi->dev.of_node, OF_POPULATED); 643 of_node_put(spi->dev.of_node); 644 } 645 if (ACPI_COMPANION(&spi->dev)) 646 acpi_device_clear_enumerated(ACPI_COMPANION(&spi->dev)); 647 device_unregister(&spi->dev); 648 } 649 EXPORT_SYMBOL_GPL(spi_unregister_device); 650 651 static void spi_match_master_to_boardinfo(struct spi_master *master, 652 struct spi_board_info *bi) 653 { 654 struct spi_device *dev; 655 656 if (master->bus_num != bi->bus_num) 657 return; 658 659 dev = spi_new_device(master, bi); 660 if (!dev) 661 dev_err(master->dev.parent, "can't create new device for %s\n", 662 bi->modalias); 663 } 664 665 /** 666 * spi_register_board_info - register SPI devices for a given board 667 * @info: array of chip descriptors 668 * @n: how many descriptors are provided 669 * Context: can sleep 670 * 671 * Board-specific early init code calls this (probably during arch_initcall) 672 * with segments of the SPI device table. Any device nodes are created later, 673 * after the relevant parent SPI controller (bus_num) is defined. We keep 674 * this table of devices forever, so that reloading a controller driver will 675 * not make Linux forget about these hard-wired devices. 676 * 677 * Other code can also call this, e.g. a particular add-on board might provide 678 * SPI devices through its expansion connector, so code initializing that board 679 * would naturally declare its SPI devices. 680 * 681 * The board info passed can safely be __initdata ... but be careful of 682 * any embedded pointers (platform_data, etc), they're copied as-is. 683 * Device properties are deep-copied though. 684 * 685 * Return: zero on success, else a negative error code. 686 */ 687 int spi_register_board_info(struct spi_board_info const *info, unsigned n) 688 { 689 struct boardinfo *bi; 690 int i; 691 692 if (!n) 693 return 0; 694 695 bi = kcalloc(n, sizeof(*bi), GFP_KERNEL); 696 if (!bi) 697 return -ENOMEM; 698 699 for (i = 0; i < n; i++, bi++, info++) { 700 struct spi_master *master; 701 702 memcpy(&bi->board_info, info, sizeof(*info)); 703 if (info->properties) { 704 bi->board_info.properties = 705 property_entries_dup(info->properties); 706 if (IS_ERR(bi->board_info.properties)) 707 return PTR_ERR(bi->board_info.properties); 708 } 709 710 mutex_lock(&board_lock); 711 list_add_tail(&bi->list, &board_list); 712 list_for_each_entry(master, &spi_master_list, list) 713 spi_match_master_to_boardinfo(master, &bi->board_info); 714 mutex_unlock(&board_lock); 715 } 716 717 return 0; 718 } 719 720 /*-------------------------------------------------------------------------*/ 721 722 static void spi_set_cs(struct spi_device *spi, bool enable) 723 { 724 if (spi->mode & SPI_CS_HIGH) 725 enable = !enable; 726 727 if (gpio_is_valid(spi->cs_gpio)) { 728 gpio_set_value(spi->cs_gpio, !enable); 729 /* Some SPI masters need both GPIO CS & slave_select */ 730 if ((spi->master->flags & SPI_MASTER_GPIO_SS) && 731 spi->master->set_cs) 732 spi->master->set_cs(spi, !enable); 733 } else if (spi->master->set_cs) { 734 spi->master->set_cs(spi, !enable); 735 } 736 } 737 738 #ifdef CONFIG_HAS_DMA 739 static int spi_map_buf(struct spi_master *master, struct device *dev, 740 struct sg_table *sgt, void *buf, size_t len, 741 enum dma_data_direction dir) 742 { 743 const bool vmalloced_buf = is_vmalloc_addr(buf); 744 unsigned int max_seg_size = dma_get_max_seg_size(dev); 745 #ifdef CONFIG_HIGHMEM 746 const bool kmap_buf = ((unsigned long)buf >= PKMAP_BASE && 747 (unsigned long)buf < (PKMAP_BASE + 748 (LAST_PKMAP * PAGE_SIZE))); 749 #else 750 const bool kmap_buf = false; 751 #endif 752 int desc_len; 753 int sgs; 754 struct page *vm_page; 755 struct scatterlist *sg; 756 void *sg_buf; 757 size_t min; 758 int i, ret; 759 760 if (vmalloced_buf || kmap_buf) { 761 desc_len = min_t(int, max_seg_size, PAGE_SIZE); 762 sgs = DIV_ROUND_UP(len + offset_in_page(buf), desc_len); 763 } else if (virt_addr_valid(buf)) { 764 desc_len = min_t(int, max_seg_size, master->max_dma_len); 765 sgs = DIV_ROUND_UP(len, desc_len); 766 } else { 767 return -EINVAL; 768 } 769 770 ret = sg_alloc_table(sgt, sgs, GFP_KERNEL); 771 if (ret != 0) 772 return ret; 773 774 sg = &sgt->sgl[0]; 775 for (i = 0; i < sgs; i++) { 776 777 if (vmalloced_buf || kmap_buf) { 778 min = min_t(size_t, 779 len, desc_len - offset_in_page(buf)); 780 if (vmalloced_buf) 781 vm_page = vmalloc_to_page(buf); 782 else 783 vm_page = kmap_to_page(buf); 784 if (!vm_page) { 785 sg_free_table(sgt); 786 return -ENOMEM; 787 } 788 sg_set_page(sg, vm_page, 789 min, offset_in_page(buf)); 790 } else { 791 min = min_t(size_t, len, desc_len); 792 sg_buf = buf; 793 sg_set_buf(sg, sg_buf, min); 794 } 795 796 buf += min; 797 len -= min; 798 sg = sg_next(sg); 799 } 800 801 ret = dma_map_sg(dev, sgt->sgl, sgt->nents, dir); 802 if (!ret) 803 ret = -ENOMEM; 804 if (ret < 0) { 805 sg_free_table(sgt); 806 return ret; 807 } 808 809 sgt->nents = ret; 810 811 return 0; 812 } 813 814 static void spi_unmap_buf(struct spi_master *master, struct device *dev, 815 struct sg_table *sgt, enum dma_data_direction dir) 816 { 817 if (sgt->orig_nents) { 818 dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir); 819 sg_free_table(sgt); 820 } 821 } 822 823 static int __spi_map_msg(struct spi_master *master, struct spi_message *msg) 824 { 825 struct device *tx_dev, *rx_dev; 826 struct spi_transfer *xfer; 827 int ret; 828 829 if (!master->can_dma) 830 return 0; 831 832 if (master->dma_tx) 833 tx_dev = master->dma_tx->device->dev; 834 else 835 tx_dev = master->dev.parent; 836 837 if (master->dma_rx) 838 rx_dev = master->dma_rx->device->dev; 839 else 840 rx_dev = master->dev.parent; 841 842 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 843 if (!master->can_dma(master, msg->spi, xfer)) 844 continue; 845 846 if (xfer->tx_buf != NULL) { 847 ret = spi_map_buf(master, tx_dev, &xfer->tx_sg, 848 (void *)xfer->tx_buf, xfer->len, 849 DMA_TO_DEVICE); 850 if (ret != 0) 851 return ret; 852 } 853 854 if (xfer->rx_buf != NULL) { 855 ret = spi_map_buf(master, rx_dev, &xfer->rx_sg, 856 xfer->rx_buf, xfer->len, 857 DMA_FROM_DEVICE); 858 if (ret != 0) { 859 spi_unmap_buf(master, tx_dev, &xfer->tx_sg, 860 DMA_TO_DEVICE); 861 return ret; 862 } 863 } 864 } 865 866 master->cur_msg_mapped = true; 867 868 return 0; 869 } 870 871 static int __spi_unmap_msg(struct spi_master *master, struct spi_message *msg) 872 { 873 struct spi_transfer *xfer; 874 struct device *tx_dev, *rx_dev; 875 876 if (!master->cur_msg_mapped || !master->can_dma) 877 return 0; 878 879 if (master->dma_tx) 880 tx_dev = master->dma_tx->device->dev; 881 else 882 tx_dev = master->dev.parent; 883 884 if (master->dma_rx) 885 rx_dev = master->dma_rx->device->dev; 886 else 887 rx_dev = master->dev.parent; 888 889 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 890 if (!master->can_dma(master, msg->spi, xfer)) 891 continue; 892 893 spi_unmap_buf(master, rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE); 894 spi_unmap_buf(master, tx_dev, &xfer->tx_sg, DMA_TO_DEVICE); 895 } 896 897 return 0; 898 } 899 #else /* !CONFIG_HAS_DMA */ 900 static inline int spi_map_buf(struct spi_master *master, 901 struct device *dev, struct sg_table *sgt, 902 void *buf, size_t len, 903 enum dma_data_direction dir) 904 { 905 return -EINVAL; 906 } 907 908 static inline void spi_unmap_buf(struct spi_master *master, 909 struct device *dev, struct sg_table *sgt, 910 enum dma_data_direction dir) 911 { 912 } 913 914 static inline int __spi_map_msg(struct spi_master *master, 915 struct spi_message *msg) 916 { 917 return 0; 918 } 919 920 static inline int __spi_unmap_msg(struct spi_master *master, 921 struct spi_message *msg) 922 { 923 return 0; 924 } 925 #endif /* !CONFIG_HAS_DMA */ 926 927 static inline int spi_unmap_msg(struct spi_master *master, 928 struct spi_message *msg) 929 { 930 struct spi_transfer *xfer; 931 932 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 933 /* 934 * Restore the original value of tx_buf or rx_buf if they are 935 * NULL. 936 */ 937 if (xfer->tx_buf == master->dummy_tx) 938 xfer->tx_buf = NULL; 939 if (xfer->rx_buf == master->dummy_rx) 940 xfer->rx_buf = NULL; 941 } 942 943 return __spi_unmap_msg(master, msg); 944 } 945 946 static int spi_map_msg(struct spi_master *master, struct spi_message *msg) 947 { 948 struct spi_transfer *xfer; 949 void *tmp; 950 unsigned int max_tx, max_rx; 951 952 if (master->flags & (SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX)) { 953 max_tx = 0; 954 max_rx = 0; 955 956 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 957 if ((master->flags & SPI_MASTER_MUST_TX) && 958 !xfer->tx_buf) 959 max_tx = max(xfer->len, max_tx); 960 if ((master->flags & SPI_MASTER_MUST_RX) && 961 !xfer->rx_buf) 962 max_rx = max(xfer->len, max_rx); 963 } 964 965 if (max_tx) { 966 tmp = krealloc(master->dummy_tx, max_tx, 967 GFP_KERNEL | GFP_DMA); 968 if (!tmp) 969 return -ENOMEM; 970 master->dummy_tx = tmp; 971 memset(tmp, 0, max_tx); 972 } 973 974 if (max_rx) { 975 tmp = krealloc(master->dummy_rx, max_rx, 976 GFP_KERNEL | GFP_DMA); 977 if (!tmp) 978 return -ENOMEM; 979 master->dummy_rx = tmp; 980 } 981 982 if (max_tx || max_rx) { 983 list_for_each_entry(xfer, &msg->transfers, 984 transfer_list) { 985 if (!xfer->tx_buf) 986 xfer->tx_buf = master->dummy_tx; 987 if (!xfer->rx_buf) 988 xfer->rx_buf = master->dummy_rx; 989 } 990 } 991 } 992 993 return __spi_map_msg(master, msg); 994 } 995 996 /* 997 * spi_transfer_one_message - Default implementation of transfer_one_message() 998 * 999 * This is a standard implementation of transfer_one_message() for 1000 * drivers which implement a transfer_one() operation. It provides 1001 * standard handling of delays and chip select management. 1002 */ 1003 static int spi_transfer_one_message(struct spi_master *master, 1004 struct spi_message *msg) 1005 { 1006 struct spi_transfer *xfer; 1007 bool keep_cs = false; 1008 int ret = 0; 1009 unsigned long long ms = 1; 1010 struct spi_statistics *statm = &master->statistics; 1011 struct spi_statistics *stats = &msg->spi->statistics; 1012 1013 spi_set_cs(msg->spi, true); 1014 1015 SPI_STATISTICS_INCREMENT_FIELD(statm, messages); 1016 SPI_STATISTICS_INCREMENT_FIELD(stats, messages); 1017 1018 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 1019 trace_spi_transfer_start(msg, xfer); 1020 1021 spi_statistics_add_transfer_stats(statm, xfer, master); 1022 spi_statistics_add_transfer_stats(stats, xfer, master); 1023 1024 if (xfer->tx_buf || xfer->rx_buf) { 1025 reinit_completion(&master->xfer_completion); 1026 1027 ret = master->transfer_one(master, msg->spi, xfer); 1028 if (ret < 0) { 1029 SPI_STATISTICS_INCREMENT_FIELD(statm, 1030 errors); 1031 SPI_STATISTICS_INCREMENT_FIELD(stats, 1032 errors); 1033 dev_err(&msg->spi->dev, 1034 "SPI transfer failed: %d\n", ret); 1035 goto out; 1036 } 1037 1038 if (ret > 0) { 1039 ret = 0; 1040 ms = 8LL * 1000LL * xfer->len; 1041 do_div(ms, xfer->speed_hz); 1042 ms += ms + 200; /* some tolerance */ 1043 1044 if (ms > UINT_MAX) 1045 ms = UINT_MAX; 1046 1047 ms = wait_for_completion_timeout(&master->xfer_completion, 1048 msecs_to_jiffies(ms)); 1049 } 1050 1051 if (ms == 0) { 1052 SPI_STATISTICS_INCREMENT_FIELD(statm, 1053 timedout); 1054 SPI_STATISTICS_INCREMENT_FIELD(stats, 1055 timedout); 1056 dev_err(&msg->spi->dev, 1057 "SPI transfer timed out\n"); 1058 msg->status = -ETIMEDOUT; 1059 } 1060 } else { 1061 if (xfer->len) 1062 dev_err(&msg->spi->dev, 1063 "Bufferless transfer has length %u\n", 1064 xfer->len); 1065 } 1066 1067 trace_spi_transfer_stop(msg, xfer); 1068 1069 if (msg->status != -EINPROGRESS) 1070 goto out; 1071 1072 if (xfer->delay_usecs) { 1073 u16 us = xfer->delay_usecs; 1074 1075 if (us <= 10) 1076 udelay(us); 1077 else 1078 usleep_range(us, us + DIV_ROUND_UP(us, 10)); 1079 } 1080 1081 if (xfer->cs_change) { 1082 if (list_is_last(&xfer->transfer_list, 1083 &msg->transfers)) { 1084 keep_cs = true; 1085 } else { 1086 spi_set_cs(msg->spi, false); 1087 udelay(10); 1088 spi_set_cs(msg->spi, true); 1089 } 1090 } 1091 1092 msg->actual_length += xfer->len; 1093 } 1094 1095 out: 1096 if (ret != 0 || !keep_cs) 1097 spi_set_cs(msg->spi, false); 1098 1099 if (msg->status == -EINPROGRESS) 1100 msg->status = ret; 1101 1102 if (msg->status && master->handle_err) 1103 master->handle_err(master, msg); 1104 1105 spi_res_release(master, msg); 1106 1107 spi_finalize_current_message(master); 1108 1109 return ret; 1110 } 1111 1112 /** 1113 * spi_finalize_current_transfer - report completion of a transfer 1114 * @master: the master reporting completion 1115 * 1116 * Called by SPI drivers using the core transfer_one_message() 1117 * implementation to notify it that the current interrupt driven 1118 * transfer has finished and the next one may be scheduled. 1119 */ 1120 void spi_finalize_current_transfer(struct spi_master *master) 1121 { 1122 complete(&master->xfer_completion); 1123 } 1124 EXPORT_SYMBOL_GPL(spi_finalize_current_transfer); 1125 1126 /** 1127 * __spi_pump_messages - function which processes spi message queue 1128 * @master: master to process queue for 1129 * @in_kthread: true if we are in the context of the message pump thread 1130 * 1131 * This function checks if there is any spi message in the queue that 1132 * needs processing and if so call out to the driver to initialize hardware 1133 * and transfer each message. 1134 * 1135 * Note that it is called both from the kthread itself and also from 1136 * inside spi_sync(); the queue extraction handling at the top of the 1137 * function should deal with this safely. 1138 */ 1139 static void __spi_pump_messages(struct spi_master *master, bool in_kthread) 1140 { 1141 unsigned long flags; 1142 bool was_busy = false; 1143 int ret; 1144 1145 /* Lock queue */ 1146 spin_lock_irqsave(&master->queue_lock, flags); 1147 1148 /* Make sure we are not already running a message */ 1149 if (master->cur_msg) { 1150 spin_unlock_irqrestore(&master->queue_lock, flags); 1151 return; 1152 } 1153 1154 /* If another context is idling the device then defer */ 1155 if (master->idling) { 1156 kthread_queue_work(&master->kworker, &master->pump_messages); 1157 spin_unlock_irqrestore(&master->queue_lock, flags); 1158 return; 1159 } 1160 1161 /* Check if the queue is idle */ 1162 if (list_empty(&master->queue) || !master->running) { 1163 if (!master->busy) { 1164 spin_unlock_irqrestore(&master->queue_lock, flags); 1165 return; 1166 } 1167 1168 /* Only do teardown in the thread */ 1169 if (!in_kthread) { 1170 kthread_queue_work(&master->kworker, 1171 &master->pump_messages); 1172 spin_unlock_irqrestore(&master->queue_lock, flags); 1173 return; 1174 } 1175 1176 master->busy = false; 1177 master->idling = true; 1178 spin_unlock_irqrestore(&master->queue_lock, flags); 1179 1180 kfree(master->dummy_rx); 1181 master->dummy_rx = NULL; 1182 kfree(master->dummy_tx); 1183 master->dummy_tx = NULL; 1184 if (master->unprepare_transfer_hardware && 1185 master->unprepare_transfer_hardware(master)) 1186 dev_err(&master->dev, 1187 "failed to unprepare transfer hardware\n"); 1188 if (master->auto_runtime_pm) { 1189 pm_runtime_mark_last_busy(master->dev.parent); 1190 pm_runtime_put_autosuspend(master->dev.parent); 1191 } 1192 trace_spi_master_idle(master); 1193 1194 spin_lock_irqsave(&master->queue_lock, flags); 1195 master->idling = false; 1196 spin_unlock_irqrestore(&master->queue_lock, flags); 1197 return; 1198 } 1199 1200 /* Extract head of queue */ 1201 master->cur_msg = 1202 list_first_entry(&master->queue, struct spi_message, queue); 1203 1204 list_del_init(&master->cur_msg->queue); 1205 if (master->busy) 1206 was_busy = true; 1207 else 1208 master->busy = true; 1209 spin_unlock_irqrestore(&master->queue_lock, flags); 1210 1211 mutex_lock(&master->io_mutex); 1212 1213 if (!was_busy && master->auto_runtime_pm) { 1214 ret = pm_runtime_get_sync(master->dev.parent); 1215 if (ret < 0) { 1216 dev_err(&master->dev, "Failed to power device: %d\n", 1217 ret); 1218 mutex_unlock(&master->io_mutex); 1219 return; 1220 } 1221 } 1222 1223 if (!was_busy) 1224 trace_spi_master_busy(master); 1225 1226 if (!was_busy && master->prepare_transfer_hardware) { 1227 ret = master->prepare_transfer_hardware(master); 1228 if (ret) { 1229 dev_err(&master->dev, 1230 "failed to prepare transfer hardware\n"); 1231 1232 if (master->auto_runtime_pm) 1233 pm_runtime_put(master->dev.parent); 1234 mutex_unlock(&master->io_mutex); 1235 return; 1236 } 1237 } 1238 1239 trace_spi_message_start(master->cur_msg); 1240 1241 if (master->prepare_message) { 1242 ret = master->prepare_message(master, master->cur_msg); 1243 if (ret) { 1244 dev_err(&master->dev, 1245 "failed to prepare message: %d\n", ret); 1246 master->cur_msg->status = ret; 1247 spi_finalize_current_message(master); 1248 goto out; 1249 } 1250 master->cur_msg_prepared = true; 1251 } 1252 1253 ret = spi_map_msg(master, master->cur_msg); 1254 if (ret) { 1255 master->cur_msg->status = ret; 1256 spi_finalize_current_message(master); 1257 goto out; 1258 } 1259 1260 ret = master->transfer_one_message(master, master->cur_msg); 1261 if (ret) { 1262 dev_err(&master->dev, 1263 "failed to transfer one message from queue\n"); 1264 goto out; 1265 } 1266 1267 out: 1268 mutex_unlock(&master->io_mutex); 1269 1270 /* Prod the scheduler in case transfer_one() was busy waiting */ 1271 if (!ret) 1272 cond_resched(); 1273 } 1274 1275 /** 1276 * spi_pump_messages - kthread work function which processes spi message queue 1277 * @work: pointer to kthread work struct contained in the master struct 1278 */ 1279 static void spi_pump_messages(struct kthread_work *work) 1280 { 1281 struct spi_master *master = 1282 container_of(work, struct spi_master, pump_messages); 1283 1284 __spi_pump_messages(master, true); 1285 } 1286 1287 static int spi_init_queue(struct spi_master *master) 1288 { 1289 struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 }; 1290 1291 master->running = false; 1292 master->busy = false; 1293 1294 kthread_init_worker(&master->kworker); 1295 master->kworker_task = kthread_run(kthread_worker_fn, 1296 &master->kworker, "%s", 1297 dev_name(&master->dev)); 1298 if (IS_ERR(master->kworker_task)) { 1299 dev_err(&master->dev, "failed to create message pump task\n"); 1300 return PTR_ERR(master->kworker_task); 1301 } 1302 kthread_init_work(&master->pump_messages, spi_pump_messages); 1303 1304 /* 1305 * Master config will indicate if this controller should run the 1306 * message pump with high (realtime) priority to reduce the transfer 1307 * latency on the bus by minimising the delay between a transfer 1308 * request and the scheduling of the message pump thread. Without this 1309 * setting the message pump thread will remain at default priority. 1310 */ 1311 if (master->rt) { 1312 dev_info(&master->dev, 1313 "will run message pump with realtime priority\n"); 1314 sched_setscheduler(master->kworker_task, SCHED_FIFO, ¶m); 1315 } 1316 1317 return 0; 1318 } 1319 1320 /** 1321 * spi_get_next_queued_message() - called by driver to check for queued 1322 * messages 1323 * @master: the master to check for queued messages 1324 * 1325 * If there are more messages in the queue, the next message is returned from 1326 * this call. 1327 * 1328 * Return: the next message in the queue, else NULL if the queue is empty. 1329 */ 1330 struct spi_message *spi_get_next_queued_message(struct spi_master *master) 1331 { 1332 struct spi_message *next; 1333 unsigned long flags; 1334 1335 /* get a pointer to the next message, if any */ 1336 spin_lock_irqsave(&master->queue_lock, flags); 1337 next = list_first_entry_or_null(&master->queue, struct spi_message, 1338 queue); 1339 spin_unlock_irqrestore(&master->queue_lock, flags); 1340 1341 return next; 1342 } 1343 EXPORT_SYMBOL_GPL(spi_get_next_queued_message); 1344 1345 /** 1346 * spi_finalize_current_message() - the current message is complete 1347 * @master: the master to return the message to 1348 * 1349 * Called by the driver to notify the core that the message in the front of the 1350 * queue is complete and can be removed from the queue. 1351 */ 1352 void spi_finalize_current_message(struct spi_master *master) 1353 { 1354 struct spi_message *mesg; 1355 unsigned long flags; 1356 int ret; 1357 1358 spin_lock_irqsave(&master->queue_lock, flags); 1359 mesg = master->cur_msg; 1360 spin_unlock_irqrestore(&master->queue_lock, flags); 1361 1362 spi_unmap_msg(master, mesg); 1363 1364 if (master->cur_msg_prepared && master->unprepare_message) { 1365 ret = master->unprepare_message(master, mesg); 1366 if (ret) { 1367 dev_err(&master->dev, 1368 "failed to unprepare message: %d\n", ret); 1369 } 1370 } 1371 1372 spin_lock_irqsave(&master->queue_lock, flags); 1373 master->cur_msg = NULL; 1374 master->cur_msg_prepared = false; 1375 kthread_queue_work(&master->kworker, &master->pump_messages); 1376 spin_unlock_irqrestore(&master->queue_lock, flags); 1377 1378 trace_spi_message_done(mesg); 1379 1380 mesg->state = NULL; 1381 if (mesg->complete) 1382 mesg->complete(mesg->context); 1383 } 1384 EXPORT_SYMBOL_GPL(spi_finalize_current_message); 1385 1386 static int spi_start_queue(struct spi_master *master) 1387 { 1388 unsigned long flags; 1389 1390 spin_lock_irqsave(&master->queue_lock, flags); 1391 1392 if (master->running || master->busy) { 1393 spin_unlock_irqrestore(&master->queue_lock, flags); 1394 return -EBUSY; 1395 } 1396 1397 master->running = true; 1398 master->cur_msg = NULL; 1399 spin_unlock_irqrestore(&master->queue_lock, flags); 1400 1401 kthread_queue_work(&master->kworker, &master->pump_messages); 1402 1403 return 0; 1404 } 1405 1406 static int spi_stop_queue(struct spi_master *master) 1407 { 1408 unsigned long flags; 1409 unsigned limit = 500; 1410 int ret = 0; 1411 1412 spin_lock_irqsave(&master->queue_lock, flags); 1413 1414 /* 1415 * This is a bit lame, but is optimized for the common execution path. 1416 * A wait_queue on the master->busy could be used, but then the common 1417 * execution path (pump_messages) would be required to call wake_up or 1418 * friends on every SPI message. Do this instead. 1419 */ 1420 while ((!list_empty(&master->queue) || master->busy) && limit--) { 1421 spin_unlock_irqrestore(&master->queue_lock, flags); 1422 usleep_range(10000, 11000); 1423 spin_lock_irqsave(&master->queue_lock, flags); 1424 } 1425 1426 if (!list_empty(&master->queue) || master->busy) 1427 ret = -EBUSY; 1428 else 1429 master->running = false; 1430 1431 spin_unlock_irqrestore(&master->queue_lock, flags); 1432 1433 if (ret) { 1434 dev_warn(&master->dev, 1435 "could not stop message queue\n"); 1436 return ret; 1437 } 1438 return ret; 1439 } 1440 1441 static int spi_destroy_queue(struct spi_master *master) 1442 { 1443 int ret; 1444 1445 ret = spi_stop_queue(master); 1446 1447 /* 1448 * kthread_flush_worker will block until all work is done. 1449 * If the reason that stop_queue timed out is that the work will never 1450 * finish, then it does no good to call flush/stop thread, so 1451 * return anyway. 1452 */ 1453 if (ret) { 1454 dev_err(&master->dev, "problem destroying queue\n"); 1455 return ret; 1456 } 1457 1458 kthread_flush_worker(&master->kworker); 1459 kthread_stop(master->kworker_task); 1460 1461 return 0; 1462 } 1463 1464 static int __spi_queued_transfer(struct spi_device *spi, 1465 struct spi_message *msg, 1466 bool need_pump) 1467 { 1468 struct spi_master *master = spi->master; 1469 unsigned long flags; 1470 1471 spin_lock_irqsave(&master->queue_lock, flags); 1472 1473 if (!master->running) { 1474 spin_unlock_irqrestore(&master->queue_lock, flags); 1475 return -ESHUTDOWN; 1476 } 1477 msg->actual_length = 0; 1478 msg->status = -EINPROGRESS; 1479 1480 list_add_tail(&msg->queue, &master->queue); 1481 if (!master->busy && need_pump) 1482 kthread_queue_work(&master->kworker, &master->pump_messages); 1483 1484 spin_unlock_irqrestore(&master->queue_lock, flags); 1485 return 0; 1486 } 1487 1488 /** 1489 * spi_queued_transfer - transfer function for queued transfers 1490 * @spi: spi device which is requesting transfer 1491 * @msg: spi message which is to handled is queued to driver queue 1492 * 1493 * Return: zero on success, else a negative error code. 1494 */ 1495 static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg) 1496 { 1497 return __spi_queued_transfer(spi, msg, true); 1498 } 1499 1500 static int spi_master_initialize_queue(struct spi_master *master) 1501 { 1502 int ret; 1503 1504 master->transfer = spi_queued_transfer; 1505 if (!master->transfer_one_message) 1506 master->transfer_one_message = spi_transfer_one_message; 1507 1508 /* Initialize and start queue */ 1509 ret = spi_init_queue(master); 1510 if (ret) { 1511 dev_err(&master->dev, "problem initializing queue\n"); 1512 goto err_init_queue; 1513 } 1514 master->queued = true; 1515 ret = spi_start_queue(master); 1516 if (ret) { 1517 dev_err(&master->dev, "problem starting queue\n"); 1518 goto err_start_queue; 1519 } 1520 1521 return 0; 1522 1523 err_start_queue: 1524 spi_destroy_queue(master); 1525 err_init_queue: 1526 return ret; 1527 } 1528 1529 /*-------------------------------------------------------------------------*/ 1530 1531 #if defined(CONFIG_OF) 1532 static int of_spi_parse_dt(struct spi_master *master, struct spi_device *spi, 1533 struct device_node *nc) 1534 { 1535 u32 value; 1536 int rc; 1537 1538 /* Device address */ 1539 rc = of_property_read_u32(nc, "reg", &value); 1540 if (rc) { 1541 dev_err(&master->dev, "%s has no valid 'reg' property (%d)\n", 1542 nc->full_name, rc); 1543 return rc; 1544 } 1545 spi->chip_select = value; 1546 1547 /* Mode (clock phase/polarity/etc.) */ 1548 if (of_find_property(nc, "spi-cpha", NULL)) 1549 spi->mode |= SPI_CPHA; 1550 if (of_find_property(nc, "spi-cpol", NULL)) 1551 spi->mode |= SPI_CPOL; 1552 if (of_find_property(nc, "spi-cs-high", NULL)) 1553 spi->mode |= SPI_CS_HIGH; 1554 if (of_find_property(nc, "spi-3wire", NULL)) 1555 spi->mode |= SPI_3WIRE; 1556 if (of_find_property(nc, "spi-lsb-first", NULL)) 1557 spi->mode |= SPI_LSB_FIRST; 1558 1559 /* Device DUAL/QUAD mode */ 1560 if (!of_property_read_u32(nc, "spi-tx-bus-width", &value)) { 1561 switch (value) { 1562 case 1: 1563 break; 1564 case 2: 1565 spi->mode |= SPI_TX_DUAL; 1566 break; 1567 case 4: 1568 spi->mode |= SPI_TX_QUAD; 1569 break; 1570 default: 1571 dev_warn(&master->dev, 1572 "spi-tx-bus-width %d not supported\n", 1573 value); 1574 break; 1575 } 1576 } 1577 1578 if (!of_property_read_u32(nc, "spi-rx-bus-width", &value)) { 1579 switch (value) { 1580 case 1: 1581 break; 1582 case 2: 1583 spi->mode |= SPI_RX_DUAL; 1584 break; 1585 case 4: 1586 spi->mode |= SPI_RX_QUAD; 1587 break; 1588 default: 1589 dev_warn(&master->dev, 1590 "spi-rx-bus-width %d not supported\n", 1591 value); 1592 break; 1593 } 1594 } 1595 1596 /* Device speed */ 1597 rc = of_property_read_u32(nc, "spi-max-frequency", &value); 1598 if (rc) { 1599 dev_err(&master->dev, "%s has no valid 'spi-max-frequency' property (%d)\n", 1600 nc->full_name, rc); 1601 return rc; 1602 } 1603 spi->max_speed_hz = value; 1604 1605 return 0; 1606 } 1607 1608 static struct spi_device * 1609 of_register_spi_device(struct spi_master *master, struct device_node *nc) 1610 { 1611 struct spi_device *spi; 1612 int rc; 1613 1614 /* Alloc an spi_device */ 1615 spi = spi_alloc_device(master); 1616 if (!spi) { 1617 dev_err(&master->dev, "spi_device alloc error for %s\n", 1618 nc->full_name); 1619 rc = -ENOMEM; 1620 goto err_out; 1621 } 1622 1623 /* Select device driver */ 1624 rc = of_modalias_node(nc, spi->modalias, 1625 sizeof(spi->modalias)); 1626 if (rc < 0) { 1627 dev_err(&master->dev, "cannot find modalias for %s\n", 1628 nc->full_name); 1629 goto err_out; 1630 } 1631 1632 rc = of_spi_parse_dt(master, spi, nc); 1633 if (rc) 1634 goto err_out; 1635 1636 /* Store a pointer to the node in the device structure */ 1637 of_node_get(nc); 1638 spi->dev.of_node = nc; 1639 1640 /* Register the new device */ 1641 rc = spi_add_device(spi); 1642 if (rc) { 1643 dev_err(&master->dev, "spi_device register error %s\n", 1644 nc->full_name); 1645 goto err_of_node_put; 1646 } 1647 1648 return spi; 1649 1650 err_of_node_put: 1651 of_node_put(nc); 1652 err_out: 1653 spi_dev_put(spi); 1654 return ERR_PTR(rc); 1655 } 1656 1657 /** 1658 * of_register_spi_devices() - Register child devices onto the SPI bus 1659 * @master: Pointer to spi_master device 1660 * 1661 * Registers an spi_device for each child node of master node which has a 'reg' 1662 * property. 1663 */ 1664 static void of_register_spi_devices(struct spi_master *master) 1665 { 1666 struct spi_device *spi; 1667 struct device_node *nc; 1668 1669 if (!master->dev.of_node) 1670 return; 1671 1672 for_each_available_child_of_node(master->dev.of_node, nc) { 1673 if (of_node_test_and_set_flag(nc, OF_POPULATED)) 1674 continue; 1675 spi = of_register_spi_device(master, nc); 1676 if (IS_ERR(spi)) { 1677 dev_warn(&master->dev, "Failed to create SPI device for %s\n", 1678 nc->full_name); 1679 of_node_clear_flag(nc, OF_POPULATED); 1680 } 1681 } 1682 } 1683 #else 1684 static void of_register_spi_devices(struct spi_master *master) { } 1685 #endif 1686 1687 #ifdef CONFIG_ACPI 1688 static int acpi_spi_add_resource(struct acpi_resource *ares, void *data) 1689 { 1690 struct spi_device *spi = data; 1691 struct spi_master *master = spi->master; 1692 1693 if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) { 1694 struct acpi_resource_spi_serialbus *sb; 1695 1696 sb = &ares->data.spi_serial_bus; 1697 if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_SPI) { 1698 /* 1699 * ACPI DeviceSelection numbering is handled by the 1700 * host controller driver in Windows and can vary 1701 * from driver to driver. In Linux we always expect 1702 * 0 .. max - 1 so we need to ask the driver to 1703 * translate between the two schemes. 1704 */ 1705 if (master->fw_translate_cs) { 1706 int cs = master->fw_translate_cs(master, 1707 sb->device_selection); 1708 if (cs < 0) 1709 return cs; 1710 spi->chip_select = cs; 1711 } else { 1712 spi->chip_select = sb->device_selection; 1713 } 1714 1715 spi->max_speed_hz = sb->connection_speed; 1716 1717 if (sb->clock_phase == ACPI_SPI_SECOND_PHASE) 1718 spi->mode |= SPI_CPHA; 1719 if (sb->clock_polarity == ACPI_SPI_START_HIGH) 1720 spi->mode |= SPI_CPOL; 1721 if (sb->device_polarity == ACPI_SPI_ACTIVE_HIGH) 1722 spi->mode |= SPI_CS_HIGH; 1723 } 1724 } else if (spi->irq < 0) { 1725 struct resource r; 1726 1727 if (acpi_dev_resource_interrupt(ares, 0, &r)) 1728 spi->irq = r.start; 1729 } 1730 1731 /* Always tell the ACPI core to skip this resource */ 1732 return 1; 1733 } 1734 1735 static acpi_status acpi_register_spi_device(struct spi_master *master, 1736 struct acpi_device *adev) 1737 { 1738 struct list_head resource_list; 1739 struct spi_device *spi; 1740 int ret; 1741 1742 if (acpi_bus_get_status(adev) || !adev->status.present || 1743 acpi_device_enumerated(adev)) 1744 return AE_OK; 1745 1746 spi = spi_alloc_device(master); 1747 if (!spi) { 1748 dev_err(&master->dev, "failed to allocate SPI device for %s\n", 1749 dev_name(&adev->dev)); 1750 return AE_NO_MEMORY; 1751 } 1752 1753 ACPI_COMPANION_SET(&spi->dev, adev); 1754 spi->irq = -1; 1755 1756 INIT_LIST_HEAD(&resource_list); 1757 ret = acpi_dev_get_resources(adev, &resource_list, 1758 acpi_spi_add_resource, spi); 1759 acpi_dev_free_resource_list(&resource_list); 1760 1761 if (ret < 0 || !spi->max_speed_hz) { 1762 spi_dev_put(spi); 1763 return AE_OK; 1764 } 1765 1766 acpi_set_modalias(adev, acpi_device_hid(adev), spi->modalias, 1767 sizeof(spi->modalias)); 1768 1769 if (spi->irq < 0) 1770 spi->irq = acpi_dev_gpio_irq_get(adev, 0); 1771 1772 acpi_device_set_enumerated(adev); 1773 1774 adev->power.flags.ignore_parent = true; 1775 if (spi_add_device(spi)) { 1776 adev->power.flags.ignore_parent = false; 1777 dev_err(&master->dev, "failed to add SPI device %s from ACPI\n", 1778 dev_name(&adev->dev)); 1779 spi_dev_put(spi); 1780 } 1781 1782 return AE_OK; 1783 } 1784 1785 static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level, 1786 void *data, void **return_value) 1787 { 1788 struct spi_master *master = data; 1789 struct acpi_device *adev; 1790 1791 if (acpi_bus_get_device(handle, &adev)) 1792 return AE_OK; 1793 1794 return acpi_register_spi_device(master, adev); 1795 } 1796 1797 static void acpi_register_spi_devices(struct spi_master *master) 1798 { 1799 acpi_status status; 1800 acpi_handle handle; 1801 1802 handle = ACPI_HANDLE(master->dev.parent); 1803 if (!handle) 1804 return; 1805 1806 status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1, 1807 acpi_spi_add_device, NULL, 1808 master, NULL); 1809 if (ACPI_FAILURE(status)) 1810 dev_warn(&master->dev, "failed to enumerate SPI slaves\n"); 1811 } 1812 #else 1813 static inline void acpi_register_spi_devices(struct spi_master *master) {} 1814 #endif /* CONFIG_ACPI */ 1815 1816 static void spi_master_release(struct device *dev) 1817 { 1818 struct spi_master *master; 1819 1820 master = container_of(dev, struct spi_master, dev); 1821 kfree(master); 1822 } 1823 1824 static struct class spi_master_class = { 1825 .name = "spi_master", 1826 .owner = THIS_MODULE, 1827 .dev_release = spi_master_release, 1828 .dev_groups = spi_master_groups, 1829 }; 1830 1831 1832 /** 1833 * spi_alloc_master - allocate SPI master controller 1834 * @dev: the controller, possibly using the platform_bus 1835 * @size: how much zeroed driver-private data to allocate; the pointer to this 1836 * memory is in the driver_data field of the returned device, 1837 * accessible with spi_master_get_devdata(). 1838 * Context: can sleep 1839 * 1840 * This call is used only by SPI master controller drivers, which are the 1841 * only ones directly touching chip registers. It's how they allocate 1842 * an spi_master structure, prior to calling spi_register_master(). 1843 * 1844 * This must be called from context that can sleep. 1845 * 1846 * The caller is responsible for assigning the bus number and initializing 1847 * the master's methods before calling spi_register_master(); and (after errors 1848 * adding the device) calling spi_master_put() to prevent a memory leak. 1849 * 1850 * Return: the SPI master structure on success, else NULL. 1851 */ 1852 struct spi_master *spi_alloc_master(struct device *dev, unsigned size) 1853 { 1854 struct spi_master *master; 1855 1856 if (!dev) 1857 return NULL; 1858 1859 master = kzalloc(size + sizeof(*master), GFP_KERNEL); 1860 if (!master) 1861 return NULL; 1862 1863 device_initialize(&master->dev); 1864 master->bus_num = -1; 1865 master->num_chipselect = 1; 1866 master->dev.class = &spi_master_class; 1867 master->dev.parent = dev; 1868 pm_suspend_ignore_children(&master->dev, true); 1869 spi_master_set_devdata(master, &master[1]); 1870 1871 return master; 1872 } 1873 EXPORT_SYMBOL_GPL(spi_alloc_master); 1874 1875 #ifdef CONFIG_OF 1876 static int of_spi_register_master(struct spi_master *master) 1877 { 1878 int nb, i, *cs; 1879 struct device_node *np = master->dev.of_node; 1880 1881 if (!np) 1882 return 0; 1883 1884 nb = of_gpio_named_count(np, "cs-gpios"); 1885 master->num_chipselect = max_t(int, nb, master->num_chipselect); 1886 1887 /* Return error only for an incorrectly formed cs-gpios property */ 1888 if (nb == 0 || nb == -ENOENT) 1889 return 0; 1890 else if (nb < 0) 1891 return nb; 1892 1893 cs = devm_kzalloc(&master->dev, 1894 sizeof(int) * master->num_chipselect, 1895 GFP_KERNEL); 1896 master->cs_gpios = cs; 1897 1898 if (!master->cs_gpios) 1899 return -ENOMEM; 1900 1901 for (i = 0; i < master->num_chipselect; i++) 1902 cs[i] = -ENOENT; 1903 1904 for (i = 0; i < nb; i++) 1905 cs[i] = of_get_named_gpio(np, "cs-gpios", i); 1906 1907 return 0; 1908 } 1909 #else 1910 static int of_spi_register_master(struct spi_master *master) 1911 { 1912 return 0; 1913 } 1914 #endif 1915 1916 /** 1917 * spi_register_master - register SPI master controller 1918 * @master: initialized master, originally from spi_alloc_master() 1919 * Context: can sleep 1920 * 1921 * SPI master controllers connect to their drivers using some non-SPI bus, 1922 * such as the platform bus. The final stage of probe() in that code 1923 * includes calling spi_register_master() to hook up to this SPI bus glue. 1924 * 1925 * SPI controllers use board specific (often SOC specific) bus numbers, 1926 * and board-specific addressing for SPI devices combines those numbers 1927 * with chip select numbers. Since SPI does not directly support dynamic 1928 * device identification, boards need configuration tables telling which 1929 * chip is at which address. 1930 * 1931 * This must be called from context that can sleep. It returns zero on 1932 * success, else a negative error code (dropping the master's refcount). 1933 * After a successful return, the caller is responsible for calling 1934 * spi_unregister_master(). 1935 * 1936 * Return: zero on success, else a negative error code. 1937 */ 1938 int spi_register_master(struct spi_master *master) 1939 { 1940 static atomic_t dyn_bus_id = ATOMIC_INIT((1<<15) - 1); 1941 struct device *dev = master->dev.parent; 1942 struct boardinfo *bi; 1943 int status = -ENODEV; 1944 int dynamic = 0; 1945 1946 if (!dev) 1947 return -ENODEV; 1948 1949 status = of_spi_register_master(master); 1950 if (status) 1951 return status; 1952 1953 /* even if it's just one always-selected device, there must 1954 * be at least one chipselect 1955 */ 1956 if (master->num_chipselect == 0) 1957 return -EINVAL; 1958 1959 if ((master->bus_num < 0) && master->dev.of_node) 1960 master->bus_num = of_alias_get_id(master->dev.of_node, "spi"); 1961 1962 /* convention: dynamically assigned bus IDs count down from the max */ 1963 if (master->bus_num < 0) { 1964 /* FIXME switch to an IDR based scheme, something like 1965 * I2C now uses, so we can't run out of "dynamic" IDs 1966 */ 1967 master->bus_num = atomic_dec_return(&dyn_bus_id); 1968 dynamic = 1; 1969 } 1970 1971 INIT_LIST_HEAD(&master->queue); 1972 spin_lock_init(&master->queue_lock); 1973 spin_lock_init(&master->bus_lock_spinlock); 1974 mutex_init(&master->bus_lock_mutex); 1975 mutex_init(&master->io_mutex); 1976 master->bus_lock_flag = 0; 1977 init_completion(&master->xfer_completion); 1978 if (!master->max_dma_len) 1979 master->max_dma_len = INT_MAX; 1980 1981 /* register the device, then userspace will see it. 1982 * registration fails if the bus ID is in use. 1983 */ 1984 dev_set_name(&master->dev, "spi%u", master->bus_num); 1985 status = device_add(&master->dev); 1986 if (status < 0) 1987 goto done; 1988 dev_dbg(dev, "registered master %s%s\n", dev_name(&master->dev), 1989 dynamic ? " (dynamic)" : ""); 1990 1991 /* If we're using a queued driver, start the queue */ 1992 if (master->transfer) 1993 dev_info(dev, "master is unqueued, this is deprecated\n"); 1994 else { 1995 status = spi_master_initialize_queue(master); 1996 if (status) { 1997 device_del(&master->dev); 1998 goto done; 1999 } 2000 } 2001 /* add statistics */ 2002 spin_lock_init(&master->statistics.lock); 2003 2004 mutex_lock(&board_lock); 2005 list_add_tail(&master->list, &spi_master_list); 2006 list_for_each_entry(bi, &board_list, list) 2007 spi_match_master_to_boardinfo(master, &bi->board_info); 2008 mutex_unlock(&board_lock); 2009 2010 /* Register devices from the device tree and ACPI */ 2011 of_register_spi_devices(master); 2012 acpi_register_spi_devices(master); 2013 done: 2014 return status; 2015 } 2016 EXPORT_SYMBOL_GPL(spi_register_master); 2017 2018 static void devm_spi_unregister(struct device *dev, void *res) 2019 { 2020 spi_unregister_master(*(struct spi_master **)res); 2021 } 2022 2023 /** 2024 * dev_spi_register_master - register managed SPI master controller 2025 * @dev: device managing SPI master 2026 * @master: initialized master, originally from spi_alloc_master() 2027 * Context: can sleep 2028 * 2029 * Register a SPI device as with spi_register_master() which will 2030 * automatically be unregister 2031 * 2032 * Return: zero on success, else a negative error code. 2033 */ 2034 int devm_spi_register_master(struct device *dev, struct spi_master *master) 2035 { 2036 struct spi_master **ptr; 2037 int ret; 2038 2039 ptr = devres_alloc(devm_spi_unregister, sizeof(*ptr), GFP_KERNEL); 2040 if (!ptr) 2041 return -ENOMEM; 2042 2043 ret = spi_register_master(master); 2044 if (!ret) { 2045 *ptr = master; 2046 devres_add(dev, ptr); 2047 } else { 2048 devres_free(ptr); 2049 } 2050 2051 return ret; 2052 } 2053 EXPORT_SYMBOL_GPL(devm_spi_register_master); 2054 2055 static int __unregister(struct device *dev, void *null) 2056 { 2057 spi_unregister_device(to_spi_device(dev)); 2058 return 0; 2059 } 2060 2061 /** 2062 * spi_unregister_master - unregister SPI master controller 2063 * @master: the master being unregistered 2064 * Context: can sleep 2065 * 2066 * This call is used only by SPI master controller drivers, which are the 2067 * only ones directly touching chip registers. 2068 * 2069 * This must be called from context that can sleep. 2070 */ 2071 void spi_unregister_master(struct spi_master *master) 2072 { 2073 int dummy; 2074 2075 if (master->queued) { 2076 if (spi_destroy_queue(master)) 2077 dev_err(&master->dev, "queue remove failed\n"); 2078 } 2079 2080 mutex_lock(&board_lock); 2081 list_del(&master->list); 2082 mutex_unlock(&board_lock); 2083 2084 dummy = device_for_each_child(&master->dev, NULL, __unregister); 2085 device_unregister(&master->dev); 2086 } 2087 EXPORT_SYMBOL_GPL(spi_unregister_master); 2088 2089 int spi_master_suspend(struct spi_master *master) 2090 { 2091 int ret; 2092 2093 /* Basically no-ops for non-queued masters */ 2094 if (!master->queued) 2095 return 0; 2096 2097 ret = spi_stop_queue(master); 2098 if (ret) 2099 dev_err(&master->dev, "queue stop failed\n"); 2100 2101 return ret; 2102 } 2103 EXPORT_SYMBOL_GPL(spi_master_suspend); 2104 2105 int spi_master_resume(struct spi_master *master) 2106 { 2107 int ret; 2108 2109 if (!master->queued) 2110 return 0; 2111 2112 ret = spi_start_queue(master); 2113 if (ret) 2114 dev_err(&master->dev, "queue restart failed\n"); 2115 2116 return ret; 2117 } 2118 EXPORT_SYMBOL_GPL(spi_master_resume); 2119 2120 static int __spi_master_match(struct device *dev, const void *data) 2121 { 2122 struct spi_master *m; 2123 const u16 *bus_num = data; 2124 2125 m = container_of(dev, struct spi_master, dev); 2126 return m->bus_num == *bus_num; 2127 } 2128 2129 /** 2130 * spi_busnum_to_master - look up master associated with bus_num 2131 * @bus_num: the master's bus number 2132 * Context: can sleep 2133 * 2134 * This call may be used with devices that are registered after 2135 * arch init time. It returns a refcounted pointer to the relevant 2136 * spi_master (which the caller must release), or NULL if there is 2137 * no such master registered. 2138 * 2139 * Return: the SPI master structure on success, else NULL. 2140 */ 2141 struct spi_master *spi_busnum_to_master(u16 bus_num) 2142 { 2143 struct device *dev; 2144 struct spi_master *master = NULL; 2145 2146 dev = class_find_device(&spi_master_class, NULL, &bus_num, 2147 __spi_master_match); 2148 if (dev) 2149 master = container_of(dev, struct spi_master, dev); 2150 /* reference got in class_find_device */ 2151 return master; 2152 } 2153 EXPORT_SYMBOL_GPL(spi_busnum_to_master); 2154 2155 /*-------------------------------------------------------------------------*/ 2156 2157 /* Core methods for SPI resource management */ 2158 2159 /** 2160 * spi_res_alloc - allocate a spi resource that is life-cycle managed 2161 * during the processing of a spi_message while using 2162 * spi_transfer_one 2163 * @spi: the spi device for which we allocate memory 2164 * @release: the release code to execute for this resource 2165 * @size: size to alloc and return 2166 * @gfp: GFP allocation flags 2167 * 2168 * Return: the pointer to the allocated data 2169 * 2170 * This may get enhanced in the future to allocate from a memory pool 2171 * of the @spi_device or @spi_master to avoid repeated allocations. 2172 */ 2173 void *spi_res_alloc(struct spi_device *spi, 2174 spi_res_release_t release, 2175 size_t size, gfp_t gfp) 2176 { 2177 struct spi_res *sres; 2178 2179 sres = kzalloc(sizeof(*sres) + size, gfp); 2180 if (!sres) 2181 return NULL; 2182 2183 INIT_LIST_HEAD(&sres->entry); 2184 sres->release = release; 2185 2186 return sres->data; 2187 } 2188 EXPORT_SYMBOL_GPL(spi_res_alloc); 2189 2190 /** 2191 * spi_res_free - free an spi resource 2192 * @res: pointer to the custom data of a resource 2193 * 2194 */ 2195 void spi_res_free(void *res) 2196 { 2197 struct spi_res *sres = container_of(res, struct spi_res, data); 2198 2199 if (!res) 2200 return; 2201 2202 WARN_ON(!list_empty(&sres->entry)); 2203 kfree(sres); 2204 } 2205 EXPORT_SYMBOL_GPL(spi_res_free); 2206 2207 /** 2208 * spi_res_add - add a spi_res to the spi_message 2209 * @message: the spi message 2210 * @res: the spi_resource 2211 */ 2212 void spi_res_add(struct spi_message *message, void *res) 2213 { 2214 struct spi_res *sres = container_of(res, struct spi_res, data); 2215 2216 WARN_ON(!list_empty(&sres->entry)); 2217 list_add_tail(&sres->entry, &message->resources); 2218 } 2219 EXPORT_SYMBOL_GPL(spi_res_add); 2220 2221 /** 2222 * spi_res_release - release all spi resources for this message 2223 * @master: the @spi_master 2224 * @message: the @spi_message 2225 */ 2226 void spi_res_release(struct spi_master *master, 2227 struct spi_message *message) 2228 { 2229 struct spi_res *res; 2230 2231 while (!list_empty(&message->resources)) { 2232 res = list_last_entry(&message->resources, 2233 struct spi_res, entry); 2234 2235 if (res->release) 2236 res->release(master, message, res->data); 2237 2238 list_del(&res->entry); 2239 2240 kfree(res); 2241 } 2242 } 2243 EXPORT_SYMBOL_GPL(spi_res_release); 2244 2245 /*-------------------------------------------------------------------------*/ 2246 2247 /* Core methods for spi_message alterations */ 2248 2249 static void __spi_replace_transfers_release(struct spi_master *master, 2250 struct spi_message *msg, 2251 void *res) 2252 { 2253 struct spi_replaced_transfers *rxfer = res; 2254 size_t i; 2255 2256 /* call extra callback if requested */ 2257 if (rxfer->release) 2258 rxfer->release(master, msg, res); 2259 2260 /* insert replaced transfers back into the message */ 2261 list_splice(&rxfer->replaced_transfers, rxfer->replaced_after); 2262 2263 /* remove the formerly inserted entries */ 2264 for (i = 0; i < rxfer->inserted; i++) 2265 list_del(&rxfer->inserted_transfers[i].transfer_list); 2266 } 2267 2268 /** 2269 * spi_replace_transfers - replace transfers with several transfers 2270 * and register change with spi_message.resources 2271 * @msg: the spi_message we work upon 2272 * @xfer_first: the first spi_transfer we want to replace 2273 * @remove: number of transfers to remove 2274 * @insert: the number of transfers we want to insert instead 2275 * @release: extra release code necessary in some circumstances 2276 * @extradatasize: extra data to allocate (with alignment guarantees 2277 * of struct @spi_transfer) 2278 * @gfp: gfp flags 2279 * 2280 * Returns: pointer to @spi_replaced_transfers, 2281 * PTR_ERR(...) in case of errors. 2282 */ 2283 struct spi_replaced_transfers *spi_replace_transfers( 2284 struct spi_message *msg, 2285 struct spi_transfer *xfer_first, 2286 size_t remove, 2287 size_t insert, 2288 spi_replaced_release_t release, 2289 size_t extradatasize, 2290 gfp_t gfp) 2291 { 2292 struct spi_replaced_transfers *rxfer; 2293 struct spi_transfer *xfer; 2294 size_t i; 2295 2296 /* allocate the structure using spi_res */ 2297 rxfer = spi_res_alloc(msg->spi, __spi_replace_transfers_release, 2298 insert * sizeof(struct spi_transfer) 2299 + sizeof(struct spi_replaced_transfers) 2300 + extradatasize, 2301 gfp); 2302 if (!rxfer) 2303 return ERR_PTR(-ENOMEM); 2304 2305 /* the release code to invoke before running the generic release */ 2306 rxfer->release = release; 2307 2308 /* assign extradata */ 2309 if (extradatasize) 2310 rxfer->extradata = 2311 &rxfer->inserted_transfers[insert]; 2312 2313 /* init the replaced_transfers list */ 2314 INIT_LIST_HEAD(&rxfer->replaced_transfers); 2315 2316 /* assign the list_entry after which we should reinsert 2317 * the @replaced_transfers - it may be spi_message.messages! 2318 */ 2319 rxfer->replaced_after = xfer_first->transfer_list.prev; 2320 2321 /* remove the requested number of transfers */ 2322 for (i = 0; i < remove; i++) { 2323 /* if the entry after replaced_after it is msg->transfers 2324 * then we have been requested to remove more transfers 2325 * than are in the list 2326 */ 2327 if (rxfer->replaced_after->next == &msg->transfers) { 2328 dev_err(&msg->spi->dev, 2329 "requested to remove more spi_transfers than are available\n"); 2330 /* insert replaced transfers back into the message */ 2331 list_splice(&rxfer->replaced_transfers, 2332 rxfer->replaced_after); 2333 2334 /* free the spi_replace_transfer structure */ 2335 spi_res_free(rxfer); 2336 2337 /* and return with an error */ 2338 return ERR_PTR(-EINVAL); 2339 } 2340 2341 /* remove the entry after replaced_after from list of 2342 * transfers and add it to list of replaced_transfers 2343 */ 2344 list_move_tail(rxfer->replaced_after->next, 2345 &rxfer->replaced_transfers); 2346 } 2347 2348 /* create copy of the given xfer with identical settings 2349 * based on the first transfer to get removed 2350 */ 2351 for (i = 0; i < insert; i++) { 2352 /* we need to run in reverse order */ 2353 xfer = &rxfer->inserted_transfers[insert - 1 - i]; 2354 2355 /* copy all spi_transfer data */ 2356 memcpy(xfer, xfer_first, sizeof(*xfer)); 2357 2358 /* add to list */ 2359 list_add(&xfer->transfer_list, rxfer->replaced_after); 2360 2361 /* clear cs_change and delay_usecs for all but the last */ 2362 if (i) { 2363 xfer->cs_change = false; 2364 xfer->delay_usecs = 0; 2365 } 2366 } 2367 2368 /* set up inserted */ 2369 rxfer->inserted = insert; 2370 2371 /* and register it with spi_res/spi_message */ 2372 spi_res_add(msg, rxfer); 2373 2374 return rxfer; 2375 } 2376 EXPORT_SYMBOL_GPL(spi_replace_transfers); 2377 2378 static int __spi_split_transfer_maxsize(struct spi_master *master, 2379 struct spi_message *msg, 2380 struct spi_transfer **xferp, 2381 size_t maxsize, 2382 gfp_t gfp) 2383 { 2384 struct spi_transfer *xfer = *xferp, *xfers; 2385 struct spi_replaced_transfers *srt; 2386 size_t offset; 2387 size_t count, i; 2388 2389 /* warn once about this fact that we are splitting a transfer */ 2390 dev_warn_once(&msg->spi->dev, 2391 "spi_transfer of length %i exceed max length of %zu - needed to split transfers\n", 2392 xfer->len, maxsize); 2393 2394 /* calculate how many we have to replace */ 2395 count = DIV_ROUND_UP(xfer->len, maxsize); 2396 2397 /* create replacement */ 2398 srt = spi_replace_transfers(msg, xfer, 1, count, NULL, 0, gfp); 2399 if (IS_ERR(srt)) 2400 return PTR_ERR(srt); 2401 xfers = srt->inserted_transfers; 2402 2403 /* now handle each of those newly inserted spi_transfers 2404 * note that the replacements spi_transfers all are preset 2405 * to the same values as *xferp, so tx_buf, rx_buf and len 2406 * are all identical (as well as most others) 2407 * so we just have to fix up len and the pointers. 2408 * 2409 * this also includes support for the depreciated 2410 * spi_message.is_dma_mapped interface 2411 */ 2412 2413 /* the first transfer just needs the length modified, so we 2414 * run it outside the loop 2415 */ 2416 xfers[0].len = min_t(size_t, maxsize, xfer[0].len); 2417 2418 /* all the others need rx_buf/tx_buf also set */ 2419 for (i = 1, offset = maxsize; i < count; offset += maxsize, i++) { 2420 /* update rx_buf, tx_buf and dma */ 2421 if (xfers[i].rx_buf) 2422 xfers[i].rx_buf += offset; 2423 if (xfers[i].rx_dma) 2424 xfers[i].rx_dma += offset; 2425 if (xfers[i].tx_buf) 2426 xfers[i].tx_buf += offset; 2427 if (xfers[i].tx_dma) 2428 xfers[i].tx_dma += offset; 2429 2430 /* update length */ 2431 xfers[i].len = min(maxsize, xfers[i].len - offset); 2432 } 2433 2434 /* we set up xferp to the last entry we have inserted, 2435 * so that we skip those already split transfers 2436 */ 2437 *xferp = &xfers[count - 1]; 2438 2439 /* increment statistics counters */ 2440 SPI_STATISTICS_INCREMENT_FIELD(&master->statistics, 2441 transfers_split_maxsize); 2442 SPI_STATISTICS_INCREMENT_FIELD(&msg->spi->statistics, 2443 transfers_split_maxsize); 2444 2445 return 0; 2446 } 2447 2448 /** 2449 * spi_split_tranfers_maxsize - split spi transfers into multiple transfers 2450 * when an individual transfer exceeds a 2451 * certain size 2452 * @master: the @spi_master for this transfer 2453 * @msg: the @spi_message to transform 2454 * @maxsize: the maximum when to apply this 2455 * @gfp: GFP allocation flags 2456 * 2457 * Return: status of transformation 2458 */ 2459 int spi_split_transfers_maxsize(struct spi_master *master, 2460 struct spi_message *msg, 2461 size_t maxsize, 2462 gfp_t gfp) 2463 { 2464 struct spi_transfer *xfer; 2465 int ret; 2466 2467 /* iterate over the transfer_list, 2468 * but note that xfer is advanced to the last transfer inserted 2469 * to avoid checking sizes again unnecessarily (also xfer does 2470 * potentiall belong to a different list by the time the 2471 * replacement has happened 2472 */ 2473 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 2474 if (xfer->len > maxsize) { 2475 ret = __spi_split_transfer_maxsize( 2476 master, msg, &xfer, maxsize, gfp); 2477 if (ret) 2478 return ret; 2479 } 2480 } 2481 2482 return 0; 2483 } 2484 EXPORT_SYMBOL_GPL(spi_split_transfers_maxsize); 2485 2486 /*-------------------------------------------------------------------------*/ 2487 2488 /* Core methods for SPI master protocol drivers. Some of the 2489 * other core methods are currently defined as inline functions. 2490 */ 2491 2492 static int __spi_validate_bits_per_word(struct spi_master *master, u8 bits_per_word) 2493 { 2494 if (master->bits_per_word_mask) { 2495 /* Only 32 bits fit in the mask */ 2496 if (bits_per_word > 32) 2497 return -EINVAL; 2498 if (!(master->bits_per_word_mask & 2499 SPI_BPW_MASK(bits_per_word))) 2500 return -EINVAL; 2501 } 2502 2503 return 0; 2504 } 2505 2506 /** 2507 * spi_setup - setup SPI mode and clock rate 2508 * @spi: the device whose settings are being modified 2509 * Context: can sleep, and no requests are queued to the device 2510 * 2511 * SPI protocol drivers may need to update the transfer mode if the 2512 * device doesn't work with its default. They may likewise need 2513 * to update clock rates or word sizes from initial values. This function 2514 * changes those settings, and must be called from a context that can sleep. 2515 * Except for SPI_CS_HIGH, which takes effect immediately, the changes take 2516 * effect the next time the device is selected and data is transferred to 2517 * or from it. When this function returns, the spi device is deselected. 2518 * 2519 * Note that this call will fail if the protocol driver specifies an option 2520 * that the underlying controller or its driver does not support. For 2521 * example, not all hardware supports wire transfers using nine bit words, 2522 * LSB-first wire encoding, or active-high chipselects. 2523 * 2524 * Return: zero on success, else a negative error code. 2525 */ 2526 int spi_setup(struct spi_device *spi) 2527 { 2528 unsigned bad_bits, ugly_bits; 2529 int status; 2530 2531 /* check mode to prevent that DUAL and QUAD set at the same time 2532 */ 2533 if (((spi->mode & SPI_TX_DUAL) && (spi->mode & SPI_TX_QUAD)) || 2534 ((spi->mode & SPI_RX_DUAL) && (spi->mode & SPI_RX_QUAD))) { 2535 dev_err(&spi->dev, 2536 "setup: can not select dual and quad at the same time\n"); 2537 return -EINVAL; 2538 } 2539 /* if it is SPI_3WIRE mode, DUAL and QUAD should be forbidden 2540 */ 2541 if ((spi->mode & SPI_3WIRE) && (spi->mode & 2542 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD))) 2543 return -EINVAL; 2544 /* help drivers fail *cleanly* when they need options 2545 * that aren't supported with their current master 2546 */ 2547 bad_bits = spi->mode & ~spi->master->mode_bits; 2548 ugly_bits = bad_bits & 2549 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD); 2550 if (ugly_bits) { 2551 dev_warn(&spi->dev, 2552 "setup: ignoring unsupported mode bits %x\n", 2553 ugly_bits); 2554 spi->mode &= ~ugly_bits; 2555 bad_bits &= ~ugly_bits; 2556 } 2557 if (bad_bits) { 2558 dev_err(&spi->dev, "setup: unsupported mode bits %x\n", 2559 bad_bits); 2560 return -EINVAL; 2561 } 2562 2563 if (!spi->bits_per_word) 2564 spi->bits_per_word = 8; 2565 2566 status = __spi_validate_bits_per_word(spi->master, spi->bits_per_word); 2567 if (status) 2568 return status; 2569 2570 if (!spi->max_speed_hz) 2571 spi->max_speed_hz = spi->master->max_speed_hz; 2572 2573 if (spi->master->setup) 2574 status = spi->master->setup(spi); 2575 2576 spi_set_cs(spi, false); 2577 2578 dev_dbg(&spi->dev, "setup mode %d, %s%s%s%s%u bits/w, %u Hz max --> %d\n", 2579 (int) (spi->mode & (SPI_CPOL | SPI_CPHA)), 2580 (spi->mode & SPI_CS_HIGH) ? "cs_high, " : "", 2581 (spi->mode & SPI_LSB_FIRST) ? "lsb, " : "", 2582 (spi->mode & SPI_3WIRE) ? "3wire, " : "", 2583 (spi->mode & SPI_LOOP) ? "loopback, " : "", 2584 spi->bits_per_word, spi->max_speed_hz, 2585 status); 2586 2587 return status; 2588 } 2589 EXPORT_SYMBOL_GPL(spi_setup); 2590 2591 static int __spi_validate(struct spi_device *spi, struct spi_message *message) 2592 { 2593 struct spi_master *master = spi->master; 2594 struct spi_transfer *xfer; 2595 int w_size; 2596 2597 if (list_empty(&message->transfers)) 2598 return -EINVAL; 2599 2600 /* Half-duplex links include original MicroWire, and ones with 2601 * only one data pin like SPI_3WIRE (switches direction) or where 2602 * either MOSI or MISO is missing. They can also be caused by 2603 * software limitations. 2604 */ 2605 if ((master->flags & SPI_MASTER_HALF_DUPLEX) 2606 || (spi->mode & SPI_3WIRE)) { 2607 unsigned flags = master->flags; 2608 2609 list_for_each_entry(xfer, &message->transfers, transfer_list) { 2610 if (xfer->rx_buf && xfer->tx_buf) 2611 return -EINVAL; 2612 if ((flags & SPI_MASTER_NO_TX) && xfer->tx_buf) 2613 return -EINVAL; 2614 if ((flags & SPI_MASTER_NO_RX) && xfer->rx_buf) 2615 return -EINVAL; 2616 } 2617 } 2618 2619 /** 2620 * Set transfer bits_per_word and max speed as spi device default if 2621 * it is not set for this transfer. 2622 * Set transfer tx_nbits and rx_nbits as single transfer default 2623 * (SPI_NBITS_SINGLE) if it is not set for this transfer. 2624 */ 2625 message->frame_length = 0; 2626 list_for_each_entry(xfer, &message->transfers, transfer_list) { 2627 message->frame_length += xfer->len; 2628 if (!xfer->bits_per_word) 2629 xfer->bits_per_word = spi->bits_per_word; 2630 2631 if (!xfer->speed_hz) 2632 xfer->speed_hz = spi->max_speed_hz; 2633 if (!xfer->speed_hz) 2634 xfer->speed_hz = master->max_speed_hz; 2635 2636 if (master->max_speed_hz && 2637 xfer->speed_hz > master->max_speed_hz) 2638 xfer->speed_hz = master->max_speed_hz; 2639 2640 if (__spi_validate_bits_per_word(master, xfer->bits_per_word)) 2641 return -EINVAL; 2642 2643 /* 2644 * SPI transfer length should be multiple of SPI word size 2645 * where SPI word size should be power-of-two multiple 2646 */ 2647 if (xfer->bits_per_word <= 8) 2648 w_size = 1; 2649 else if (xfer->bits_per_word <= 16) 2650 w_size = 2; 2651 else 2652 w_size = 4; 2653 2654 /* No partial transfers accepted */ 2655 if (xfer->len % w_size) 2656 return -EINVAL; 2657 2658 if (xfer->speed_hz && master->min_speed_hz && 2659 xfer->speed_hz < master->min_speed_hz) 2660 return -EINVAL; 2661 2662 if (xfer->tx_buf && !xfer->tx_nbits) 2663 xfer->tx_nbits = SPI_NBITS_SINGLE; 2664 if (xfer->rx_buf && !xfer->rx_nbits) 2665 xfer->rx_nbits = SPI_NBITS_SINGLE; 2666 /* check transfer tx/rx_nbits: 2667 * 1. check the value matches one of single, dual and quad 2668 * 2. check tx/rx_nbits match the mode in spi_device 2669 */ 2670 if (xfer->tx_buf) { 2671 if (xfer->tx_nbits != SPI_NBITS_SINGLE && 2672 xfer->tx_nbits != SPI_NBITS_DUAL && 2673 xfer->tx_nbits != SPI_NBITS_QUAD) 2674 return -EINVAL; 2675 if ((xfer->tx_nbits == SPI_NBITS_DUAL) && 2676 !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD))) 2677 return -EINVAL; 2678 if ((xfer->tx_nbits == SPI_NBITS_QUAD) && 2679 !(spi->mode & SPI_TX_QUAD)) 2680 return -EINVAL; 2681 } 2682 /* check transfer rx_nbits */ 2683 if (xfer->rx_buf) { 2684 if (xfer->rx_nbits != SPI_NBITS_SINGLE && 2685 xfer->rx_nbits != SPI_NBITS_DUAL && 2686 xfer->rx_nbits != SPI_NBITS_QUAD) 2687 return -EINVAL; 2688 if ((xfer->rx_nbits == SPI_NBITS_DUAL) && 2689 !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD))) 2690 return -EINVAL; 2691 if ((xfer->rx_nbits == SPI_NBITS_QUAD) && 2692 !(spi->mode & SPI_RX_QUAD)) 2693 return -EINVAL; 2694 } 2695 } 2696 2697 message->status = -EINPROGRESS; 2698 2699 return 0; 2700 } 2701 2702 static int __spi_async(struct spi_device *spi, struct spi_message *message) 2703 { 2704 struct spi_master *master = spi->master; 2705 2706 message->spi = spi; 2707 2708 SPI_STATISTICS_INCREMENT_FIELD(&master->statistics, spi_async); 2709 SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_async); 2710 2711 trace_spi_message_submit(message); 2712 2713 return master->transfer(spi, message); 2714 } 2715 2716 /** 2717 * spi_async - asynchronous SPI transfer 2718 * @spi: device with which data will be exchanged 2719 * @message: describes the data transfers, including completion callback 2720 * Context: any (irqs may be blocked, etc) 2721 * 2722 * This call may be used in_irq and other contexts which can't sleep, 2723 * as well as from task contexts which can sleep. 2724 * 2725 * The completion callback is invoked in a context which can't sleep. 2726 * Before that invocation, the value of message->status is undefined. 2727 * When the callback is issued, message->status holds either zero (to 2728 * indicate complete success) or a negative error code. After that 2729 * callback returns, the driver which issued the transfer request may 2730 * deallocate the associated memory; it's no longer in use by any SPI 2731 * core or controller driver code. 2732 * 2733 * Note that although all messages to a spi_device are handled in 2734 * FIFO order, messages may go to different devices in other orders. 2735 * Some device might be higher priority, or have various "hard" access 2736 * time requirements, for example. 2737 * 2738 * On detection of any fault during the transfer, processing of 2739 * the entire message is aborted, and the device is deselected. 2740 * Until returning from the associated message completion callback, 2741 * no other spi_message queued to that device will be processed. 2742 * (This rule applies equally to all the synchronous transfer calls, 2743 * which are wrappers around this core asynchronous primitive.) 2744 * 2745 * Return: zero on success, else a negative error code. 2746 */ 2747 int spi_async(struct spi_device *spi, struct spi_message *message) 2748 { 2749 struct spi_master *master = spi->master; 2750 int ret; 2751 unsigned long flags; 2752 2753 ret = __spi_validate(spi, message); 2754 if (ret != 0) 2755 return ret; 2756 2757 spin_lock_irqsave(&master->bus_lock_spinlock, flags); 2758 2759 if (master->bus_lock_flag) 2760 ret = -EBUSY; 2761 else 2762 ret = __spi_async(spi, message); 2763 2764 spin_unlock_irqrestore(&master->bus_lock_spinlock, flags); 2765 2766 return ret; 2767 } 2768 EXPORT_SYMBOL_GPL(spi_async); 2769 2770 /** 2771 * spi_async_locked - version of spi_async with exclusive bus usage 2772 * @spi: device with which data will be exchanged 2773 * @message: describes the data transfers, including completion callback 2774 * Context: any (irqs may be blocked, etc) 2775 * 2776 * This call may be used in_irq and other contexts which can't sleep, 2777 * as well as from task contexts which can sleep. 2778 * 2779 * The completion callback is invoked in a context which can't sleep. 2780 * Before that invocation, the value of message->status is undefined. 2781 * When the callback is issued, message->status holds either zero (to 2782 * indicate complete success) or a negative error code. After that 2783 * callback returns, the driver which issued the transfer request may 2784 * deallocate the associated memory; it's no longer in use by any SPI 2785 * core or controller driver code. 2786 * 2787 * Note that although all messages to a spi_device are handled in 2788 * FIFO order, messages may go to different devices in other orders. 2789 * Some device might be higher priority, or have various "hard" access 2790 * time requirements, for example. 2791 * 2792 * On detection of any fault during the transfer, processing of 2793 * the entire message is aborted, and the device is deselected. 2794 * Until returning from the associated message completion callback, 2795 * no other spi_message queued to that device will be processed. 2796 * (This rule applies equally to all the synchronous transfer calls, 2797 * which are wrappers around this core asynchronous primitive.) 2798 * 2799 * Return: zero on success, else a negative error code. 2800 */ 2801 int spi_async_locked(struct spi_device *spi, struct spi_message *message) 2802 { 2803 struct spi_master *master = spi->master; 2804 int ret; 2805 unsigned long flags; 2806 2807 ret = __spi_validate(spi, message); 2808 if (ret != 0) 2809 return ret; 2810 2811 spin_lock_irqsave(&master->bus_lock_spinlock, flags); 2812 2813 ret = __spi_async(spi, message); 2814 2815 spin_unlock_irqrestore(&master->bus_lock_spinlock, flags); 2816 2817 return ret; 2818 2819 } 2820 EXPORT_SYMBOL_GPL(spi_async_locked); 2821 2822 2823 int spi_flash_read(struct spi_device *spi, 2824 struct spi_flash_read_message *msg) 2825 2826 { 2827 struct spi_master *master = spi->master; 2828 struct device *rx_dev = NULL; 2829 int ret; 2830 2831 if ((msg->opcode_nbits == SPI_NBITS_DUAL || 2832 msg->addr_nbits == SPI_NBITS_DUAL) && 2833 !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD))) 2834 return -EINVAL; 2835 if ((msg->opcode_nbits == SPI_NBITS_QUAD || 2836 msg->addr_nbits == SPI_NBITS_QUAD) && 2837 !(spi->mode & SPI_TX_QUAD)) 2838 return -EINVAL; 2839 if (msg->data_nbits == SPI_NBITS_DUAL && 2840 !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD))) 2841 return -EINVAL; 2842 if (msg->data_nbits == SPI_NBITS_QUAD && 2843 !(spi->mode & SPI_RX_QUAD)) 2844 return -EINVAL; 2845 2846 if (master->auto_runtime_pm) { 2847 ret = pm_runtime_get_sync(master->dev.parent); 2848 if (ret < 0) { 2849 dev_err(&master->dev, "Failed to power device: %d\n", 2850 ret); 2851 return ret; 2852 } 2853 } 2854 2855 mutex_lock(&master->bus_lock_mutex); 2856 mutex_lock(&master->io_mutex); 2857 if (master->dma_rx && master->spi_flash_can_dma(spi, msg)) { 2858 rx_dev = master->dma_rx->device->dev; 2859 ret = spi_map_buf(master, rx_dev, &msg->rx_sg, 2860 msg->buf, msg->len, 2861 DMA_FROM_DEVICE); 2862 if (!ret) 2863 msg->cur_msg_mapped = true; 2864 } 2865 ret = master->spi_flash_read(spi, msg); 2866 if (msg->cur_msg_mapped) 2867 spi_unmap_buf(master, rx_dev, &msg->rx_sg, 2868 DMA_FROM_DEVICE); 2869 mutex_unlock(&master->io_mutex); 2870 mutex_unlock(&master->bus_lock_mutex); 2871 2872 if (master->auto_runtime_pm) 2873 pm_runtime_put(master->dev.parent); 2874 2875 return ret; 2876 } 2877 EXPORT_SYMBOL_GPL(spi_flash_read); 2878 2879 /*-------------------------------------------------------------------------*/ 2880 2881 /* Utility methods for SPI master protocol drivers, layered on 2882 * top of the core. Some other utility methods are defined as 2883 * inline functions. 2884 */ 2885 2886 static void spi_complete(void *arg) 2887 { 2888 complete(arg); 2889 } 2890 2891 static int __spi_sync(struct spi_device *spi, struct spi_message *message) 2892 { 2893 DECLARE_COMPLETION_ONSTACK(done); 2894 int status; 2895 struct spi_master *master = spi->master; 2896 unsigned long flags; 2897 2898 status = __spi_validate(spi, message); 2899 if (status != 0) 2900 return status; 2901 2902 message->complete = spi_complete; 2903 message->context = &done; 2904 message->spi = spi; 2905 2906 SPI_STATISTICS_INCREMENT_FIELD(&master->statistics, spi_sync); 2907 SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_sync); 2908 2909 /* If we're not using the legacy transfer method then we will 2910 * try to transfer in the calling context so special case. 2911 * This code would be less tricky if we could remove the 2912 * support for driver implemented message queues. 2913 */ 2914 if (master->transfer == spi_queued_transfer) { 2915 spin_lock_irqsave(&master->bus_lock_spinlock, flags); 2916 2917 trace_spi_message_submit(message); 2918 2919 status = __spi_queued_transfer(spi, message, false); 2920 2921 spin_unlock_irqrestore(&master->bus_lock_spinlock, flags); 2922 } else { 2923 status = spi_async_locked(spi, message); 2924 } 2925 2926 if (status == 0) { 2927 /* Push out the messages in the calling context if we 2928 * can. 2929 */ 2930 if (master->transfer == spi_queued_transfer) { 2931 SPI_STATISTICS_INCREMENT_FIELD(&master->statistics, 2932 spi_sync_immediate); 2933 SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, 2934 spi_sync_immediate); 2935 __spi_pump_messages(master, false); 2936 } 2937 2938 wait_for_completion(&done); 2939 status = message->status; 2940 } 2941 message->context = NULL; 2942 return status; 2943 } 2944 2945 /** 2946 * spi_sync - blocking/synchronous SPI data transfers 2947 * @spi: device with which data will be exchanged 2948 * @message: describes the data transfers 2949 * Context: can sleep 2950 * 2951 * This call may only be used from a context that may sleep. The sleep 2952 * is non-interruptible, and has no timeout. Low-overhead controller 2953 * drivers may DMA directly into and out of the message buffers. 2954 * 2955 * Note that the SPI device's chip select is active during the message, 2956 * and then is normally disabled between messages. Drivers for some 2957 * frequently-used devices may want to minimize costs of selecting a chip, 2958 * by leaving it selected in anticipation that the next message will go 2959 * to the same chip. (That may increase power usage.) 2960 * 2961 * Also, the caller is guaranteeing that the memory associated with the 2962 * message will not be freed before this call returns. 2963 * 2964 * Return: zero on success, else a negative error code. 2965 */ 2966 int spi_sync(struct spi_device *spi, struct spi_message *message) 2967 { 2968 int ret; 2969 2970 mutex_lock(&spi->master->bus_lock_mutex); 2971 ret = __spi_sync(spi, message); 2972 mutex_unlock(&spi->master->bus_lock_mutex); 2973 2974 return ret; 2975 } 2976 EXPORT_SYMBOL_GPL(spi_sync); 2977 2978 /** 2979 * spi_sync_locked - version of spi_sync with exclusive bus usage 2980 * @spi: device with which data will be exchanged 2981 * @message: describes the data transfers 2982 * Context: can sleep 2983 * 2984 * This call may only be used from a context that may sleep. The sleep 2985 * is non-interruptible, and has no timeout. Low-overhead controller 2986 * drivers may DMA directly into and out of the message buffers. 2987 * 2988 * This call should be used by drivers that require exclusive access to the 2989 * SPI bus. It has to be preceded by a spi_bus_lock call. The SPI bus must 2990 * be released by a spi_bus_unlock call when the exclusive access is over. 2991 * 2992 * Return: zero on success, else a negative error code. 2993 */ 2994 int spi_sync_locked(struct spi_device *spi, struct spi_message *message) 2995 { 2996 return __spi_sync(spi, message); 2997 } 2998 EXPORT_SYMBOL_GPL(spi_sync_locked); 2999 3000 /** 3001 * spi_bus_lock - obtain a lock for exclusive SPI bus usage 3002 * @master: SPI bus master that should be locked for exclusive bus access 3003 * Context: can sleep 3004 * 3005 * This call may only be used from a context that may sleep. The sleep 3006 * is non-interruptible, and has no timeout. 3007 * 3008 * This call should be used by drivers that require exclusive access to the 3009 * SPI bus. The SPI bus must be released by a spi_bus_unlock call when the 3010 * exclusive access is over. Data transfer must be done by spi_sync_locked 3011 * and spi_async_locked calls when the SPI bus lock is held. 3012 * 3013 * Return: always zero. 3014 */ 3015 int spi_bus_lock(struct spi_master *master) 3016 { 3017 unsigned long flags; 3018 3019 mutex_lock(&master->bus_lock_mutex); 3020 3021 spin_lock_irqsave(&master->bus_lock_spinlock, flags); 3022 master->bus_lock_flag = 1; 3023 spin_unlock_irqrestore(&master->bus_lock_spinlock, flags); 3024 3025 /* mutex remains locked until spi_bus_unlock is called */ 3026 3027 return 0; 3028 } 3029 EXPORT_SYMBOL_GPL(spi_bus_lock); 3030 3031 /** 3032 * spi_bus_unlock - release the lock for exclusive SPI bus usage 3033 * @master: SPI bus master that was locked for exclusive bus access 3034 * Context: can sleep 3035 * 3036 * This call may only be used from a context that may sleep. The sleep 3037 * is non-interruptible, and has no timeout. 3038 * 3039 * This call releases an SPI bus lock previously obtained by an spi_bus_lock 3040 * call. 3041 * 3042 * Return: always zero. 3043 */ 3044 int spi_bus_unlock(struct spi_master *master) 3045 { 3046 master->bus_lock_flag = 0; 3047 3048 mutex_unlock(&master->bus_lock_mutex); 3049 3050 return 0; 3051 } 3052 EXPORT_SYMBOL_GPL(spi_bus_unlock); 3053 3054 /* portable code must never pass more than 32 bytes */ 3055 #define SPI_BUFSIZ max(32, SMP_CACHE_BYTES) 3056 3057 static u8 *buf; 3058 3059 /** 3060 * spi_write_then_read - SPI synchronous write followed by read 3061 * @spi: device with which data will be exchanged 3062 * @txbuf: data to be written (need not be dma-safe) 3063 * @n_tx: size of txbuf, in bytes 3064 * @rxbuf: buffer into which data will be read (need not be dma-safe) 3065 * @n_rx: size of rxbuf, in bytes 3066 * Context: can sleep 3067 * 3068 * This performs a half duplex MicroWire style transaction with the 3069 * device, sending txbuf and then reading rxbuf. The return value 3070 * is zero for success, else a negative errno status code. 3071 * This call may only be used from a context that may sleep. 3072 * 3073 * Parameters to this routine are always copied using a small buffer; 3074 * portable code should never use this for more than 32 bytes. 3075 * Performance-sensitive or bulk transfer code should instead use 3076 * spi_{async,sync}() calls with dma-safe buffers. 3077 * 3078 * Return: zero on success, else a negative error code. 3079 */ 3080 int spi_write_then_read(struct spi_device *spi, 3081 const void *txbuf, unsigned n_tx, 3082 void *rxbuf, unsigned n_rx) 3083 { 3084 static DEFINE_MUTEX(lock); 3085 3086 int status; 3087 struct spi_message message; 3088 struct spi_transfer x[2]; 3089 u8 *local_buf; 3090 3091 /* Use preallocated DMA-safe buffer if we can. We can't avoid 3092 * copying here, (as a pure convenience thing), but we can 3093 * keep heap costs out of the hot path unless someone else is 3094 * using the pre-allocated buffer or the transfer is too large. 3095 */ 3096 if ((n_tx + n_rx) > SPI_BUFSIZ || !mutex_trylock(&lock)) { 3097 local_buf = kmalloc(max((unsigned)SPI_BUFSIZ, n_tx + n_rx), 3098 GFP_KERNEL | GFP_DMA); 3099 if (!local_buf) 3100 return -ENOMEM; 3101 } else { 3102 local_buf = buf; 3103 } 3104 3105 spi_message_init(&message); 3106 memset(x, 0, sizeof(x)); 3107 if (n_tx) { 3108 x[0].len = n_tx; 3109 spi_message_add_tail(&x[0], &message); 3110 } 3111 if (n_rx) { 3112 x[1].len = n_rx; 3113 spi_message_add_tail(&x[1], &message); 3114 } 3115 3116 memcpy(local_buf, txbuf, n_tx); 3117 x[0].tx_buf = local_buf; 3118 x[1].rx_buf = local_buf + n_tx; 3119 3120 /* do the i/o */ 3121 status = spi_sync(spi, &message); 3122 if (status == 0) 3123 memcpy(rxbuf, x[1].rx_buf, n_rx); 3124 3125 if (x[0].tx_buf == buf) 3126 mutex_unlock(&lock); 3127 else 3128 kfree(local_buf); 3129 3130 return status; 3131 } 3132 EXPORT_SYMBOL_GPL(spi_write_then_read); 3133 3134 /*-------------------------------------------------------------------------*/ 3135 3136 #if IS_ENABLED(CONFIG_OF_DYNAMIC) 3137 static int __spi_of_device_match(struct device *dev, void *data) 3138 { 3139 return dev->of_node == data; 3140 } 3141 3142 /* must call put_device() when done with returned spi_device device */ 3143 static struct spi_device *of_find_spi_device_by_node(struct device_node *node) 3144 { 3145 struct device *dev = bus_find_device(&spi_bus_type, NULL, node, 3146 __spi_of_device_match); 3147 return dev ? to_spi_device(dev) : NULL; 3148 } 3149 3150 static int __spi_of_master_match(struct device *dev, const void *data) 3151 { 3152 return dev->of_node == data; 3153 } 3154 3155 /* the spi masters are not using spi_bus, so we find it with another way */ 3156 static struct spi_master *of_find_spi_master_by_node(struct device_node *node) 3157 { 3158 struct device *dev; 3159 3160 dev = class_find_device(&spi_master_class, NULL, node, 3161 __spi_of_master_match); 3162 if (!dev) 3163 return NULL; 3164 3165 /* reference got in class_find_device */ 3166 return container_of(dev, struct spi_master, dev); 3167 } 3168 3169 static int of_spi_notify(struct notifier_block *nb, unsigned long action, 3170 void *arg) 3171 { 3172 struct of_reconfig_data *rd = arg; 3173 struct spi_master *master; 3174 struct spi_device *spi; 3175 3176 switch (of_reconfig_get_state_change(action, arg)) { 3177 case OF_RECONFIG_CHANGE_ADD: 3178 master = of_find_spi_master_by_node(rd->dn->parent); 3179 if (master == NULL) 3180 return NOTIFY_OK; /* not for us */ 3181 3182 if (of_node_test_and_set_flag(rd->dn, OF_POPULATED)) { 3183 put_device(&master->dev); 3184 return NOTIFY_OK; 3185 } 3186 3187 spi = of_register_spi_device(master, rd->dn); 3188 put_device(&master->dev); 3189 3190 if (IS_ERR(spi)) { 3191 pr_err("%s: failed to create for '%s'\n", 3192 __func__, rd->dn->full_name); 3193 of_node_clear_flag(rd->dn, OF_POPULATED); 3194 return notifier_from_errno(PTR_ERR(spi)); 3195 } 3196 break; 3197 3198 case OF_RECONFIG_CHANGE_REMOVE: 3199 /* already depopulated? */ 3200 if (!of_node_check_flag(rd->dn, OF_POPULATED)) 3201 return NOTIFY_OK; 3202 3203 /* find our device by node */ 3204 spi = of_find_spi_device_by_node(rd->dn); 3205 if (spi == NULL) 3206 return NOTIFY_OK; /* no? not meant for us */ 3207 3208 /* unregister takes one ref away */ 3209 spi_unregister_device(spi); 3210 3211 /* and put the reference of the find */ 3212 put_device(&spi->dev); 3213 break; 3214 } 3215 3216 return NOTIFY_OK; 3217 } 3218 3219 static struct notifier_block spi_of_notifier = { 3220 .notifier_call = of_spi_notify, 3221 }; 3222 #else /* IS_ENABLED(CONFIG_OF_DYNAMIC) */ 3223 extern struct notifier_block spi_of_notifier; 3224 #endif /* IS_ENABLED(CONFIG_OF_DYNAMIC) */ 3225 3226 #if IS_ENABLED(CONFIG_ACPI) 3227 static int spi_acpi_master_match(struct device *dev, const void *data) 3228 { 3229 return ACPI_COMPANION(dev->parent) == data; 3230 } 3231 3232 static int spi_acpi_device_match(struct device *dev, void *data) 3233 { 3234 return ACPI_COMPANION(dev) == data; 3235 } 3236 3237 static struct spi_master *acpi_spi_find_master_by_adev(struct acpi_device *adev) 3238 { 3239 struct device *dev; 3240 3241 dev = class_find_device(&spi_master_class, NULL, adev, 3242 spi_acpi_master_match); 3243 if (!dev) 3244 return NULL; 3245 3246 return container_of(dev, struct spi_master, dev); 3247 } 3248 3249 static struct spi_device *acpi_spi_find_device_by_adev(struct acpi_device *adev) 3250 { 3251 struct device *dev; 3252 3253 dev = bus_find_device(&spi_bus_type, NULL, adev, spi_acpi_device_match); 3254 3255 return dev ? to_spi_device(dev) : NULL; 3256 } 3257 3258 static int acpi_spi_notify(struct notifier_block *nb, unsigned long value, 3259 void *arg) 3260 { 3261 struct acpi_device *adev = arg; 3262 struct spi_master *master; 3263 struct spi_device *spi; 3264 3265 switch (value) { 3266 case ACPI_RECONFIG_DEVICE_ADD: 3267 master = acpi_spi_find_master_by_adev(adev->parent); 3268 if (!master) 3269 break; 3270 3271 acpi_register_spi_device(master, adev); 3272 put_device(&master->dev); 3273 break; 3274 case ACPI_RECONFIG_DEVICE_REMOVE: 3275 if (!acpi_device_enumerated(adev)) 3276 break; 3277 3278 spi = acpi_spi_find_device_by_adev(adev); 3279 if (!spi) 3280 break; 3281 3282 spi_unregister_device(spi); 3283 put_device(&spi->dev); 3284 break; 3285 } 3286 3287 return NOTIFY_OK; 3288 } 3289 3290 static struct notifier_block spi_acpi_notifier = { 3291 .notifier_call = acpi_spi_notify, 3292 }; 3293 #else 3294 extern struct notifier_block spi_acpi_notifier; 3295 #endif 3296 3297 static int __init spi_init(void) 3298 { 3299 int status; 3300 3301 buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL); 3302 if (!buf) { 3303 status = -ENOMEM; 3304 goto err0; 3305 } 3306 3307 status = bus_register(&spi_bus_type); 3308 if (status < 0) 3309 goto err1; 3310 3311 status = class_register(&spi_master_class); 3312 if (status < 0) 3313 goto err2; 3314 3315 if (IS_ENABLED(CONFIG_OF_DYNAMIC)) 3316 WARN_ON(of_reconfig_notifier_register(&spi_of_notifier)); 3317 if (IS_ENABLED(CONFIG_ACPI)) 3318 WARN_ON(acpi_reconfig_notifier_register(&spi_acpi_notifier)); 3319 3320 return 0; 3321 3322 err2: 3323 bus_unregister(&spi_bus_type); 3324 err1: 3325 kfree(buf); 3326 buf = NULL; 3327 err0: 3328 return status; 3329 } 3330 3331 /* board_info is normally registered in arch_initcall(), 3332 * but even essential drivers wait till later 3333 * 3334 * REVISIT only boardinfo really needs static linking. the rest (device and 3335 * driver registration) _could_ be dynamically linked (modular) ... costs 3336 * include needing to have boardinfo data structures be much more public. 3337 */ 3338 postcore_initcall(spi_init); 3339 3340