1 // SPDX-License-Identifier: GPL-2.0-or-later 2 // SPI init/core code 3 // 4 // Copyright (C) 2005 David Brownell 5 // Copyright (C) 2008 Secret Lab Technologies Ltd. 6 7 #include <linux/kernel.h> 8 #include <linux/device.h> 9 #include <linux/init.h> 10 #include <linux/cache.h> 11 #include <linux/dma-mapping.h> 12 #include <linux/dmaengine.h> 13 #include <linux/mutex.h> 14 #include <linux/of_device.h> 15 #include <linux/of_irq.h> 16 #include <linux/clk/clk-conf.h> 17 #include <linux/slab.h> 18 #include <linux/mod_devicetable.h> 19 #include <linux/spi/spi.h> 20 #include <linux/spi/spi-mem.h> 21 #include <linux/gpio/consumer.h> 22 #include <linux/pm_runtime.h> 23 #include <linux/pm_domain.h> 24 #include <linux/property.h> 25 #include <linux/export.h> 26 #include <linux/sched/rt.h> 27 #include <uapi/linux/sched/types.h> 28 #include <linux/delay.h> 29 #include <linux/kthread.h> 30 #include <linux/ioport.h> 31 #include <linux/acpi.h> 32 #include <linux/highmem.h> 33 #include <linux/idr.h> 34 #include <linux/platform_data/x86/apple.h> 35 #include <linux/ptp_clock_kernel.h> 36 #include <linux/percpu.h> 37 38 #define CREATE_TRACE_POINTS 39 #include <trace/events/spi.h> 40 EXPORT_TRACEPOINT_SYMBOL(spi_transfer_start); 41 EXPORT_TRACEPOINT_SYMBOL(spi_transfer_stop); 42 43 #include "internals.h" 44 45 static DEFINE_IDR(spi_master_idr); 46 47 static void spidev_release(struct device *dev) 48 { 49 struct spi_device *spi = to_spi_device(dev); 50 51 spi_controller_put(spi->controller); 52 kfree(spi->driver_override); 53 free_percpu(spi->pcpu_statistics); 54 kfree(spi); 55 } 56 57 static ssize_t 58 modalias_show(struct device *dev, struct device_attribute *a, char *buf) 59 { 60 const struct spi_device *spi = to_spi_device(dev); 61 int len; 62 63 len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1); 64 if (len != -ENODEV) 65 return len; 66 67 return sprintf(buf, "%s%s\n", SPI_MODULE_PREFIX, spi->modalias); 68 } 69 static DEVICE_ATTR_RO(modalias); 70 71 static ssize_t driver_override_store(struct device *dev, 72 struct device_attribute *a, 73 const char *buf, size_t count) 74 { 75 struct spi_device *spi = to_spi_device(dev); 76 int ret; 77 78 ret = driver_set_override(dev, &spi->driver_override, buf, count); 79 if (ret) 80 return ret; 81 82 return count; 83 } 84 85 static ssize_t driver_override_show(struct device *dev, 86 struct device_attribute *a, char *buf) 87 { 88 const struct spi_device *spi = to_spi_device(dev); 89 ssize_t len; 90 91 device_lock(dev); 92 len = snprintf(buf, PAGE_SIZE, "%s\n", spi->driver_override ? : ""); 93 device_unlock(dev); 94 return len; 95 } 96 static DEVICE_ATTR_RW(driver_override); 97 98 static struct spi_statistics __percpu *spi_alloc_pcpu_stats(struct device *dev) 99 { 100 struct spi_statistics __percpu *pcpu_stats; 101 102 if (dev) 103 pcpu_stats = devm_alloc_percpu(dev, struct spi_statistics); 104 else 105 pcpu_stats = alloc_percpu_gfp(struct spi_statistics, GFP_KERNEL); 106 107 if (pcpu_stats) { 108 int cpu; 109 110 for_each_possible_cpu(cpu) { 111 struct spi_statistics *stat; 112 113 stat = per_cpu_ptr(pcpu_stats, cpu); 114 u64_stats_init(&stat->syncp); 115 } 116 } 117 return pcpu_stats; 118 } 119 120 #define spi_pcpu_stats_totalize(ret, in, field) \ 121 do { \ 122 int i; \ 123 ret = 0; \ 124 for_each_possible_cpu(i) { \ 125 const struct spi_statistics *pcpu_stats; \ 126 u64 inc; \ 127 unsigned int start; \ 128 pcpu_stats = per_cpu_ptr(in, i); \ 129 do { \ 130 start = u64_stats_fetch_begin_irq( \ 131 &pcpu_stats->syncp); \ 132 inc = u64_stats_read(&pcpu_stats->field); \ 133 } while (u64_stats_fetch_retry_irq( \ 134 &pcpu_stats->syncp, start)); \ 135 ret += inc; \ 136 } \ 137 } while (0) 138 139 #define SPI_STATISTICS_ATTRS(field, file) \ 140 static ssize_t spi_controller_##field##_show(struct device *dev, \ 141 struct device_attribute *attr, \ 142 char *buf) \ 143 { \ 144 struct spi_controller *ctlr = container_of(dev, \ 145 struct spi_controller, dev); \ 146 return spi_statistics_##field##_show(ctlr->pcpu_statistics, buf); \ 147 } \ 148 static struct device_attribute dev_attr_spi_controller_##field = { \ 149 .attr = { .name = file, .mode = 0444 }, \ 150 .show = spi_controller_##field##_show, \ 151 }; \ 152 static ssize_t spi_device_##field##_show(struct device *dev, \ 153 struct device_attribute *attr, \ 154 char *buf) \ 155 { \ 156 struct spi_device *spi = to_spi_device(dev); \ 157 return spi_statistics_##field##_show(spi->pcpu_statistics, buf); \ 158 } \ 159 static struct device_attribute dev_attr_spi_device_##field = { \ 160 .attr = { .name = file, .mode = 0444 }, \ 161 .show = spi_device_##field##_show, \ 162 } 163 164 #define SPI_STATISTICS_SHOW_NAME(name, file, field) \ 165 static ssize_t spi_statistics_##name##_show(struct spi_statistics __percpu *stat, \ 166 char *buf) \ 167 { \ 168 ssize_t len; \ 169 u64 val; \ 170 spi_pcpu_stats_totalize(val, stat, field); \ 171 len = sysfs_emit(buf, "%llu\n", val); \ 172 return len; \ 173 } \ 174 SPI_STATISTICS_ATTRS(name, file) 175 176 #define SPI_STATISTICS_SHOW(field) \ 177 SPI_STATISTICS_SHOW_NAME(field, __stringify(field), \ 178 field) 179 180 SPI_STATISTICS_SHOW(messages); 181 SPI_STATISTICS_SHOW(transfers); 182 SPI_STATISTICS_SHOW(errors); 183 SPI_STATISTICS_SHOW(timedout); 184 185 SPI_STATISTICS_SHOW(spi_sync); 186 SPI_STATISTICS_SHOW(spi_sync_immediate); 187 SPI_STATISTICS_SHOW(spi_async); 188 189 SPI_STATISTICS_SHOW(bytes); 190 SPI_STATISTICS_SHOW(bytes_rx); 191 SPI_STATISTICS_SHOW(bytes_tx); 192 193 #define SPI_STATISTICS_TRANSFER_BYTES_HISTO(index, number) \ 194 SPI_STATISTICS_SHOW_NAME(transfer_bytes_histo##index, \ 195 "transfer_bytes_histo_" number, \ 196 transfer_bytes_histo[index]) 197 SPI_STATISTICS_TRANSFER_BYTES_HISTO(0, "0-1"); 198 SPI_STATISTICS_TRANSFER_BYTES_HISTO(1, "2-3"); 199 SPI_STATISTICS_TRANSFER_BYTES_HISTO(2, "4-7"); 200 SPI_STATISTICS_TRANSFER_BYTES_HISTO(3, "8-15"); 201 SPI_STATISTICS_TRANSFER_BYTES_HISTO(4, "16-31"); 202 SPI_STATISTICS_TRANSFER_BYTES_HISTO(5, "32-63"); 203 SPI_STATISTICS_TRANSFER_BYTES_HISTO(6, "64-127"); 204 SPI_STATISTICS_TRANSFER_BYTES_HISTO(7, "128-255"); 205 SPI_STATISTICS_TRANSFER_BYTES_HISTO(8, "256-511"); 206 SPI_STATISTICS_TRANSFER_BYTES_HISTO(9, "512-1023"); 207 SPI_STATISTICS_TRANSFER_BYTES_HISTO(10, "1024-2047"); 208 SPI_STATISTICS_TRANSFER_BYTES_HISTO(11, "2048-4095"); 209 SPI_STATISTICS_TRANSFER_BYTES_HISTO(12, "4096-8191"); 210 SPI_STATISTICS_TRANSFER_BYTES_HISTO(13, "8192-16383"); 211 SPI_STATISTICS_TRANSFER_BYTES_HISTO(14, "16384-32767"); 212 SPI_STATISTICS_TRANSFER_BYTES_HISTO(15, "32768-65535"); 213 SPI_STATISTICS_TRANSFER_BYTES_HISTO(16, "65536+"); 214 215 SPI_STATISTICS_SHOW(transfers_split_maxsize); 216 217 static struct attribute *spi_dev_attrs[] = { 218 &dev_attr_modalias.attr, 219 &dev_attr_driver_override.attr, 220 NULL, 221 }; 222 223 static const struct attribute_group spi_dev_group = { 224 .attrs = spi_dev_attrs, 225 }; 226 227 static struct attribute *spi_device_statistics_attrs[] = { 228 &dev_attr_spi_device_messages.attr, 229 &dev_attr_spi_device_transfers.attr, 230 &dev_attr_spi_device_errors.attr, 231 &dev_attr_spi_device_timedout.attr, 232 &dev_attr_spi_device_spi_sync.attr, 233 &dev_attr_spi_device_spi_sync_immediate.attr, 234 &dev_attr_spi_device_spi_async.attr, 235 &dev_attr_spi_device_bytes.attr, 236 &dev_attr_spi_device_bytes_rx.attr, 237 &dev_attr_spi_device_bytes_tx.attr, 238 &dev_attr_spi_device_transfer_bytes_histo0.attr, 239 &dev_attr_spi_device_transfer_bytes_histo1.attr, 240 &dev_attr_spi_device_transfer_bytes_histo2.attr, 241 &dev_attr_spi_device_transfer_bytes_histo3.attr, 242 &dev_attr_spi_device_transfer_bytes_histo4.attr, 243 &dev_attr_spi_device_transfer_bytes_histo5.attr, 244 &dev_attr_spi_device_transfer_bytes_histo6.attr, 245 &dev_attr_spi_device_transfer_bytes_histo7.attr, 246 &dev_attr_spi_device_transfer_bytes_histo8.attr, 247 &dev_attr_spi_device_transfer_bytes_histo9.attr, 248 &dev_attr_spi_device_transfer_bytes_histo10.attr, 249 &dev_attr_spi_device_transfer_bytes_histo11.attr, 250 &dev_attr_spi_device_transfer_bytes_histo12.attr, 251 &dev_attr_spi_device_transfer_bytes_histo13.attr, 252 &dev_attr_spi_device_transfer_bytes_histo14.attr, 253 &dev_attr_spi_device_transfer_bytes_histo15.attr, 254 &dev_attr_spi_device_transfer_bytes_histo16.attr, 255 &dev_attr_spi_device_transfers_split_maxsize.attr, 256 NULL, 257 }; 258 259 static const struct attribute_group spi_device_statistics_group = { 260 .name = "statistics", 261 .attrs = spi_device_statistics_attrs, 262 }; 263 264 static const struct attribute_group *spi_dev_groups[] = { 265 &spi_dev_group, 266 &spi_device_statistics_group, 267 NULL, 268 }; 269 270 static struct attribute *spi_controller_statistics_attrs[] = { 271 &dev_attr_spi_controller_messages.attr, 272 &dev_attr_spi_controller_transfers.attr, 273 &dev_attr_spi_controller_errors.attr, 274 &dev_attr_spi_controller_timedout.attr, 275 &dev_attr_spi_controller_spi_sync.attr, 276 &dev_attr_spi_controller_spi_sync_immediate.attr, 277 &dev_attr_spi_controller_spi_async.attr, 278 &dev_attr_spi_controller_bytes.attr, 279 &dev_attr_spi_controller_bytes_rx.attr, 280 &dev_attr_spi_controller_bytes_tx.attr, 281 &dev_attr_spi_controller_transfer_bytes_histo0.attr, 282 &dev_attr_spi_controller_transfer_bytes_histo1.attr, 283 &dev_attr_spi_controller_transfer_bytes_histo2.attr, 284 &dev_attr_spi_controller_transfer_bytes_histo3.attr, 285 &dev_attr_spi_controller_transfer_bytes_histo4.attr, 286 &dev_attr_spi_controller_transfer_bytes_histo5.attr, 287 &dev_attr_spi_controller_transfer_bytes_histo6.attr, 288 &dev_attr_spi_controller_transfer_bytes_histo7.attr, 289 &dev_attr_spi_controller_transfer_bytes_histo8.attr, 290 &dev_attr_spi_controller_transfer_bytes_histo9.attr, 291 &dev_attr_spi_controller_transfer_bytes_histo10.attr, 292 &dev_attr_spi_controller_transfer_bytes_histo11.attr, 293 &dev_attr_spi_controller_transfer_bytes_histo12.attr, 294 &dev_attr_spi_controller_transfer_bytes_histo13.attr, 295 &dev_attr_spi_controller_transfer_bytes_histo14.attr, 296 &dev_attr_spi_controller_transfer_bytes_histo15.attr, 297 &dev_attr_spi_controller_transfer_bytes_histo16.attr, 298 &dev_attr_spi_controller_transfers_split_maxsize.attr, 299 NULL, 300 }; 301 302 static const struct attribute_group spi_controller_statistics_group = { 303 .name = "statistics", 304 .attrs = spi_controller_statistics_attrs, 305 }; 306 307 static const struct attribute_group *spi_master_groups[] = { 308 &spi_controller_statistics_group, 309 NULL, 310 }; 311 312 static void spi_statistics_add_transfer_stats(struct spi_statistics __percpu *pcpu_stats, 313 struct spi_transfer *xfer, 314 struct spi_controller *ctlr) 315 { 316 int l2len = min(fls(xfer->len), SPI_STATISTICS_HISTO_SIZE) - 1; 317 struct spi_statistics *stats; 318 319 if (l2len < 0) 320 l2len = 0; 321 322 get_cpu(); 323 stats = this_cpu_ptr(pcpu_stats); 324 u64_stats_update_begin(&stats->syncp); 325 326 u64_stats_inc(&stats->transfers); 327 u64_stats_inc(&stats->transfer_bytes_histo[l2len]); 328 329 u64_stats_add(&stats->bytes, xfer->len); 330 if ((xfer->tx_buf) && 331 (xfer->tx_buf != ctlr->dummy_tx)) 332 u64_stats_add(&stats->bytes_tx, xfer->len); 333 if ((xfer->rx_buf) && 334 (xfer->rx_buf != ctlr->dummy_rx)) 335 u64_stats_add(&stats->bytes_rx, xfer->len); 336 337 u64_stats_update_end(&stats->syncp); 338 put_cpu(); 339 } 340 341 /* 342 * modalias support makes "modprobe $MODALIAS" new-style hotplug work, 343 * and the sysfs version makes coldplug work too. 344 */ 345 static const struct spi_device_id *spi_match_id(const struct spi_device_id *id, const char *name) 346 { 347 while (id->name[0]) { 348 if (!strcmp(name, id->name)) 349 return id; 350 id++; 351 } 352 return NULL; 353 } 354 355 const struct spi_device_id *spi_get_device_id(const struct spi_device *sdev) 356 { 357 const struct spi_driver *sdrv = to_spi_driver(sdev->dev.driver); 358 359 return spi_match_id(sdrv->id_table, sdev->modalias); 360 } 361 EXPORT_SYMBOL_GPL(spi_get_device_id); 362 363 static int spi_match_device(struct device *dev, struct device_driver *drv) 364 { 365 const struct spi_device *spi = to_spi_device(dev); 366 const struct spi_driver *sdrv = to_spi_driver(drv); 367 368 /* Check override first, and if set, only use the named driver */ 369 if (spi->driver_override) 370 return strcmp(spi->driver_override, drv->name) == 0; 371 372 /* Attempt an OF style match */ 373 if (of_driver_match_device(dev, drv)) 374 return 1; 375 376 /* Then try ACPI */ 377 if (acpi_driver_match_device(dev, drv)) 378 return 1; 379 380 if (sdrv->id_table) 381 return !!spi_match_id(sdrv->id_table, spi->modalias); 382 383 return strcmp(spi->modalias, drv->name) == 0; 384 } 385 386 static int spi_uevent(struct device *dev, struct kobj_uevent_env *env) 387 { 388 const struct spi_device *spi = to_spi_device(dev); 389 int rc; 390 391 rc = acpi_device_uevent_modalias(dev, env); 392 if (rc != -ENODEV) 393 return rc; 394 395 return add_uevent_var(env, "MODALIAS=%s%s", SPI_MODULE_PREFIX, spi->modalias); 396 } 397 398 static int spi_probe(struct device *dev) 399 { 400 const struct spi_driver *sdrv = to_spi_driver(dev->driver); 401 struct spi_device *spi = to_spi_device(dev); 402 int ret; 403 404 ret = of_clk_set_defaults(dev->of_node, false); 405 if (ret) 406 return ret; 407 408 if (dev->of_node) { 409 spi->irq = of_irq_get(dev->of_node, 0); 410 if (spi->irq == -EPROBE_DEFER) 411 return -EPROBE_DEFER; 412 if (spi->irq < 0) 413 spi->irq = 0; 414 } 415 416 ret = dev_pm_domain_attach(dev, true); 417 if (ret) 418 return ret; 419 420 if (sdrv->probe) { 421 ret = sdrv->probe(spi); 422 if (ret) 423 dev_pm_domain_detach(dev, true); 424 } 425 426 return ret; 427 } 428 429 static void spi_remove(struct device *dev) 430 { 431 const struct spi_driver *sdrv = to_spi_driver(dev->driver); 432 433 if (sdrv->remove) 434 sdrv->remove(to_spi_device(dev)); 435 436 dev_pm_domain_detach(dev, true); 437 } 438 439 static void spi_shutdown(struct device *dev) 440 { 441 if (dev->driver) { 442 const struct spi_driver *sdrv = to_spi_driver(dev->driver); 443 444 if (sdrv->shutdown) 445 sdrv->shutdown(to_spi_device(dev)); 446 } 447 } 448 449 struct bus_type spi_bus_type = { 450 .name = "spi", 451 .dev_groups = spi_dev_groups, 452 .match = spi_match_device, 453 .uevent = spi_uevent, 454 .probe = spi_probe, 455 .remove = spi_remove, 456 .shutdown = spi_shutdown, 457 }; 458 EXPORT_SYMBOL_GPL(spi_bus_type); 459 460 /** 461 * __spi_register_driver - register a SPI driver 462 * @owner: owner module of the driver to register 463 * @sdrv: the driver to register 464 * Context: can sleep 465 * 466 * Return: zero on success, else a negative error code. 467 */ 468 int __spi_register_driver(struct module *owner, struct spi_driver *sdrv) 469 { 470 sdrv->driver.owner = owner; 471 sdrv->driver.bus = &spi_bus_type; 472 473 /* 474 * For Really Good Reasons we use spi: modaliases not of: 475 * modaliases for DT so module autoloading won't work if we 476 * don't have a spi_device_id as well as a compatible string. 477 */ 478 if (sdrv->driver.of_match_table) { 479 const struct of_device_id *of_id; 480 481 for (of_id = sdrv->driver.of_match_table; of_id->compatible[0]; 482 of_id++) { 483 const char *of_name; 484 485 /* Strip off any vendor prefix */ 486 of_name = strnchr(of_id->compatible, 487 sizeof(of_id->compatible), ','); 488 if (of_name) 489 of_name++; 490 else 491 of_name = of_id->compatible; 492 493 if (sdrv->id_table) { 494 const struct spi_device_id *spi_id; 495 496 spi_id = spi_match_id(sdrv->id_table, of_name); 497 if (spi_id) 498 continue; 499 } else { 500 if (strcmp(sdrv->driver.name, of_name) == 0) 501 continue; 502 } 503 504 pr_warn("SPI driver %s has no spi_device_id for %s\n", 505 sdrv->driver.name, of_id->compatible); 506 } 507 } 508 509 return driver_register(&sdrv->driver); 510 } 511 EXPORT_SYMBOL_GPL(__spi_register_driver); 512 513 /*-------------------------------------------------------------------------*/ 514 515 /* 516 * SPI devices should normally not be created by SPI device drivers; that 517 * would make them board-specific. Similarly with SPI controller drivers. 518 * Device registration normally goes into like arch/.../mach.../board-YYY.c 519 * with other readonly (flashable) information about mainboard devices. 520 */ 521 522 struct boardinfo { 523 struct list_head list; 524 struct spi_board_info board_info; 525 }; 526 527 static LIST_HEAD(board_list); 528 static LIST_HEAD(spi_controller_list); 529 530 /* 531 * Used to protect add/del operation for board_info list and 532 * spi_controller list, and their matching process also used 533 * to protect object of type struct idr. 534 */ 535 static DEFINE_MUTEX(board_lock); 536 537 /** 538 * spi_alloc_device - Allocate a new SPI device 539 * @ctlr: Controller to which device is connected 540 * Context: can sleep 541 * 542 * Allows a driver to allocate and initialize a spi_device without 543 * registering it immediately. This allows a driver to directly 544 * fill the spi_device with device parameters before calling 545 * spi_add_device() on it. 546 * 547 * Caller is responsible to call spi_add_device() on the returned 548 * spi_device structure to add it to the SPI controller. If the caller 549 * needs to discard the spi_device without adding it, then it should 550 * call spi_dev_put() on it. 551 * 552 * Return: a pointer to the new device, or NULL. 553 */ 554 struct spi_device *spi_alloc_device(struct spi_controller *ctlr) 555 { 556 struct spi_device *spi; 557 558 if (!spi_controller_get(ctlr)) 559 return NULL; 560 561 spi = kzalloc(sizeof(*spi), GFP_KERNEL); 562 if (!spi) { 563 spi_controller_put(ctlr); 564 return NULL; 565 } 566 567 spi->pcpu_statistics = spi_alloc_pcpu_stats(NULL); 568 if (!spi->pcpu_statistics) { 569 kfree(spi); 570 spi_controller_put(ctlr); 571 return NULL; 572 } 573 574 spi->master = spi->controller = ctlr; 575 spi->dev.parent = &ctlr->dev; 576 spi->dev.bus = &spi_bus_type; 577 spi->dev.release = spidev_release; 578 spi->mode = ctlr->buswidth_override_bits; 579 580 device_initialize(&spi->dev); 581 return spi; 582 } 583 EXPORT_SYMBOL_GPL(spi_alloc_device); 584 585 static void spi_dev_set_name(struct spi_device *spi) 586 { 587 struct acpi_device *adev = ACPI_COMPANION(&spi->dev); 588 589 if (adev) { 590 dev_set_name(&spi->dev, "spi-%s", acpi_dev_name(adev)); 591 return; 592 } 593 594 dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->controller->dev), 595 spi->chip_select); 596 } 597 598 static int spi_dev_check(struct device *dev, void *data) 599 { 600 struct spi_device *spi = to_spi_device(dev); 601 struct spi_device *new_spi = data; 602 603 if (spi->controller == new_spi->controller && 604 spi->chip_select == new_spi->chip_select) 605 return -EBUSY; 606 return 0; 607 } 608 609 static void spi_cleanup(struct spi_device *spi) 610 { 611 if (spi->controller->cleanup) 612 spi->controller->cleanup(spi); 613 } 614 615 static int __spi_add_device(struct spi_device *spi) 616 { 617 struct spi_controller *ctlr = spi->controller; 618 struct device *dev = ctlr->dev.parent; 619 int status; 620 621 /* 622 * We need to make sure there's no other device with this 623 * chipselect **BEFORE** we call setup(), else we'll trash 624 * its configuration. 625 */ 626 status = bus_for_each_dev(&spi_bus_type, NULL, spi, spi_dev_check); 627 if (status) { 628 dev_err(dev, "chipselect %d already in use\n", 629 spi->chip_select); 630 return status; 631 } 632 633 /* Controller may unregister concurrently */ 634 if (IS_ENABLED(CONFIG_SPI_DYNAMIC) && 635 !device_is_registered(&ctlr->dev)) { 636 return -ENODEV; 637 } 638 639 if (ctlr->cs_gpiods) 640 spi->cs_gpiod = ctlr->cs_gpiods[spi->chip_select]; 641 642 /* 643 * Drivers may modify this initial i/o setup, but will 644 * normally rely on the device being setup. Devices 645 * using SPI_CS_HIGH can't coexist well otherwise... 646 */ 647 status = spi_setup(spi); 648 if (status < 0) { 649 dev_err(dev, "can't setup %s, status %d\n", 650 dev_name(&spi->dev), status); 651 return status; 652 } 653 654 /* Device may be bound to an active driver when this returns */ 655 status = device_add(&spi->dev); 656 if (status < 0) { 657 dev_err(dev, "can't add %s, status %d\n", 658 dev_name(&spi->dev), status); 659 spi_cleanup(spi); 660 } else { 661 dev_dbg(dev, "registered child %s\n", dev_name(&spi->dev)); 662 } 663 664 return status; 665 } 666 667 /** 668 * spi_add_device - Add spi_device allocated with spi_alloc_device 669 * @spi: spi_device to register 670 * 671 * Companion function to spi_alloc_device. Devices allocated with 672 * spi_alloc_device can be added onto the spi bus with this function. 673 * 674 * Return: 0 on success; negative errno on failure 675 */ 676 int spi_add_device(struct spi_device *spi) 677 { 678 struct spi_controller *ctlr = spi->controller; 679 struct device *dev = ctlr->dev.parent; 680 int status; 681 682 /* Chipselects are numbered 0..max; validate. */ 683 if (spi->chip_select >= ctlr->num_chipselect) { 684 dev_err(dev, "cs%d >= max %d\n", spi->chip_select, 685 ctlr->num_chipselect); 686 return -EINVAL; 687 } 688 689 /* Set the bus ID string */ 690 spi_dev_set_name(spi); 691 692 mutex_lock(&ctlr->add_lock); 693 status = __spi_add_device(spi); 694 mutex_unlock(&ctlr->add_lock); 695 return status; 696 } 697 EXPORT_SYMBOL_GPL(spi_add_device); 698 699 static int spi_add_device_locked(struct spi_device *spi) 700 { 701 struct spi_controller *ctlr = spi->controller; 702 struct device *dev = ctlr->dev.parent; 703 704 /* Chipselects are numbered 0..max; validate. */ 705 if (spi->chip_select >= ctlr->num_chipselect) { 706 dev_err(dev, "cs%d >= max %d\n", spi->chip_select, 707 ctlr->num_chipselect); 708 return -EINVAL; 709 } 710 711 /* Set the bus ID string */ 712 spi_dev_set_name(spi); 713 714 WARN_ON(!mutex_is_locked(&ctlr->add_lock)); 715 return __spi_add_device(spi); 716 } 717 718 /** 719 * spi_new_device - instantiate one new SPI device 720 * @ctlr: Controller to which device is connected 721 * @chip: Describes the SPI device 722 * Context: can sleep 723 * 724 * On typical mainboards, this is purely internal; and it's not needed 725 * after board init creates the hard-wired devices. Some development 726 * platforms may not be able to use spi_register_board_info though, and 727 * this is exported so that for example a USB or parport based adapter 728 * driver could add devices (which it would learn about out-of-band). 729 * 730 * Return: the new device, or NULL. 731 */ 732 struct spi_device *spi_new_device(struct spi_controller *ctlr, 733 struct spi_board_info *chip) 734 { 735 struct spi_device *proxy; 736 int status; 737 738 /* 739 * NOTE: caller did any chip->bus_num checks necessary. 740 * 741 * Also, unless we change the return value convention to use 742 * error-or-pointer (not NULL-or-pointer), troubleshootability 743 * suggests syslogged diagnostics are best here (ugh). 744 */ 745 746 proxy = spi_alloc_device(ctlr); 747 if (!proxy) 748 return NULL; 749 750 WARN_ON(strlen(chip->modalias) >= sizeof(proxy->modalias)); 751 752 proxy->chip_select = chip->chip_select; 753 proxy->max_speed_hz = chip->max_speed_hz; 754 proxy->mode = chip->mode; 755 proxy->irq = chip->irq; 756 strlcpy(proxy->modalias, chip->modalias, sizeof(proxy->modalias)); 757 proxy->dev.platform_data = (void *) chip->platform_data; 758 proxy->controller_data = chip->controller_data; 759 proxy->controller_state = NULL; 760 761 if (chip->swnode) { 762 status = device_add_software_node(&proxy->dev, chip->swnode); 763 if (status) { 764 dev_err(&ctlr->dev, "failed to add software node to '%s': %d\n", 765 chip->modalias, status); 766 goto err_dev_put; 767 } 768 } 769 770 status = spi_add_device(proxy); 771 if (status < 0) 772 goto err_dev_put; 773 774 return proxy; 775 776 err_dev_put: 777 device_remove_software_node(&proxy->dev); 778 spi_dev_put(proxy); 779 return NULL; 780 } 781 EXPORT_SYMBOL_GPL(spi_new_device); 782 783 /** 784 * spi_unregister_device - unregister a single SPI device 785 * @spi: spi_device to unregister 786 * 787 * Start making the passed SPI device vanish. Normally this would be handled 788 * by spi_unregister_controller(). 789 */ 790 void spi_unregister_device(struct spi_device *spi) 791 { 792 if (!spi) 793 return; 794 795 if (spi->dev.of_node) { 796 of_node_clear_flag(spi->dev.of_node, OF_POPULATED); 797 of_node_put(spi->dev.of_node); 798 } 799 if (ACPI_COMPANION(&spi->dev)) 800 acpi_device_clear_enumerated(ACPI_COMPANION(&spi->dev)); 801 device_remove_software_node(&spi->dev); 802 device_del(&spi->dev); 803 spi_cleanup(spi); 804 put_device(&spi->dev); 805 } 806 EXPORT_SYMBOL_GPL(spi_unregister_device); 807 808 static void spi_match_controller_to_boardinfo(struct spi_controller *ctlr, 809 struct spi_board_info *bi) 810 { 811 struct spi_device *dev; 812 813 if (ctlr->bus_num != bi->bus_num) 814 return; 815 816 dev = spi_new_device(ctlr, bi); 817 if (!dev) 818 dev_err(ctlr->dev.parent, "can't create new device for %s\n", 819 bi->modalias); 820 } 821 822 /** 823 * spi_register_board_info - register SPI devices for a given board 824 * @info: array of chip descriptors 825 * @n: how many descriptors are provided 826 * Context: can sleep 827 * 828 * Board-specific early init code calls this (probably during arch_initcall) 829 * with segments of the SPI device table. Any device nodes are created later, 830 * after the relevant parent SPI controller (bus_num) is defined. We keep 831 * this table of devices forever, so that reloading a controller driver will 832 * not make Linux forget about these hard-wired devices. 833 * 834 * Other code can also call this, e.g. a particular add-on board might provide 835 * SPI devices through its expansion connector, so code initializing that board 836 * would naturally declare its SPI devices. 837 * 838 * The board info passed can safely be __initdata ... but be careful of 839 * any embedded pointers (platform_data, etc), they're copied as-is. 840 * 841 * Return: zero on success, else a negative error code. 842 */ 843 int spi_register_board_info(struct spi_board_info const *info, unsigned n) 844 { 845 struct boardinfo *bi; 846 int i; 847 848 if (!n) 849 return 0; 850 851 bi = kcalloc(n, sizeof(*bi), GFP_KERNEL); 852 if (!bi) 853 return -ENOMEM; 854 855 for (i = 0; i < n; i++, bi++, info++) { 856 struct spi_controller *ctlr; 857 858 memcpy(&bi->board_info, info, sizeof(*info)); 859 860 mutex_lock(&board_lock); 861 list_add_tail(&bi->list, &board_list); 862 list_for_each_entry(ctlr, &spi_controller_list, list) 863 spi_match_controller_to_boardinfo(ctlr, 864 &bi->board_info); 865 mutex_unlock(&board_lock); 866 } 867 868 return 0; 869 } 870 871 /*-------------------------------------------------------------------------*/ 872 873 /* Core methods for SPI resource management */ 874 875 /** 876 * spi_res_alloc - allocate a spi resource that is life-cycle managed 877 * during the processing of a spi_message while using 878 * spi_transfer_one 879 * @spi: the spi device for which we allocate memory 880 * @release: the release code to execute for this resource 881 * @size: size to alloc and return 882 * @gfp: GFP allocation flags 883 * 884 * Return: the pointer to the allocated data 885 * 886 * This may get enhanced in the future to allocate from a memory pool 887 * of the @spi_device or @spi_controller to avoid repeated allocations. 888 */ 889 static void *spi_res_alloc(struct spi_device *spi, spi_res_release_t release, 890 size_t size, gfp_t gfp) 891 { 892 struct spi_res *sres; 893 894 sres = kzalloc(sizeof(*sres) + size, gfp); 895 if (!sres) 896 return NULL; 897 898 INIT_LIST_HEAD(&sres->entry); 899 sres->release = release; 900 901 return sres->data; 902 } 903 904 /** 905 * spi_res_free - free an spi resource 906 * @res: pointer to the custom data of a resource 907 */ 908 static void spi_res_free(void *res) 909 { 910 struct spi_res *sres = container_of(res, struct spi_res, data); 911 912 if (!res) 913 return; 914 915 WARN_ON(!list_empty(&sres->entry)); 916 kfree(sres); 917 } 918 919 /** 920 * spi_res_add - add a spi_res to the spi_message 921 * @message: the spi message 922 * @res: the spi_resource 923 */ 924 static void spi_res_add(struct spi_message *message, void *res) 925 { 926 struct spi_res *sres = container_of(res, struct spi_res, data); 927 928 WARN_ON(!list_empty(&sres->entry)); 929 list_add_tail(&sres->entry, &message->resources); 930 } 931 932 /** 933 * spi_res_release - release all spi resources for this message 934 * @ctlr: the @spi_controller 935 * @message: the @spi_message 936 */ 937 static void spi_res_release(struct spi_controller *ctlr, struct spi_message *message) 938 { 939 struct spi_res *res, *tmp; 940 941 list_for_each_entry_safe_reverse(res, tmp, &message->resources, entry) { 942 if (res->release) 943 res->release(ctlr, message, res->data); 944 945 list_del(&res->entry); 946 947 kfree(res); 948 } 949 } 950 951 /*-------------------------------------------------------------------------*/ 952 953 static void spi_set_cs(struct spi_device *spi, bool enable, bool force) 954 { 955 bool activate = enable; 956 957 /* 958 * Avoid calling into the driver (or doing delays) if the chip select 959 * isn't actually changing from the last time this was called. 960 */ 961 if (!force && ((enable && spi->controller->last_cs == spi->chip_select) || 962 (!enable && spi->controller->last_cs != spi->chip_select)) && 963 (spi->controller->last_cs_mode_high == (spi->mode & SPI_CS_HIGH))) 964 return; 965 966 trace_spi_set_cs(spi, activate); 967 968 spi->controller->last_cs = enable ? spi->chip_select : -1; 969 spi->controller->last_cs_mode_high = spi->mode & SPI_CS_HIGH; 970 971 if ((spi->cs_gpiod || !spi->controller->set_cs_timing) && !activate) { 972 spi_delay_exec(&spi->cs_hold, NULL); 973 } 974 975 if (spi->mode & SPI_CS_HIGH) 976 enable = !enable; 977 978 if (spi->cs_gpiod) { 979 if (!(spi->mode & SPI_NO_CS)) { 980 /* 981 * Historically ACPI has no means of the GPIO polarity and 982 * thus the SPISerialBus() resource defines it on the per-chip 983 * basis. In order to avoid a chain of negations, the GPIO 984 * polarity is considered being Active High. Even for the cases 985 * when _DSD() is involved (in the updated versions of ACPI) 986 * the GPIO CS polarity must be defined Active High to avoid 987 * ambiguity. That's why we use enable, that takes SPI_CS_HIGH 988 * into account. 989 */ 990 if (has_acpi_companion(&spi->dev)) 991 gpiod_set_value_cansleep(spi->cs_gpiod, !enable); 992 else 993 /* Polarity handled by GPIO library */ 994 gpiod_set_value_cansleep(spi->cs_gpiod, activate); 995 } 996 /* Some SPI masters need both GPIO CS & slave_select */ 997 if ((spi->controller->flags & SPI_MASTER_GPIO_SS) && 998 spi->controller->set_cs) 999 spi->controller->set_cs(spi, !enable); 1000 } else if (spi->controller->set_cs) { 1001 spi->controller->set_cs(spi, !enable); 1002 } 1003 1004 if (spi->cs_gpiod || !spi->controller->set_cs_timing) { 1005 if (activate) 1006 spi_delay_exec(&spi->cs_setup, NULL); 1007 else 1008 spi_delay_exec(&spi->cs_inactive, NULL); 1009 } 1010 } 1011 1012 #ifdef CONFIG_HAS_DMA 1013 int spi_map_buf(struct spi_controller *ctlr, struct device *dev, 1014 struct sg_table *sgt, void *buf, size_t len, 1015 enum dma_data_direction dir) 1016 { 1017 const bool vmalloced_buf = is_vmalloc_addr(buf); 1018 unsigned int max_seg_size = dma_get_max_seg_size(dev); 1019 #ifdef CONFIG_HIGHMEM 1020 const bool kmap_buf = ((unsigned long)buf >= PKMAP_BASE && 1021 (unsigned long)buf < (PKMAP_BASE + 1022 (LAST_PKMAP * PAGE_SIZE))); 1023 #else 1024 const bool kmap_buf = false; 1025 #endif 1026 int desc_len; 1027 int sgs; 1028 struct page *vm_page; 1029 struct scatterlist *sg; 1030 void *sg_buf; 1031 size_t min; 1032 int i, ret; 1033 1034 if (vmalloced_buf || kmap_buf) { 1035 desc_len = min_t(unsigned long, max_seg_size, PAGE_SIZE); 1036 sgs = DIV_ROUND_UP(len + offset_in_page(buf), desc_len); 1037 } else if (virt_addr_valid(buf)) { 1038 desc_len = min_t(size_t, max_seg_size, ctlr->max_dma_len); 1039 sgs = DIV_ROUND_UP(len, desc_len); 1040 } else { 1041 return -EINVAL; 1042 } 1043 1044 ret = sg_alloc_table(sgt, sgs, GFP_KERNEL); 1045 if (ret != 0) 1046 return ret; 1047 1048 sg = &sgt->sgl[0]; 1049 for (i = 0; i < sgs; i++) { 1050 1051 if (vmalloced_buf || kmap_buf) { 1052 /* 1053 * Next scatterlist entry size is the minimum between 1054 * the desc_len and the remaining buffer length that 1055 * fits in a page. 1056 */ 1057 min = min_t(size_t, desc_len, 1058 min_t(size_t, len, 1059 PAGE_SIZE - offset_in_page(buf))); 1060 if (vmalloced_buf) 1061 vm_page = vmalloc_to_page(buf); 1062 else 1063 vm_page = kmap_to_page(buf); 1064 if (!vm_page) { 1065 sg_free_table(sgt); 1066 return -ENOMEM; 1067 } 1068 sg_set_page(sg, vm_page, 1069 min, offset_in_page(buf)); 1070 } else { 1071 min = min_t(size_t, len, desc_len); 1072 sg_buf = buf; 1073 sg_set_buf(sg, sg_buf, min); 1074 } 1075 1076 buf += min; 1077 len -= min; 1078 sg = sg_next(sg); 1079 } 1080 1081 ret = dma_map_sg(dev, sgt->sgl, sgt->nents, dir); 1082 if (!ret) 1083 ret = -ENOMEM; 1084 if (ret < 0) { 1085 sg_free_table(sgt); 1086 return ret; 1087 } 1088 1089 sgt->nents = ret; 1090 1091 return 0; 1092 } 1093 1094 void spi_unmap_buf(struct spi_controller *ctlr, struct device *dev, 1095 struct sg_table *sgt, enum dma_data_direction dir) 1096 { 1097 if (sgt->orig_nents) { 1098 dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir); 1099 sg_free_table(sgt); 1100 } 1101 } 1102 1103 static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg) 1104 { 1105 struct device *tx_dev, *rx_dev; 1106 struct spi_transfer *xfer; 1107 int ret; 1108 1109 if (!ctlr->can_dma) 1110 return 0; 1111 1112 if (ctlr->dma_tx) 1113 tx_dev = ctlr->dma_tx->device->dev; 1114 else if (ctlr->dma_map_dev) 1115 tx_dev = ctlr->dma_map_dev; 1116 else 1117 tx_dev = ctlr->dev.parent; 1118 1119 if (ctlr->dma_rx) 1120 rx_dev = ctlr->dma_rx->device->dev; 1121 else if (ctlr->dma_map_dev) 1122 rx_dev = ctlr->dma_map_dev; 1123 else 1124 rx_dev = ctlr->dev.parent; 1125 1126 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 1127 if (!ctlr->can_dma(ctlr, msg->spi, xfer)) 1128 continue; 1129 1130 if (xfer->tx_buf != NULL) { 1131 ret = spi_map_buf(ctlr, tx_dev, &xfer->tx_sg, 1132 (void *)xfer->tx_buf, xfer->len, 1133 DMA_TO_DEVICE); 1134 if (ret != 0) 1135 return ret; 1136 } 1137 1138 if (xfer->rx_buf != NULL) { 1139 ret = spi_map_buf(ctlr, rx_dev, &xfer->rx_sg, 1140 xfer->rx_buf, xfer->len, 1141 DMA_FROM_DEVICE); 1142 if (ret != 0) { 1143 spi_unmap_buf(ctlr, tx_dev, &xfer->tx_sg, 1144 DMA_TO_DEVICE); 1145 return ret; 1146 } 1147 } 1148 } 1149 1150 ctlr->cur_msg_mapped = true; 1151 1152 return 0; 1153 } 1154 1155 static int __spi_unmap_msg(struct spi_controller *ctlr, struct spi_message *msg) 1156 { 1157 struct spi_transfer *xfer; 1158 struct device *tx_dev, *rx_dev; 1159 1160 if (!ctlr->cur_msg_mapped || !ctlr->can_dma) 1161 return 0; 1162 1163 if (ctlr->dma_tx) 1164 tx_dev = ctlr->dma_tx->device->dev; 1165 else if (ctlr->dma_map_dev) 1166 tx_dev = ctlr->dma_map_dev; 1167 else 1168 tx_dev = ctlr->dev.parent; 1169 1170 if (ctlr->dma_rx) 1171 rx_dev = ctlr->dma_rx->device->dev; 1172 else if (ctlr->dma_map_dev) 1173 rx_dev = ctlr->dma_map_dev; 1174 else 1175 rx_dev = ctlr->dev.parent; 1176 1177 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 1178 if (!ctlr->can_dma(ctlr, msg->spi, xfer)) 1179 continue; 1180 1181 spi_unmap_buf(ctlr, rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE); 1182 spi_unmap_buf(ctlr, tx_dev, &xfer->tx_sg, DMA_TO_DEVICE); 1183 } 1184 1185 ctlr->cur_msg_mapped = false; 1186 1187 return 0; 1188 } 1189 #else /* !CONFIG_HAS_DMA */ 1190 static inline int __spi_map_msg(struct spi_controller *ctlr, 1191 struct spi_message *msg) 1192 { 1193 return 0; 1194 } 1195 1196 static inline int __spi_unmap_msg(struct spi_controller *ctlr, 1197 struct spi_message *msg) 1198 { 1199 return 0; 1200 } 1201 #endif /* !CONFIG_HAS_DMA */ 1202 1203 static inline int spi_unmap_msg(struct spi_controller *ctlr, 1204 struct spi_message *msg) 1205 { 1206 struct spi_transfer *xfer; 1207 1208 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 1209 /* 1210 * Restore the original value of tx_buf or rx_buf if they are 1211 * NULL. 1212 */ 1213 if (xfer->tx_buf == ctlr->dummy_tx) 1214 xfer->tx_buf = NULL; 1215 if (xfer->rx_buf == ctlr->dummy_rx) 1216 xfer->rx_buf = NULL; 1217 } 1218 1219 return __spi_unmap_msg(ctlr, msg); 1220 } 1221 1222 static int spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg) 1223 { 1224 struct spi_transfer *xfer; 1225 void *tmp; 1226 unsigned int max_tx, max_rx; 1227 1228 if ((ctlr->flags & (SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX)) 1229 && !(msg->spi->mode & SPI_3WIRE)) { 1230 max_tx = 0; 1231 max_rx = 0; 1232 1233 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 1234 if ((ctlr->flags & SPI_CONTROLLER_MUST_TX) && 1235 !xfer->tx_buf) 1236 max_tx = max(xfer->len, max_tx); 1237 if ((ctlr->flags & SPI_CONTROLLER_MUST_RX) && 1238 !xfer->rx_buf) 1239 max_rx = max(xfer->len, max_rx); 1240 } 1241 1242 if (max_tx) { 1243 tmp = krealloc(ctlr->dummy_tx, max_tx, 1244 GFP_KERNEL | GFP_DMA | __GFP_ZERO); 1245 if (!tmp) 1246 return -ENOMEM; 1247 ctlr->dummy_tx = tmp; 1248 } 1249 1250 if (max_rx) { 1251 tmp = krealloc(ctlr->dummy_rx, max_rx, 1252 GFP_KERNEL | GFP_DMA); 1253 if (!tmp) 1254 return -ENOMEM; 1255 ctlr->dummy_rx = tmp; 1256 } 1257 1258 if (max_tx || max_rx) { 1259 list_for_each_entry(xfer, &msg->transfers, 1260 transfer_list) { 1261 if (!xfer->len) 1262 continue; 1263 if (!xfer->tx_buf) 1264 xfer->tx_buf = ctlr->dummy_tx; 1265 if (!xfer->rx_buf) 1266 xfer->rx_buf = ctlr->dummy_rx; 1267 } 1268 } 1269 } 1270 1271 return __spi_map_msg(ctlr, msg); 1272 } 1273 1274 static int spi_transfer_wait(struct spi_controller *ctlr, 1275 struct spi_message *msg, 1276 struct spi_transfer *xfer) 1277 { 1278 struct spi_statistics __percpu *statm = ctlr->pcpu_statistics; 1279 struct spi_statistics __percpu *stats = msg->spi->pcpu_statistics; 1280 u32 speed_hz = xfer->speed_hz; 1281 unsigned long long ms; 1282 1283 if (spi_controller_is_slave(ctlr)) { 1284 if (wait_for_completion_interruptible(&ctlr->xfer_completion)) { 1285 dev_dbg(&msg->spi->dev, "SPI transfer interrupted\n"); 1286 return -EINTR; 1287 } 1288 } else { 1289 if (!speed_hz) 1290 speed_hz = 100000; 1291 1292 /* 1293 * For each byte we wait for 8 cycles of the SPI clock. 1294 * Since speed is defined in Hz and we want milliseconds, 1295 * use respective multiplier, but before the division, 1296 * otherwise we may get 0 for short transfers. 1297 */ 1298 ms = 8LL * MSEC_PER_SEC * xfer->len; 1299 do_div(ms, speed_hz); 1300 1301 /* 1302 * Increase it twice and add 200 ms tolerance, use 1303 * predefined maximum in case of overflow. 1304 */ 1305 ms += ms + 200; 1306 if (ms > UINT_MAX) 1307 ms = UINT_MAX; 1308 1309 ms = wait_for_completion_timeout(&ctlr->xfer_completion, 1310 msecs_to_jiffies(ms)); 1311 1312 if (ms == 0) { 1313 SPI_STATISTICS_INCREMENT_FIELD(statm, timedout); 1314 SPI_STATISTICS_INCREMENT_FIELD(stats, timedout); 1315 dev_err(&msg->spi->dev, 1316 "SPI transfer timed out\n"); 1317 return -ETIMEDOUT; 1318 } 1319 } 1320 1321 return 0; 1322 } 1323 1324 static void _spi_transfer_delay_ns(u32 ns) 1325 { 1326 if (!ns) 1327 return; 1328 if (ns <= NSEC_PER_USEC) { 1329 ndelay(ns); 1330 } else { 1331 u32 us = DIV_ROUND_UP(ns, NSEC_PER_USEC); 1332 1333 if (us <= 10) 1334 udelay(us); 1335 else 1336 usleep_range(us, us + DIV_ROUND_UP(us, 10)); 1337 } 1338 } 1339 1340 int spi_delay_to_ns(struct spi_delay *_delay, struct spi_transfer *xfer) 1341 { 1342 u32 delay = _delay->value; 1343 u32 unit = _delay->unit; 1344 u32 hz; 1345 1346 if (!delay) 1347 return 0; 1348 1349 switch (unit) { 1350 case SPI_DELAY_UNIT_USECS: 1351 delay *= NSEC_PER_USEC; 1352 break; 1353 case SPI_DELAY_UNIT_NSECS: 1354 /* Nothing to do here */ 1355 break; 1356 case SPI_DELAY_UNIT_SCK: 1357 /* Clock cycles need to be obtained from spi_transfer */ 1358 if (!xfer) 1359 return -EINVAL; 1360 /* 1361 * If there is unknown effective speed, approximate it 1362 * by underestimating with half of the requested hz. 1363 */ 1364 hz = xfer->effective_speed_hz ?: xfer->speed_hz / 2; 1365 if (!hz) 1366 return -EINVAL; 1367 1368 /* Convert delay to nanoseconds */ 1369 delay *= DIV_ROUND_UP(NSEC_PER_SEC, hz); 1370 break; 1371 default: 1372 return -EINVAL; 1373 } 1374 1375 return delay; 1376 } 1377 EXPORT_SYMBOL_GPL(spi_delay_to_ns); 1378 1379 int spi_delay_exec(struct spi_delay *_delay, struct spi_transfer *xfer) 1380 { 1381 int delay; 1382 1383 might_sleep(); 1384 1385 if (!_delay) 1386 return -EINVAL; 1387 1388 delay = spi_delay_to_ns(_delay, xfer); 1389 if (delay < 0) 1390 return delay; 1391 1392 _spi_transfer_delay_ns(delay); 1393 1394 return 0; 1395 } 1396 EXPORT_SYMBOL_GPL(spi_delay_exec); 1397 1398 static void _spi_transfer_cs_change_delay(struct spi_message *msg, 1399 struct spi_transfer *xfer) 1400 { 1401 u32 default_delay_ns = 10 * NSEC_PER_USEC; 1402 u32 delay = xfer->cs_change_delay.value; 1403 u32 unit = xfer->cs_change_delay.unit; 1404 int ret; 1405 1406 /* Return early on "fast" mode - for everything but USECS */ 1407 if (!delay) { 1408 if (unit == SPI_DELAY_UNIT_USECS) 1409 _spi_transfer_delay_ns(default_delay_ns); 1410 return; 1411 } 1412 1413 ret = spi_delay_exec(&xfer->cs_change_delay, xfer); 1414 if (ret) { 1415 dev_err_once(&msg->spi->dev, 1416 "Use of unsupported delay unit %i, using default of %luus\n", 1417 unit, default_delay_ns / NSEC_PER_USEC); 1418 _spi_transfer_delay_ns(default_delay_ns); 1419 } 1420 } 1421 1422 /* 1423 * spi_transfer_one_message - Default implementation of transfer_one_message() 1424 * 1425 * This is a standard implementation of transfer_one_message() for 1426 * drivers which implement a transfer_one() operation. It provides 1427 * standard handling of delays and chip select management. 1428 */ 1429 static int spi_transfer_one_message(struct spi_controller *ctlr, 1430 struct spi_message *msg) 1431 { 1432 struct spi_transfer *xfer; 1433 bool keep_cs = false; 1434 int ret = 0; 1435 struct spi_statistics __percpu *statm = ctlr->pcpu_statistics; 1436 struct spi_statistics __percpu *stats = msg->spi->pcpu_statistics; 1437 1438 spi_set_cs(msg->spi, true, false); 1439 1440 SPI_STATISTICS_INCREMENT_FIELD(statm, messages); 1441 SPI_STATISTICS_INCREMENT_FIELD(stats, messages); 1442 1443 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 1444 trace_spi_transfer_start(msg, xfer); 1445 1446 spi_statistics_add_transfer_stats(statm, xfer, ctlr); 1447 spi_statistics_add_transfer_stats(stats, xfer, ctlr); 1448 1449 if (!ctlr->ptp_sts_supported) { 1450 xfer->ptp_sts_word_pre = 0; 1451 ptp_read_system_prets(xfer->ptp_sts); 1452 } 1453 1454 if ((xfer->tx_buf || xfer->rx_buf) && xfer->len) { 1455 reinit_completion(&ctlr->xfer_completion); 1456 1457 fallback_pio: 1458 ret = ctlr->transfer_one(ctlr, msg->spi, xfer); 1459 if (ret < 0) { 1460 if (ctlr->cur_msg_mapped && 1461 (xfer->error & SPI_TRANS_FAIL_NO_START)) { 1462 __spi_unmap_msg(ctlr, msg); 1463 ctlr->fallback = true; 1464 xfer->error &= ~SPI_TRANS_FAIL_NO_START; 1465 goto fallback_pio; 1466 } 1467 1468 SPI_STATISTICS_INCREMENT_FIELD(statm, 1469 errors); 1470 SPI_STATISTICS_INCREMENT_FIELD(stats, 1471 errors); 1472 dev_err(&msg->spi->dev, 1473 "SPI transfer failed: %d\n", ret); 1474 goto out; 1475 } 1476 1477 if (ret > 0) { 1478 ret = spi_transfer_wait(ctlr, msg, xfer); 1479 if (ret < 0) 1480 msg->status = ret; 1481 } 1482 } else { 1483 if (xfer->len) 1484 dev_err(&msg->spi->dev, 1485 "Bufferless transfer has length %u\n", 1486 xfer->len); 1487 } 1488 1489 if (!ctlr->ptp_sts_supported) { 1490 ptp_read_system_postts(xfer->ptp_sts); 1491 xfer->ptp_sts_word_post = xfer->len; 1492 } 1493 1494 trace_spi_transfer_stop(msg, xfer); 1495 1496 if (msg->status != -EINPROGRESS) 1497 goto out; 1498 1499 spi_transfer_delay_exec(xfer); 1500 1501 if (xfer->cs_change) { 1502 if (list_is_last(&xfer->transfer_list, 1503 &msg->transfers)) { 1504 keep_cs = true; 1505 } else { 1506 spi_set_cs(msg->spi, false, false); 1507 _spi_transfer_cs_change_delay(msg, xfer); 1508 spi_set_cs(msg->spi, true, false); 1509 } 1510 } 1511 1512 msg->actual_length += xfer->len; 1513 } 1514 1515 out: 1516 if (ret != 0 || !keep_cs) 1517 spi_set_cs(msg->spi, false, false); 1518 1519 if (msg->status == -EINPROGRESS) 1520 msg->status = ret; 1521 1522 if (msg->status && ctlr->handle_err) 1523 ctlr->handle_err(ctlr, msg); 1524 1525 spi_finalize_current_message(ctlr); 1526 1527 return ret; 1528 } 1529 1530 /** 1531 * spi_finalize_current_transfer - report completion of a transfer 1532 * @ctlr: the controller reporting completion 1533 * 1534 * Called by SPI drivers using the core transfer_one_message() 1535 * implementation to notify it that the current interrupt driven 1536 * transfer has finished and the next one may be scheduled. 1537 */ 1538 void spi_finalize_current_transfer(struct spi_controller *ctlr) 1539 { 1540 complete(&ctlr->xfer_completion); 1541 } 1542 EXPORT_SYMBOL_GPL(spi_finalize_current_transfer); 1543 1544 static void spi_idle_runtime_pm(struct spi_controller *ctlr) 1545 { 1546 if (ctlr->auto_runtime_pm) { 1547 pm_runtime_mark_last_busy(ctlr->dev.parent); 1548 pm_runtime_put_autosuspend(ctlr->dev.parent); 1549 } 1550 } 1551 1552 static int __spi_pump_transfer_message(struct spi_controller *ctlr, 1553 struct spi_message *msg, bool was_busy) 1554 { 1555 struct spi_transfer *xfer; 1556 int ret; 1557 1558 if (!was_busy && ctlr->auto_runtime_pm) { 1559 ret = pm_runtime_get_sync(ctlr->dev.parent); 1560 if (ret < 0) { 1561 pm_runtime_put_noidle(ctlr->dev.parent); 1562 dev_err(&ctlr->dev, "Failed to power device: %d\n", 1563 ret); 1564 return ret; 1565 } 1566 } 1567 1568 if (!was_busy) 1569 trace_spi_controller_busy(ctlr); 1570 1571 if (!was_busy && ctlr->prepare_transfer_hardware) { 1572 ret = ctlr->prepare_transfer_hardware(ctlr); 1573 if (ret) { 1574 dev_err(&ctlr->dev, 1575 "failed to prepare transfer hardware: %d\n", 1576 ret); 1577 1578 if (ctlr->auto_runtime_pm) 1579 pm_runtime_put(ctlr->dev.parent); 1580 1581 msg->status = ret; 1582 spi_finalize_current_message(ctlr); 1583 1584 return ret; 1585 } 1586 } 1587 1588 trace_spi_message_start(msg); 1589 1590 if (ctlr->prepare_message) { 1591 ret = ctlr->prepare_message(ctlr, msg); 1592 if (ret) { 1593 dev_err(&ctlr->dev, "failed to prepare message: %d\n", 1594 ret); 1595 msg->status = ret; 1596 spi_finalize_current_message(ctlr); 1597 return ret; 1598 } 1599 msg->prepared = true; 1600 } 1601 1602 ret = spi_map_msg(ctlr, msg); 1603 if (ret) { 1604 msg->status = ret; 1605 spi_finalize_current_message(ctlr); 1606 return ret; 1607 } 1608 1609 if (!ctlr->ptp_sts_supported && !ctlr->transfer_one) { 1610 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 1611 xfer->ptp_sts_word_pre = 0; 1612 ptp_read_system_prets(xfer->ptp_sts); 1613 } 1614 } 1615 1616 /* 1617 * Drivers implementation of transfer_one_message() must arrange for 1618 * spi_finalize_current_message() to get called. Most drivers will do 1619 * this in the calling context, but some don't. For those cases, a 1620 * completion is used to guarantee that this function does not return 1621 * until spi_finalize_current_message() is done accessing 1622 * ctlr->cur_msg. 1623 * Use of the following two flags enable to opportunistically skip the 1624 * use of the completion since its use involves expensive spin locks. 1625 * In case of a race with the context that calls 1626 * spi_finalize_current_message() the completion will always be used, 1627 * due to strict ordering of these flags using barriers. 1628 */ 1629 WRITE_ONCE(ctlr->cur_msg_incomplete, true); 1630 WRITE_ONCE(ctlr->cur_msg_need_completion, false); 1631 reinit_completion(&ctlr->cur_msg_completion); 1632 smp_wmb(); /* Make these available to spi_finalize_current_message() */ 1633 1634 ret = ctlr->transfer_one_message(ctlr, msg); 1635 if (ret) { 1636 dev_err(&ctlr->dev, 1637 "failed to transfer one message from queue\n"); 1638 return ret; 1639 } 1640 1641 WRITE_ONCE(ctlr->cur_msg_need_completion, true); 1642 smp_mb(); /* See spi_finalize_current_message()... */ 1643 if (READ_ONCE(ctlr->cur_msg_incomplete)) 1644 wait_for_completion(&ctlr->cur_msg_completion); 1645 1646 return 0; 1647 } 1648 1649 /** 1650 * __spi_pump_messages - function which processes spi message queue 1651 * @ctlr: controller to process queue for 1652 * @in_kthread: true if we are in the context of the message pump thread 1653 * 1654 * This function checks if there is any spi message in the queue that 1655 * needs processing and if so call out to the driver to initialize hardware 1656 * and transfer each message. 1657 * 1658 * Note that it is called both from the kthread itself and also from 1659 * inside spi_sync(); the queue extraction handling at the top of the 1660 * function should deal with this safely. 1661 */ 1662 static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread) 1663 { 1664 struct spi_message *msg; 1665 bool was_busy = false; 1666 unsigned long flags; 1667 int ret; 1668 1669 /* Take the IO mutex */ 1670 mutex_lock(&ctlr->io_mutex); 1671 1672 /* Lock queue */ 1673 spin_lock_irqsave(&ctlr->queue_lock, flags); 1674 1675 /* Make sure we are not already running a message */ 1676 if (ctlr->cur_msg) 1677 goto out_unlock; 1678 1679 /* Check if the queue is idle */ 1680 if (list_empty(&ctlr->queue) || !ctlr->running) { 1681 if (!ctlr->busy) 1682 goto out_unlock; 1683 1684 /* Defer any non-atomic teardown to the thread */ 1685 if (!in_kthread) { 1686 if (!ctlr->dummy_rx && !ctlr->dummy_tx && 1687 !ctlr->unprepare_transfer_hardware) { 1688 spi_idle_runtime_pm(ctlr); 1689 ctlr->busy = false; 1690 ctlr->queue_empty = true; 1691 trace_spi_controller_idle(ctlr); 1692 } else { 1693 kthread_queue_work(ctlr->kworker, 1694 &ctlr->pump_messages); 1695 } 1696 goto out_unlock; 1697 } 1698 1699 ctlr->busy = false; 1700 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1701 1702 kfree(ctlr->dummy_rx); 1703 ctlr->dummy_rx = NULL; 1704 kfree(ctlr->dummy_tx); 1705 ctlr->dummy_tx = NULL; 1706 if (ctlr->unprepare_transfer_hardware && 1707 ctlr->unprepare_transfer_hardware(ctlr)) 1708 dev_err(&ctlr->dev, 1709 "failed to unprepare transfer hardware\n"); 1710 spi_idle_runtime_pm(ctlr); 1711 trace_spi_controller_idle(ctlr); 1712 1713 spin_lock_irqsave(&ctlr->queue_lock, flags); 1714 ctlr->queue_empty = true; 1715 goto out_unlock; 1716 } 1717 1718 /* Extract head of queue */ 1719 msg = list_first_entry(&ctlr->queue, struct spi_message, queue); 1720 ctlr->cur_msg = msg; 1721 1722 list_del_init(&msg->queue); 1723 if (ctlr->busy) 1724 was_busy = true; 1725 else 1726 ctlr->busy = true; 1727 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1728 1729 ret = __spi_pump_transfer_message(ctlr, msg, was_busy); 1730 kthread_queue_work(ctlr->kworker, &ctlr->pump_messages); 1731 1732 ctlr->cur_msg = NULL; 1733 ctlr->fallback = false; 1734 1735 mutex_unlock(&ctlr->io_mutex); 1736 1737 /* Prod the scheduler in case transfer_one() was busy waiting */ 1738 if (!ret) 1739 cond_resched(); 1740 return; 1741 1742 out_unlock: 1743 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1744 mutex_unlock(&ctlr->io_mutex); 1745 } 1746 1747 /** 1748 * spi_pump_messages - kthread work function which processes spi message queue 1749 * @work: pointer to kthread work struct contained in the controller struct 1750 */ 1751 static void spi_pump_messages(struct kthread_work *work) 1752 { 1753 struct spi_controller *ctlr = 1754 container_of(work, struct spi_controller, pump_messages); 1755 1756 __spi_pump_messages(ctlr, true); 1757 } 1758 1759 /** 1760 * spi_take_timestamp_pre - helper to collect the beginning of the TX timestamp 1761 * @ctlr: Pointer to the spi_controller structure of the driver 1762 * @xfer: Pointer to the transfer being timestamped 1763 * @progress: How many words (not bytes) have been transferred so far 1764 * @irqs_off: If true, will disable IRQs and preemption for the duration of the 1765 * transfer, for less jitter in time measurement. Only compatible 1766 * with PIO drivers. If true, must follow up with 1767 * spi_take_timestamp_post or otherwise system will crash. 1768 * WARNING: for fully predictable results, the CPU frequency must 1769 * also be under control (governor). 1770 * 1771 * This is a helper for drivers to collect the beginning of the TX timestamp 1772 * for the requested byte from the SPI transfer. The frequency with which this 1773 * function must be called (once per word, once for the whole transfer, once 1774 * per batch of words etc) is arbitrary as long as the @tx buffer offset is 1775 * greater than or equal to the requested byte at the time of the call. The 1776 * timestamp is only taken once, at the first such call. It is assumed that 1777 * the driver advances its @tx buffer pointer monotonically. 1778 */ 1779 void spi_take_timestamp_pre(struct spi_controller *ctlr, 1780 struct spi_transfer *xfer, 1781 size_t progress, bool irqs_off) 1782 { 1783 if (!xfer->ptp_sts) 1784 return; 1785 1786 if (xfer->timestamped) 1787 return; 1788 1789 if (progress > xfer->ptp_sts_word_pre) 1790 return; 1791 1792 /* Capture the resolution of the timestamp */ 1793 xfer->ptp_sts_word_pre = progress; 1794 1795 if (irqs_off) { 1796 local_irq_save(ctlr->irq_flags); 1797 preempt_disable(); 1798 } 1799 1800 ptp_read_system_prets(xfer->ptp_sts); 1801 } 1802 EXPORT_SYMBOL_GPL(spi_take_timestamp_pre); 1803 1804 /** 1805 * spi_take_timestamp_post - helper to collect the end of the TX timestamp 1806 * @ctlr: Pointer to the spi_controller structure of the driver 1807 * @xfer: Pointer to the transfer being timestamped 1808 * @progress: How many words (not bytes) have been transferred so far 1809 * @irqs_off: If true, will re-enable IRQs and preemption for the local CPU. 1810 * 1811 * This is a helper for drivers to collect the end of the TX timestamp for 1812 * the requested byte from the SPI transfer. Can be called with an arbitrary 1813 * frequency: only the first call where @tx exceeds or is equal to the 1814 * requested word will be timestamped. 1815 */ 1816 void spi_take_timestamp_post(struct spi_controller *ctlr, 1817 struct spi_transfer *xfer, 1818 size_t progress, bool irqs_off) 1819 { 1820 if (!xfer->ptp_sts) 1821 return; 1822 1823 if (xfer->timestamped) 1824 return; 1825 1826 if (progress < xfer->ptp_sts_word_post) 1827 return; 1828 1829 ptp_read_system_postts(xfer->ptp_sts); 1830 1831 if (irqs_off) { 1832 local_irq_restore(ctlr->irq_flags); 1833 preempt_enable(); 1834 } 1835 1836 /* Capture the resolution of the timestamp */ 1837 xfer->ptp_sts_word_post = progress; 1838 1839 xfer->timestamped = true; 1840 } 1841 EXPORT_SYMBOL_GPL(spi_take_timestamp_post); 1842 1843 /** 1844 * spi_set_thread_rt - set the controller to pump at realtime priority 1845 * @ctlr: controller to boost priority of 1846 * 1847 * This can be called because the controller requested realtime priority 1848 * (by setting the ->rt value before calling spi_register_controller()) or 1849 * because a device on the bus said that its transfers needed realtime 1850 * priority. 1851 * 1852 * NOTE: at the moment if any device on a bus says it needs realtime then 1853 * the thread will be at realtime priority for all transfers on that 1854 * controller. If this eventually becomes a problem we may see if we can 1855 * find a way to boost the priority only temporarily during relevant 1856 * transfers. 1857 */ 1858 static void spi_set_thread_rt(struct spi_controller *ctlr) 1859 { 1860 dev_info(&ctlr->dev, 1861 "will run message pump with realtime priority\n"); 1862 sched_set_fifo(ctlr->kworker->task); 1863 } 1864 1865 static int spi_init_queue(struct spi_controller *ctlr) 1866 { 1867 ctlr->running = false; 1868 ctlr->busy = false; 1869 ctlr->queue_empty = true; 1870 1871 ctlr->kworker = kthread_create_worker(0, dev_name(&ctlr->dev)); 1872 if (IS_ERR(ctlr->kworker)) { 1873 dev_err(&ctlr->dev, "failed to create message pump kworker\n"); 1874 return PTR_ERR(ctlr->kworker); 1875 } 1876 1877 kthread_init_work(&ctlr->pump_messages, spi_pump_messages); 1878 1879 /* 1880 * Controller config will indicate if this controller should run the 1881 * message pump with high (realtime) priority to reduce the transfer 1882 * latency on the bus by minimising the delay between a transfer 1883 * request and the scheduling of the message pump thread. Without this 1884 * setting the message pump thread will remain at default priority. 1885 */ 1886 if (ctlr->rt) 1887 spi_set_thread_rt(ctlr); 1888 1889 return 0; 1890 } 1891 1892 /** 1893 * spi_get_next_queued_message() - called by driver to check for queued 1894 * messages 1895 * @ctlr: the controller to check for queued messages 1896 * 1897 * If there are more messages in the queue, the next message is returned from 1898 * this call. 1899 * 1900 * Return: the next message in the queue, else NULL if the queue is empty. 1901 */ 1902 struct spi_message *spi_get_next_queued_message(struct spi_controller *ctlr) 1903 { 1904 struct spi_message *next; 1905 unsigned long flags; 1906 1907 /* Get a pointer to the next message, if any */ 1908 spin_lock_irqsave(&ctlr->queue_lock, flags); 1909 next = list_first_entry_or_null(&ctlr->queue, struct spi_message, 1910 queue); 1911 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1912 1913 return next; 1914 } 1915 EXPORT_SYMBOL_GPL(spi_get_next_queued_message); 1916 1917 /** 1918 * spi_finalize_current_message() - the current message is complete 1919 * @ctlr: the controller to return the message to 1920 * 1921 * Called by the driver to notify the core that the message in the front of the 1922 * queue is complete and can be removed from the queue. 1923 */ 1924 void spi_finalize_current_message(struct spi_controller *ctlr) 1925 { 1926 struct spi_transfer *xfer; 1927 struct spi_message *mesg; 1928 int ret; 1929 1930 mesg = ctlr->cur_msg; 1931 1932 if (!ctlr->ptp_sts_supported && !ctlr->transfer_one) { 1933 list_for_each_entry(xfer, &mesg->transfers, transfer_list) { 1934 ptp_read_system_postts(xfer->ptp_sts); 1935 xfer->ptp_sts_word_post = xfer->len; 1936 } 1937 } 1938 1939 if (unlikely(ctlr->ptp_sts_supported)) 1940 list_for_each_entry(xfer, &mesg->transfers, transfer_list) 1941 WARN_ON_ONCE(xfer->ptp_sts && !xfer->timestamped); 1942 1943 spi_unmap_msg(ctlr, mesg); 1944 1945 /* 1946 * In the prepare_messages callback the SPI bus has the opportunity 1947 * to split a transfer to smaller chunks. 1948 * 1949 * Release the split transfers here since spi_map_msg() is done on 1950 * the split transfers. 1951 */ 1952 spi_res_release(ctlr, mesg); 1953 1954 if (mesg->prepared && ctlr->unprepare_message) { 1955 ret = ctlr->unprepare_message(ctlr, mesg); 1956 if (ret) { 1957 dev_err(&ctlr->dev, "failed to unprepare message: %d\n", 1958 ret); 1959 } 1960 } 1961 1962 mesg->prepared = false; 1963 1964 WRITE_ONCE(ctlr->cur_msg_incomplete, false); 1965 smp_mb(); /* See __spi_pump_transfer_message()... */ 1966 if (READ_ONCE(ctlr->cur_msg_need_completion)) 1967 complete(&ctlr->cur_msg_completion); 1968 1969 trace_spi_message_done(mesg); 1970 1971 mesg->state = NULL; 1972 if (mesg->complete) 1973 mesg->complete(mesg->context); 1974 } 1975 EXPORT_SYMBOL_GPL(spi_finalize_current_message); 1976 1977 static int spi_start_queue(struct spi_controller *ctlr) 1978 { 1979 unsigned long flags; 1980 1981 spin_lock_irqsave(&ctlr->queue_lock, flags); 1982 1983 if (ctlr->running || ctlr->busy) { 1984 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1985 return -EBUSY; 1986 } 1987 1988 ctlr->running = true; 1989 ctlr->cur_msg = NULL; 1990 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1991 1992 kthread_queue_work(ctlr->kworker, &ctlr->pump_messages); 1993 1994 return 0; 1995 } 1996 1997 static int spi_stop_queue(struct spi_controller *ctlr) 1998 { 1999 unsigned long flags; 2000 unsigned limit = 500; 2001 int ret = 0; 2002 2003 spin_lock_irqsave(&ctlr->queue_lock, flags); 2004 2005 /* 2006 * This is a bit lame, but is optimized for the common execution path. 2007 * A wait_queue on the ctlr->busy could be used, but then the common 2008 * execution path (pump_messages) would be required to call wake_up or 2009 * friends on every SPI message. Do this instead. 2010 */ 2011 while ((!list_empty(&ctlr->queue) || ctlr->busy) && limit--) { 2012 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 2013 usleep_range(10000, 11000); 2014 spin_lock_irqsave(&ctlr->queue_lock, flags); 2015 } 2016 2017 if (!list_empty(&ctlr->queue) || ctlr->busy) 2018 ret = -EBUSY; 2019 else 2020 ctlr->running = false; 2021 2022 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 2023 2024 if (ret) { 2025 dev_warn(&ctlr->dev, "could not stop message queue\n"); 2026 return ret; 2027 } 2028 return ret; 2029 } 2030 2031 static int spi_destroy_queue(struct spi_controller *ctlr) 2032 { 2033 int ret; 2034 2035 ret = spi_stop_queue(ctlr); 2036 2037 /* 2038 * kthread_flush_worker will block until all work is done. 2039 * If the reason that stop_queue timed out is that the work will never 2040 * finish, then it does no good to call flush/stop thread, so 2041 * return anyway. 2042 */ 2043 if (ret) { 2044 dev_err(&ctlr->dev, "problem destroying queue\n"); 2045 return ret; 2046 } 2047 2048 kthread_destroy_worker(ctlr->kworker); 2049 2050 return 0; 2051 } 2052 2053 static int __spi_queued_transfer(struct spi_device *spi, 2054 struct spi_message *msg, 2055 bool need_pump) 2056 { 2057 struct spi_controller *ctlr = spi->controller; 2058 unsigned long flags; 2059 2060 spin_lock_irqsave(&ctlr->queue_lock, flags); 2061 2062 if (!ctlr->running) { 2063 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 2064 return -ESHUTDOWN; 2065 } 2066 msg->actual_length = 0; 2067 msg->status = -EINPROGRESS; 2068 2069 list_add_tail(&msg->queue, &ctlr->queue); 2070 ctlr->queue_empty = false; 2071 if (!ctlr->busy && need_pump) 2072 kthread_queue_work(ctlr->kworker, &ctlr->pump_messages); 2073 2074 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 2075 return 0; 2076 } 2077 2078 /** 2079 * spi_queued_transfer - transfer function for queued transfers 2080 * @spi: spi device which is requesting transfer 2081 * @msg: spi message which is to handled is queued to driver queue 2082 * 2083 * Return: zero on success, else a negative error code. 2084 */ 2085 static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg) 2086 { 2087 return __spi_queued_transfer(spi, msg, true); 2088 } 2089 2090 static int spi_controller_initialize_queue(struct spi_controller *ctlr) 2091 { 2092 int ret; 2093 2094 ctlr->transfer = spi_queued_transfer; 2095 if (!ctlr->transfer_one_message) 2096 ctlr->transfer_one_message = spi_transfer_one_message; 2097 2098 /* Initialize and start queue */ 2099 ret = spi_init_queue(ctlr); 2100 if (ret) { 2101 dev_err(&ctlr->dev, "problem initializing queue\n"); 2102 goto err_init_queue; 2103 } 2104 ctlr->queued = true; 2105 ret = spi_start_queue(ctlr); 2106 if (ret) { 2107 dev_err(&ctlr->dev, "problem starting queue\n"); 2108 goto err_start_queue; 2109 } 2110 2111 return 0; 2112 2113 err_start_queue: 2114 spi_destroy_queue(ctlr); 2115 err_init_queue: 2116 return ret; 2117 } 2118 2119 /** 2120 * spi_flush_queue - Send all pending messages in the queue from the callers' 2121 * context 2122 * @ctlr: controller to process queue for 2123 * 2124 * This should be used when one wants to ensure all pending messages have been 2125 * sent before doing something. Is used by the spi-mem code to make sure SPI 2126 * memory operations do not preempt regular SPI transfers that have been queued 2127 * before the spi-mem operation. 2128 */ 2129 void spi_flush_queue(struct spi_controller *ctlr) 2130 { 2131 if (ctlr->transfer == spi_queued_transfer) 2132 __spi_pump_messages(ctlr, false); 2133 } 2134 2135 /*-------------------------------------------------------------------------*/ 2136 2137 #if defined(CONFIG_OF) 2138 static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi, 2139 struct device_node *nc) 2140 { 2141 u32 value; 2142 int rc; 2143 2144 /* Mode (clock phase/polarity/etc.) */ 2145 if (of_property_read_bool(nc, "spi-cpha")) 2146 spi->mode |= SPI_CPHA; 2147 if (of_property_read_bool(nc, "spi-cpol")) 2148 spi->mode |= SPI_CPOL; 2149 if (of_property_read_bool(nc, "spi-3wire")) 2150 spi->mode |= SPI_3WIRE; 2151 if (of_property_read_bool(nc, "spi-lsb-first")) 2152 spi->mode |= SPI_LSB_FIRST; 2153 if (of_property_read_bool(nc, "spi-cs-high")) 2154 spi->mode |= SPI_CS_HIGH; 2155 2156 /* Device DUAL/QUAD mode */ 2157 if (!of_property_read_u32(nc, "spi-tx-bus-width", &value)) { 2158 switch (value) { 2159 case 0: 2160 spi->mode |= SPI_NO_TX; 2161 break; 2162 case 1: 2163 break; 2164 case 2: 2165 spi->mode |= SPI_TX_DUAL; 2166 break; 2167 case 4: 2168 spi->mode |= SPI_TX_QUAD; 2169 break; 2170 case 8: 2171 spi->mode |= SPI_TX_OCTAL; 2172 break; 2173 default: 2174 dev_warn(&ctlr->dev, 2175 "spi-tx-bus-width %d not supported\n", 2176 value); 2177 break; 2178 } 2179 } 2180 2181 if (!of_property_read_u32(nc, "spi-rx-bus-width", &value)) { 2182 switch (value) { 2183 case 0: 2184 spi->mode |= SPI_NO_RX; 2185 break; 2186 case 1: 2187 break; 2188 case 2: 2189 spi->mode |= SPI_RX_DUAL; 2190 break; 2191 case 4: 2192 spi->mode |= SPI_RX_QUAD; 2193 break; 2194 case 8: 2195 spi->mode |= SPI_RX_OCTAL; 2196 break; 2197 default: 2198 dev_warn(&ctlr->dev, 2199 "spi-rx-bus-width %d not supported\n", 2200 value); 2201 break; 2202 } 2203 } 2204 2205 if (spi_controller_is_slave(ctlr)) { 2206 if (!of_node_name_eq(nc, "slave")) { 2207 dev_err(&ctlr->dev, "%pOF is not called 'slave'\n", 2208 nc); 2209 return -EINVAL; 2210 } 2211 return 0; 2212 } 2213 2214 /* Device address */ 2215 rc = of_property_read_u32(nc, "reg", &value); 2216 if (rc) { 2217 dev_err(&ctlr->dev, "%pOF has no valid 'reg' property (%d)\n", 2218 nc, rc); 2219 return rc; 2220 } 2221 spi->chip_select = value; 2222 2223 /* Device speed */ 2224 if (!of_property_read_u32(nc, "spi-max-frequency", &value)) 2225 spi->max_speed_hz = value; 2226 2227 return 0; 2228 } 2229 2230 static struct spi_device * 2231 of_register_spi_device(struct spi_controller *ctlr, struct device_node *nc) 2232 { 2233 struct spi_device *spi; 2234 int rc; 2235 2236 /* Alloc an spi_device */ 2237 spi = spi_alloc_device(ctlr); 2238 if (!spi) { 2239 dev_err(&ctlr->dev, "spi_device alloc error for %pOF\n", nc); 2240 rc = -ENOMEM; 2241 goto err_out; 2242 } 2243 2244 /* Select device driver */ 2245 rc = of_modalias_node(nc, spi->modalias, 2246 sizeof(spi->modalias)); 2247 if (rc < 0) { 2248 dev_err(&ctlr->dev, "cannot find modalias for %pOF\n", nc); 2249 goto err_out; 2250 } 2251 2252 rc = of_spi_parse_dt(ctlr, spi, nc); 2253 if (rc) 2254 goto err_out; 2255 2256 /* Store a pointer to the node in the device structure */ 2257 of_node_get(nc); 2258 spi->dev.of_node = nc; 2259 spi->dev.fwnode = of_fwnode_handle(nc); 2260 2261 /* Register the new device */ 2262 rc = spi_add_device(spi); 2263 if (rc) { 2264 dev_err(&ctlr->dev, "spi_device register error %pOF\n", nc); 2265 goto err_of_node_put; 2266 } 2267 2268 return spi; 2269 2270 err_of_node_put: 2271 of_node_put(nc); 2272 err_out: 2273 spi_dev_put(spi); 2274 return ERR_PTR(rc); 2275 } 2276 2277 /** 2278 * of_register_spi_devices() - Register child devices onto the SPI bus 2279 * @ctlr: Pointer to spi_controller device 2280 * 2281 * Registers an spi_device for each child node of controller node which 2282 * represents a valid SPI slave. 2283 */ 2284 static void of_register_spi_devices(struct spi_controller *ctlr) 2285 { 2286 struct spi_device *spi; 2287 struct device_node *nc; 2288 2289 if (!ctlr->dev.of_node) 2290 return; 2291 2292 for_each_available_child_of_node(ctlr->dev.of_node, nc) { 2293 if (of_node_test_and_set_flag(nc, OF_POPULATED)) 2294 continue; 2295 spi = of_register_spi_device(ctlr, nc); 2296 if (IS_ERR(spi)) { 2297 dev_warn(&ctlr->dev, 2298 "Failed to create SPI device for %pOF\n", nc); 2299 of_node_clear_flag(nc, OF_POPULATED); 2300 } 2301 } 2302 } 2303 #else 2304 static void of_register_spi_devices(struct spi_controller *ctlr) { } 2305 #endif 2306 2307 /** 2308 * spi_new_ancillary_device() - Register ancillary SPI device 2309 * @spi: Pointer to the main SPI device registering the ancillary device 2310 * @chip_select: Chip Select of the ancillary device 2311 * 2312 * Register an ancillary SPI device; for example some chips have a chip-select 2313 * for normal device usage and another one for setup/firmware upload. 2314 * 2315 * This may only be called from main SPI device's probe routine. 2316 * 2317 * Return: 0 on success; negative errno on failure 2318 */ 2319 struct spi_device *spi_new_ancillary_device(struct spi_device *spi, 2320 u8 chip_select) 2321 { 2322 struct spi_device *ancillary; 2323 int rc = 0; 2324 2325 /* Alloc an spi_device */ 2326 ancillary = spi_alloc_device(spi->controller); 2327 if (!ancillary) { 2328 rc = -ENOMEM; 2329 goto err_out; 2330 } 2331 2332 strlcpy(ancillary->modalias, "dummy", sizeof(ancillary->modalias)); 2333 2334 /* Use provided chip-select for ancillary device */ 2335 ancillary->chip_select = chip_select; 2336 2337 /* Take over SPI mode/speed from SPI main device */ 2338 ancillary->max_speed_hz = spi->max_speed_hz; 2339 ancillary->mode = spi->mode; 2340 2341 /* Register the new device */ 2342 rc = spi_add_device_locked(ancillary); 2343 if (rc) { 2344 dev_err(&spi->dev, "failed to register ancillary device\n"); 2345 goto err_out; 2346 } 2347 2348 return ancillary; 2349 2350 err_out: 2351 spi_dev_put(ancillary); 2352 return ERR_PTR(rc); 2353 } 2354 EXPORT_SYMBOL_GPL(spi_new_ancillary_device); 2355 2356 #ifdef CONFIG_ACPI 2357 struct acpi_spi_lookup { 2358 struct spi_controller *ctlr; 2359 u32 max_speed_hz; 2360 u32 mode; 2361 int irq; 2362 u8 bits_per_word; 2363 u8 chip_select; 2364 int n; 2365 int index; 2366 }; 2367 2368 static int acpi_spi_count(struct acpi_resource *ares, void *data) 2369 { 2370 struct acpi_resource_spi_serialbus *sb; 2371 int *count = data; 2372 2373 if (ares->type != ACPI_RESOURCE_TYPE_SERIAL_BUS) 2374 return 1; 2375 2376 sb = &ares->data.spi_serial_bus; 2377 if (sb->type != ACPI_RESOURCE_SERIAL_TYPE_SPI) 2378 return 1; 2379 2380 *count = *count + 1; 2381 2382 return 1; 2383 } 2384 2385 /** 2386 * acpi_spi_count_resources - Count the number of SpiSerialBus resources 2387 * @adev: ACPI device 2388 * 2389 * Returns the number of SpiSerialBus resources in the ACPI-device's 2390 * resource-list; or a negative error code. 2391 */ 2392 int acpi_spi_count_resources(struct acpi_device *adev) 2393 { 2394 LIST_HEAD(r); 2395 int count = 0; 2396 int ret; 2397 2398 ret = acpi_dev_get_resources(adev, &r, acpi_spi_count, &count); 2399 if (ret < 0) 2400 return ret; 2401 2402 acpi_dev_free_resource_list(&r); 2403 2404 return count; 2405 } 2406 EXPORT_SYMBOL_GPL(acpi_spi_count_resources); 2407 2408 static void acpi_spi_parse_apple_properties(struct acpi_device *dev, 2409 struct acpi_spi_lookup *lookup) 2410 { 2411 const union acpi_object *obj; 2412 2413 if (!x86_apple_machine) 2414 return; 2415 2416 if (!acpi_dev_get_property(dev, "spiSclkPeriod", ACPI_TYPE_BUFFER, &obj) 2417 && obj->buffer.length >= 4) 2418 lookup->max_speed_hz = NSEC_PER_SEC / *(u32 *)obj->buffer.pointer; 2419 2420 if (!acpi_dev_get_property(dev, "spiWordSize", ACPI_TYPE_BUFFER, &obj) 2421 && obj->buffer.length == 8) 2422 lookup->bits_per_word = *(u64 *)obj->buffer.pointer; 2423 2424 if (!acpi_dev_get_property(dev, "spiBitOrder", ACPI_TYPE_BUFFER, &obj) 2425 && obj->buffer.length == 8 && !*(u64 *)obj->buffer.pointer) 2426 lookup->mode |= SPI_LSB_FIRST; 2427 2428 if (!acpi_dev_get_property(dev, "spiSPO", ACPI_TYPE_BUFFER, &obj) 2429 && obj->buffer.length == 8 && *(u64 *)obj->buffer.pointer) 2430 lookup->mode |= SPI_CPOL; 2431 2432 if (!acpi_dev_get_property(dev, "spiSPH", ACPI_TYPE_BUFFER, &obj) 2433 && obj->buffer.length == 8 && *(u64 *)obj->buffer.pointer) 2434 lookup->mode |= SPI_CPHA; 2435 } 2436 2437 static struct spi_controller *acpi_spi_find_controller_by_adev(struct acpi_device *adev); 2438 2439 static int acpi_spi_add_resource(struct acpi_resource *ares, void *data) 2440 { 2441 struct acpi_spi_lookup *lookup = data; 2442 struct spi_controller *ctlr = lookup->ctlr; 2443 2444 if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) { 2445 struct acpi_resource_spi_serialbus *sb; 2446 acpi_handle parent_handle; 2447 acpi_status status; 2448 2449 sb = &ares->data.spi_serial_bus; 2450 if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_SPI) { 2451 2452 if (lookup->index != -1 && lookup->n++ != lookup->index) 2453 return 1; 2454 2455 status = acpi_get_handle(NULL, 2456 sb->resource_source.string_ptr, 2457 &parent_handle); 2458 2459 if (ACPI_FAILURE(status)) 2460 return -ENODEV; 2461 2462 if (ctlr) { 2463 if (ACPI_HANDLE(ctlr->dev.parent) != parent_handle) 2464 return -ENODEV; 2465 } else { 2466 struct acpi_device *adev; 2467 2468 adev = acpi_fetch_acpi_dev(parent_handle); 2469 if (!adev) 2470 return -ENODEV; 2471 2472 ctlr = acpi_spi_find_controller_by_adev(adev); 2473 if (!ctlr) 2474 return -EPROBE_DEFER; 2475 2476 lookup->ctlr = ctlr; 2477 } 2478 2479 /* 2480 * ACPI DeviceSelection numbering is handled by the 2481 * host controller driver in Windows and can vary 2482 * from driver to driver. In Linux we always expect 2483 * 0 .. max - 1 so we need to ask the driver to 2484 * translate between the two schemes. 2485 */ 2486 if (ctlr->fw_translate_cs) { 2487 int cs = ctlr->fw_translate_cs(ctlr, 2488 sb->device_selection); 2489 if (cs < 0) 2490 return cs; 2491 lookup->chip_select = cs; 2492 } else { 2493 lookup->chip_select = sb->device_selection; 2494 } 2495 2496 lookup->max_speed_hz = sb->connection_speed; 2497 lookup->bits_per_word = sb->data_bit_length; 2498 2499 if (sb->clock_phase == ACPI_SPI_SECOND_PHASE) 2500 lookup->mode |= SPI_CPHA; 2501 if (sb->clock_polarity == ACPI_SPI_START_HIGH) 2502 lookup->mode |= SPI_CPOL; 2503 if (sb->device_polarity == ACPI_SPI_ACTIVE_HIGH) 2504 lookup->mode |= SPI_CS_HIGH; 2505 } 2506 } else if (lookup->irq < 0) { 2507 struct resource r; 2508 2509 if (acpi_dev_resource_interrupt(ares, 0, &r)) 2510 lookup->irq = r.start; 2511 } 2512 2513 /* Always tell the ACPI core to skip this resource */ 2514 return 1; 2515 } 2516 2517 /** 2518 * acpi_spi_device_alloc - Allocate a spi device, and fill it in with ACPI information 2519 * @ctlr: controller to which the spi device belongs 2520 * @adev: ACPI Device for the spi device 2521 * @index: Index of the spi resource inside the ACPI Node 2522 * 2523 * This should be used to allocate a new spi device from and ACPI Node. 2524 * The caller is responsible for calling spi_add_device to register the spi device. 2525 * 2526 * If ctlr is set to NULL, the Controller for the spi device will be looked up 2527 * using the resource. 2528 * If index is set to -1, index is not used. 2529 * Note: If index is -1, ctlr must be set. 2530 * 2531 * Return: a pointer to the new device, or ERR_PTR on error. 2532 */ 2533 struct spi_device *acpi_spi_device_alloc(struct spi_controller *ctlr, 2534 struct acpi_device *adev, 2535 int index) 2536 { 2537 acpi_handle parent_handle = NULL; 2538 struct list_head resource_list; 2539 struct acpi_spi_lookup lookup = {}; 2540 struct spi_device *spi; 2541 int ret; 2542 2543 if (!ctlr && index == -1) 2544 return ERR_PTR(-EINVAL); 2545 2546 lookup.ctlr = ctlr; 2547 lookup.irq = -1; 2548 lookup.index = index; 2549 lookup.n = 0; 2550 2551 INIT_LIST_HEAD(&resource_list); 2552 ret = acpi_dev_get_resources(adev, &resource_list, 2553 acpi_spi_add_resource, &lookup); 2554 acpi_dev_free_resource_list(&resource_list); 2555 2556 if (ret < 0) 2557 /* Found SPI in _CRS but it points to another controller */ 2558 return ERR_PTR(ret); 2559 2560 if (!lookup.max_speed_hz && 2561 ACPI_SUCCESS(acpi_get_parent(adev->handle, &parent_handle)) && 2562 ACPI_HANDLE(lookup.ctlr->dev.parent) == parent_handle) { 2563 /* Apple does not use _CRS but nested devices for SPI slaves */ 2564 acpi_spi_parse_apple_properties(adev, &lookup); 2565 } 2566 2567 if (!lookup.max_speed_hz) 2568 return ERR_PTR(-ENODEV); 2569 2570 spi = spi_alloc_device(lookup.ctlr); 2571 if (!spi) { 2572 dev_err(&lookup.ctlr->dev, "failed to allocate SPI device for %s\n", 2573 dev_name(&adev->dev)); 2574 return ERR_PTR(-ENOMEM); 2575 } 2576 2577 ACPI_COMPANION_SET(&spi->dev, adev); 2578 spi->max_speed_hz = lookup.max_speed_hz; 2579 spi->mode |= lookup.mode; 2580 spi->irq = lookup.irq; 2581 spi->bits_per_word = lookup.bits_per_word; 2582 spi->chip_select = lookup.chip_select; 2583 2584 return spi; 2585 } 2586 EXPORT_SYMBOL_GPL(acpi_spi_device_alloc); 2587 2588 static acpi_status acpi_register_spi_device(struct spi_controller *ctlr, 2589 struct acpi_device *adev) 2590 { 2591 struct spi_device *spi; 2592 2593 if (acpi_bus_get_status(adev) || !adev->status.present || 2594 acpi_device_enumerated(adev)) 2595 return AE_OK; 2596 2597 spi = acpi_spi_device_alloc(ctlr, adev, -1); 2598 if (IS_ERR(spi)) { 2599 if (PTR_ERR(spi) == -ENOMEM) 2600 return AE_NO_MEMORY; 2601 else 2602 return AE_OK; 2603 } 2604 2605 acpi_set_modalias(adev, acpi_device_hid(adev), spi->modalias, 2606 sizeof(spi->modalias)); 2607 2608 if (spi->irq < 0) 2609 spi->irq = acpi_dev_gpio_irq_get(adev, 0); 2610 2611 acpi_device_set_enumerated(adev); 2612 2613 adev->power.flags.ignore_parent = true; 2614 if (spi_add_device(spi)) { 2615 adev->power.flags.ignore_parent = false; 2616 dev_err(&ctlr->dev, "failed to add SPI device %s from ACPI\n", 2617 dev_name(&adev->dev)); 2618 spi_dev_put(spi); 2619 } 2620 2621 return AE_OK; 2622 } 2623 2624 static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level, 2625 void *data, void **return_value) 2626 { 2627 struct acpi_device *adev = acpi_fetch_acpi_dev(handle); 2628 struct spi_controller *ctlr = data; 2629 2630 if (!adev) 2631 return AE_OK; 2632 2633 return acpi_register_spi_device(ctlr, adev); 2634 } 2635 2636 #define SPI_ACPI_ENUMERATE_MAX_DEPTH 32 2637 2638 static void acpi_register_spi_devices(struct spi_controller *ctlr) 2639 { 2640 acpi_status status; 2641 acpi_handle handle; 2642 2643 handle = ACPI_HANDLE(ctlr->dev.parent); 2644 if (!handle) 2645 return; 2646 2647 status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, 2648 SPI_ACPI_ENUMERATE_MAX_DEPTH, 2649 acpi_spi_add_device, NULL, ctlr, NULL); 2650 if (ACPI_FAILURE(status)) 2651 dev_warn(&ctlr->dev, "failed to enumerate SPI slaves\n"); 2652 } 2653 #else 2654 static inline void acpi_register_spi_devices(struct spi_controller *ctlr) {} 2655 #endif /* CONFIG_ACPI */ 2656 2657 static void spi_controller_release(struct device *dev) 2658 { 2659 struct spi_controller *ctlr; 2660 2661 ctlr = container_of(dev, struct spi_controller, dev); 2662 kfree(ctlr); 2663 } 2664 2665 static struct class spi_master_class = { 2666 .name = "spi_master", 2667 .owner = THIS_MODULE, 2668 .dev_release = spi_controller_release, 2669 .dev_groups = spi_master_groups, 2670 }; 2671 2672 #ifdef CONFIG_SPI_SLAVE 2673 /** 2674 * spi_slave_abort - abort the ongoing transfer request on an SPI slave 2675 * controller 2676 * @spi: device used for the current transfer 2677 */ 2678 int spi_slave_abort(struct spi_device *spi) 2679 { 2680 struct spi_controller *ctlr = spi->controller; 2681 2682 if (spi_controller_is_slave(ctlr) && ctlr->slave_abort) 2683 return ctlr->slave_abort(ctlr); 2684 2685 return -ENOTSUPP; 2686 } 2687 EXPORT_SYMBOL_GPL(spi_slave_abort); 2688 2689 static ssize_t slave_show(struct device *dev, struct device_attribute *attr, 2690 char *buf) 2691 { 2692 struct spi_controller *ctlr = container_of(dev, struct spi_controller, 2693 dev); 2694 struct device *child; 2695 2696 child = device_find_any_child(&ctlr->dev); 2697 return sprintf(buf, "%s\n", 2698 child ? to_spi_device(child)->modalias : NULL); 2699 } 2700 2701 static ssize_t slave_store(struct device *dev, struct device_attribute *attr, 2702 const char *buf, size_t count) 2703 { 2704 struct spi_controller *ctlr = container_of(dev, struct spi_controller, 2705 dev); 2706 struct spi_device *spi; 2707 struct device *child; 2708 char name[32]; 2709 int rc; 2710 2711 rc = sscanf(buf, "%31s", name); 2712 if (rc != 1 || !name[0]) 2713 return -EINVAL; 2714 2715 child = device_find_any_child(&ctlr->dev); 2716 if (child) { 2717 /* Remove registered slave */ 2718 device_unregister(child); 2719 put_device(child); 2720 } 2721 2722 if (strcmp(name, "(null)")) { 2723 /* Register new slave */ 2724 spi = spi_alloc_device(ctlr); 2725 if (!spi) 2726 return -ENOMEM; 2727 2728 strlcpy(spi->modalias, name, sizeof(spi->modalias)); 2729 2730 rc = spi_add_device(spi); 2731 if (rc) { 2732 spi_dev_put(spi); 2733 return rc; 2734 } 2735 } 2736 2737 return count; 2738 } 2739 2740 static DEVICE_ATTR_RW(slave); 2741 2742 static struct attribute *spi_slave_attrs[] = { 2743 &dev_attr_slave.attr, 2744 NULL, 2745 }; 2746 2747 static const struct attribute_group spi_slave_group = { 2748 .attrs = spi_slave_attrs, 2749 }; 2750 2751 static const struct attribute_group *spi_slave_groups[] = { 2752 &spi_controller_statistics_group, 2753 &spi_slave_group, 2754 NULL, 2755 }; 2756 2757 static struct class spi_slave_class = { 2758 .name = "spi_slave", 2759 .owner = THIS_MODULE, 2760 .dev_release = spi_controller_release, 2761 .dev_groups = spi_slave_groups, 2762 }; 2763 #else 2764 extern struct class spi_slave_class; /* dummy */ 2765 #endif 2766 2767 /** 2768 * __spi_alloc_controller - allocate an SPI master or slave controller 2769 * @dev: the controller, possibly using the platform_bus 2770 * @size: how much zeroed driver-private data to allocate; the pointer to this 2771 * memory is in the driver_data field of the returned device, accessible 2772 * with spi_controller_get_devdata(); the memory is cacheline aligned; 2773 * drivers granting DMA access to portions of their private data need to 2774 * round up @size using ALIGN(size, dma_get_cache_alignment()). 2775 * @slave: flag indicating whether to allocate an SPI master (false) or SPI 2776 * slave (true) controller 2777 * Context: can sleep 2778 * 2779 * This call is used only by SPI controller drivers, which are the 2780 * only ones directly touching chip registers. It's how they allocate 2781 * an spi_controller structure, prior to calling spi_register_controller(). 2782 * 2783 * This must be called from context that can sleep. 2784 * 2785 * The caller is responsible for assigning the bus number and initializing the 2786 * controller's methods before calling spi_register_controller(); and (after 2787 * errors adding the device) calling spi_controller_put() to prevent a memory 2788 * leak. 2789 * 2790 * Return: the SPI controller structure on success, else NULL. 2791 */ 2792 struct spi_controller *__spi_alloc_controller(struct device *dev, 2793 unsigned int size, bool slave) 2794 { 2795 struct spi_controller *ctlr; 2796 size_t ctlr_size = ALIGN(sizeof(*ctlr), dma_get_cache_alignment()); 2797 2798 if (!dev) 2799 return NULL; 2800 2801 ctlr = kzalloc(size + ctlr_size, GFP_KERNEL); 2802 if (!ctlr) 2803 return NULL; 2804 2805 device_initialize(&ctlr->dev); 2806 INIT_LIST_HEAD(&ctlr->queue); 2807 spin_lock_init(&ctlr->queue_lock); 2808 spin_lock_init(&ctlr->bus_lock_spinlock); 2809 mutex_init(&ctlr->bus_lock_mutex); 2810 mutex_init(&ctlr->io_mutex); 2811 mutex_init(&ctlr->add_lock); 2812 ctlr->bus_num = -1; 2813 ctlr->num_chipselect = 1; 2814 ctlr->slave = slave; 2815 if (IS_ENABLED(CONFIG_SPI_SLAVE) && slave) 2816 ctlr->dev.class = &spi_slave_class; 2817 else 2818 ctlr->dev.class = &spi_master_class; 2819 ctlr->dev.parent = dev; 2820 pm_suspend_ignore_children(&ctlr->dev, true); 2821 spi_controller_set_devdata(ctlr, (void *)ctlr + ctlr_size); 2822 2823 return ctlr; 2824 } 2825 EXPORT_SYMBOL_GPL(__spi_alloc_controller); 2826 2827 static void devm_spi_release_controller(struct device *dev, void *ctlr) 2828 { 2829 spi_controller_put(*(struct spi_controller **)ctlr); 2830 } 2831 2832 /** 2833 * __devm_spi_alloc_controller - resource-managed __spi_alloc_controller() 2834 * @dev: physical device of SPI controller 2835 * @size: how much zeroed driver-private data to allocate 2836 * @slave: whether to allocate an SPI master (false) or SPI slave (true) 2837 * Context: can sleep 2838 * 2839 * Allocate an SPI controller and automatically release a reference on it 2840 * when @dev is unbound from its driver. Drivers are thus relieved from 2841 * having to call spi_controller_put(). 2842 * 2843 * The arguments to this function are identical to __spi_alloc_controller(). 2844 * 2845 * Return: the SPI controller structure on success, else NULL. 2846 */ 2847 struct spi_controller *__devm_spi_alloc_controller(struct device *dev, 2848 unsigned int size, 2849 bool slave) 2850 { 2851 struct spi_controller **ptr, *ctlr; 2852 2853 ptr = devres_alloc(devm_spi_release_controller, sizeof(*ptr), 2854 GFP_KERNEL); 2855 if (!ptr) 2856 return NULL; 2857 2858 ctlr = __spi_alloc_controller(dev, size, slave); 2859 if (ctlr) { 2860 ctlr->devm_allocated = true; 2861 *ptr = ctlr; 2862 devres_add(dev, ptr); 2863 } else { 2864 devres_free(ptr); 2865 } 2866 2867 return ctlr; 2868 } 2869 EXPORT_SYMBOL_GPL(__devm_spi_alloc_controller); 2870 2871 /** 2872 * spi_get_gpio_descs() - grab chip select GPIOs for the master 2873 * @ctlr: The SPI master to grab GPIO descriptors for 2874 */ 2875 static int spi_get_gpio_descs(struct spi_controller *ctlr) 2876 { 2877 int nb, i; 2878 struct gpio_desc **cs; 2879 struct device *dev = &ctlr->dev; 2880 unsigned long native_cs_mask = 0; 2881 unsigned int num_cs_gpios = 0; 2882 2883 nb = gpiod_count(dev, "cs"); 2884 if (nb < 0) { 2885 /* No GPIOs at all is fine, else return the error */ 2886 if (nb == -ENOENT) 2887 return 0; 2888 return nb; 2889 } 2890 2891 ctlr->num_chipselect = max_t(int, nb, ctlr->num_chipselect); 2892 2893 cs = devm_kcalloc(dev, ctlr->num_chipselect, sizeof(*cs), 2894 GFP_KERNEL); 2895 if (!cs) 2896 return -ENOMEM; 2897 ctlr->cs_gpiods = cs; 2898 2899 for (i = 0; i < nb; i++) { 2900 /* 2901 * Most chipselects are active low, the inverted 2902 * semantics are handled by special quirks in gpiolib, 2903 * so initializing them GPIOD_OUT_LOW here means 2904 * "unasserted", in most cases this will drive the physical 2905 * line high. 2906 */ 2907 cs[i] = devm_gpiod_get_index_optional(dev, "cs", i, 2908 GPIOD_OUT_LOW); 2909 if (IS_ERR(cs[i])) 2910 return PTR_ERR(cs[i]); 2911 2912 if (cs[i]) { 2913 /* 2914 * If we find a CS GPIO, name it after the device and 2915 * chip select line. 2916 */ 2917 char *gpioname; 2918 2919 gpioname = devm_kasprintf(dev, GFP_KERNEL, "%s CS%d", 2920 dev_name(dev), i); 2921 if (!gpioname) 2922 return -ENOMEM; 2923 gpiod_set_consumer_name(cs[i], gpioname); 2924 num_cs_gpios++; 2925 continue; 2926 } 2927 2928 if (ctlr->max_native_cs && i >= ctlr->max_native_cs) { 2929 dev_err(dev, "Invalid native chip select %d\n", i); 2930 return -EINVAL; 2931 } 2932 native_cs_mask |= BIT(i); 2933 } 2934 2935 ctlr->unused_native_cs = ffs(~native_cs_mask) - 1; 2936 2937 if ((ctlr->flags & SPI_MASTER_GPIO_SS) && num_cs_gpios && 2938 ctlr->max_native_cs && ctlr->unused_native_cs >= ctlr->max_native_cs) { 2939 dev_err(dev, "No unused native chip select available\n"); 2940 return -EINVAL; 2941 } 2942 2943 return 0; 2944 } 2945 2946 static int spi_controller_check_ops(struct spi_controller *ctlr) 2947 { 2948 /* 2949 * The controller may implement only the high-level SPI-memory like 2950 * operations if it does not support regular SPI transfers, and this is 2951 * valid use case. 2952 * If ->mem_ops is NULL, we request that at least one of the 2953 * ->transfer_xxx() method be implemented. 2954 */ 2955 if (ctlr->mem_ops) { 2956 if (!ctlr->mem_ops->exec_op) 2957 return -EINVAL; 2958 } else if (!ctlr->transfer && !ctlr->transfer_one && 2959 !ctlr->transfer_one_message) { 2960 return -EINVAL; 2961 } 2962 2963 return 0; 2964 } 2965 2966 /** 2967 * spi_register_controller - register SPI master or slave controller 2968 * @ctlr: initialized master, originally from spi_alloc_master() or 2969 * spi_alloc_slave() 2970 * Context: can sleep 2971 * 2972 * SPI controllers connect to their drivers using some non-SPI bus, 2973 * such as the platform bus. The final stage of probe() in that code 2974 * includes calling spi_register_controller() to hook up to this SPI bus glue. 2975 * 2976 * SPI controllers use board specific (often SOC specific) bus numbers, 2977 * and board-specific addressing for SPI devices combines those numbers 2978 * with chip select numbers. Since SPI does not directly support dynamic 2979 * device identification, boards need configuration tables telling which 2980 * chip is at which address. 2981 * 2982 * This must be called from context that can sleep. It returns zero on 2983 * success, else a negative error code (dropping the controller's refcount). 2984 * After a successful return, the caller is responsible for calling 2985 * spi_unregister_controller(). 2986 * 2987 * Return: zero on success, else a negative error code. 2988 */ 2989 int spi_register_controller(struct spi_controller *ctlr) 2990 { 2991 struct device *dev = ctlr->dev.parent; 2992 struct boardinfo *bi; 2993 int status; 2994 int id, first_dynamic; 2995 2996 if (!dev) 2997 return -ENODEV; 2998 2999 /* 3000 * Make sure all necessary hooks are implemented before registering 3001 * the SPI controller. 3002 */ 3003 status = spi_controller_check_ops(ctlr); 3004 if (status) 3005 return status; 3006 3007 if (ctlr->bus_num >= 0) { 3008 /* Devices with a fixed bus num must check-in with the num */ 3009 mutex_lock(&board_lock); 3010 id = idr_alloc(&spi_master_idr, ctlr, ctlr->bus_num, 3011 ctlr->bus_num + 1, GFP_KERNEL); 3012 mutex_unlock(&board_lock); 3013 if (WARN(id < 0, "couldn't get idr")) 3014 return id == -ENOSPC ? -EBUSY : id; 3015 ctlr->bus_num = id; 3016 } else if (ctlr->dev.of_node) { 3017 /* Allocate dynamic bus number using Linux idr */ 3018 id = of_alias_get_id(ctlr->dev.of_node, "spi"); 3019 if (id >= 0) { 3020 ctlr->bus_num = id; 3021 mutex_lock(&board_lock); 3022 id = idr_alloc(&spi_master_idr, ctlr, ctlr->bus_num, 3023 ctlr->bus_num + 1, GFP_KERNEL); 3024 mutex_unlock(&board_lock); 3025 if (WARN(id < 0, "couldn't get idr")) 3026 return id == -ENOSPC ? -EBUSY : id; 3027 } 3028 } 3029 if (ctlr->bus_num < 0) { 3030 first_dynamic = of_alias_get_highest_id("spi"); 3031 if (first_dynamic < 0) 3032 first_dynamic = 0; 3033 else 3034 first_dynamic++; 3035 3036 mutex_lock(&board_lock); 3037 id = idr_alloc(&spi_master_idr, ctlr, first_dynamic, 3038 0, GFP_KERNEL); 3039 mutex_unlock(&board_lock); 3040 if (WARN(id < 0, "couldn't get idr")) 3041 return id; 3042 ctlr->bus_num = id; 3043 } 3044 ctlr->bus_lock_flag = 0; 3045 init_completion(&ctlr->xfer_completion); 3046 init_completion(&ctlr->cur_msg_completion); 3047 if (!ctlr->max_dma_len) 3048 ctlr->max_dma_len = INT_MAX; 3049 3050 /* 3051 * Register the device, then userspace will see it. 3052 * Registration fails if the bus ID is in use. 3053 */ 3054 dev_set_name(&ctlr->dev, "spi%u", ctlr->bus_num); 3055 3056 if (!spi_controller_is_slave(ctlr) && ctlr->use_gpio_descriptors) { 3057 status = spi_get_gpio_descs(ctlr); 3058 if (status) 3059 goto free_bus_id; 3060 /* 3061 * A controller using GPIO descriptors always 3062 * supports SPI_CS_HIGH if need be. 3063 */ 3064 ctlr->mode_bits |= SPI_CS_HIGH; 3065 } 3066 3067 /* 3068 * Even if it's just one always-selected device, there must 3069 * be at least one chipselect. 3070 */ 3071 if (!ctlr->num_chipselect) { 3072 status = -EINVAL; 3073 goto free_bus_id; 3074 } 3075 3076 /* Setting last_cs to -1 means no chip selected */ 3077 ctlr->last_cs = -1; 3078 3079 status = device_add(&ctlr->dev); 3080 if (status < 0) 3081 goto free_bus_id; 3082 dev_dbg(dev, "registered %s %s\n", 3083 spi_controller_is_slave(ctlr) ? "slave" : "master", 3084 dev_name(&ctlr->dev)); 3085 3086 /* 3087 * If we're using a queued driver, start the queue. Note that we don't 3088 * need the queueing logic if the driver is only supporting high-level 3089 * memory operations. 3090 */ 3091 if (ctlr->transfer) { 3092 dev_info(dev, "controller is unqueued, this is deprecated\n"); 3093 } else if (ctlr->transfer_one || ctlr->transfer_one_message) { 3094 status = spi_controller_initialize_queue(ctlr); 3095 if (status) { 3096 device_del(&ctlr->dev); 3097 goto free_bus_id; 3098 } 3099 } 3100 /* Add statistics */ 3101 ctlr->pcpu_statistics = spi_alloc_pcpu_stats(dev); 3102 if (!ctlr->pcpu_statistics) { 3103 dev_err(dev, "Error allocating per-cpu statistics\n"); 3104 status = -ENOMEM; 3105 goto destroy_queue; 3106 } 3107 3108 mutex_lock(&board_lock); 3109 list_add_tail(&ctlr->list, &spi_controller_list); 3110 list_for_each_entry(bi, &board_list, list) 3111 spi_match_controller_to_boardinfo(ctlr, &bi->board_info); 3112 mutex_unlock(&board_lock); 3113 3114 /* Register devices from the device tree and ACPI */ 3115 of_register_spi_devices(ctlr); 3116 acpi_register_spi_devices(ctlr); 3117 return status; 3118 3119 destroy_queue: 3120 spi_destroy_queue(ctlr); 3121 free_bus_id: 3122 mutex_lock(&board_lock); 3123 idr_remove(&spi_master_idr, ctlr->bus_num); 3124 mutex_unlock(&board_lock); 3125 return status; 3126 } 3127 EXPORT_SYMBOL_GPL(spi_register_controller); 3128 3129 static void devm_spi_unregister(struct device *dev, void *res) 3130 { 3131 spi_unregister_controller(*(struct spi_controller **)res); 3132 } 3133 3134 /** 3135 * devm_spi_register_controller - register managed SPI master or slave 3136 * controller 3137 * @dev: device managing SPI controller 3138 * @ctlr: initialized controller, originally from spi_alloc_master() or 3139 * spi_alloc_slave() 3140 * Context: can sleep 3141 * 3142 * Register a SPI device as with spi_register_controller() which will 3143 * automatically be unregistered and freed. 3144 * 3145 * Return: zero on success, else a negative error code. 3146 */ 3147 int devm_spi_register_controller(struct device *dev, 3148 struct spi_controller *ctlr) 3149 { 3150 struct spi_controller **ptr; 3151 int ret; 3152 3153 ptr = devres_alloc(devm_spi_unregister, sizeof(*ptr), GFP_KERNEL); 3154 if (!ptr) 3155 return -ENOMEM; 3156 3157 ret = spi_register_controller(ctlr); 3158 if (!ret) { 3159 *ptr = ctlr; 3160 devres_add(dev, ptr); 3161 } else { 3162 devres_free(ptr); 3163 } 3164 3165 return ret; 3166 } 3167 EXPORT_SYMBOL_GPL(devm_spi_register_controller); 3168 3169 static int __unregister(struct device *dev, void *null) 3170 { 3171 spi_unregister_device(to_spi_device(dev)); 3172 return 0; 3173 } 3174 3175 /** 3176 * spi_unregister_controller - unregister SPI master or slave controller 3177 * @ctlr: the controller being unregistered 3178 * Context: can sleep 3179 * 3180 * This call is used only by SPI controller drivers, which are the 3181 * only ones directly touching chip registers. 3182 * 3183 * This must be called from context that can sleep. 3184 * 3185 * Note that this function also drops a reference to the controller. 3186 */ 3187 void spi_unregister_controller(struct spi_controller *ctlr) 3188 { 3189 struct spi_controller *found; 3190 int id = ctlr->bus_num; 3191 3192 /* Prevent addition of new devices, unregister existing ones */ 3193 if (IS_ENABLED(CONFIG_SPI_DYNAMIC)) 3194 mutex_lock(&ctlr->add_lock); 3195 3196 device_for_each_child(&ctlr->dev, NULL, __unregister); 3197 3198 /* First make sure that this controller was ever added */ 3199 mutex_lock(&board_lock); 3200 found = idr_find(&spi_master_idr, id); 3201 mutex_unlock(&board_lock); 3202 if (ctlr->queued) { 3203 if (spi_destroy_queue(ctlr)) 3204 dev_err(&ctlr->dev, "queue remove failed\n"); 3205 } 3206 mutex_lock(&board_lock); 3207 list_del(&ctlr->list); 3208 mutex_unlock(&board_lock); 3209 3210 device_del(&ctlr->dev); 3211 3212 /* Free bus id */ 3213 mutex_lock(&board_lock); 3214 if (found == ctlr) 3215 idr_remove(&spi_master_idr, id); 3216 mutex_unlock(&board_lock); 3217 3218 if (IS_ENABLED(CONFIG_SPI_DYNAMIC)) 3219 mutex_unlock(&ctlr->add_lock); 3220 3221 /* Release the last reference on the controller if its driver 3222 * has not yet been converted to devm_spi_alloc_master/slave(). 3223 */ 3224 if (!ctlr->devm_allocated) 3225 put_device(&ctlr->dev); 3226 } 3227 EXPORT_SYMBOL_GPL(spi_unregister_controller); 3228 3229 int spi_controller_suspend(struct spi_controller *ctlr) 3230 { 3231 int ret; 3232 3233 /* Basically no-ops for non-queued controllers */ 3234 if (!ctlr->queued) 3235 return 0; 3236 3237 ret = spi_stop_queue(ctlr); 3238 if (ret) 3239 dev_err(&ctlr->dev, "queue stop failed\n"); 3240 3241 return ret; 3242 } 3243 EXPORT_SYMBOL_GPL(spi_controller_suspend); 3244 3245 int spi_controller_resume(struct spi_controller *ctlr) 3246 { 3247 int ret; 3248 3249 if (!ctlr->queued) 3250 return 0; 3251 3252 ret = spi_start_queue(ctlr); 3253 if (ret) 3254 dev_err(&ctlr->dev, "queue restart failed\n"); 3255 3256 return ret; 3257 } 3258 EXPORT_SYMBOL_GPL(spi_controller_resume); 3259 3260 /*-------------------------------------------------------------------------*/ 3261 3262 /* Core methods for spi_message alterations */ 3263 3264 static void __spi_replace_transfers_release(struct spi_controller *ctlr, 3265 struct spi_message *msg, 3266 void *res) 3267 { 3268 struct spi_replaced_transfers *rxfer = res; 3269 size_t i; 3270 3271 /* Call extra callback if requested */ 3272 if (rxfer->release) 3273 rxfer->release(ctlr, msg, res); 3274 3275 /* Insert replaced transfers back into the message */ 3276 list_splice(&rxfer->replaced_transfers, rxfer->replaced_after); 3277 3278 /* Remove the formerly inserted entries */ 3279 for (i = 0; i < rxfer->inserted; i++) 3280 list_del(&rxfer->inserted_transfers[i].transfer_list); 3281 } 3282 3283 /** 3284 * spi_replace_transfers - replace transfers with several transfers 3285 * and register change with spi_message.resources 3286 * @msg: the spi_message we work upon 3287 * @xfer_first: the first spi_transfer we want to replace 3288 * @remove: number of transfers to remove 3289 * @insert: the number of transfers we want to insert instead 3290 * @release: extra release code necessary in some circumstances 3291 * @extradatasize: extra data to allocate (with alignment guarantees 3292 * of struct @spi_transfer) 3293 * @gfp: gfp flags 3294 * 3295 * Returns: pointer to @spi_replaced_transfers, 3296 * PTR_ERR(...) in case of errors. 3297 */ 3298 static struct spi_replaced_transfers *spi_replace_transfers( 3299 struct spi_message *msg, 3300 struct spi_transfer *xfer_first, 3301 size_t remove, 3302 size_t insert, 3303 spi_replaced_release_t release, 3304 size_t extradatasize, 3305 gfp_t gfp) 3306 { 3307 struct spi_replaced_transfers *rxfer; 3308 struct spi_transfer *xfer; 3309 size_t i; 3310 3311 /* Allocate the structure using spi_res */ 3312 rxfer = spi_res_alloc(msg->spi, __spi_replace_transfers_release, 3313 struct_size(rxfer, inserted_transfers, insert) 3314 + extradatasize, 3315 gfp); 3316 if (!rxfer) 3317 return ERR_PTR(-ENOMEM); 3318 3319 /* The release code to invoke before running the generic release */ 3320 rxfer->release = release; 3321 3322 /* Assign extradata */ 3323 if (extradatasize) 3324 rxfer->extradata = 3325 &rxfer->inserted_transfers[insert]; 3326 3327 /* Init the replaced_transfers list */ 3328 INIT_LIST_HEAD(&rxfer->replaced_transfers); 3329 3330 /* 3331 * Assign the list_entry after which we should reinsert 3332 * the @replaced_transfers - it may be spi_message.messages! 3333 */ 3334 rxfer->replaced_after = xfer_first->transfer_list.prev; 3335 3336 /* Remove the requested number of transfers */ 3337 for (i = 0; i < remove; i++) { 3338 /* 3339 * If the entry after replaced_after it is msg->transfers 3340 * then we have been requested to remove more transfers 3341 * than are in the list. 3342 */ 3343 if (rxfer->replaced_after->next == &msg->transfers) { 3344 dev_err(&msg->spi->dev, 3345 "requested to remove more spi_transfers than are available\n"); 3346 /* Insert replaced transfers back into the message */ 3347 list_splice(&rxfer->replaced_transfers, 3348 rxfer->replaced_after); 3349 3350 /* Free the spi_replace_transfer structure... */ 3351 spi_res_free(rxfer); 3352 3353 /* ...and return with an error */ 3354 return ERR_PTR(-EINVAL); 3355 } 3356 3357 /* 3358 * Remove the entry after replaced_after from list of 3359 * transfers and add it to list of replaced_transfers. 3360 */ 3361 list_move_tail(rxfer->replaced_after->next, 3362 &rxfer->replaced_transfers); 3363 } 3364 3365 /* 3366 * Create copy of the given xfer with identical settings 3367 * based on the first transfer to get removed. 3368 */ 3369 for (i = 0; i < insert; i++) { 3370 /* We need to run in reverse order */ 3371 xfer = &rxfer->inserted_transfers[insert - 1 - i]; 3372 3373 /* Copy all spi_transfer data */ 3374 memcpy(xfer, xfer_first, sizeof(*xfer)); 3375 3376 /* Add to list */ 3377 list_add(&xfer->transfer_list, rxfer->replaced_after); 3378 3379 /* Clear cs_change and delay for all but the last */ 3380 if (i) { 3381 xfer->cs_change = false; 3382 xfer->delay.value = 0; 3383 } 3384 } 3385 3386 /* Set up inserted... */ 3387 rxfer->inserted = insert; 3388 3389 /* ...and register it with spi_res/spi_message */ 3390 spi_res_add(msg, rxfer); 3391 3392 return rxfer; 3393 } 3394 3395 static int __spi_split_transfer_maxsize(struct spi_controller *ctlr, 3396 struct spi_message *msg, 3397 struct spi_transfer **xferp, 3398 size_t maxsize, 3399 gfp_t gfp) 3400 { 3401 struct spi_transfer *xfer = *xferp, *xfers; 3402 struct spi_replaced_transfers *srt; 3403 size_t offset; 3404 size_t count, i; 3405 3406 /* Calculate how many we have to replace */ 3407 count = DIV_ROUND_UP(xfer->len, maxsize); 3408 3409 /* Create replacement */ 3410 srt = spi_replace_transfers(msg, xfer, 1, count, NULL, 0, gfp); 3411 if (IS_ERR(srt)) 3412 return PTR_ERR(srt); 3413 xfers = srt->inserted_transfers; 3414 3415 /* 3416 * Now handle each of those newly inserted spi_transfers. 3417 * Note that the replacements spi_transfers all are preset 3418 * to the same values as *xferp, so tx_buf, rx_buf and len 3419 * are all identical (as well as most others) 3420 * so we just have to fix up len and the pointers. 3421 * 3422 * This also includes support for the depreciated 3423 * spi_message.is_dma_mapped interface. 3424 */ 3425 3426 /* 3427 * The first transfer just needs the length modified, so we 3428 * run it outside the loop. 3429 */ 3430 xfers[0].len = min_t(size_t, maxsize, xfer[0].len); 3431 3432 /* All the others need rx_buf/tx_buf also set */ 3433 for (i = 1, offset = maxsize; i < count; offset += maxsize, i++) { 3434 /* Update rx_buf, tx_buf and dma */ 3435 if (xfers[i].rx_buf) 3436 xfers[i].rx_buf += offset; 3437 if (xfers[i].rx_dma) 3438 xfers[i].rx_dma += offset; 3439 if (xfers[i].tx_buf) 3440 xfers[i].tx_buf += offset; 3441 if (xfers[i].tx_dma) 3442 xfers[i].tx_dma += offset; 3443 3444 /* Update length */ 3445 xfers[i].len = min(maxsize, xfers[i].len - offset); 3446 } 3447 3448 /* 3449 * We set up xferp to the last entry we have inserted, 3450 * so that we skip those already split transfers. 3451 */ 3452 *xferp = &xfers[count - 1]; 3453 3454 /* Increment statistics counters */ 3455 SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics, 3456 transfers_split_maxsize); 3457 SPI_STATISTICS_INCREMENT_FIELD(msg->spi->pcpu_statistics, 3458 transfers_split_maxsize); 3459 3460 return 0; 3461 } 3462 3463 /** 3464 * spi_split_transfers_maxsize - split spi transfers into multiple transfers 3465 * when an individual transfer exceeds a 3466 * certain size 3467 * @ctlr: the @spi_controller for this transfer 3468 * @msg: the @spi_message to transform 3469 * @maxsize: the maximum when to apply this 3470 * @gfp: GFP allocation flags 3471 * 3472 * Return: status of transformation 3473 */ 3474 int spi_split_transfers_maxsize(struct spi_controller *ctlr, 3475 struct spi_message *msg, 3476 size_t maxsize, 3477 gfp_t gfp) 3478 { 3479 struct spi_transfer *xfer; 3480 int ret; 3481 3482 /* 3483 * Iterate over the transfer_list, 3484 * but note that xfer is advanced to the last transfer inserted 3485 * to avoid checking sizes again unnecessarily (also xfer does 3486 * potentially belong to a different list by the time the 3487 * replacement has happened). 3488 */ 3489 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 3490 if (xfer->len > maxsize) { 3491 ret = __spi_split_transfer_maxsize(ctlr, msg, &xfer, 3492 maxsize, gfp); 3493 if (ret) 3494 return ret; 3495 } 3496 } 3497 3498 return 0; 3499 } 3500 EXPORT_SYMBOL_GPL(spi_split_transfers_maxsize); 3501 3502 /*-------------------------------------------------------------------------*/ 3503 3504 /* Core methods for SPI controller protocol drivers. Some of the 3505 * other core methods are currently defined as inline functions. 3506 */ 3507 3508 static int __spi_validate_bits_per_word(struct spi_controller *ctlr, 3509 u8 bits_per_word) 3510 { 3511 if (ctlr->bits_per_word_mask) { 3512 /* Only 32 bits fit in the mask */ 3513 if (bits_per_word > 32) 3514 return -EINVAL; 3515 if (!(ctlr->bits_per_word_mask & SPI_BPW_MASK(bits_per_word))) 3516 return -EINVAL; 3517 } 3518 3519 return 0; 3520 } 3521 3522 /** 3523 * spi_setup - setup SPI mode and clock rate 3524 * @spi: the device whose settings are being modified 3525 * Context: can sleep, and no requests are queued to the device 3526 * 3527 * SPI protocol drivers may need to update the transfer mode if the 3528 * device doesn't work with its default. They may likewise need 3529 * to update clock rates or word sizes from initial values. This function 3530 * changes those settings, and must be called from a context that can sleep. 3531 * Except for SPI_CS_HIGH, which takes effect immediately, the changes take 3532 * effect the next time the device is selected and data is transferred to 3533 * or from it. When this function returns, the spi device is deselected. 3534 * 3535 * Note that this call will fail if the protocol driver specifies an option 3536 * that the underlying controller or its driver does not support. For 3537 * example, not all hardware supports wire transfers using nine bit words, 3538 * LSB-first wire encoding, or active-high chipselects. 3539 * 3540 * Return: zero on success, else a negative error code. 3541 */ 3542 int spi_setup(struct spi_device *spi) 3543 { 3544 unsigned bad_bits, ugly_bits; 3545 int status = 0; 3546 3547 /* 3548 * Check mode to prevent that any two of DUAL, QUAD and NO_MOSI/MISO 3549 * are set at the same time. 3550 */ 3551 if ((hweight_long(spi->mode & 3552 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_NO_TX)) > 1) || 3553 (hweight_long(spi->mode & 3554 (SPI_RX_DUAL | SPI_RX_QUAD | SPI_NO_RX)) > 1)) { 3555 dev_err(&spi->dev, 3556 "setup: can not select any two of dual, quad and no-rx/tx at the same time\n"); 3557 return -EINVAL; 3558 } 3559 /* If it is SPI_3WIRE mode, DUAL and QUAD should be forbidden */ 3560 if ((spi->mode & SPI_3WIRE) && (spi->mode & 3561 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL | 3562 SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL))) 3563 return -EINVAL; 3564 /* 3565 * Help drivers fail *cleanly* when they need options 3566 * that aren't supported with their current controller. 3567 * SPI_CS_WORD has a fallback software implementation, 3568 * so it is ignored here. 3569 */ 3570 bad_bits = spi->mode & ~(spi->controller->mode_bits | SPI_CS_WORD | 3571 SPI_NO_TX | SPI_NO_RX); 3572 ugly_bits = bad_bits & 3573 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL | 3574 SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL); 3575 if (ugly_bits) { 3576 dev_warn(&spi->dev, 3577 "setup: ignoring unsupported mode bits %x\n", 3578 ugly_bits); 3579 spi->mode &= ~ugly_bits; 3580 bad_bits &= ~ugly_bits; 3581 } 3582 if (bad_bits) { 3583 dev_err(&spi->dev, "setup: unsupported mode bits %x\n", 3584 bad_bits); 3585 return -EINVAL; 3586 } 3587 3588 if (!spi->bits_per_word) { 3589 spi->bits_per_word = 8; 3590 } else { 3591 /* 3592 * Some controllers may not support the default 8 bits-per-word 3593 * so only perform the check when this is explicitly provided. 3594 */ 3595 status = __spi_validate_bits_per_word(spi->controller, 3596 spi->bits_per_word); 3597 if (status) 3598 return status; 3599 } 3600 3601 if (spi->controller->max_speed_hz && 3602 (!spi->max_speed_hz || 3603 spi->max_speed_hz > spi->controller->max_speed_hz)) 3604 spi->max_speed_hz = spi->controller->max_speed_hz; 3605 3606 mutex_lock(&spi->controller->io_mutex); 3607 3608 if (spi->controller->setup) { 3609 status = spi->controller->setup(spi); 3610 if (status) { 3611 mutex_unlock(&spi->controller->io_mutex); 3612 dev_err(&spi->controller->dev, "Failed to setup device: %d\n", 3613 status); 3614 return status; 3615 } 3616 } 3617 3618 if (spi->controller->auto_runtime_pm && spi->controller->set_cs) { 3619 status = pm_runtime_resume_and_get(spi->controller->dev.parent); 3620 if (status < 0) { 3621 mutex_unlock(&spi->controller->io_mutex); 3622 dev_err(&spi->controller->dev, "Failed to power device: %d\n", 3623 status); 3624 return status; 3625 } 3626 3627 /* 3628 * We do not want to return positive value from pm_runtime_get, 3629 * there are many instances of devices calling spi_setup() and 3630 * checking for a non-zero return value instead of a negative 3631 * return value. 3632 */ 3633 status = 0; 3634 3635 spi_set_cs(spi, false, true); 3636 pm_runtime_mark_last_busy(spi->controller->dev.parent); 3637 pm_runtime_put_autosuspend(spi->controller->dev.parent); 3638 } else { 3639 spi_set_cs(spi, false, true); 3640 } 3641 3642 mutex_unlock(&spi->controller->io_mutex); 3643 3644 if (spi->rt && !spi->controller->rt) { 3645 spi->controller->rt = true; 3646 spi_set_thread_rt(spi->controller); 3647 } 3648 3649 trace_spi_setup(spi, status); 3650 3651 dev_dbg(&spi->dev, "setup mode %lu, %s%s%s%s%u bits/w, %u Hz max --> %d\n", 3652 spi->mode & SPI_MODE_X_MASK, 3653 (spi->mode & SPI_CS_HIGH) ? "cs_high, " : "", 3654 (spi->mode & SPI_LSB_FIRST) ? "lsb, " : "", 3655 (spi->mode & SPI_3WIRE) ? "3wire, " : "", 3656 (spi->mode & SPI_LOOP) ? "loopback, " : "", 3657 spi->bits_per_word, spi->max_speed_hz, 3658 status); 3659 3660 return status; 3661 } 3662 EXPORT_SYMBOL_GPL(spi_setup); 3663 3664 static int _spi_xfer_word_delay_update(struct spi_transfer *xfer, 3665 struct spi_device *spi) 3666 { 3667 int delay1, delay2; 3668 3669 delay1 = spi_delay_to_ns(&xfer->word_delay, xfer); 3670 if (delay1 < 0) 3671 return delay1; 3672 3673 delay2 = spi_delay_to_ns(&spi->word_delay, xfer); 3674 if (delay2 < 0) 3675 return delay2; 3676 3677 if (delay1 < delay2) 3678 memcpy(&xfer->word_delay, &spi->word_delay, 3679 sizeof(xfer->word_delay)); 3680 3681 return 0; 3682 } 3683 3684 static int __spi_validate(struct spi_device *spi, struct spi_message *message) 3685 { 3686 struct spi_controller *ctlr = spi->controller; 3687 struct spi_transfer *xfer; 3688 int w_size; 3689 3690 if (list_empty(&message->transfers)) 3691 return -EINVAL; 3692 3693 /* 3694 * If an SPI controller does not support toggling the CS line on each 3695 * transfer (indicated by the SPI_CS_WORD flag) or we are using a GPIO 3696 * for the CS line, we can emulate the CS-per-word hardware function by 3697 * splitting transfers into one-word transfers and ensuring that 3698 * cs_change is set for each transfer. 3699 */ 3700 if ((spi->mode & SPI_CS_WORD) && (!(ctlr->mode_bits & SPI_CS_WORD) || 3701 spi->cs_gpiod)) { 3702 size_t maxsize; 3703 int ret; 3704 3705 maxsize = (spi->bits_per_word + 7) / 8; 3706 3707 /* spi_split_transfers_maxsize() requires message->spi */ 3708 message->spi = spi; 3709 3710 ret = spi_split_transfers_maxsize(ctlr, message, maxsize, 3711 GFP_KERNEL); 3712 if (ret) 3713 return ret; 3714 3715 list_for_each_entry(xfer, &message->transfers, transfer_list) { 3716 /* Don't change cs_change on the last entry in the list */ 3717 if (list_is_last(&xfer->transfer_list, &message->transfers)) 3718 break; 3719 xfer->cs_change = 1; 3720 } 3721 } 3722 3723 /* 3724 * Half-duplex links include original MicroWire, and ones with 3725 * only one data pin like SPI_3WIRE (switches direction) or where 3726 * either MOSI or MISO is missing. They can also be caused by 3727 * software limitations. 3728 */ 3729 if ((ctlr->flags & SPI_CONTROLLER_HALF_DUPLEX) || 3730 (spi->mode & SPI_3WIRE)) { 3731 unsigned flags = ctlr->flags; 3732 3733 list_for_each_entry(xfer, &message->transfers, transfer_list) { 3734 if (xfer->rx_buf && xfer->tx_buf) 3735 return -EINVAL; 3736 if ((flags & SPI_CONTROLLER_NO_TX) && xfer->tx_buf) 3737 return -EINVAL; 3738 if ((flags & SPI_CONTROLLER_NO_RX) && xfer->rx_buf) 3739 return -EINVAL; 3740 } 3741 } 3742 3743 /* 3744 * Set transfer bits_per_word and max speed as spi device default if 3745 * it is not set for this transfer. 3746 * Set transfer tx_nbits and rx_nbits as single transfer default 3747 * (SPI_NBITS_SINGLE) if it is not set for this transfer. 3748 * Ensure transfer word_delay is at least as long as that required by 3749 * device itself. 3750 */ 3751 message->frame_length = 0; 3752 list_for_each_entry(xfer, &message->transfers, transfer_list) { 3753 xfer->effective_speed_hz = 0; 3754 message->frame_length += xfer->len; 3755 if (!xfer->bits_per_word) 3756 xfer->bits_per_word = spi->bits_per_word; 3757 3758 if (!xfer->speed_hz) 3759 xfer->speed_hz = spi->max_speed_hz; 3760 3761 if (ctlr->max_speed_hz && xfer->speed_hz > ctlr->max_speed_hz) 3762 xfer->speed_hz = ctlr->max_speed_hz; 3763 3764 if (__spi_validate_bits_per_word(ctlr, xfer->bits_per_word)) 3765 return -EINVAL; 3766 3767 /* 3768 * SPI transfer length should be multiple of SPI word size 3769 * where SPI word size should be power-of-two multiple. 3770 */ 3771 if (xfer->bits_per_word <= 8) 3772 w_size = 1; 3773 else if (xfer->bits_per_word <= 16) 3774 w_size = 2; 3775 else 3776 w_size = 4; 3777 3778 /* No partial transfers accepted */ 3779 if (xfer->len % w_size) 3780 return -EINVAL; 3781 3782 if (xfer->speed_hz && ctlr->min_speed_hz && 3783 xfer->speed_hz < ctlr->min_speed_hz) 3784 return -EINVAL; 3785 3786 if (xfer->tx_buf && !xfer->tx_nbits) 3787 xfer->tx_nbits = SPI_NBITS_SINGLE; 3788 if (xfer->rx_buf && !xfer->rx_nbits) 3789 xfer->rx_nbits = SPI_NBITS_SINGLE; 3790 /* 3791 * Check transfer tx/rx_nbits: 3792 * 1. check the value matches one of single, dual and quad 3793 * 2. check tx/rx_nbits match the mode in spi_device 3794 */ 3795 if (xfer->tx_buf) { 3796 if (spi->mode & SPI_NO_TX) 3797 return -EINVAL; 3798 if (xfer->tx_nbits != SPI_NBITS_SINGLE && 3799 xfer->tx_nbits != SPI_NBITS_DUAL && 3800 xfer->tx_nbits != SPI_NBITS_QUAD) 3801 return -EINVAL; 3802 if ((xfer->tx_nbits == SPI_NBITS_DUAL) && 3803 !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD))) 3804 return -EINVAL; 3805 if ((xfer->tx_nbits == SPI_NBITS_QUAD) && 3806 !(spi->mode & SPI_TX_QUAD)) 3807 return -EINVAL; 3808 } 3809 /* Check transfer rx_nbits */ 3810 if (xfer->rx_buf) { 3811 if (spi->mode & SPI_NO_RX) 3812 return -EINVAL; 3813 if (xfer->rx_nbits != SPI_NBITS_SINGLE && 3814 xfer->rx_nbits != SPI_NBITS_DUAL && 3815 xfer->rx_nbits != SPI_NBITS_QUAD) 3816 return -EINVAL; 3817 if ((xfer->rx_nbits == SPI_NBITS_DUAL) && 3818 !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD))) 3819 return -EINVAL; 3820 if ((xfer->rx_nbits == SPI_NBITS_QUAD) && 3821 !(spi->mode & SPI_RX_QUAD)) 3822 return -EINVAL; 3823 } 3824 3825 if (_spi_xfer_word_delay_update(xfer, spi)) 3826 return -EINVAL; 3827 } 3828 3829 message->status = -EINPROGRESS; 3830 3831 return 0; 3832 } 3833 3834 static int __spi_async(struct spi_device *spi, struct spi_message *message) 3835 { 3836 struct spi_controller *ctlr = spi->controller; 3837 struct spi_transfer *xfer; 3838 3839 /* 3840 * Some controllers do not support doing regular SPI transfers. Return 3841 * ENOTSUPP when this is the case. 3842 */ 3843 if (!ctlr->transfer) 3844 return -ENOTSUPP; 3845 3846 message->spi = spi; 3847 3848 SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics, spi_async); 3849 SPI_STATISTICS_INCREMENT_FIELD(spi->pcpu_statistics, spi_async); 3850 3851 trace_spi_message_submit(message); 3852 3853 if (!ctlr->ptp_sts_supported) { 3854 list_for_each_entry(xfer, &message->transfers, transfer_list) { 3855 xfer->ptp_sts_word_pre = 0; 3856 ptp_read_system_prets(xfer->ptp_sts); 3857 } 3858 } 3859 3860 return ctlr->transfer(spi, message); 3861 } 3862 3863 /** 3864 * spi_async - asynchronous SPI transfer 3865 * @spi: device with which data will be exchanged 3866 * @message: describes the data transfers, including completion callback 3867 * Context: any (irqs may be blocked, etc) 3868 * 3869 * This call may be used in_irq and other contexts which can't sleep, 3870 * as well as from task contexts which can sleep. 3871 * 3872 * The completion callback is invoked in a context which can't sleep. 3873 * Before that invocation, the value of message->status is undefined. 3874 * When the callback is issued, message->status holds either zero (to 3875 * indicate complete success) or a negative error code. After that 3876 * callback returns, the driver which issued the transfer request may 3877 * deallocate the associated memory; it's no longer in use by any SPI 3878 * core or controller driver code. 3879 * 3880 * Note that although all messages to a spi_device are handled in 3881 * FIFO order, messages may go to different devices in other orders. 3882 * Some device might be higher priority, or have various "hard" access 3883 * time requirements, for example. 3884 * 3885 * On detection of any fault during the transfer, processing of 3886 * the entire message is aborted, and the device is deselected. 3887 * Until returning from the associated message completion callback, 3888 * no other spi_message queued to that device will be processed. 3889 * (This rule applies equally to all the synchronous transfer calls, 3890 * which are wrappers around this core asynchronous primitive.) 3891 * 3892 * Return: zero on success, else a negative error code. 3893 */ 3894 int spi_async(struct spi_device *spi, struct spi_message *message) 3895 { 3896 struct spi_controller *ctlr = spi->controller; 3897 int ret; 3898 unsigned long flags; 3899 3900 ret = __spi_validate(spi, message); 3901 if (ret != 0) 3902 return ret; 3903 3904 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags); 3905 3906 if (ctlr->bus_lock_flag) 3907 ret = -EBUSY; 3908 else 3909 ret = __spi_async(spi, message); 3910 3911 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags); 3912 3913 return ret; 3914 } 3915 EXPORT_SYMBOL_GPL(spi_async); 3916 3917 /** 3918 * spi_async_locked - version of spi_async with exclusive bus usage 3919 * @spi: device with which data will be exchanged 3920 * @message: describes the data transfers, including completion callback 3921 * Context: any (irqs may be blocked, etc) 3922 * 3923 * This call may be used in_irq and other contexts which can't sleep, 3924 * as well as from task contexts which can sleep. 3925 * 3926 * The completion callback is invoked in a context which can't sleep. 3927 * Before that invocation, the value of message->status is undefined. 3928 * When the callback is issued, message->status holds either zero (to 3929 * indicate complete success) or a negative error code. After that 3930 * callback returns, the driver which issued the transfer request may 3931 * deallocate the associated memory; it's no longer in use by any SPI 3932 * core or controller driver code. 3933 * 3934 * Note that although all messages to a spi_device are handled in 3935 * FIFO order, messages may go to different devices in other orders. 3936 * Some device might be higher priority, or have various "hard" access 3937 * time requirements, for example. 3938 * 3939 * On detection of any fault during the transfer, processing of 3940 * the entire message is aborted, and the device is deselected. 3941 * Until returning from the associated message completion callback, 3942 * no other spi_message queued to that device will be processed. 3943 * (This rule applies equally to all the synchronous transfer calls, 3944 * which are wrappers around this core asynchronous primitive.) 3945 * 3946 * Return: zero on success, else a negative error code. 3947 */ 3948 static int spi_async_locked(struct spi_device *spi, struct spi_message *message) 3949 { 3950 struct spi_controller *ctlr = spi->controller; 3951 int ret; 3952 unsigned long flags; 3953 3954 ret = __spi_validate(spi, message); 3955 if (ret != 0) 3956 return ret; 3957 3958 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags); 3959 3960 ret = __spi_async(spi, message); 3961 3962 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags); 3963 3964 return ret; 3965 3966 } 3967 3968 static void __spi_transfer_message_noqueue(struct spi_controller *ctlr, struct spi_message *msg) 3969 { 3970 bool was_busy; 3971 int ret; 3972 3973 mutex_lock(&ctlr->io_mutex); 3974 3975 was_busy = ctlr->busy; 3976 3977 ctlr->cur_msg = msg; 3978 ret = __spi_pump_transfer_message(ctlr, msg, was_busy); 3979 if (ret) 3980 goto out; 3981 3982 ctlr->cur_msg = NULL; 3983 ctlr->fallback = false; 3984 3985 if (!was_busy) { 3986 kfree(ctlr->dummy_rx); 3987 ctlr->dummy_rx = NULL; 3988 kfree(ctlr->dummy_tx); 3989 ctlr->dummy_tx = NULL; 3990 if (ctlr->unprepare_transfer_hardware && 3991 ctlr->unprepare_transfer_hardware(ctlr)) 3992 dev_err(&ctlr->dev, 3993 "failed to unprepare transfer hardware\n"); 3994 spi_idle_runtime_pm(ctlr); 3995 } 3996 3997 out: 3998 mutex_unlock(&ctlr->io_mutex); 3999 } 4000 4001 /*-------------------------------------------------------------------------*/ 4002 4003 /* 4004 * Utility methods for SPI protocol drivers, layered on 4005 * top of the core. Some other utility methods are defined as 4006 * inline functions. 4007 */ 4008 4009 static void spi_complete(void *arg) 4010 { 4011 complete(arg); 4012 } 4013 4014 static int __spi_sync(struct spi_device *spi, struct spi_message *message) 4015 { 4016 DECLARE_COMPLETION_ONSTACK(done); 4017 int status; 4018 struct spi_controller *ctlr = spi->controller; 4019 4020 status = __spi_validate(spi, message); 4021 if (status != 0) 4022 return status; 4023 4024 message->spi = spi; 4025 4026 SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics, spi_sync); 4027 SPI_STATISTICS_INCREMENT_FIELD(spi->pcpu_statistics, spi_sync); 4028 4029 /* 4030 * Checking queue_empty here only guarantees async/sync message 4031 * ordering when coming from the same context. It does not need to 4032 * guard against reentrancy from a different context. The io_mutex 4033 * will catch those cases. 4034 */ 4035 if (READ_ONCE(ctlr->queue_empty) && !ctlr->must_async) { 4036 message->actual_length = 0; 4037 message->status = -EINPROGRESS; 4038 4039 trace_spi_message_submit(message); 4040 4041 SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics, spi_sync_immediate); 4042 SPI_STATISTICS_INCREMENT_FIELD(spi->pcpu_statistics, spi_sync_immediate); 4043 4044 __spi_transfer_message_noqueue(ctlr, message); 4045 4046 return message->status; 4047 } 4048 4049 /* 4050 * There are messages in the async queue that could have originated 4051 * from the same context, so we need to preserve ordering. 4052 * Therefor we send the message to the async queue and wait until they 4053 * are completed. 4054 */ 4055 message->complete = spi_complete; 4056 message->context = &done; 4057 status = spi_async_locked(spi, message); 4058 if (status == 0) { 4059 wait_for_completion(&done); 4060 status = message->status; 4061 } 4062 message->context = NULL; 4063 4064 return status; 4065 } 4066 4067 /** 4068 * spi_sync - blocking/synchronous SPI data transfers 4069 * @spi: device with which data will be exchanged 4070 * @message: describes the data transfers 4071 * Context: can sleep 4072 * 4073 * This call may only be used from a context that may sleep. The sleep 4074 * is non-interruptible, and has no timeout. Low-overhead controller 4075 * drivers may DMA directly into and out of the message buffers. 4076 * 4077 * Note that the SPI device's chip select is active during the message, 4078 * and then is normally disabled between messages. Drivers for some 4079 * frequently-used devices may want to minimize costs of selecting a chip, 4080 * by leaving it selected in anticipation that the next message will go 4081 * to the same chip. (That may increase power usage.) 4082 * 4083 * Also, the caller is guaranteeing that the memory associated with the 4084 * message will not be freed before this call returns. 4085 * 4086 * Return: zero on success, else a negative error code. 4087 */ 4088 int spi_sync(struct spi_device *spi, struct spi_message *message) 4089 { 4090 int ret; 4091 4092 mutex_lock(&spi->controller->bus_lock_mutex); 4093 ret = __spi_sync(spi, message); 4094 mutex_unlock(&spi->controller->bus_lock_mutex); 4095 4096 return ret; 4097 } 4098 EXPORT_SYMBOL_GPL(spi_sync); 4099 4100 /** 4101 * spi_sync_locked - version of spi_sync with exclusive bus usage 4102 * @spi: device with which data will be exchanged 4103 * @message: describes the data transfers 4104 * Context: can sleep 4105 * 4106 * This call may only be used from a context that may sleep. The sleep 4107 * is non-interruptible, and has no timeout. Low-overhead controller 4108 * drivers may DMA directly into and out of the message buffers. 4109 * 4110 * This call should be used by drivers that require exclusive access to the 4111 * SPI bus. It has to be preceded by a spi_bus_lock call. The SPI bus must 4112 * be released by a spi_bus_unlock call when the exclusive access is over. 4113 * 4114 * Return: zero on success, else a negative error code. 4115 */ 4116 int spi_sync_locked(struct spi_device *spi, struct spi_message *message) 4117 { 4118 return __spi_sync(spi, message); 4119 } 4120 EXPORT_SYMBOL_GPL(spi_sync_locked); 4121 4122 /** 4123 * spi_bus_lock - obtain a lock for exclusive SPI bus usage 4124 * @ctlr: SPI bus master that should be locked for exclusive bus access 4125 * Context: can sleep 4126 * 4127 * This call may only be used from a context that may sleep. The sleep 4128 * is non-interruptible, and has no timeout. 4129 * 4130 * This call should be used by drivers that require exclusive access to the 4131 * SPI bus. The SPI bus must be released by a spi_bus_unlock call when the 4132 * exclusive access is over. Data transfer must be done by spi_sync_locked 4133 * and spi_async_locked calls when the SPI bus lock is held. 4134 * 4135 * Return: always zero. 4136 */ 4137 int spi_bus_lock(struct spi_controller *ctlr) 4138 { 4139 unsigned long flags; 4140 4141 mutex_lock(&ctlr->bus_lock_mutex); 4142 4143 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags); 4144 ctlr->bus_lock_flag = 1; 4145 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags); 4146 4147 /* Mutex remains locked until spi_bus_unlock() is called */ 4148 4149 return 0; 4150 } 4151 EXPORT_SYMBOL_GPL(spi_bus_lock); 4152 4153 /** 4154 * spi_bus_unlock - release the lock for exclusive SPI bus usage 4155 * @ctlr: SPI bus master that was locked for exclusive bus access 4156 * Context: can sleep 4157 * 4158 * This call may only be used from a context that may sleep. The sleep 4159 * is non-interruptible, and has no timeout. 4160 * 4161 * This call releases an SPI bus lock previously obtained by an spi_bus_lock 4162 * call. 4163 * 4164 * Return: always zero. 4165 */ 4166 int spi_bus_unlock(struct spi_controller *ctlr) 4167 { 4168 ctlr->bus_lock_flag = 0; 4169 4170 mutex_unlock(&ctlr->bus_lock_mutex); 4171 4172 return 0; 4173 } 4174 EXPORT_SYMBOL_GPL(spi_bus_unlock); 4175 4176 /* Portable code must never pass more than 32 bytes */ 4177 #define SPI_BUFSIZ max(32, SMP_CACHE_BYTES) 4178 4179 static u8 *buf; 4180 4181 /** 4182 * spi_write_then_read - SPI synchronous write followed by read 4183 * @spi: device with which data will be exchanged 4184 * @txbuf: data to be written (need not be dma-safe) 4185 * @n_tx: size of txbuf, in bytes 4186 * @rxbuf: buffer into which data will be read (need not be dma-safe) 4187 * @n_rx: size of rxbuf, in bytes 4188 * Context: can sleep 4189 * 4190 * This performs a half duplex MicroWire style transaction with the 4191 * device, sending txbuf and then reading rxbuf. The return value 4192 * is zero for success, else a negative errno status code. 4193 * This call may only be used from a context that may sleep. 4194 * 4195 * Parameters to this routine are always copied using a small buffer. 4196 * Performance-sensitive or bulk transfer code should instead use 4197 * spi_{async,sync}() calls with dma-safe buffers. 4198 * 4199 * Return: zero on success, else a negative error code. 4200 */ 4201 int spi_write_then_read(struct spi_device *spi, 4202 const void *txbuf, unsigned n_tx, 4203 void *rxbuf, unsigned n_rx) 4204 { 4205 static DEFINE_MUTEX(lock); 4206 4207 int status; 4208 struct spi_message message; 4209 struct spi_transfer x[2]; 4210 u8 *local_buf; 4211 4212 /* 4213 * Use preallocated DMA-safe buffer if we can. We can't avoid 4214 * copying here, (as a pure convenience thing), but we can 4215 * keep heap costs out of the hot path unless someone else is 4216 * using the pre-allocated buffer or the transfer is too large. 4217 */ 4218 if ((n_tx + n_rx) > SPI_BUFSIZ || !mutex_trylock(&lock)) { 4219 local_buf = kmalloc(max((unsigned)SPI_BUFSIZ, n_tx + n_rx), 4220 GFP_KERNEL | GFP_DMA); 4221 if (!local_buf) 4222 return -ENOMEM; 4223 } else { 4224 local_buf = buf; 4225 } 4226 4227 spi_message_init(&message); 4228 memset(x, 0, sizeof(x)); 4229 if (n_tx) { 4230 x[0].len = n_tx; 4231 spi_message_add_tail(&x[0], &message); 4232 } 4233 if (n_rx) { 4234 x[1].len = n_rx; 4235 spi_message_add_tail(&x[1], &message); 4236 } 4237 4238 memcpy(local_buf, txbuf, n_tx); 4239 x[0].tx_buf = local_buf; 4240 x[1].rx_buf = local_buf + n_tx; 4241 4242 /* Do the i/o */ 4243 status = spi_sync(spi, &message); 4244 if (status == 0) 4245 memcpy(rxbuf, x[1].rx_buf, n_rx); 4246 4247 if (x[0].tx_buf == buf) 4248 mutex_unlock(&lock); 4249 else 4250 kfree(local_buf); 4251 4252 return status; 4253 } 4254 EXPORT_SYMBOL_GPL(spi_write_then_read); 4255 4256 /*-------------------------------------------------------------------------*/ 4257 4258 #if IS_ENABLED(CONFIG_OF_DYNAMIC) 4259 /* Must call put_device() when done with returned spi_device device */ 4260 static struct spi_device *of_find_spi_device_by_node(struct device_node *node) 4261 { 4262 struct device *dev = bus_find_device_by_of_node(&spi_bus_type, node); 4263 4264 return dev ? to_spi_device(dev) : NULL; 4265 } 4266 4267 /* The spi controllers are not using spi_bus, so we find it with another way */ 4268 static struct spi_controller *of_find_spi_controller_by_node(struct device_node *node) 4269 { 4270 struct device *dev; 4271 4272 dev = class_find_device_by_of_node(&spi_master_class, node); 4273 if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE)) 4274 dev = class_find_device_by_of_node(&spi_slave_class, node); 4275 if (!dev) 4276 return NULL; 4277 4278 /* Reference got in class_find_device */ 4279 return container_of(dev, struct spi_controller, dev); 4280 } 4281 4282 static int of_spi_notify(struct notifier_block *nb, unsigned long action, 4283 void *arg) 4284 { 4285 struct of_reconfig_data *rd = arg; 4286 struct spi_controller *ctlr; 4287 struct spi_device *spi; 4288 4289 switch (of_reconfig_get_state_change(action, arg)) { 4290 case OF_RECONFIG_CHANGE_ADD: 4291 ctlr = of_find_spi_controller_by_node(rd->dn->parent); 4292 if (ctlr == NULL) 4293 return NOTIFY_OK; /* Not for us */ 4294 4295 if (of_node_test_and_set_flag(rd->dn, OF_POPULATED)) { 4296 put_device(&ctlr->dev); 4297 return NOTIFY_OK; 4298 } 4299 4300 spi = of_register_spi_device(ctlr, rd->dn); 4301 put_device(&ctlr->dev); 4302 4303 if (IS_ERR(spi)) { 4304 pr_err("%s: failed to create for '%pOF'\n", 4305 __func__, rd->dn); 4306 of_node_clear_flag(rd->dn, OF_POPULATED); 4307 return notifier_from_errno(PTR_ERR(spi)); 4308 } 4309 break; 4310 4311 case OF_RECONFIG_CHANGE_REMOVE: 4312 /* Already depopulated? */ 4313 if (!of_node_check_flag(rd->dn, OF_POPULATED)) 4314 return NOTIFY_OK; 4315 4316 /* Find our device by node */ 4317 spi = of_find_spi_device_by_node(rd->dn); 4318 if (spi == NULL) 4319 return NOTIFY_OK; /* No? not meant for us */ 4320 4321 /* Unregister takes one ref away */ 4322 spi_unregister_device(spi); 4323 4324 /* And put the reference of the find */ 4325 put_device(&spi->dev); 4326 break; 4327 } 4328 4329 return NOTIFY_OK; 4330 } 4331 4332 static struct notifier_block spi_of_notifier = { 4333 .notifier_call = of_spi_notify, 4334 }; 4335 #else /* IS_ENABLED(CONFIG_OF_DYNAMIC) */ 4336 extern struct notifier_block spi_of_notifier; 4337 #endif /* IS_ENABLED(CONFIG_OF_DYNAMIC) */ 4338 4339 #if IS_ENABLED(CONFIG_ACPI) 4340 static int spi_acpi_controller_match(struct device *dev, const void *data) 4341 { 4342 return ACPI_COMPANION(dev->parent) == data; 4343 } 4344 4345 static struct spi_controller *acpi_spi_find_controller_by_adev(struct acpi_device *adev) 4346 { 4347 struct device *dev; 4348 4349 dev = class_find_device(&spi_master_class, NULL, adev, 4350 spi_acpi_controller_match); 4351 if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE)) 4352 dev = class_find_device(&spi_slave_class, NULL, adev, 4353 spi_acpi_controller_match); 4354 if (!dev) 4355 return NULL; 4356 4357 return container_of(dev, struct spi_controller, dev); 4358 } 4359 4360 static struct spi_device *acpi_spi_find_device_by_adev(struct acpi_device *adev) 4361 { 4362 struct device *dev; 4363 4364 dev = bus_find_device_by_acpi_dev(&spi_bus_type, adev); 4365 return to_spi_device(dev); 4366 } 4367 4368 static int acpi_spi_notify(struct notifier_block *nb, unsigned long value, 4369 void *arg) 4370 { 4371 struct acpi_device *adev = arg; 4372 struct spi_controller *ctlr; 4373 struct spi_device *spi; 4374 4375 switch (value) { 4376 case ACPI_RECONFIG_DEVICE_ADD: 4377 ctlr = acpi_spi_find_controller_by_adev(adev->parent); 4378 if (!ctlr) 4379 break; 4380 4381 acpi_register_spi_device(ctlr, adev); 4382 put_device(&ctlr->dev); 4383 break; 4384 case ACPI_RECONFIG_DEVICE_REMOVE: 4385 if (!acpi_device_enumerated(adev)) 4386 break; 4387 4388 spi = acpi_spi_find_device_by_adev(adev); 4389 if (!spi) 4390 break; 4391 4392 spi_unregister_device(spi); 4393 put_device(&spi->dev); 4394 break; 4395 } 4396 4397 return NOTIFY_OK; 4398 } 4399 4400 static struct notifier_block spi_acpi_notifier = { 4401 .notifier_call = acpi_spi_notify, 4402 }; 4403 #else 4404 extern struct notifier_block spi_acpi_notifier; 4405 #endif 4406 4407 static int __init spi_init(void) 4408 { 4409 int status; 4410 4411 buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL); 4412 if (!buf) { 4413 status = -ENOMEM; 4414 goto err0; 4415 } 4416 4417 status = bus_register(&spi_bus_type); 4418 if (status < 0) 4419 goto err1; 4420 4421 status = class_register(&spi_master_class); 4422 if (status < 0) 4423 goto err2; 4424 4425 if (IS_ENABLED(CONFIG_SPI_SLAVE)) { 4426 status = class_register(&spi_slave_class); 4427 if (status < 0) 4428 goto err3; 4429 } 4430 4431 if (IS_ENABLED(CONFIG_OF_DYNAMIC)) 4432 WARN_ON(of_reconfig_notifier_register(&spi_of_notifier)); 4433 if (IS_ENABLED(CONFIG_ACPI)) 4434 WARN_ON(acpi_reconfig_notifier_register(&spi_acpi_notifier)); 4435 4436 return 0; 4437 4438 err3: 4439 class_unregister(&spi_master_class); 4440 err2: 4441 bus_unregister(&spi_bus_type); 4442 err1: 4443 kfree(buf); 4444 buf = NULL; 4445 err0: 4446 return status; 4447 } 4448 4449 /* 4450 * A board_info is normally registered in arch_initcall(), 4451 * but even essential drivers wait till later. 4452 * 4453 * REVISIT only boardinfo really needs static linking. The rest (device and 4454 * driver registration) _could_ be dynamically linked (modular) ... Costs 4455 * include needing to have boardinfo data structures be much more public. 4456 */ 4457 postcore_initcall(spi_init); 4458