1 // SPDX-License-Identifier: GPL-2.0-or-later 2 // SPI init/core code 3 // 4 // Copyright (C) 2005 David Brownell 5 // Copyright (C) 2008 Secret Lab Technologies Ltd. 6 7 #include <linux/acpi.h> 8 #include <linux/cache.h> 9 #include <linux/clk/clk-conf.h> 10 #include <linux/delay.h> 11 #include <linux/device.h> 12 #include <linux/dmaengine.h> 13 #include <linux/dma-mapping.h> 14 #include <linux/export.h> 15 #include <linux/gpio/consumer.h> 16 #include <linux/highmem.h> 17 #include <linux/idr.h> 18 #include <linux/init.h> 19 #include <linux/ioport.h> 20 #include <linux/kernel.h> 21 #include <linux/kthread.h> 22 #include <linux/mod_devicetable.h> 23 #include <linux/mutex.h> 24 #include <linux/of_device.h> 25 #include <linux/of_irq.h> 26 #include <linux/percpu.h> 27 #include <linux/platform_data/x86/apple.h> 28 #include <linux/pm_domain.h> 29 #include <linux/pm_runtime.h> 30 #include <linux/property.h> 31 #include <linux/ptp_clock_kernel.h> 32 #include <linux/sched/rt.h> 33 #include <linux/slab.h> 34 #include <linux/spi/spi.h> 35 #include <linux/spi/spi-mem.h> 36 #include <uapi/linux/sched/types.h> 37 38 #define CREATE_TRACE_POINTS 39 #include <trace/events/spi.h> 40 EXPORT_TRACEPOINT_SYMBOL(spi_transfer_start); 41 EXPORT_TRACEPOINT_SYMBOL(spi_transfer_stop); 42 43 #include "internals.h" 44 45 static DEFINE_IDR(spi_master_idr); 46 47 static void spidev_release(struct device *dev) 48 { 49 struct spi_device *spi = to_spi_device(dev); 50 51 spi_controller_put(spi->controller); 52 kfree(spi->driver_override); 53 free_percpu(spi->pcpu_statistics); 54 kfree(spi); 55 } 56 57 static ssize_t 58 modalias_show(struct device *dev, struct device_attribute *a, char *buf) 59 { 60 const struct spi_device *spi = to_spi_device(dev); 61 int len; 62 63 len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1); 64 if (len != -ENODEV) 65 return len; 66 67 return sysfs_emit(buf, "%s%s\n", SPI_MODULE_PREFIX, spi->modalias); 68 } 69 static DEVICE_ATTR_RO(modalias); 70 71 static ssize_t driver_override_store(struct device *dev, 72 struct device_attribute *a, 73 const char *buf, size_t count) 74 { 75 struct spi_device *spi = to_spi_device(dev); 76 int ret; 77 78 ret = driver_set_override(dev, &spi->driver_override, buf, count); 79 if (ret) 80 return ret; 81 82 return count; 83 } 84 85 static ssize_t driver_override_show(struct device *dev, 86 struct device_attribute *a, char *buf) 87 { 88 const struct spi_device *spi = to_spi_device(dev); 89 ssize_t len; 90 91 device_lock(dev); 92 len = sysfs_emit(buf, "%s\n", spi->driver_override ? : ""); 93 device_unlock(dev); 94 return len; 95 } 96 static DEVICE_ATTR_RW(driver_override); 97 98 static struct spi_statistics __percpu *spi_alloc_pcpu_stats(struct device *dev) 99 { 100 struct spi_statistics __percpu *pcpu_stats; 101 102 if (dev) 103 pcpu_stats = devm_alloc_percpu(dev, struct spi_statistics); 104 else 105 pcpu_stats = alloc_percpu_gfp(struct spi_statistics, GFP_KERNEL); 106 107 if (pcpu_stats) { 108 int cpu; 109 110 for_each_possible_cpu(cpu) { 111 struct spi_statistics *stat; 112 113 stat = per_cpu_ptr(pcpu_stats, cpu); 114 u64_stats_init(&stat->syncp); 115 } 116 } 117 return pcpu_stats; 118 } 119 120 static ssize_t spi_emit_pcpu_stats(struct spi_statistics __percpu *stat, 121 char *buf, size_t offset) 122 { 123 u64 val = 0; 124 int i; 125 126 for_each_possible_cpu(i) { 127 const struct spi_statistics *pcpu_stats; 128 u64_stats_t *field; 129 unsigned int start; 130 u64 inc; 131 132 pcpu_stats = per_cpu_ptr(stat, i); 133 field = (void *)pcpu_stats + offset; 134 do { 135 start = u64_stats_fetch_begin(&pcpu_stats->syncp); 136 inc = u64_stats_read(field); 137 } while (u64_stats_fetch_retry(&pcpu_stats->syncp, start)); 138 val += inc; 139 } 140 return sysfs_emit(buf, "%llu\n", val); 141 } 142 143 #define SPI_STATISTICS_ATTRS(field, file) \ 144 static ssize_t spi_controller_##field##_show(struct device *dev, \ 145 struct device_attribute *attr, \ 146 char *buf) \ 147 { \ 148 struct spi_controller *ctlr = container_of(dev, \ 149 struct spi_controller, dev); \ 150 return spi_statistics_##field##_show(ctlr->pcpu_statistics, buf); \ 151 } \ 152 static struct device_attribute dev_attr_spi_controller_##field = { \ 153 .attr = { .name = file, .mode = 0444 }, \ 154 .show = spi_controller_##field##_show, \ 155 }; \ 156 static ssize_t spi_device_##field##_show(struct device *dev, \ 157 struct device_attribute *attr, \ 158 char *buf) \ 159 { \ 160 struct spi_device *spi = to_spi_device(dev); \ 161 return spi_statistics_##field##_show(spi->pcpu_statistics, buf); \ 162 } \ 163 static struct device_attribute dev_attr_spi_device_##field = { \ 164 .attr = { .name = file, .mode = 0444 }, \ 165 .show = spi_device_##field##_show, \ 166 } 167 168 #define SPI_STATISTICS_SHOW_NAME(name, file, field) \ 169 static ssize_t spi_statistics_##name##_show(struct spi_statistics __percpu *stat, \ 170 char *buf) \ 171 { \ 172 return spi_emit_pcpu_stats(stat, buf, \ 173 offsetof(struct spi_statistics, field)); \ 174 } \ 175 SPI_STATISTICS_ATTRS(name, file) 176 177 #define SPI_STATISTICS_SHOW(field) \ 178 SPI_STATISTICS_SHOW_NAME(field, __stringify(field), \ 179 field) 180 181 SPI_STATISTICS_SHOW(messages); 182 SPI_STATISTICS_SHOW(transfers); 183 SPI_STATISTICS_SHOW(errors); 184 SPI_STATISTICS_SHOW(timedout); 185 186 SPI_STATISTICS_SHOW(spi_sync); 187 SPI_STATISTICS_SHOW(spi_sync_immediate); 188 SPI_STATISTICS_SHOW(spi_async); 189 190 SPI_STATISTICS_SHOW(bytes); 191 SPI_STATISTICS_SHOW(bytes_rx); 192 SPI_STATISTICS_SHOW(bytes_tx); 193 194 #define SPI_STATISTICS_TRANSFER_BYTES_HISTO(index, number) \ 195 SPI_STATISTICS_SHOW_NAME(transfer_bytes_histo##index, \ 196 "transfer_bytes_histo_" number, \ 197 transfer_bytes_histo[index]) 198 SPI_STATISTICS_TRANSFER_BYTES_HISTO(0, "0-1"); 199 SPI_STATISTICS_TRANSFER_BYTES_HISTO(1, "2-3"); 200 SPI_STATISTICS_TRANSFER_BYTES_HISTO(2, "4-7"); 201 SPI_STATISTICS_TRANSFER_BYTES_HISTO(3, "8-15"); 202 SPI_STATISTICS_TRANSFER_BYTES_HISTO(4, "16-31"); 203 SPI_STATISTICS_TRANSFER_BYTES_HISTO(5, "32-63"); 204 SPI_STATISTICS_TRANSFER_BYTES_HISTO(6, "64-127"); 205 SPI_STATISTICS_TRANSFER_BYTES_HISTO(7, "128-255"); 206 SPI_STATISTICS_TRANSFER_BYTES_HISTO(8, "256-511"); 207 SPI_STATISTICS_TRANSFER_BYTES_HISTO(9, "512-1023"); 208 SPI_STATISTICS_TRANSFER_BYTES_HISTO(10, "1024-2047"); 209 SPI_STATISTICS_TRANSFER_BYTES_HISTO(11, "2048-4095"); 210 SPI_STATISTICS_TRANSFER_BYTES_HISTO(12, "4096-8191"); 211 SPI_STATISTICS_TRANSFER_BYTES_HISTO(13, "8192-16383"); 212 SPI_STATISTICS_TRANSFER_BYTES_HISTO(14, "16384-32767"); 213 SPI_STATISTICS_TRANSFER_BYTES_HISTO(15, "32768-65535"); 214 SPI_STATISTICS_TRANSFER_BYTES_HISTO(16, "65536+"); 215 216 SPI_STATISTICS_SHOW(transfers_split_maxsize); 217 218 static struct attribute *spi_dev_attrs[] = { 219 &dev_attr_modalias.attr, 220 &dev_attr_driver_override.attr, 221 NULL, 222 }; 223 224 static const struct attribute_group spi_dev_group = { 225 .attrs = spi_dev_attrs, 226 }; 227 228 static struct attribute *spi_device_statistics_attrs[] = { 229 &dev_attr_spi_device_messages.attr, 230 &dev_attr_spi_device_transfers.attr, 231 &dev_attr_spi_device_errors.attr, 232 &dev_attr_spi_device_timedout.attr, 233 &dev_attr_spi_device_spi_sync.attr, 234 &dev_attr_spi_device_spi_sync_immediate.attr, 235 &dev_attr_spi_device_spi_async.attr, 236 &dev_attr_spi_device_bytes.attr, 237 &dev_attr_spi_device_bytes_rx.attr, 238 &dev_attr_spi_device_bytes_tx.attr, 239 &dev_attr_spi_device_transfer_bytes_histo0.attr, 240 &dev_attr_spi_device_transfer_bytes_histo1.attr, 241 &dev_attr_spi_device_transfer_bytes_histo2.attr, 242 &dev_attr_spi_device_transfer_bytes_histo3.attr, 243 &dev_attr_spi_device_transfer_bytes_histo4.attr, 244 &dev_attr_spi_device_transfer_bytes_histo5.attr, 245 &dev_attr_spi_device_transfer_bytes_histo6.attr, 246 &dev_attr_spi_device_transfer_bytes_histo7.attr, 247 &dev_attr_spi_device_transfer_bytes_histo8.attr, 248 &dev_attr_spi_device_transfer_bytes_histo9.attr, 249 &dev_attr_spi_device_transfer_bytes_histo10.attr, 250 &dev_attr_spi_device_transfer_bytes_histo11.attr, 251 &dev_attr_spi_device_transfer_bytes_histo12.attr, 252 &dev_attr_spi_device_transfer_bytes_histo13.attr, 253 &dev_attr_spi_device_transfer_bytes_histo14.attr, 254 &dev_attr_spi_device_transfer_bytes_histo15.attr, 255 &dev_attr_spi_device_transfer_bytes_histo16.attr, 256 &dev_attr_spi_device_transfers_split_maxsize.attr, 257 NULL, 258 }; 259 260 static const struct attribute_group spi_device_statistics_group = { 261 .name = "statistics", 262 .attrs = spi_device_statistics_attrs, 263 }; 264 265 static const struct attribute_group *spi_dev_groups[] = { 266 &spi_dev_group, 267 &spi_device_statistics_group, 268 NULL, 269 }; 270 271 static struct attribute *spi_controller_statistics_attrs[] = { 272 &dev_attr_spi_controller_messages.attr, 273 &dev_attr_spi_controller_transfers.attr, 274 &dev_attr_spi_controller_errors.attr, 275 &dev_attr_spi_controller_timedout.attr, 276 &dev_attr_spi_controller_spi_sync.attr, 277 &dev_attr_spi_controller_spi_sync_immediate.attr, 278 &dev_attr_spi_controller_spi_async.attr, 279 &dev_attr_spi_controller_bytes.attr, 280 &dev_attr_spi_controller_bytes_rx.attr, 281 &dev_attr_spi_controller_bytes_tx.attr, 282 &dev_attr_spi_controller_transfer_bytes_histo0.attr, 283 &dev_attr_spi_controller_transfer_bytes_histo1.attr, 284 &dev_attr_spi_controller_transfer_bytes_histo2.attr, 285 &dev_attr_spi_controller_transfer_bytes_histo3.attr, 286 &dev_attr_spi_controller_transfer_bytes_histo4.attr, 287 &dev_attr_spi_controller_transfer_bytes_histo5.attr, 288 &dev_attr_spi_controller_transfer_bytes_histo6.attr, 289 &dev_attr_spi_controller_transfer_bytes_histo7.attr, 290 &dev_attr_spi_controller_transfer_bytes_histo8.attr, 291 &dev_attr_spi_controller_transfer_bytes_histo9.attr, 292 &dev_attr_spi_controller_transfer_bytes_histo10.attr, 293 &dev_attr_spi_controller_transfer_bytes_histo11.attr, 294 &dev_attr_spi_controller_transfer_bytes_histo12.attr, 295 &dev_attr_spi_controller_transfer_bytes_histo13.attr, 296 &dev_attr_spi_controller_transfer_bytes_histo14.attr, 297 &dev_attr_spi_controller_transfer_bytes_histo15.attr, 298 &dev_attr_spi_controller_transfer_bytes_histo16.attr, 299 &dev_attr_spi_controller_transfers_split_maxsize.attr, 300 NULL, 301 }; 302 303 static const struct attribute_group spi_controller_statistics_group = { 304 .name = "statistics", 305 .attrs = spi_controller_statistics_attrs, 306 }; 307 308 static const struct attribute_group *spi_master_groups[] = { 309 &spi_controller_statistics_group, 310 NULL, 311 }; 312 313 static void spi_statistics_add_transfer_stats(struct spi_statistics __percpu *pcpu_stats, 314 struct spi_transfer *xfer, 315 struct spi_message *msg) 316 { 317 int l2len = min(fls(xfer->len), SPI_STATISTICS_HISTO_SIZE) - 1; 318 struct spi_statistics *stats; 319 320 if (l2len < 0) 321 l2len = 0; 322 323 get_cpu(); 324 stats = this_cpu_ptr(pcpu_stats); 325 u64_stats_update_begin(&stats->syncp); 326 327 u64_stats_inc(&stats->transfers); 328 u64_stats_inc(&stats->transfer_bytes_histo[l2len]); 329 330 u64_stats_add(&stats->bytes, xfer->len); 331 if (spi_valid_txbuf(msg, xfer)) 332 u64_stats_add(&stats->bytes_tx, xfer->len); 333 if (spi_valid_rxbuf(msg, xfer)) 334 u64_stats_add(&stats->bytes_rx, xfer->len); 335 336 u64_stats_update_end(&stats->syncp); 337 put_cpu(); 338 } 339 340 /* 341 * modalias support makes "modprobe $MODALIAS" new-style hotplug work, 342 * and the sysfs version makes coldplug work too. 343 */ 344 static const struct spi_device_id *spi_match_id(const struct spi_device_id *id, const char *name) 345 { 346 while (id->name[0]) { 347 if (!strcmp(name, id->name)) 348 return id; 349 id++; 350 } 351 return NULL; 352 } 353 354 const struct spi_device_id *spi_get_device_id(const struct spi_device *sdev) 355 { 356 const struct spi_driver *sdrv = to_spi_driver(sdev->dev.driver); 357 358 return spi_match_id(sdrv->id_table, sdev->modalias); 359 } 360 EXPORT_SYMBOL_GPL(spi_get_device_id); 361 362 const void *spi_get_device_match_data(const struct spi_device *sdev) 363 { 364 const void *match; 365 366 match = device_get_match_data(&sdev->dev); 367 if (match) 368 return match; 369 370 return (const void *)spi_get_device_id(sdev)->driver_data; 371 } 372 EXPORT_SYMBOL_GPL(spi_get_device_match_data); 373 374 static int spi_match_device(struct device *dev, const struct device_driver *drv) 375 { 376 const struct spi_device *spi = to_spi_device(dev); 377 const struct spi_driver *sdrv = to_spi_driver(drv); 378 379 /* Check override first, and if set, only use the named driver */ 380 if (spi->driver_override) 381 return strcmp(spi->driver_override, drv->name) == 0; 382 383 /* Attempt an OF style match */ 384 if (of_driver_match_device(dev, drv)) 385 return 1; 386 387 /* Then try ACPI */ 388 if (acpi_driver_match_device(dev, drv)) 389 return 1; 390 391 if (sdrv->id_table) 392 return !!spi_match_id(sdrv->id_table, spi->modalias); 393 394 return strcmp(spi->modalias, drv->name) == 0; 395 } 396 397 static int spi_uevent(const struct device *dev, struct kobj_uevent_env *env) 398 { 399 const struct spi_device *spi = to_spi_device(dev); 400 int rc; 401 402 rc = acpi_device_uevent_modalias(dev, env); 403 if (rc != -ENODEV) 404 return rc; 405 406 return add_uevent_var(env, "MODALIAS=%s%s", SPI_MODULE_PREFIX, spi->modalias); 407 } 408 409 static int spi_probe(struct device *dev) 410 { 411 const struct spi_driver *sdrv = to_spi_driver(dev->driver); 412 struct spi_device *spi = to_spi_device(dev); 413 int ret; 414 415 ret = of_clk_set_defaults(dev->of_node, false); 416 if (ret) 417 return ret; 418 419 if (dev->of_node) { 420 spi->irq = of_irq_get(dev->of_node, 0); 421 if (spi->irq == -EPROBE_DEFER) 422 return dev_err_probe(dev, -EPROBE_DEFER, "Failed to get irq\n"); 423 if (spi->irq < 0) 424 spi->irq = 0; 425 } 426 427 if (has_acpi_companion(dev) && spi->irq < 0) { 428 struct acpi_device *adev = to_acpi_device_node(dev->fwnode); 429 430 spi->irq = acpi_dev_gpio_irq_get(adev, 0); 431 if (spi->irq == -EPROBE_DEFER) 432 return -EPROBE_DEFER; 433 if (spi->irq < 0) 434 spi->irq = 0; 435 } 436 437 ret = dev_pm_domain_attach(dev, true); 438 if (ret) 439 return ret; 440 441 if (sdrv->probe) { 442 ret = sdrv->probe(spi); 443 if (ret) 444 dev_pm_domain_detach(dev, true); 445 } 446 447 return ret; 448 } 449 450 static void spi_remove(struct device *dev) 451 { 452 const struct spi_driver *sdrv = to_spi_driver(dev->driver); 453 454 if (sdrv->remove) 455 sdrv->remove(to_spi_device(dev)); 456 457 dev_pm_domain_detach(dev, true); 458 } 459 460 static void spi_shutdown(struct device *dev) 461 { 462 if (dev->driver) { 463 const struct spi_driver *sdrv = to_spi_driver(dev->driver); 464 465 if (sdrv->shutdown) 466 sdrv->shutdown(to_spi_device(dev)); 467 } 468 } 469 470 const struct bus_type spi_bus_type = { 471 .name = "spi", 472 .dev_groups = spi_dev_groups, 473 .match = spi_match_device, 474 .uevent = spi_uevent, 475 .probe = spi_probe, 476 .remove = spi_remove, 477 .shutdown = spi_shutdown, 478 }; 479 EXPORT_SYMBOL_GPL(spi_bus_type); 480 481 /** 482 * __spi_register_driver - register a SPI driver 483 * @owner: owner module of the driver to register 484 * @sdrv: the driver to register 485 * Context: can sleep 486 * 487 * Return: zero on success, else a negative error code. 488 */ 489 int __spi_register_driver(struct module *owner, struct spi_driver *sdrv) 490 { 491 sdrv->driver.owner = owner; 492 sdrv->driver.bus = &spi_bus_type; 493 494 /* 495 * For Really Good Reasons we use spi: modaliases not of: 496 * modaliases for DT so module autoloading won't work if we 497 * don't have a spi_device_id as well as a compatible string. 498 */ 499 if (sdrv->driver.of_match_table) { 500 const struct of_device_id *of_id; 501 502 for (of_id = sdrv->driver.of_match_table; of_id->compatible[0]; 503 of_id++) { 504 const char *of_name; 505 506 /* Strip off any vendor prefix */ 507 of_name = strnchr(of_id->compatible, 508 sizeof(of_id->compatible), ','); 509 if (of_name) 510 of_name++; 511 else 512 of_name = of_id->compatible; 513 514 if (sdrv->id_table) { 515 const struct spi_device_id *spi_id; 516 517 spi_id = spi_match_id(sdrv->id_table, of_name); 518 if (spi_id) 519 continue; 520 } else { 521 if (strcmp(sdrv->driver.name, of_name) == 0) 522 continue; 523 } 524 525 pr_warn("SPI driver %s has no spi_device_id for %s\n", 526 sdrv->driver.name, of_id->compatible); 527 } 528 } 529 530 return driver_register(&sdrv->driver); 531 } 532 EXPORT_SYMBOL_GPL(__spi_register_driver); 533 534 /*-------------------------------------------------------------------------*/ 535 536 /* 537 * SPI devices should normally not be created by SPI device drivers; that 538 * would make them board-specific. Similarly with SPI controller drivers. 539 * Device registration normally goes into like arch/.../mach.../board-YYY.c 540 * with other readonly (flashable) information about mainboard devices. 541 */ 542 543 struct boardinfo { 544 struct list_head list; 545 struct spi_board_info board_info; 546 }; 547 548 static LIST_HEAD(board_list); 549 static LIST_HEAD(spi_controller_list); 550 551 /* 552 * Used to protect add/del operation for board_info list and 553 * spi_controller list, and their matching process also used 554 * to protect object of type struct idr. 555 */ 556 static DEFINE_MUTEX(board_lock); 557 558 /** 559 * spi_alloc_device - Allocate a new SPI device 560 * @ctlr: Controller to which device is connected 561 * Context: can sleep 562 * 563 * Allows a driver to allocate and initialize a spi_device without 564 * registering it immediately. This allows a driver to directly 565 * fill the spi_device with device parameters before calling 566 * spi_add_device() on it. 567 * 568 * Caller is responsible to call spi_add_device() on the returned 569 * spi_device structure to add it to the SPI controller. If the caller 570 * needs to discard the spi_device without adding it, then it should 571 * call spi_dev_put() on it. 572 * 573 * Return: a pointer to the new device, or NULL. 574 */ 575 struct spi_device *spi_alloc_device(struct spi_controller *ctlr) 576 { 577 struct spi_device *spi; 578 579 if (!spi_controller_get(ctlr)) 580 return NULL; 581 582 spi = kzalloc(sizeof(*spi), GFP_KERNEL); 583 if (!spi) { 584 spi_controller_put(ctlr); 585 return NULL; 586 } 587 588 spi->pcpu_statistics = spi_alloc_pcpu_stats(NULL); 589 if (!spi->pcpu_statistics) { 590 kfree(spi); 591 spi_controller_put(ctlr); 592 return NULL; 593 } 594 595 spi->controller = ctlr; 596 spi->dev.parent = &ctlr->dev; 597 spi->dev.bus = &spi_bus_type; 598 spi->dev.release = spidev_release; 599 spi->mode = ctlr->buswidth_override_bits; 600 601 device_initialize(&spi->dev); 602 return spi; 603 } 604 EXPORT_SYMBOL_GPL(spi_alloc_device); 605 606 static void spi_dev_set_name(struct spi_device *spi) 607 { 608 struct device *dev = &spi->dev; 609 struct fwnode_handle *fwnode = dev_fwnode(dev); 610 611 if (is_acpi_device_node(fwnode)) { 612 dev_set_name(dev, "spi-%s", acpi_dev_name(to_acpi_device_node(fwnode))); 613 return; 614 } 615 616 if (is_software_node(fwnode)) { 617 dev_set_name(dev, "spi-%pfwP", fwnode); 618 return; 619 } 620 621 dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->controller->dev), 622 spi_get_chipselect(spi, 0)); 623 } 624 625 /* 626 * Zero(0) is a valid physical CS value and can be located at any 627 * logical CS in the spi->chip_select[]. If all the physical CS 628 * are initialized to 0 then It would be difficult to differentiate 629 * between a valid physical CS 0 & an unused logical CS whose physical 630 * CS can be 0. As a solution to this issue initialize all the CS to -1. 631 * Now all the unused logical CS will have -1 physical CS value & can be 632 * ignored while performing physical CS validity checks. 633 */ 634 #define SPI_INVALID_CS ((s8)-1) 635 636 static inline bool is_valid_cs(s8 chip_select) 637 { 638 return chip_select != SPI_INVALID_CS; 639 } 640 641 static inline int spi_dev_check_cs(struct device *dev, 642 struct spi_device *spi, u8 idx, 643 struct spi_device *new_spi, u8 new_idx) 644 { 645 u8 cs, cs_new; 646 u8 idx_new; 647 648 cs = spi_get_chipselect(spi, idx); 649 for (idx_new = new_idx; idx_new < SPI_CS_CNT_MAX; idx_new++) { 650 cs_new = spi_get_chipselect(new_spi, idx_new); 651 if (is_valid_cs(cs) && is_valid_cs(cs_new) && cs == cs_new) { 652 dev_err(dev, "chipselect %u already in use\n", cs_new); 653 return -EBUSY; 654 } 655 } 656 return 0; 657 } 658 659 static int spi_dev_check(struct device *dev, void *data) 660 { 661 struct spi_device *spi = to_spi_device(dev); 662 struct spi_device *new_spi = data; 663 int status, idx; 664 665 if (spi->controller == new_spi->controller) { 666 for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) { 667 status = spi_dev_check_cs(dev, spi, idx, new_spi, 0); 668 if (status) 669 return status; 670 } 671 } 672 return 0; 673 } 674 675 static void spi_cleanup(struct spi_device *spi) 676 { 677 if (spi->controller->cleanup) 678 spi->controller->cleanup(spi); 679 } 680 681 static int __spi_add_device(struct spi_device *spi) 682 { 683 struct spi_controller *ctlr = spi->controller; 684 struct device *dev = ctlr->dev.parent; 685 int status, idx; 686 u8 cs; 687 688 for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) { 689 /* Chipselects are numbered 0..max; validate. */ 690 cs = spi_get_chipselect(spi, idx); 691 if (is_valid_cs(cs) && cs >= ctlr->num_chipselect) { 692 dev_err(dev, "cs%d >= max %d\n", spi_get_chipselect(spi, idx), 693 ctlr->num_chipselect); 694 return -EINVAL; 695 } 696 } 697 698 /* 699 * Make sure that multiple logical CS doesn't map to the same physical CS. 700 * For example, spi->chip_select[0] != spi->chip_select[1] and so on. 701 */ 702 if (!spi_controller_is_target(ctlr)) { 703 for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) { 704 status = spi_dev_check_cs(dev, spi, idx, spi, idx + 1); 705 if (status) 706 return status; 707 } 708 } 709 710 /* Set the bus ID string */ 711 spi_dev_set_name(spi); 712 713 /* 714 * We need to make sure there's no other device with this 715 * chipselect **BEFORE** we call setup(), else we'll trash 716 * its configuration. 717 */ 718 status = bus_for_each_dev(&spi_bus_type, NULL, spi, spi_dev_check); 719 if (status) 720 return status; 721 722 /* Controller may unregister concurrently */ 723 if (IS_ENABLED(CONFIG_SPI_DYNAMIC) && 724 !device_is_registered(&ctlr->dev)) { 725 return -ENODEV; 726 } 727 728 if (ctlr->cs_gpiods) { 729 u8 cs; 730 731 for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) { 732 cs = spi_get_chipselect(spi, idx); 733 if (is_valid_cs(cs)) 734 spi_set_csgpiod(spi, idx, ctlr->cs_gpiods[cs]); 735 } 736 } 737 738 /* 739 * Drivers may modify this initial i/o setup, but will 740 * normally rely on the device being setup. Devices 741 * using SPI_CS_HIGH can't coexist well otherwise... 742 */ 743 status = spi_setup(spi); 744 if (status < 0) { 745 dev_err(dev, "can't setup %s, status %d\n", 746 dev_name(&spi->dev), status); 747 return status; 748 } 749 750 /* Device may be bound to an active driver when this returns */ 751 status = device_add(&spi->dev); 752 if (status < 0) { 753 dev_err(dev, "can't add %s, status %d\n", 754 dev_name(&spi->dev), status); 755 spi_cleanup(spi); 756 } else { 757 dev_dbg(dev, "registered child %s\n", dev_name(&spi->dev)); 758 } 759 760 return status; 761 } 762 763 /** 764 * spi_add_device - Add spi_device allocated with spi_alloc_device 765 * @spi: spi_device to register 766 * 767 * Companion function to spi_alloc_device. Devices allocated with 768 * spi_alloc_device can be added onto the SPI bus with this function. 769 * 770 * Return: 0 on success; negative errno on failure 771 */ 772 int spi_add_device(struct spi_device *spi) 773 { 774 struct spi_controller *ctlr = spi->controller; 775 int status; 776 777 /* Set the bus ID string */ 778 spi_dev_set_name(spi); 779 780 mutex_lock(&ctlr->add_lock); 781 status = __spi_add_device(spi); 782 mutex_unlock(&ctlr->add_lock); 783 return status; 784 } 785 EXPORT_SYMBOL_GPL(spi_add_device); 786 787 static void spi_set_all_cs_unused(struct spi_device *spi) 788 { 789 u8 idx; 790 791 for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) 792 spi_set_chipselect(spi, idx, SPI_INVALID_CS); 793 } 794 795 /** 796 * spi_new_device - instantiate one new SPI device 797 * @ctlr: Controller to which device is connected 798 * @chip: Describes the SPI device 799 * Context: can sleep 800 * 801 * On typical mainboards, this is purely internal; and it's not needed 802 * after board init creates the hard-wired devices. Some development 803 * platforms may not be able to use spi_register_board_info though, and 804 * this is exported so that for example a USB or parport based adapter 805 * driver could add devices (which it would learn about out-of-band). 806 * 807 * Return: the new device, or NULL. 808 */ 809 struct spi_device *spi_new_device(struct spi_controller *ctlr, 810 struct spi_board_info *chip) 811 { 812 struct spi_device *proxy; 813 int status; 814 815 /* 816 * NOTE: caller did any chip->bus_num checks necessary. 817 * 818 * Also, unless we change the return value convention to use 819 * error-or-pointer (not NULL-or-pointer), troubleshootability 820 * suggests syslogged diagnostics are best here (ugh). 821 */ 822 823 proxy = spi_alloc_device(ctlr); 824 if (!proxy) 825 return NULL; 826 827 WARN_ON(strlen(chip->modalias) >= sizeof(proxy->modalias)); 828 829 /* Use provided chip-select for proxy device */ 830 spi_set_all_cs_unused(proxy); 831 spi_set_chipselect(proxy, 0, chip->chip_select); 832 833 proxy->max_speed_hz = chip->max_speed_hz; 834 proxy->mode = chip->mode; 835 proxy->irq = chip->irq; 836 strscpy(proxy->modalias, chip->modalias, sizeof(proxy->modalias)); 837 proxy->dev.platform_data = (void *) chip->platform_data; 838 proxy->controller_data = chip->controller_data; 839 proxy->controller_state = NULL; 840 /* 841 * By default spi->chip_select[0] will hold the physical CS number, 842 * so set bit 0 in spi->cs_index_mask. 843 */ 844 proxy->cs_index_mask = BIT(0); 845 846 if (chip->swnode) { 847 status = device_add_software_node(&proxy->dev, chip->swnode); 848 if (status) { 849 dev_err(&ctlr->dev, "failed to add software node to '%s': %d\n", 850 chip->modalias, status); 851 goto err_dev_put; 852 } 853 } 854 855 status = spi_add_device(proxy); 856 if (status < 0) 857 goto err_dev_put; 858 859 return proxy; 860 861 err_dev_put: 862 device_remove_software_node(&proxy->dev); 863 spi_dev_put(proxy); 864 return NULL; 865 } 866 EXPORT_SYMBOL_GPL(spi_new_device); 867 868 /** 869 * spi_unregister_device - unregister a single SPI device 870 * @spi: spi_device to unregister 871 * 872 * Start making the passed SPI device vanish. Normally this would be handled 873 * by spi_unregister_controller(). 874 */ 875 void spi_unregister_device(struct spi_device *spi) 876 { 877 if (!spi) 878 return; 879 880 if (spi->dev.of_node) { 881 of_node_clear_flag(spi->dev.of_node, OF_POPULATED); 882 of_node_put(spi->dev.of_node); 883 } 884 if (ACPI_COMPANION(&spi->dev)) 885 acpi_device_clear_enumerated(ACPI_COMPANION(&spi->dev)); 886 device_remove_software_node(&spi->dev); 887 device_del(&spi->dev); 888 spi_cleanup(spi); 889 put_device(&spi->dev); 890 } 891 EXPORT_SYMBOL_GPL(spi_unregister_device); 892 893 static void spi_match_controller_to_boardinfo(struct spi_controller *ctlr, 894 struct spi_board_info *bi) 895 { 896 struct spi_device *dev; 897 898 if (ctlr->bus_num != bi->bus_num) 899 return; 900 901 dev = spi_new_device(ctlr, bi); 902 if (!dev) 903 dev_err(ctlr->dev.parent, "can't create new device for %s\n", 904 bi->modalias); 905 } 906 907 /** 908 * spi_register_board_info - register SPI devices for a given board 909 * @info: array of chip descriptors 910 * @n: how many descriptors are provided 911 * Context: can sleep 912 * 913 * Board-specific early init code calls this (probably during arch_initcall) 914 * with segments of the SPI device table. Any device nodes are created later, 915 * after the relevant parent SPI controller (bus_num) is defined. We keep 916 * this table of devices forever, so that reloading a controller driver will 917 * not make Linux forget about these hard-wired devices. 918 * 919 * Other code can also call this, e.g. a particular add-on board might provide 920 * SPI devices through its expansion connector, so code initializing that board 921 * would naturally declare its SPI devices. 922 * 923 * The board info passed can safely be __initdata ... but be careful of 924 * any embedded pointers (platform_data, etc), they're copied as-is. 925 * 926 * Return: zero on success, else a negative error code. 927 */ 928 int spi_register_board_info(struct spi_board_info const *info, unsigned n) 929 { 930 struct boardinfo *bi; 931 int i; 932 933 if (!n) 934 return 0; 935 936 bi = kcalloc(n, sizeof(*bi), GFP_KERNEL); 937 if (!bi) 938 return -ENOMEM; 939 940 for (i = 0; i < n; i++, bi++, info++) { 941 struct spi_controller *ctlr; 942 943 memcpy(&bi->board_info, info, sizeof(*info)); 944 945 mutex_lock(&board_lock); 946 list_add_tail(&bi->list, &board_list); 947 list_for_each_entry(ctlr, &spi_controller_list, list) 948 spi_match_controller_to_boardinfo(ctlr, 949 &bi->board_info); 950 mutex_unlock(&board_lock); 951 } 952 953 return 0; 954 } 955 956 /*-------------------------------------------------------------------------*/ 957 958 /* Core methods for SPI resource management */ 959 960 /** 961 * spi_res_alloc - allocate a spi resource that is life-cycle managed 962 * during the processing of a spi_message while using 963 * spi_transfer_one 964 * @spi: the SPI device for which we allocate memory 965 * @release: the release code to execute for this resource 966 * @size: size to alloc and return 967 * @gfp: GFP allocation flags 968 * 969 * Return: the pointer to the allocated data 970 * 971 * This may get enhanced in the future to allocate from a memory pool 972 * of the @spi_device or @spi_controller to avoid repeated allocations. 973 */ 974 static void *spi_res_alloc(struct spi_device *spi, spi_res_release_t release, 975 size_t size, gfp_t gfp) 976 { 977 struct spi_res *sres; 978 979 sres = kzalloc(sizeof(*sres) + size, gfp); 980 if (!sres) 981 return NULL; 982 983 INIT_LIST_HEAD(&sres->entry); 984 sres->release = release; 985 986 return sres->data; 987 } 988 989 /** 990 * spi_res_free - free an SPI resource 991 * @res: pointer to the custom data of a resource 992 */ 993 static void spi_res_free(void *res) 994 { 995 struct spi_res *sres = container_of(res, struct spi_res, data); 996 997 WARN_ON(!list_empty(&sres->entry)); 998 kfree(sres); 999 } 1000 1001 /** 1002 * spi_res_add - add a spi_res to the spi_message 1003 * @message: the SPI message 1004 * @res: the spi_resource 1005 */ 1006 static void spi_res_add(struct spi_message *message, void *res) 1007 { 1008 struct spi_res *sres = container_of(res, struct spi_res, data); 1009 1010 WARN_ON(!list_empty(&sres->entry)); 1011 list_add_tail(&sres->entry, &message->resources); 1012 } 1013 1014 /** 1015 * spi_res_release - release all SPI resources for this message 1016 * @ctlr: the @spi_controller 1017 * @message: the @spi_message 1018 */ 1019 static void spi_res_release(struct spi_controller *ctlr, struct spi_message *message) 1020 { 1021 struct spi_res *res, *tmp; 1022 1023 list_for_each_entry_safe_reverse(res, tmp, &message->resources, entry) { 1024 if (res->release) 1025 res->release(ctlr, message, res->data); 1026 1027 list_del(&res->entry); 1028 1029 kfree(res); 1030 } 1031 } 1032 1033 /*-------------------------------------------------------------------------*/ 1034 #define spi_for_each_valid_cs(spi, idx) \ 1035 for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) \ 1036 if (!(spi->cs_index_mask & BIT(idx))) {} else 1037 1038 static inline bool spi_is_last_cs(struct spi_device *spi) 1039 { 1040 u8 idx; 1041 bool last = false; 1042 1043 spi_for_each_valid_cs(spi, idx) { 1044 if (spi->controller->last_cs[idx] == spi_get_chipselect(spi, idx)) 1045 last = true; 1046 } 1047 return last; 1048 } 1049 1050 static void spi_toggle_csgpiod(struct spi_device *spi, u8 idx, bool enable, bool activate) 1051 { 1052 /* 1053 * Historically ACPI has no means of the GPIO polarity and 1054 * thus the SPISerialBus() resource defines it on the per-chip 1055 * basis. In order to avoid a chain of negations, the GPIO 1056 * polarity is considered being Active High. Even for the cases 1057 * when _DSD() is involved (in the updated versions of ACPI) 1058 * the GPIO CS polarity must be defined Active High to avoid 1059 * ambiguity. That's why we use enable, that takes SPI_CS_HIGH 1060 * into account. 1061 */ 1062 if (has_acpi_companion(&spi->dev)) 1063 gpiod_set_value_cansleep(spi_get_csgpiod(spi, idx), !enable); 1064 else 1065 /* Polarity handled by GPIO library */ 1066 gpiod_set_value_cansleep(spi_get_csgpiod(spi, idx), activate); 1067 1068 if (activate) 1069 spi_delay_exec(&spi->cs_setup, NULL); 1070 else 1071 spi_delay_exec(&spi->cs_inactive, NULL); 1072 } 1073 1074 static void spi_set_cs(struct spi_device *spi, bool enable, bool force) 1075 { 1076 bool activate = enable; 1077 u8 idx; 1078 1079 /* 1080 * Avoid calling into the driver (or doing delays) if the chip select 1081 * isn't actually changing from the last time this was called. 1082 */ 1083 if (!force && ((enable && spi->controller->last_cs_index_mask == spi->cs_index_mask && 1084 spi_is_last_cs(spi)) || 1085 (!enable && spi->controller->last_cs_index_mask == spi->cs_index_mask && 1086 !spi_is_last_cs(spi))) && 1087 (spi->controller->last_cs_mode_high == (spi->mode & SPI_CS_HIGH))) 1088 return; 1089 1090 trace_spi_set_cs(spi, activate); 1091 1092 spi->controller->last_cs_index_mask = spi->cs_index_mask; 1093 for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) 1094 spi->controller->last_cs[idx] = enable ? spi_get_chipselect(spi, 0) : SPI_INVALID_CS; 1095 spi->controller->last_cs_mode_high = spi->mode & SPI_CS_HIGH; 1096 1097 if (spi->mode & SPI_CS_HIGH) 1098 enable = !enable; 1099 1100 /* 1101 * Handle chip select delays for GPIO based CS or controllers without 1102 * programmable chip select timing. 1103 */ 1104 if ((spi_is_csgpiod(spi) || !spi->controller->set_cs_timing) && !activate) 1105 spi_delay_exec(&spi->cs_hold, NULL); 1106 1107 if (spi_is_csgpiod(spi)) { 1108 if (!(spi->mode & SPI_NO_CS)) { 1109 spi_for_each_valid_cs(spi, idx) { 1110 if (spi_get_csgpiod(spi, idx)) 1111 spi_toggle_csgpiod(spi, idx, enable, activate); 1112 } 1113 } 1114 /* Some SPI masters need both GPIO CS & slave_select */ 1115 if ((spi->controller->flags & SPI_CONTROLLER_GPIO_SS) && 1116 spi->controller->set_cs) 1117 spi->controller->set_cs(spi, !enable); 1118 } else if (spi->controller->set_cs) { 1119 spi->controller->set_cs(spi, !enable); 1120 } 1121 1122 if (spi_is_csgpiod(spi) || !spi->controller->set_cs_timing) { 1123 if (activate) 1124 spi_delay_exec(&spi->cs_setup, NULL); 1125 else 1126 spi_delay_exec(&spi->cs_inactive, NULL); 1127 } 1128 } 1129 1130 #ifdef CONFIG_HAS_DMA 1131 static int spi_map_buf_attrs(struct spi_controller *ctlr, struct device *dev, 1132 struct sg_table *sgt, void *buf, size_t len, 1133 enum dma_data_direction dir, unsigned long attrs) 1134 { 1135 const bool vmalloced_buf = is_vmalloc_addr(buf); 1136 unsigned int max_seg_size = dma_get_max_seg_size(dev); 1137 #ifdef CONFIG_HIGHMEM 1138 const bool kmap_buf = ((unsigned long)buf >= PKMAP_BASE && 1139 (unsigned long)buf < (PKMAP_BASE + 1140 (LAST_PKMAP * PAGE_SIZE))); 1141 #else 1142 const bool kmap_buf = false; 1143 #endif 1144 int desc_len; 1145 int sgs; 1146 struct page *vm_page; 1147 struct scatterlist *sg; 1148 void *sg_buf; 1149 size_t min; 1150 int i, ret; 1151 1152 if (vmalloced_buf || kmap_buf) { 1153 desc_len = min_t(unsigned long, max_seg_size, PAGE_SIZE); 1154 sgs = DIV_ROUND_UP(len + offset_in_page(buf), desc_len); 1155 } else if (virt_addr_valid(buf)) { 1156 desc_len = min_t(size_t, max_seg_size, ctlr->max_dma_len); 1157 sgs = DIV_ROUND_UP(len, desc_len); 1158 } else { 1159 return -EINVAL; 1160 } 1161 1162 ret = sg_alloc_table(sgt, sgs, GFP_KERNEL); 1163 if (ret != 0) 1164 return ret; 1165 1166 sg = &sgt->sgl[0]; 1167 for (i = 0; i < sgs; i++) { 1168 1169 if (vmalloced_buf || kmap_buf) { 1170 /* 1171 * Next scatterlist entry size is the minimum between 1172 * the desc_len and the remaining buffer length that 1173 * fits in a page. 1174 */ 1175 min = min_t(size_t, desc_len, 1176 min_t(size_t, len, 1177 PAGE_SIZE - offset_in_page(buf))); 1178 if (vmalloced_buf) 1179 vm_page = vmalloc_to_page(buf); 1180 else 1181 vm_page = kmap_to_page(buf); 1182 if (!vm_page) { 1183 sg_free_table(sgt); 1184 return -ENOMEM; 1185 } 1186 sg_set_page(sg, vm_page, 1187 min, offset_in_page(buf)); 1188 } else { 1189 min = min_t(size_t, len, desc_len); 1190 sg_buf = buf; 1191 sg_set_buf(sg, sg_buf, min); 1192 } 1193 1194 buf += min; 1195 len -= min; 1196 sg = sg_next(sg); 1197 } 1198 1199 ret = dma_map_sgtable(dev, sgt, dir, attrs); 1200 if (ret < 0) { 1201 sg_free_table(sgt); 1202 return ret; 1203 } 1204 1205 return 0; 1206 } 1207 1208 int spi_map_buf(struct spi_controller *ctlr, struct device *dev, 1209 struct sg_table *sgt, void *buf, size_t len, 1210 enum dma_data_direction dir) 1211 { 1212 return spi_map_buf_attrs(ctlr, dev, sgt, buf, len, dir, 0); 1213 } 1214 1215 static void spi_unmap_buf_attrs(struct spi_controller *ctlr, 1216 struct device *dev, struct sg_table *sgt, 1217 enum dma_data_direction dir, 1218 unsigned long attrs) 1219 { 1220 dma_unmap_sgtable(dev, sgt, dir, attrs); 1221 sg_free_table(sgt); 1222 sgt->orig_nents = 0; 1223 sgt->nents = 0; 1224 } 1225 1226 void spi_unmap_buf(struct spi_controller *ctlr, struct device *dev, 1227 struct sg_table *sgt, enum dma_data_direction dir) 1228 { 1229 spi_unmap_buf_attrs(ctlr, dev, sgt, dir, 0); 1230 } 1231 1232 static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg) 1233 { 1234 struct device *tx_dev, *rx_dev; 1235 struct spi_transfer *xfer; 1236 int ret; 1237 1238 if (!ctlr->can_dma) 1239 return 0; 1240 1241 if (ctlr->dma_tx) 1242 tx_dev = ctlr->dma_tx->device->dev; 1243 else if (ctlr->dma_map_dev) 1244 tx_dev = ctlr->dma_map_dev; 1245 else 1246 tx_dev = ctlr->dev.parent; 1247 1248 if (ctlr->dma_rx) 1249 rx_dev = ctlr->dma_rx->device->dev; 1250 else if (ctlr->dma_map_dev) 1251 rx_dev = ctlr->dma_map_dev; 1252 else 1253 rx_dev = ctlr->dev.parent; 1254 1255 ret = -ENOMSG; 1256 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 1257 /* The sync is done before each transfer. */ 1258 unsigned long attrs = DMA_ATTR_SKIP_CPU_SYNC; 1259 1260 if (!ctlr->can_dma(ctlr, msg->spi, xfer)) 1261 continue; 1262 1263 if (xfer->tx_buf != NULL) { 1264 ret = spi_map_buf_attrs(ctlr, tx_dev, &xfer->tx_sg, 1265 (void *)xfer->tx_buf, 1266 xfer->len, DMA_TO_DEVICE, 1267 attrs); 1268 if (ret != 0) 1269 return ret; 1270 1271 xfer->tx_sg_mapped = true; 1272 } 1273 1274 if (xfer->rx_buf != NULL) { 1275 ret = spi_map_buf_attrs(ctlr, rx_dev, &xfer->rx_sg, 1276 xfer->rx_buf, xfer->len, 1277 DMA_FROM_DEVICE, attrs); 1278 if (ret != 0) { 1279 spi_unmap_buf_attrs(ctlr, tx_dev, 1280 &xfer->tx_sg, DMA_TO_DEVICE, 1281 attrs); 1282 1283 return ret; 1284 } 1285 1286 xfer->rx_sg_mapped = true; 1287 } 1288 } 1289 /* No transfer has been mapped, bail out with success */ 1290 if (ret) 1291 return 0; 1292 1293 ctlr->cur_rx_dma_dev = rx_dev; 1294 ctlr->cur_tx_dma_dev = tx_dev; 1295 1296 return 0; 1297 } 1298 1299 static int __spi_unmap_msg(struct spi_controller *ctlr, struct spi_message *msg) 1300 { 1301 struct device *rx_dev = ctlr->cur_rx_dma_dev; 1302 struct device *tx_dev = ctlr->cur_tx_dma_dev; 1303 struct spi_transfer *xfer; 1304 1305 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 1306 /* The sync has already been done after each transfer. */ 1307 unsigned long attrs = DMA_ATTR_SKIP_CPU_SYNC; 1308 1309 if (xfer->rx_sg_mapped) 1310 spi_unmap_buf_attrs(ctlr, rx_dev, &xfer->rx_sg, 1311 DMA_FROM_DEVICE, attrs); 1312 xfer->rx_sg_mapped = false; 1313 1314 if (xfer->tx_sg_mapped) 1315 spi_unmap_buf_attrs(ctlr, tx_dev, &xfer->tx_sg, 1316 DMA_TO_DEVICE, attrs); 1317 xfer->tx_sg_mapped = false; 1318 } 1319 1320 return 0; 1321 } 1322 1323 static void spi_dma_sync_for_device(struct spi_controller *ctlr, 1324 struct spi_transfer *xfer) 1325 { 1326 struct device *rx_dev = ctlr->cur_rx_dma_dev; 1327 struct device *tx_dev = ctlr->cur_tx_dma_dev; 1328 1329 if (xfer->tx_sg_mapped) 1330 dma_sync_sgtable_for_device(tx_dev, &xfer->tx_sg, DMA_TO_DEVICE); 1331 if (xfer->rx_sg_mapped) 1332 dma_sync_sgtable_for_device(rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE); 1333 } 1334 1335 static void spi_dma_sync_for_cpu(struct spi_controller *ctlr, 1336 struct spi_transfer *xfer) 1337 { 1338 struct device *rx_dev = ctlr->cur_rx_dma_dev; 1339 struct device *tx_dev = ctlr->cur_tx_dma_dev; 1340 1341 if (xfer->rx_sg_mapped) 1342 dma_sync_sgtable_for_cpu(rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE); 1343 if (xfer->tx_sg_mapped) 1344 dma_sync_sgtable_for_cpu(tx_dev, &xfer->tx_sg, DMA_TO_DEVICE); 1345 } 1346 #else /* !CONFIG_HAS_DMA */ 1347 static inline int __spi_map_msg(struct spi_controller *ctlr, 1348 struct spi_message *msg) 1349 { 1350 return 0; 1351 } 1352 1353 static inline int __spi_unmap_msg(struct spi_controller *ctlr, 1354 struct spi_message *msg) 1355 { 1356 return 0; 1357 } 1358 1359 static void spi_dma_sync_for_device(struct spi_controller *ctrl, 1360 struct spi_transfer *xfer) 1361 { 1362 } 1363 1364 static void spi_dma_sync_for_cpu(struct spi_controller *ctrl, 1365 struct spi_transfer *xfer) 1366 { 1367 } 1368 #endif /* !CONFIG_HAS_DMA */ 1369 1370 static inline int spi_unmap_msg(struct spi_controller *ctlr, 1371 struct spi_message *msg) 1372 { 1373 struct spi_transfer *xfer; 1374 1375 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 1376 /* 1377 * Restore the original value of tx_buf or rx_buf if they are 1378 * NULL. 1379 */ 1380 if (xfer->tx_buf == ctlr->dummy_tx) 1381 xfer->tx_buf = NULL; 1382 if (xfer->rx_buf == ctlr->dummy_rx) 1383 xfer->rx_buf = NULL; 1384 } 1385 1386 return __spi_unmap_msg(ctlr, msg); 1387 } 1388 1389 static int spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg) 1390 { 1391 struct spi_transfer *xfer; 1392 void *tmp; 1393 unsigned int max_tx, max_rx; 1394 1395 if ((ctlr->flags & (SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX)) 1396 && !(msg->spi->mode & SPI_3WIRE)) { 1397 max_tx = 0; 1398 max_rx = 0; 1399 1400 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 1401 if ((ctlr->flags & SPI_CONTROLLER_MUST_TX) && 1402 !xfer->tx_buf) 1403 max_tx = max(xfer->len, max_tx); 1404 if ((ctlr->flags & SPI_CONTROLLER_MUST_RX) && 1405 !xfer->rx_buf) 1406 max_rx = max(xfer->len, max_rx); 1407 } 1408 1409 if (max_tx) { 1410 tmp = krealloc(ctlr->dummy_tx, max_tx, 1411 GFP_KERNEL | GFP_DMA | __GFP_ZERO); 1412 if (!tmp) 1413 return -ENOMEM; 1414 ctlr->dummy_tx = tmp; 1415 } 1416 1417 if (max_rx) { 1418 tmp = krealloc(ctlr->dummy_rx, max_rx, 1419 GFP_KERNEL | GFP_DMA); 1420 if (!tmp) 1421 return -ENOMEM; 1422 ctlr->dummy_rx = tmp; 1423 } 1424 1425 if (max_tx || max_rx) { 1426 list_for_each_entry(xfer, &msg->transfers, 1427 transfer_list) { 1428 if (!xfer->len) 1429 continue; 1430 if (!xfer->tx_buf) 1431 xfer->tx_buf = ctlr->dummy_tx; 1432 if (!xfer->rx_buf) 1433 xfer->rx_buf = ctlr->dummy_rx; 1434 } 1435 } 1436 } 1437 1438 return __spi_map_msg(ctlr, msg); 1439 } 1440 1441 static int spi_transfer_wait(struct spi_controller *ctlr, 1442 struct spi_message *msg, 1443 struct spi_transfer *xfer) 1444 { 1445 struct spi_statistics __percpu *statm = ctlr->pcpu_statistics; 1446 struct spi_statistics __percpu *stats = msg->spi->pcpu_statistics; 1447 u32 speed_hz = xfer->speed_hz; 1448 unsigned long long ms; 1449 1450 if (spi_controller_is_target(ctlr)) { 1451 if (wait_for_completion_interruptible(&ctlr->xfer_completion)) { 1452 dev_dbg(&msg->spi->dev, "SPI transfer interrupted\n"); 1453 return -EINTR; 1454 } 1455 } else { 1456 if (!speed_hz) 1457 speed_hz = 100000; 1458 1459 /* 1460 * For each byte we wait for 8 cycles of the SPI clock. 1461 * Since speed is defined in Hz and we want milliseconds, 1462 * use respective multiplier, but before the division, 1463 * otherwise we may get 0 for short transfers. 1464 */ 1465 ms = 8LL * MSEC_PER_SEC * xfer->len; 1466 do_div(ms, speed_hz); 1467 1468 /* 1469 * Increase it twice and add 200 ms tolerance, use 1470 * predefined maximum in case of overflow. 1471 */ 1472 ms += ms + 200; 1473 if (ms > UINT_MAX) 1474 ms = UINT_MAX; 1475 1476 ms = wait_for_completion_timeout(&ctlr->xfer_completion, 1477 msecs_to_jiffies(ms)); 1478 1479 if (ms == 0) { 1480 SPI_STATISTICS_INCREMENT_FIELD(statm, timedout); 1481 SPI_STATISTICS_INCREMENT_FIELD(stats, timedout); 1482 dev_err(&msg->spi->dev, 1483 "SPI transfer timed out\n"); 1484 return -ETIMEDOUT; 1485 } 1486 1487 if (xfer->error & SPI_TRANS_FAIL_IO) 1488 return -EIO; 1489 } 1490 1491 return 0; 1492 } 1493 1494 static void _spi_transfer_delay_ns(u32 ns) 1495 { 1496 if (!ns) 1497 return; 1498 if (ns <= NSEC_PER_USEC) { 1499 ndelay(ns); 1500 } else { 1501 u32 us = DIV_ROUND_UP(ns, NSEC_PER_USEC); 1502 1503 if (us <= 10) 1504 udelay(us); 1505 else 1506 usleep_range(us, us + DIV_ROUND_UP(us, 10)); 1507 } 1508 } 1509 1510 int spi_delay_to_ns(struct spi_delay *_delay, struct spi_transfer *xfer) 1511 { 1512 u32 delay = _delay->value; 1513 u32 unit = _delay->unit; 1514 u32 hz; 1515 1516 if (!delay) 1517 return 0; 1518 1519 switch (unit) { 1520 case SPI_DELAY_UNIT_USECS: 1521 delay *= NSEC_PER_USEC; 1522 break; 1523 case SPI_DELAY_UNIT_NSECS: 1524 /* Nothing to do here */ 1525 break; 1526 case SPI_DELAY_UNIT_SCK: 1527 /* Clock cycles need to be obtained from spi_transfer */ 1528 if (!xfer) 1529 return -EINVAL; 1530 /* 1531 * If there is unknown effective speed, approximate it 1532 * by underestimating with half of the requested Hz. 1533 */ 1534 hz = xfer->effective_speed_hz ?: xfer->speed_hz / 2; 1535 if (!hz) 1536 return -EINVAL; 1537 1538 /* Convert delay to nanoseconds */ 1539 delay *= DIV_ROUND_UP(NSEC_PER_SEC, hz); 1540 break; 1541 default: 1542 return -EINVAL; 1543 } 1544 1545 return delay; 1546 } 1547 EXPORT_SYMBOL_GPL(spi_delay_to_ns); 1548 1549 int spi_delay_exec(struct spi_delay *_delay, struct spi_transfer *xfer) 1550 { 1551 int delay; 1552 1553 might_sleep(); 1554 1555 if (!_delay) 1556 return -EINVAL; 1557 1558 delay = spi_delay_to_ns(_delay, xfer); 1559 if (delay < 0) 1560 return delay; 1561 1562 _spi_transfer_delay_ns(delay); 1563 1564 return 0; 1565 } 1566 EXPORT_SYMBOL_GPL(spi_delay_exec); 1567 1568 static void _spi_transfer_cs_change_delay(struct spi_message *msg, 1569 struct spi_transfer *xfer) 1570 { 1571 u32 default_delay_ns = 10 * NSEC_PER_USEC; 1572 u32 delay = xfer->cs_change_delay.value; 1573 u32 unit = xfer->cs_change_delay.unit; 1574 int ret; 1575 1576 /* Return early on "fast" mode - for everything but USECS */ 1577 if (!delay) { 1578 if (unit == SPI_DELAY_UNIT_USECS) 1579 _spi_transfer_delay_ns(default_delay_ns); 1580 return; 1581 } 1582 1583 ret = spi_delay_exec(&xfer->cs_change_delay, xfer); 1584 if (ret) { 1585 dev_err_once(&msg->spi->dev, 1586 "Use of unsupported delay unit %i, using default of %luus\n", 1587 unit, default_delay_ns / NSEC_PER_USEC); 1588 _spi_transfer_delay_ns(default_delay_ns); 1589 } 1590 } 1591 1592 void spi_transfer_cs_change_delay_exec(struct spi_message *msg, 1593 struct spi_transfer *xfer) 1594 { 1595 _spi_transfer_cs_change_delay(msg, xfer); 1596 } 1597 EXPORT_SYMBOL_GPL(spi_transfer_cs_change_delay_exec); 1598 1599 /* 1600 * spi_transfer_one_message - Default implementation of transfer_one_message() 1601 * 1602 * This is a standard implementation of transfer_one_message() for 1603 * drivers which implement a transfer_one() operation. It provides 1604 * standard handling of delays and chip select management. 1605 */ 1606 static int spi_transfer_one_message(struct spi_controller *ctlr, 1607 struct spi_message *msg) 1608 { 1609 struct spi_transfer *xfer; 1610 bool keep_cs = false; 1611 int ret = 0; 1612 struct spi_statistics __percpu *statm = ctlr->pcpu_statistics; 1613 struct spi_statistics __percpu *stats = msg->spi->pcpu_statistics; 1614 1615 xfer = list_first_entry(&msg->transfers, struct spi_transfer, transfer_list); 1616 spi_set_cs(msg->spi, !xfer->cs_off, false); 1617 1618 SPI_STATISTICS_INCREMENT_FIELD(statm, messages); 1619 SPI_STATISTICS_INCREMENT_FIELD(stats, messages); 1620 1621 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 1622 trace_spi_transfer_start(msg, xfer); 1623 1624 spi_statistics_add_transfer_stats(statm, xfer, msg); 1625 spi_statistics_add_transfer_stats(stats, xfer, msg); 1626 1627 if (!ctlr->ptp_sts_supported) { 1628 xfer->ptp_sts_word_pre = 0; 1629 ptp_read_system_prets(xfer->ptp_sts); 1630 } 1631 1632 if ((xfer->tx_buf || xfer->rx_buf) && xfer->len) { 1633 reinit_completion(&ctlr->xfer_completion); 1634 1635 fallback_pio: 1636 spi_dma_sync_for_device(ctlr, xfer); 1637 ret = ctlr->transfer_one(ctlr, msg->spi, xfer); 1638 if (ret < 0) { 1639 spi_dma_sync_for_cpu(ctlr, xfer); 1640 1641 if ((xfer->tx_sg_mapped || xfer->rx_sg_mapped) && 1642 (xfer->error & SPI_TRANS_FAIL_NO_START)) { 1643 __spi_unmap_msg(ctlr, msg); 1644 ctlr->fallback = true; 1645 xfer->error &= ~SPI_TRANS_FAIL_NO_START; 1646 goto fallback_pio; 1647 } 1648 1649 SPI_STATISTICS_INCREMENT_FIELD(statm, 1650 errors); 1651 SPI_STATISTICS_INCREMENT_FIELD(stats, 1652 errors); 1653 dev_err(&msg->spi->dev, 1654 "SPI transfer failed: %d\n", ret); 1655 goto out; 1656 } 1657 1658 if (ret > 0) { 1659 ret = spi_transfer_wait(ctlr, msg, xfer); 1660 if (ret < 0) 1661 msg->status = ret; 1662 } 1663 1664 spi_dma_sync_for_cpu(ctlr, xfer); 1665 } else { 1666 if (xfer->len) 1667 dev_err(&msg->spi->dev, 1668 "Bufferless transfer has length %u\n", 1669 xfer->len); 1670 } 1671 1672 if (!ctlr->ptp_sts_supported) { 1673 ptp_read_system_postts(xfer->ptp_sts); 1674 xfer->ptp_sts_word_post = xfer->len; 1675 } 1676 1677 trace_spi_transfer_stop(msg, xfer); 1678 1679 if (msg->status != -EINPROGRESS) 1680 goto out; 1681 1682 spi_transfer_delay_exec(xfer); 1683 1684 if (xfer->cs_change) { 1685 if (list_is_last(&xfer->transfer_list, 1686 &msg->transfers)) { 1687 keep_cs = true; 1688 } else { 1689 if (!xfer->cs_off) 1690 spi_set_cs(msg->spi, false, false); 1691 _spi_transfer_cs_change_delay(msg, xfer); 1692 if (!list_next_entry(xfer, transfer_list)->cs_off) 1693 spi_set_cs(msg->spi, true, false); 1694 } 1695 } else if (!list_is_last(&xfer->transfer_list, &msg->transfers) && 1696 xfer->cs_off != list_next_entry(xfer, transfer_list)->cs_off) { 1697 spi_set_cs(msg->spi, xfer->cs_off, false); 1698 } 1699 1700 msg->actual_length += xfer->len; 1701 } 1702 1703 out: 1704 if (ret != 0 || !keep_cs) 1705 spi_set_cs(msg->spi, false, false); 1706 1707 if (msg->status == -EINPROGRESS) 1708 msg->status = ret; 1709 1710 if (msg->status && ctlr->handle_err) 1711 ctlr->handle_err(ctlr, msg); 1712 1713 spi_finalize_current_message(ctlr); 1714 1715 return ret; 1716 } 1717 1718 /** 1719 * spi_finalize_current_transfer - report completion of a transfer 1720 * @ctlr: the controller reporting completion 1721 * 1722 * Called by SPI drivers using the core transfer_one_message() 1723 * implementation to notify it that the current interrupt driven 1724 * transfer has finished and the next one may be scheduled. 1725 */ 1726 void spi_finalize_current_transfer(struct spi_controller *ctlr) 1727 { 1728 complete(&ctlr->xfer_completion); 1729 } 1730 EXPORT_SYMBOL_GPL(spi_finalize_current_transfer); 1731 1732 static void spi_idle_runtime_pm(struct spi_controller *ctlr) 1733 { 1734 if (ctlr->auto_runtime_pm) { 1735 pm_runtime_mark_last_busy(ctlr->dev.parent); 1736 pm_runtime_put_autosuspend(ctlr->dev.parent); 1737 } 1738 } 1739 1740 static int __spi_pump_transfer_message(struct spi_controller *ctlr, 1741 struct spi_message *msg, bool was_busy) 1742 { 1743 struct spi_transfer *xfer; 1744 int ret; 1745 1746 if (!was_busy && ctlr->auto_runtime_pm) { 1747 ret = pm_runtime_get_sync(ctlr->dev.parent); 1748 if (ret < 0) { 1749 pm_runtime_put_noidle(ctlr->dev.parent); 1750 dev_err(&ctlr->dev, "Failed to power device: %d\n", 1751 ret); 1752 1753 msg->status = ret; 1754 spi_finalize_current_message(ctlr); 1755 1756 return ret; 1757 } 1758 } 1759 1760 if (!was_busy) 1761 trace_spi_controller_busy(ctlr); 1762 1763 if (!was_busy && ctlr->prepare_transfer_hardware) { 1764 ret = ctlr->prepare_transfer_hardware(ctlr); 1765 if (ret) { 1766 dev_err(&ctlr->dev, 1767 "failed to prepare transfer hardware: %d\n", 1768 ret); 1769 1770 if (ctlr->auto_runtime_pm) 1771 pm_runtime_put(ctlr->dev.parent); 1772 1773 msg->status = ret; 1774 spi_finalize_current_message(ctlr); 1775 1776 return ret; 1777 } 1778 } 1779 1780 trace_spi_message_start(msg); 1781 1782 if (ctlr->prepare_message) { 1783 ret = ctlr->prepare_message(ctlr, msg); 1784 if (ret) { 1785 dev_err(&ctlr->dev, "failed to prepare message: %d\n", 1786 ret); 1787 msg->status = ret; 1788 spi_finalize_current_message(ctlr); 1789 return ret; 1790 } 1791 msg->prepared = true; 1792 } 1793 1794 ret = spi_map_msg(ctlr, msg); 1795 if (ret) { 1796 msg->status = ret; 1797 spi_finalize_current_message(ctlr); 1798 return ret; 1799 } 1800 1801 if (!ctlr->ptp_sts_supported && !ctlr->transfer_one) { 1802 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 1803 xfer->ptp_sts_word_pre = 0; 1804 ptp_read_system_prets(xfer->ptp_sts); 1805 } 1806 } 1807 1808 /* 1809 * Drivers implementation of transfer_one_message() must arrange for 1810 * spi_finalize_current_message() to get called. Most drivers will do 1811 * this in the calling context, but some don't. For those cases, a 1812 * completion is used to guarantee that this function does not return 1813 * until spi_finalize_current_message() is done accessing 1814 * ctlr->cur_msg. 1815 * Use of the following two flags enable to opportunistically skip the 1816 * use of the completion since its use involves expensive spin locks. 1817 * In case of a race with the context that calls 1818 * spi_finalize_current_message() the completion will always be used, 1819 * due to strict ordering of these flags using barriers. 1820 */ 1821 WRITE_ONCE(ctlr->cur_msg_incomplete, true); 1822 WRITE_ONCE(ctlr->cur_msg_need_completion, false); 1823 reinit_completion(&ctlr->cur_msg_completion); 1824 smp_wmb(); /* Make these available to spi_finalize_current_message() */ 1825 1826 ret = ctlr->transfer_one_message(ctlr, msg); 1827 if (ret) { 1828 dev_err(&ctlr->dev, 1829 "failed to transfer one message from queue\n"); 1830 return ret; 1831 } 1832 1833 WRITE_ONCE(ctlr->cur_msg_need_completion, true); 1834 smp_mb(); /* See spi_finalize_current_message()... */ 1835 if (READ_ONCE(ctlr->cur_msg_incomplete)) 1836 wait_for_completion(&ctlr->cur_msg_completion); 1837 1838 return 0; 1839 } 1840 1841 /** 1842 * __spi_pump_messages - function which processes SPI message queue 1843 * @ctlr: controller to process queue for 1844 * @in_kthread: true if we are in the context of the message pump thread 1845 * 1846 * This function checks if there is any SPI message in the queue that 1847 * needs processing and if so call out to the driver to initialize hardware 1848 * and transfer each message. 1849 * 1850 * Note that it is called both from the kthread itself and also from 1851 * inside spi_sync(); the queue extraction handling at the top of the 1852 * function should deal with this safely. 1853 */ 1854 static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread) 1855 { 1856 struct spi_message *msg; 1857 bool was_busy = false; 1858 unsigned long flags; 1859 int ret; 1860 1861 /* Take the I/O mutex */ 1862 mutex_lock(&ctlr->io_mutex); 1863 1864 /* Lock queue */ 1865 spin_lock_irqsave(&ctlr->queue_lock, flags); 1866 1867 /* Make sure we are not already running a message */ 1868 if (ctlr->cur_msg) 1869 goto out_unlock; 1870 1871 /* Check if the queue is idle */ 1872 if (list_empty(&ctlr->queue) || !ctlr->running) { 1873 if (!ctlr->busy) 1874 goto out_unlock; 1875 1876 /* Defer any non-atomic teardown to the thread */ 1877 if (!in_kthread) { 1878 if (!ctlr->dummy_rx && !ctlr->dummy_tx && 1879 !ctlr->unprepare_transfer_hardware) { 1880 spi_idle_runtime_pm(ctlr); 1881 ctlr->busy = false; 1882 ctlr->queue_empty = true; 1883 trace_spi_controller_idle(ctlr); 1884 } else { 1885 kthread_queue_work(ctlr->kworker, 1886 &ctlr->pump_messages); 1887 } 1888 goto out_unlock; 1889 } 1890 1891 ctlr->busy = false; 1892 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1893 1894 kfree(ctlr->dummy_rx); 1895 ctlr->dummy_rx = NULL; 1896 kfree(ctlr->dummy_tx); 1897 ctlr->dummy_tx = NULL; 1898 if (ctlr->unprepare_transfer_hardware && 1899 ctlr->unprepare_transfer_hardware(ctlr)) 1900 dev_err(&ctlr->dev, 1901 "failed to unprepare transfer hardware\n"); 1902 spi_idle_runtime_pm(ctlr); 1903 trace_spi_controller_idle(ctlr); 1904 1905 spin_lock_irqsave(&ctlr->queue_lock, flags); 1906 ctlr->queue_empty = true; 1907 goto out_unlock; 1908 } 1909 1910 /* Extract head of queue */ 1911 msg = list_first_entry(&ctlr->queue, struct spi_message, queue); 1912 ctlr->cur_msg = msg; 1913 1914 list_del_init(&msg->queue); 1915 if (ctlr->busy) 1916 was_busy = true; 1917 else 1918 ctlr->busy = true; 1919 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1920 1921 ret = __spi_pump_transfer_message(ctlr, msg, was_busy); 1922 kthread_queue_work(ctlr->kworker, &ctlr->pump_messages); 1923 1924 ctlr->cur_msg = NULL; 1925 ctlr->fallback = false; 1926 1927 mutex_unlock(&ctlr->io_mutex); 1928 1929 /* Prod the scheduler in case transfer_one() was busy waiting */ 1930 if (!ret) 1931 cond_resched(); 1932 return; 1933 1934 out_unlock: 1935 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1936 mutex_unlock(&ctlr->io_mutex); 1937 } 1938 1939 /** 1940 * spi_pump_messages - kthread work function which processes spi message queue 1941 * @work: pointer to kthread work struct contained in the controller struct 1942 */ 1943 static void spi_pump_messages(struct kthread_work *work) 1944 { 1945 struct spi_controller *ctlr = 1946 container_of(work, struct spi_controller, pump_messages); 1947 1948 __spi_pump_messages(ctlr, true); 1949 } 1950 1951 /** 1952 * spi_take_timestamp_pre - helper to collect the beginning of the TX timestamp 1953 * @ctlr: Pointer to the spi_controller structure of the driver 1954 * @xfer: Pointer to the transfer being timestamped 1955 * @progress: How many words (not bytes) have been transferred so far 1956 * @irqs_off: If true, will disable IRQs and preemption for the duration of the 1957 * transfer, for less jitter in time measurement. Only compatible 1958 * with PIO drivers. If true, must follow up with 1959 * spi_take_timestamp_post or otherwise system will crash. 1960 * WARNING: for fully predictable results, the CPU frequency must 1961 * also be under control (governor). 1962 * 1963 * This is a helper for drivers to collect the beginning of the TX timestamp 1964 * for the requested byte from the SPI transfer. The frequency with which this 1965 * function must be called (once per word, once for the whole transfer, once 1966 * per batch of words etc) is arbitrary as long as the @tx buffer offset is 1967 * greater than or equal to the requested byte at the time of the call. The 1968 * timestamp is only taken once, at the first such call. It is assumed that 1969 * the driver advances its @tx buffer pointer monotonically. 1970 */ 1971 void spi_take_timestamp_pre(struct spi_controller *ctlr, 1972 struct spi_transfer *xfer, 1973 size_t progress, bool irqs_off) 1974 { 1975 if (!xfer->ptp_sts) 1976 return; 1977 1978 if (xfer->timestamped) 1979 return; 1980 1981 if (progress > xfer->ptp_sts_word_pre) 1982 return; 1983 1984 /* Capture the resolution of the timestamp */ 1985 xfer->ptp_sts_word_pre = progress; 1986 1987 if (irqs_off) { 1988 local_irq_save(ctlr->irq_flags); 1989 preempt_disable(); 1990 } 1991 1992 ptp_read_system_prets(xfer->ptp_sts); 1993 } 1994 EXPORT_SYMBOL_GPL(spi_take_timestamp_pre); 1995 1996 /** 1997 * spi_take_timestamp_post - helper to collect the end of the TX timestamp 1998 * @ctlr: Pointer to the spi_controller structure of the driver 1999 * @xfer: Pointer to the transfer being timestamped 2000 * @progress: How many words (not bytes) have been transferred so far 2001 * @irqs_off: If true, will re-enable IRQs and preemption for the local CPU. 2002 * 2003 * This is a helper for drivers to collect the end of the TX timestamp for 2004 * the requested byte from the SPI transfer. Can be called with an arbitrary 2005 * frequency: only the first call where @tx exceeds or is equal to the 2006 * requested word will be timestamped. 2007 */ 2008 void spi_take_timestamp_post(struct spi_controller *ctlr, 2009 struct spi_transfer *xfer, 2010 size_t progress, bool irqs_off) 2011 { 2012 if (!xfer->ptp_sts) 2013 return; 2014 2015 if (xfer->timestamped) 2016 return; 2017 2018 if (progress < xfer->ptp_sts_word_post) 2019 return; 2020 2021 ptp_read_system_postts(xfer->ptp_sts); 2022 2023 if (irqs_off) { 2024 local_irq_restore(ctlr->irq_flags); 2025 preempt_enable(); 2026 } 2027 2028 /* Capture the resolution of the timestamp */ 2029 xfer->ptp_sts_word_post = progress; 2030 2031 xfer->timestamped = 1; 2032 } 2033 EXPORT_SYMBOL_GPL(spi_take_timestamp_post); 2034 2035 /** 2036 * spi_set_thread_rt - set the controller to pump at realtime priority 2037 * @ctlr: controller to boost priority of 2038 * 2039 * This can be called because the controller requested realtime priority 2040 * (by setting the ->rt value before calling spi_register_controller()) or 2041 * because a device on the bus said that its transfers needed realtime 2042 * priority. 2043 * 2044 * NOTE: at the moment if any device on a bus says it needs realtime then 2045 * the thread will be at realtime priority for all transfers on that 2046 * controller. If this eventually becomes a problem we may see if we can 2047 * find a way to boost the priority only temporarily during relevant 2048 * transfers. 2049 */ 2050 static void spi_set_thread_rt(struct spi_controller *ctlr) 2051 { 2052 dev_info(&ctlr->dev, 2053 "will run message pump with realtime priority\n"); 2054 sched_set_fifo(ctlr->kworker->task); 2055 } 2056 2057 static int spi_init_queue(struct spi_controller *ctlr) 2058 { 2059 ctlr->running = false; 2060 ctlr->busy = false; 2061 ctlr->queue_empty = true; 2062 2063 ctlr->kworker = kthread_create_worker(0, dev_name(&ctlr->dev)); 2064 if (IS_ERR(ctlr->kworker)) { 2065 dev_err(&ctlr->dev, "failed to create message pump kworker\n"); 2066 return PTR_ERR(ctlr->kworker); 2067 } 2068 2069 kthread_init_work(&ctlr->pump_messages, spi_pump_messages); 2070 2071 /* 2072 * Controller config will indicate if this controller should run the 2073 * message pump with high (realtime) priority to reduce the transfer 2074 * latency on the bus by minimising the delay between a transfer 2075 * request and the scheduling of the message pump thread. Without this 2076 * setting the message pump thread will remain at default priority. 2077 */ 2078 if (ctlr->rt) 2079 spi_set_thread_rt(ctlr); 2080 2081 return 0; 2082 } 2083 2084 /** 2085 * spi_get_next_queued_message() - called by driver to check for queued 2086 * messages 2087 * @ctlr: the controller to check for queued messages 2088 * 2089 * If there are more messages in the queue, the next message is returned from 2090 * this call. 2091 * 2092 * Return: the next message in the queue, else NULL if the queue is empty. 2093 */ 2094 struct spi_message *spi_get_next_queued_message(struct spi_controller *ctlr) 2095 { 2096 struct spi_message *next; 2097 unsigned long flags; 2098 2099 /* Get a pointer to the next message, if any */ 2100 spin_lock_irqsave(&ctlr->queue_lock, flags); 2101 next = list_first_entry_or_null(&ctlr->queue, struct spi_message, 2102 queue); 2103 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 2104 2105 return next; 2106 } 2107 EXPORT_SYMBOL_GPL(spi_get_next_queued_message); 2108 2109 /* 2110 * __spi_unoptimize_message - shared implementation of spi_unoptimize_message() 2111 * and spi_maybe_unoptimize_message() 2112 * @msg: the message to unoptimize 2113 * 2114 * Peripheral drivers should use spi_unoptimize_message() and callers inside 2115 * core should use spi_maybe_unoptimize_message() rather than calling this 2116 * function directly. 2117 * 2118 * It is not valid to call this on a message that is not currently optimized. 2119 */ 2120 static void __spi_unoptimize_message(struct spi_message *msg) 2121 { 2122 struct spi_controller *ctlr = msg->spi->controller; 2123 2124 if (ctlr->unoptimize_message) 2125 ctlr->unoptimize_message(msg); 2126 2127 spi_res_release(ctlr, msg); 2128 2129 msg->optimized = false; 2130 msg->opt_state = NULL; 2131 } 2132 2133 /* 2134 * spi_maybe_unoptimize_message - unoptimize msg not managed by a peripheral 2135 * @msg: the message to unoptimize 2136 * 2137 * This function is used to unoptimize a message if and only if it was 2138 * optimized by the core (via spi_maybe_optimize_message()). 2139 */ 2140 static void spi_maybe_unoptimize_message(struct spi_message *msg) 2141 { 2142 if (!msg->pre_optimized && msg->optimized && 2143 !msg->spi->controller->defer_optimize_message) 2144 __spi_unoptimize_message(msg); 2145 } 2146 2147 /** 2148 * spi_finalize_current_message() - the current message is complete 2149 * @ctlr: the controller to return the message to 2150 * 2151 * Called by the driver to notify the core that the message in the front of the 2152 * queue is complete and can be removed from the queue. 2153 */ 2154 void spi_finalize_current_message(struct spi_controller *ctlr) 2155 { 2156 struct spi_transfer *xfer; 2157 struct spi_message *mesg; 2158 int ret; 2159 2160 mesg = ctlr->cur_msg; 2161 2162 if (!ctlr->ptp_sts_supported && !ctlr->transfer_one) { 2163 list_for_each_entry(xfer, &mesg->transfers, transfer_list) { 2164 ptp_read_system_postts(xfer->ptp_sts); 2165 xfer->ptp_sts_word_post = xfer->len; 2166 } 2167 } 2168 2169 if (unlikely(ctlr->ptp_sts_supported)) 2170 list_for_each_entry(xfer, &mesg->transfers, transfer_list) 2171 WARN_ON_ONCE(xfer->ptp_sts && !xfer->timestamped); 2172 2173 spi_unmap_msg(ctlr, mesg); 2174 2175 if (mesg->prepared && ctlr->unprepare_message) { 2176 ret = ctlr->unprepare_message(ctlr, mesg); 2177 if (ret) { 2178 dev_err(&ctlr->dev, "failed to unprepare message: %d\n", 2179 ret); 2180 } 2181 } 2182 2183 mesg->prepared = false; 2184 2185 spi_maybe_unoptimize_message(mesg); 2186 2187 WRITE_ONCE(ctlr->cur_msg_incomplete, false); 2188 smp_mb(); /* See __spi_pump_transfer_message()... */ 2189 if (READ_ONCE(ctlr->cur_msg_need_completion)) 2190 complete(&ctlr->cur_msg_completion); 2191 2192 trace_spi_message_done(mesg); 2193 2194 mesg->state = NULL; 2195 if (mesg->complete) 2196 mesg->complete(mesg->context); 2197 } 2198 EXPORT_SYMBOL_GPL(spi_finalize_current_message); 2199 2200 static int spi_start_queue(struct spi_controller *ctlr) 2201 { 2202 unsigned long flags; 2203 2204 spin_lock_irqsave(&ctlr->queue_lock, flags); 2205 2206 if (ctlr->running || ctlr->busy) { 2207 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 2208 return -EBUSY; 2209 } 2210 2211 ctlr->running = true; 2212 ctlr->cur_msg = NULL; 2213 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 2214 2215 kthread_queue_work(ctlr->kworker, &ctlr->pump_messages); 2216 2217 return 0; 2218 } 2219 2220 static int spi_stop_queue(struct spi_controller *ctlr) 2221 { 2222 unsigned int limit = 500; 2223 unsigned long flags; 2224 2225 /* 2226 * This is a bit lame, but is optimized for the common execution path. 2227 * A wait_queue on the ctlr->busy could be used, but then the common 2228 * execution path (pump_messages) would be required to call wake_up or 2229 * friends on every SPI message. Do this instead. 2230 */ 2231 do { 2232 spin_lock_irqsave(&ctlr->queue_lock, flags); 2233 if (list_empty(&ctlr->queue) && !ctlr->busy) { 2234 ctlr->running = false; 2235 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 2236 return 0; 2237 } 2238 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 2239 usleep_range(10000, 11000); 2240 } while (--limit); 2241 2242 return -EBUSY; 2243 } 2244 2245 static int spi_destroy_queue(struct spi_controller *ctlr) 2246 { 2247 int ret; 2248 2249 ret = spi_stop_queue(ctlr); 2250 2251 /* 2252 * kthread_flush_worker will block until all work is done. 2253 * If the reason that stop_queue timed out is that the work will never 2254 * finish, then it does no good to call flush/stop thread, so 2255 * return anyway. 2256 */ 2257 if (ret) { 2258 dev_err(&ctlr->dev, "problem destroying queue\n"); 2259 return ret; 2260 } 2261 2262 kthread_destroy_worker(ctlr->kworker); 2263 2264 return 0; 2265 } 2266 2267 static int __spi_queued_transfer(struct spi_device *spi, 2268 struct spi_message *msg, 2269 bool need_pump) 2270 { 2271 struct spi_controller *ctlr = spi->controller; 2272 unsigned long flags; 2273 2274 spin_lock_irqsave(&ctlr->queue_lock, flags); 2275 2276 if (!ctlr->running) { 2277 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 2278 return -ESHUTDOWN; 2279 } 2280 msg->actual_length = 0; 2281 msg->status = -EINPROGRESS; 2282 2283 list_add_tail(&msg->queue, &ctlr->queue); 2284 ctlr->queue_empty = false; 2285 if (!ctlr->busy && need_pump) 2286 kthread_queue_work(ctlr->kworker, &ctlr->pump_messages); 2287 2288 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 2289 return 0; 2290 } 2291 2292 /** 2293 * spi_queued_transfer - transfer function for queued transfers 2294 * @spi: SPI device which is requesting transfer 2295 * @msg: SPI message which is to handled is queued to driver queue 2296 * 2297 * Return: zero on success, else a negative error code. 2298 */ 2299 static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg) 2300 { 2301 return __spi_queued_transfer(spi, msg, true); 2302 } 2303 2304 static int spi_controller_initialize_queue(struct spi_controller *ctlr) 2305 { 2306 int ret; 2307 2308 ctlr->transfer = spi_queued_transfer; 2309 if (!ctlr->transfer_one_message) 2310 ctlr->transfer_one_message = spi_transfer_one_message; 2311 2312 /* Initialize and start queue */ 2313 ret = spi_init_queue(ctlr); 2314 if (ret) { 2315 dev_err(&ctlr->dev, "problem initializing queue\n"); 2316 goto err_init_queue; 2317 } 2318 ctlr->queued = true; 2319 ret = spi_start_queue(ctlr); 2320 if (ret) { 2321 dev_err(&ctlr->dev, "problem starting queue\n"); 2322 goto err_start_queue; 2323 } 2324 2325 return 0; 2326 2327 err_start_queue: 2328 spi_destroy_queue(ctlr); 2329 err_init_queue: 2330 return ret; 2331 } 2332 2333 /** 2334 * spi_flush_queue - Send all pending messages in the queue from the callers' 2335 * context 2336 * @ctlr: controller to process queue for 2337 * 2338 * This should be used when one wants to ensure all pending messages have been 2339 * sent before doing something. Is used by the spi-mem code to make sure SPI 2340 * memory operations do not preempt regular SPI transfers that have been queued 2341 * before the spi-mem operation. 2342 */ 2343 void spi_flush_queue(struct spi_controller *ctlr) 2344 { 2345 if (ctlr->transfer == spi_queued_transfer) 2346 __spi_pump_messages(ctlr, false); 2347 } 2348 2349 /*-------------------------------------------------------------------------*/ 2350 2351 #if defined(CONFIG_OF) 2352 static void of_spi_parse_dt_cs_delay(struct device_node *nc, 2353 struct spi_delay *delay, const char *prop) 2354 { 2355 u32 value; 2356 2357 if (!of_property_read_u32(nc, prop, &value)) { 2358 if (value > U16_MAX) { 2359 delay->value = DIV_ROUND_UP(value, 1000); 2360 delay->unit = SPI_DELAY_UNIT_USECS; 2361 } else { 2362 delay->value = value; 2363 delay->unit = SPI_DELAY_UNIT_NSECS; 2364 } 2365 } 2366 } 2367 2368 static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi, 2369 struct device_node *nc) 2370 { 2371 u32 value, cs[SPI_CS_CNT_MAX]; 2372 int rc, idx; 2373 2374 /* Mode (clock phase/polarity/etc.) */ 2375 if (of_property_read_bool(nc, "spi-cpha")) 2376 spi->mode |= SPI_CPHA; 2377 if (of_property_read_bool(nc, "spi-cpol")) 2378 spi->mode |= SPI_CPOL; 2379 if (of_property_read_bool(nc, "spi-3wire")) 2380 spi->mode |= SPI_3WIRE; 2381 if (of_property_read_bool(nc, "spi-lsb-first")) 2382 spi->mode |= SPI_LSB_FIRST; 2383 if (of_property_read_bool(nc, "spi-cs-high")) 2384 spi->mode |= SPI_CS_HIGH; 2385 2386 /* Device DUAL/QUAD mode */ 2387 if (!of_property_read_u32(nc, "spi-tx-bus-width", &value)) { 2388 switch (value) { 2389 case 0: 2390 spi->mode |= SPI_NO_TX; 2391 break; 2392 case 1: 2393 break; 2394 case 2: 2395 spi->mode |= SPI_TX_DUAL; 2396 break; 2397 case 4: 2398 spi->mode |= SPI_TX_QUAD; 2399 break; 2400 case 8: 2401 spi->mode |= SPI_TX_OCTAL; 2402 break; 2403 default: 2404 dev_warn(&ctlr->dev, 2405 "spi-tx-bus-width %d not supported\n", 2406 value); 2407 break; 2408 } 2409 } 2410 2411 if (!of_property_read_u32(nc, "spi-rx-bus-width", &value)) { 2412 switch (value) { 2413 case 0: 2414 spi->mode |= SPI_NO_RX; 2415 break; 2416 case 1: 2417 break; 2418 case 2: 2419 spi->mode |= SPI_RX_DUAL; 2420 break; 2421 case 4: 2422 spi->mode |= SPI_RX_QUAD; 2423 break; 2424 case 8: 2425 spi->mode |= SPI_RX_OCTAL; 2426 break; 2427 default: 2428 dev_warn(&ctlr->dev, 2429 "spi-rx-bus-width %d not supported\n", 2430 value); 2431 break; 2432 } 2433 } 2434 2435 if (spi_controller_is_target(ctlr)) { 2436 if (!of_node_name_eq(nc, "slave")) { 2437 dev_err(&ctlr->dev, "%pOF is not called 'slave'\n", 2438 nc); 2439 return -EINVAL; 2440 } 2441 return 0; 2442 } 2443 2444 if (ctlr->num_chipselect > SPI_CS_CNT_MAX) { 2445 dev_err(&ctlr->dev, "No. of CS is more than max. no. of supported CS\n"); 2446 return -EINVAL; 2447 } 2448 2449 spi_set_all_cs_unused(spi); 2450 2451 /* Device address */ 2452 rc = of_property_read_variable_u32_array(nc, "reg", &cs[0], 1, 2453 SPI_CS_CNT_MAX); 2454 if (rc < 0) { 2455 dev_err(&ctlr->dev, "%pOF has no valid 'reg' property (%d)\n", 2456 nc, rc); 2457 return rc; 2458 } 2459 if (rc > ctlr->num_chipselect) { 2460 dev_err(&ctlr->dev, "%pOF has number of CS > ctlr->num_chipselect (%d)\n", 2461 nc, rc); 2462 return rc; 2463 } 2464 if ((of_property_present(nc, "parallel-memories")) && 2465 (!(ctlr->flags & SPI_CONTROLLER_MULTI_CS))) { 2466 dev_err(&ctlr->dev, "SPI controller doesn't support multi CS\n"); 2467 return -EINVAL; 2468 } 2469 for (idx = 0; idx < rc; idx++) 2470 spi_set_chipselect(spi, idx, cs[idx]); 2471 2472 /* 2473 * By default spi->chip_select[0] will hold the physical CS number, 2474 * so set bit 0 in spi->cs_index_mask. 2475 */ 2476 spi->cs_index_mask = BIT(0); 2477 2478 /* Device speed */ 2479 if (!of_property_read_u32(nc, "spi-max-frequency", &value)) 2480 spi->max_speed_hz = value; 2481 2482 /* Device CS delays */ 2483 of_spi_parse_dt_cs_delay(nc, &spi->cs_setup, "spi-cs-setup-delay-ns"); 2484 of_spi_parse_dt_cs_delay(nc, &spi->cs_hold, "spi-cs-hold-delay-ns"); 2485 of_spi_parse_dt_cs_delay(nc, &spi->cs_inactive, "spi-cs-inactive-delay-ns"); 2486 2487 return 0; 2488 } 2489 2490 static struct spi_device * 2491 of_register_spi_device(struct spi_controller *ctlr, struct device_node *nc) 2492 { 2493 struct spi_device *spi; 2494 int rc; 2495 2496 /* Alloc an spi_device */ 2497 spi = spi_alloc_device(ctlr); 2498 if (!spi) { 2499 dev_err(&ctlr->dev, "spi_device alloc error for %pOF\n", nc); 2500 rc = -ENOMEM; 2501 goto err_out; 2502 } 2503 2504 /* Select device driver */ 2505 rc = of_alias_from_compatible(nc, spi->modalias, 2506 sizeof(spi->modalias)); 2507 if (rc < 0) { 2508 dev_err(&ctlr->dev, "cannot find modalias for %pOF\n", nc); 2509 goto err_out; 2510 } 2511 2512 rc = of_spi_parse_dt(ctlr, spi, nc); 2513 if (rc) 2514 goto err_out; 2515 2516 /* Store a pointer to the node in the device structure */ 2517 of_node_get(nc); 2518 2519 device_set_node(&spi->dev, of_fwnode_handle(nc)); 2520 2521 /* Register the new device */ 2522 rc = spi_add_device(spi); 2523 if (rc) { 2524 dev_err(&ctlr->dev, "spi_device register error %pOF\n", nc); 2525 goto err_of_node_put; 2526 } 2527 2528 return spi; 2529 2530 err_of_node_put: 2531 of_node_put(nc); 2532 err_out: 2533 spi_dev_put(spi); 2534 return ERR_PTR(rc); 2535 } 2536 2537 /** 2538 * of_register_spi_devices() - Register child devices onto the SPI bus 2539 * @ctlr: Pointer to spi_controller device 2540 * 2541 * Registers an spi_device for each child node of controller node which 2542 * represents a valid SPI slave. 2543 */ 2544 static void of_register_spi_devices(struct spi_controller *ctlr) 2545 { 2546 struct spi_device *spi; 2547 struct device_node *nc; 2548 2549 for_each_available_child_of_node(ctlr->dev.of_node, nc) { 2550 if (of_node_test_and_set_flag(nc, OF_POPULATED)) 2551 continue; 2552 spi = of_register_spi_device(ctlr, nc); 2553 if (IS_ERR(spi)) { 2554 dev_warn(&ctlr->dev, 2555 "Failed to create SPI device for %pOF\n", nc); 2556 of_node_clear_flag(nc, OF_POPULATED); 2557 } 2558 } 2559 } 2560 #else 2561 static void of_register_spi_devices(struct spi_controller *ctlr) { } 2562 #endif 2563 2564 /** 2565 * spi_new_ancillary_device() - Register ancillary SPI device 2566 * @spi: Pointer to the main SPI device registering the ancillary device 2567 * @chip_select: Chip Select of the ancillary device 2568 * 2569 * Register an ancillary SPI device; for example some chips have a chip-select 2570 * for normal device usage and another one for setup/firmware upload. 2571 * 2572 * This may only be called from main SPI device's probe routine. 2573 * 2574 * Return: 0 on success; negative errno on failure 2575 */ 2576 struct spi_device *spi_new_ancillary_device(struct spi_device *spi, 2577 u8 chip_select) 2578 { 2579 struct spi_controller *ctlr = spi->controller; 2580 struct spi_device *ancillary; 2581 int rc; 2582 2583 /* Alloc an spi_device */ 2584 ancillary = spi_alloc_device(ctlr); 2585 if (!ancillary) { 2586 rc = -ENOMEM; 2587 goto err_out; 2588 } 2589 2590 strscpy(ancillary->modalias, "dummy", sizeof(ancillary->modalias)); 2591 2592 /* Use provided chip-select for ancillary device */ 2593 spi_set_all_cs_unused(ancillary); 2594 spi_set_chipselect(ancillary, 0, chip_select); 2595 2596 /* Take over SPI mode/speed from SPI main device */ 2597 ancillary->max_speed_hz = spi->max_speed_hz; 2598 ancillary->mode = spi->mode; 2599 /* 2600 * By default spi->chip_select[0] will hold the physical CS number, 2601 * so set bit 0 in spi->cs_index_mask. 2602 */ 2603 ancillary->cs_index_mask = BIT(0); 2604 2605 WARN_ON(!mutex_is_locked(&ctlr->add_lock)); 2606 2607 /* Register the new device */ 2608 rc = __spi_add_device(ancillary); 2609 if (rc) { 2610 dev_err(&spi->dev, "failed to register ancillary device\n"); 2611 goto err_out; 2612 } 2613 2614 return ancillary; 2615 2616 err_out: 2617 spi_dev_put(ancillary); 2618 return ERR_PTR(rc); 2619 } 2620 EXPORT_SYMBOL_GPL(spi_new_ancillary_device); 2621 2622 #ifdef CONFIG_ACPI 2623 struct acpi_spi_lookup { 2624 struct spi_controller *ctlr; 2625 u32 max_speed_hz; 2626 u32 mode; 2627 int irq; 2628 u8 bits_per_word; 2629 u8 chip_select; 2630 int n; 2631 int index; 2632 }; 2633 2634 static int acpi_spi_count(struct acpi_resource *ares, void *data) 2635 { 2636 struct acpi_resource_spi_serialbus *sb; 2637 int *count = data; 2638 2639 if (ares->type != ACPI_RESOURCE_TYPE_SERIAL_BUS) 2640 return 1; 2641 2642 sb = &ares->data.spi_serial_bus; 2643 if (sb->type != ACPI_RESOURCE_SERIAL_TYPE_SPI) 2644 return 1; 2645 2646 *count = *count + 1; 2647 2648 return 1; 2649 } 2650 2651 /** 2652 * acpi_spi_count_resources - Count the number of SpiSerialBus resources 2653 * @adev: ACPI device 2654 * 2655 * Return: the number of SpiSerialBus resources in the ACPI-device's 2656 * resource-list; or a negative error code. 2657 */ 2658 int acpi_spi_count_resources(struct acpi_device *adev) 2659 { 2660 LIST_HEAD(r); 2661 int count = 0; 2662 int ret; 2663 2664 ret = acpi_dev_get_resources(adev, &r, acpi_spi_count, &count); 2665 if (ret < 0) 2666 return ret; 2667 2668 acpi_dev_free_resource_list(&r); 2669 2670 return count; 2671 } 2672 EXPORT_SYMBOL_GPL(acpi_spi_count_resources); 2673 2674 static void acpi_spi_parse_apple_properties(struct acpi_device *dev, 2675 struct acpi_spi_lookup *lookup) 2676 { 2677 const union acpi_object *obj; 2678 2679 if (!x86_apple_machine) 2680 return; 2681 2682 if (!acpi_dev_get_property(dev, "spiSclkPeriod", ACPI_TYPE_BUFFER, &obj) 2683 && obj->buffer.length >= 4) 2684 lookup->max_speed_hz = NSEC_PER_SEC / *(u32 *)obj->buffer.pointer; 2685 2686 if (!acpi_dev_get_property(dev, "spiWordSize", ACPI_TYPE_BUFFER, &obj) 2687 && obj->buffer.length == 8) 2688 lookup->bits_per_word = *(u64 *)obj->buffer.pointer; 2689 2690 if (!acpi_dev_get_property(dev, "spiBitOrder", ACPI_TYPE_BUFFER, &obj) 2691 && obj->buffer.length == 8 && !*(u64 *)obj->buffer.pointer) 2692 lookup->mode |= SPI_LSB_FIRST; 2693 2694 if (!acpi_dev_get_property(dev, "spiSPO", ACPI_TYPE_BUFFER, &obj) 2695 && obj->buffer.length == 8 && *(u64 *)obj->buffer.pointer) 2696 lookup->mode |= SPI_CPOL; 2697 2698 if (!acpi_dev_get_property(dev, "spiSPH", ACPI_TYPE_BUFFER, &obj) 2699 && obj->buffer.length == 8 && *(u64 *)obj->buffer.pointer) 2700 lookup->mode |= SPI_CPHA; 2701 } 2702 2703 static int acpi_spi_add_resource(struct acpi_resource *ares, void *data) 2704 { 2705 struct acpi_spi_lookup *lookup = data; 2706 struct spi_controller *ctlr = lookup->ctlr; 2707 2708 if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) { 2709 struct acpi_resource_spi_serialbus *sb; 2710 acpi_handle parent_handle; 2711 acpi_status status; 2712 2713 sb = &ares->data.spi_serial_bus; 2714 if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_SPI) { 2715 2716 if (lookup->index != -1 && lookup->n++ != lookup->index) 2717 return 1; 2718 2719 status = acpi_get_handle(NULL, 2720 sb->resource_source.string_ptr, 2721 &parent_handle); 2722 2723 if (ACPI_FAILURE(status)) 2724 return -ENODEV; 2725 2726 if (ctlr) { 2727 if (!device_match_acpi_handle(ctlr->dev.parent, parent_handle)) 2728 return -ENODEV; 2729 } else { 2730 struct acpi_device *adev; 2731 2732 adev = acpi_fetch_acpi_dev(parent_handle); 2733 if (!adev) 2734 return -ENODEV; 2735 2736 ctlr = acpi_spi_find_controller_by_adev(adev); 2737 if (!ctlr) 2738 return -EPROBE_DEFER; 2739 2740 lookup->ctlr = ctlr; 2741 } 2742 2743 /* 2744 * ACPI DeviceSelection numbering is handled by the 2745 * host controller driver in Windows and can vary 2746 * from driver to driver. In Linux we always expect 2747 * 0 .. max - 1 so we need to ask the driver to 2748 * translate between the two schemes. 2749 */ 2750 if (ctlr->fw_translate_cs) { 2751 int cs = ctlr->fw_translate_cs(ctlr, 2752 sb->device_selection); 2753 if (cs < 0) 2754 return cs; 2755 lookup->chip_select = cs; 2756 } else { 2757 lookup->chip_select = sb->device_selection; 2758 } 2759 2760 lookup->max_speed_hz = sb->connection_speed; 2761 lookup->bits_per_word = sb->data_bit_length; 2762 2763 if (sb->clock_phase == ACPI_SPI_SECOND_PHASE) 2764 lookup->mode |= SPI_CPHA; 2765 if (sb->clock_polarity == ACPI_SPI_START_HIGH) 2766 lookup->mode |= SPI_CPOL; 2767 if (sb->device_polarity == ACPI_SPI_ACTIVE_HIGH) 2768 lookup->mode |= SPI_CS_HIGH; 2769 } 2770 } else if (lookup->irq < 0) { 2771 struct resource r; 2772 2773 if (acpi_dev_resource_interrupt(ares, 0, &r)) 2774 lookup->irq = r.start; 2775 } 2776 2777 /* Always tell the ACPI core to skip this resource */ 2778 return 1; 2779 } 2780 2781 /** 2782 * acpi_spi_device_alloc - Allocate a spi device, and fill it in with ACPI information 2783 * @ctlr: controller to which the spi device belongs 2784 * @adev: ACPI Device for the spi device 2785 * @index: Index of the spi resource inside the ACPI Node 2786 * 2787 * This should be used to allocate a new SPI device from and ACPI Device node. 2788 * The caller is responsible for calling spi_add_device to register the SPI device. 2789 * 2790 * If ctlr is set to NULL, the Controller for the SPI device will be looked up 2791 * using the resource. 2792 * If index is set to -1, index is not used. 2793 * Note: If index is -1, ctlr must be set. 2794 * 2795 * Return: a pointer to the new device, or ERR_PTR on error. 2796 */ 2797 struct spi_device *acpi_spi_device_alloc(struct spi_controller *ctlr, 2798 struct acpi_device *adev, 2799 int index) 2800 { 2801 acpi_handle parent_handle = NULL; 2802 struct list_head resource_list; 2803 struct acpi_spi_lookup lookup = {}; 2804 struct spi_device *spi; 2805 int ret; 2806 2807 if (!ctlr && index == -1) 2808 return ERR_PTR(-EINVAL); 2809 2810 lookup.ctlr = ctlr; 2811 lookup.irq = -1; 2812 lookup.index = index; 2813 lookup.n = 0; 2814 2815 INIT_LIST_HEAD(&resource_list); 2816 ret = acpi_dev_get_resources(adev, &resource_list, 2817 acpi_spi_add_resource, &lookup); 2818 acpi_dev_free_resource_list(&resource_list); 2819 2820 if (ret < 0) 2821 /* Found SPI in _CRS but it points to another controller */ 2822 return ERR_PTR(ret); 2823 2824 if (!lookup.max_speed_hz && 2825 ACPI_SUCCESS(acpi_get_parent(adev->handle, &parent_handle)) && 2826 device_match_acpi_handle(lookup.ctlr->dev.parent, parent_handle)) { 2827 /* Apple does not use _CRS but nested devices for SPI slaves */ 2828 acpi_spi_parse_apple_properties(adev, &lookup); 2829 } 2830 2831 if (!lookup.max_speed_hz) 2832 return ERR_PTR(-ENODEV); 2833 2834 spi = spi_alloc_device(lookup.ctlr); 2835 if (!spi) { 2836 dev_err(&lookup.ctlr->dev, "failed to allocate SPI device for %s\n", 2837 dev_name(&adev->dev)); 2838 return ERR_PTR(-ENOMEM); 2839 } 2840 2841 spi_set_all_cs_unused(spi); 2842 spi_set_chipselect(spi, 0, lookup.chip_select); 2843 2844 ACPI_COMPANION_SET(&spi->dev, adev); 2845 spi->max_speed_hz = lookup.max_speed_hz; 2846 spi->mode |= lookup.mode; 2847 spi->irq = lookup.irq; 2848 spi->bits_per_word = lookup.bits_per_word; 2849 /* 2850 * By default spi->chip_select[0] will hold the physical CS number, 2851 * so set bit 0 in spi->cs_index_mask. 2852 */ 2853 spi->cs_index_mask = BIT(0); 2854 2855 return spi; 2856 } 2857 EXPORT_SYMBOL_GPL(acpi_spi_device_alloc); 2858 2859 static acpi_status acpi_register_spi_device(struct spi_controller *ctlr, 2860 struct acpi_device *adev) 2861 { 2862 struct spi_device *spi; 2863 2864 if (acpi_bus_get_status(adev) || !adev->status.present || 2865 acpi_device_enumerated(adev)) 2866 return AE_OK; 2867 2868 spi = acpi_spi_device_alloc(ctlr, adev, -1); 2869 if (IS_ERR(spi)) { 2870 if (PTR_ERR(spi) == -ENOMEM) 2871 return AE_NO_MEMORY; 2872 else 2873 return AE_OK; 2874 } 2875 2876 acpi_set_modalias(adev, acpi_device_hid(adev), spi->modalias, 2877 sizeof(spi->modalias)); 2878 2879 acpi_device_set_enumerated(adev); 2880 2881 adev->power.flags.ignore_parent = true; 2882 if (spi_add_device(spi)) { 2883 adev->power.flags.ignore_parent = false; 2884 dev_err(&ctlr->dev, "failed to add SPI device %s from ACPI\n", 2885 dev_name(&adev->dev)); 2886 spi_dev_put(spi); 2887 } 2888 2889 return AE_OK; 2890 } 2891 2892 static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level, 2893 void *data, void **return_value) 2894 { 2895 struct acpi_device *adev = acpi_fetch_acpi_dev(handle); 2896 struct spi_controller *ctlr = data; 2897 2898 if (!adev) 2899 return AE_OK; 2900 2901 return acpi_register_spi_device(ctlr, adev); 2902 } 2903 2904 #define SPI_ACPI_ENUMERATE_MAX_DEPTH 32 2905 2906 static void acpi_register_spi_devices(struct spi_controller *ctlr) 2907 { 2908 acpi_status status; 2909 acpi_handle handle; 2910 2911 handle = ACPI_HANDLE(ctlr->dev.parent); 2912 if (!handle) 2913 return; 2914 2915 status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, 2916 SPI_ACPI_ENUMERATE_MAX_DEPTH, 2917 acpi_spi_add_device, NULL, ctlr, NULL); 2918 if (ACPI_FAILURE(status)) 2919 dev_warn(&ctlr->dev, "failed to enumerate SPI slaves\n"); 2920 } 2921 #else 2922 static inline void acpi_register_spi_devices(struct spi_controller *ctlr) {} 2923 #endif /* CONFIG_ACPI */ 2924 2925 static void spi_controller_release(struct device *dev) 2926 { 2927 struct spi_controller *ctlr; 2928 2929 ctlr = container_of(dev, struct spi_controller, dev); 2930 kfree(ctlr); 2931 } 2932 2933 static const struct class spi_master_class = { 2934 .name = "spi_master", 2935 .dev_release = spi_controller_release, 2936 .dev_groups = spi_master_groups, 2937 }; 2938 2939 #ifdef CONFIG_SPI_SLAVE 2940 /** 2941 * spi_target_abort - abort the ongoing transfer request on an SPI slave 2942 * controller 2943 * @spi: device used for the current transfer 2944 */ 2945 int spi_target_abort(struct spi_device *spi) 2946 { 2947 struct spi_controller *ctlr = spi->controller; 2948 2949 if (spi_controller_is_target(ctlr) && ctlr->target_abort) 2950 return ctlr->target_abort(ctlr); 2951 2952 return -ENOTSUPP; 2953 } 2954 EXPORT_SYMBOL_GPL(spi_target_abort); 2955 2956 static ssize_t slave_show(struct device *dev, struct device_attribute *attr, 2957 char *buf) 2958 { 2959 struct spi_controller *ctlr = container_of(dev, struct spi_controller, 2960 dev); 2961 struct device *child; 2962 2963 child = device_find_any_child(&ctlr->dev); 2964 return sysfs_emit(buf, "%s\n", child ? to_spi_device(child)->modalias : NULL); 2965 } 2966 2967 static ssize_t slave_store(struct device *dev, struct device_attribute *attr, 2968 const char *buf, size_t count) 2969 { 2970 struct spi_controller *ctlr = container_of(dev, struct spi_controller, 2971 dev); 2972 struct spi_device *spi; 2973 struct device *child; 2974 char name[32]; 2975 int rc; 2976 2977 rc = sscanf(buf, "%31s", name); 2978 if (rc != 1 || !name[0]) 2979 return -EINVAL; 2980 2981 child = device_find_any_child(&ctlr->dev); 2982 if (child) { 2983 /* Remove registered slave */ 2984 device_unregister(child); 2985 put_device(child); 2986 } 2987 2988 if (strcmp(name, "(null)")) { 2989 /* Register new slave */ 2990 spi = spi_alloc_device(ctlr); 2991 if (!spi) 2992 return -ENOMEM; 2993 2994 strscpy(spi->modalias, name, sizeof(spi->modalias)); 2995 2996 rc = spi_add_device(spi); 2997 if (rc) { 2998 spi_dev_put(spi); 2999 return rc; 3000 } 3001 } 3002 3003 return count; 3004 } 3005 3006 static DEVICE_ATTR_RW(slave); 3007 3008 static struct attribute *spi_slave_attrs[] = { 3009 &dev_attr_slave.attr, 3010 NULL, 3011 }; 3012 3013 static const struct attribute_group spi_slave_group = { 3014 .attrs = spi_slave_attrs, 3015 }; 3016 3017 static const struct attribute_group *spi_slave_groups[] = { 3018 &spi_controller_statistics_group, 3019 &spi_slave_group, 3020 NULL, 3021 }; 3022 3023 static const struct class spi_slave_class = { 3024 .name = "spi_slave", 3025 .dev_release = spi_controller_release, 3026 .dev_groups = spi_slave_groups, 3027 }; 3028 #else 3029 extern struct class spi_slave_class; /* dummy */ 3030 #endif 3031 3032 /** 3033 * __spi_alloc_controller - allocate an SPI master or slave controller 3034 * @dev: the controller, possibly using the platform_bus 3035 * @size: how much zeroed driver-private data to allocate; the pointer to this 3036 * memory is in the driver_data field of the returned device, accessible 3037 * with spi_controller_get_devdata(); the memory is cacheline aligned; 3038 * drivers granting DMA access to portions of their private data need to 3039 * round up @size using ALIGN(size, dma_get_cache_alignment()). 3040 * @slave: flag indicating whether to allocate an SPI master (false) or SPI 3041 * slave (true) controller 3042 * Context: can sleep 3043 * 3044 * This call is used only by SPI controller drivers, which are the 3045 * only ones directly touching chip registers. It's how they allocate 3046 * an spi_controller structure, prior to calling spi_register_controller(). 3047 * 3048 * This must be called from context that can sleep. 3049 * 3050 * The caller is responsible for assigning the bus number and initializing the 3051 * controller's methods before calling spi_register_controller(); and (after 3052 * errors adding the device) calling spi_controller_put() to prevent a memory 3053 * leak. 3054 * 3055 * Return: the SPI controller structure on success, else NULL. 3056 */ 3057 struct spi_controller *__spi_alloc_controller(struct device *dev, 3058 unsigned int size, bool slave) 3059 { 3060 struct spi_controller *ctlr; 3061 size_t ctlr_size = ALIGN(sizeof(*ctlr), dma_get_cache_alignment()); 3062 3063 if (!dev) 3064 return NULL; 3065 3066 ctlr = kzalloc(size + ctlr_size, GFP_KERNEL); 3067 if (!ctlr) 3068 return NULL; 3069 3070 device_initialize(&ctlr->dev); 3071 INIT_LIST_HEAD(&ctlr->queue); 3072 spin_lock_init(&ctlr->queue_lock); 3073 spin_lock_init(&ctlr->bus_lock_spinlock); 3074 mutex_init(&ctlr->bus_lock_mutex); 3075 mutex_init(&ctlr->io_mutex); 3076 mutex_init(&ctlr->add_lock); 3077 ctlr->bus_num = -1; 3078 ctlr->num_chipselect = 1; 3079 ctlr->slave = slave; 3080 if (IS_ENABLED(CONFIG_SPI_SLAVE) && slave) 3081 ctlr->dev.class = &spi_slave_class; 3082 else 3083 ctlr->dev.class = &spi_master_class; 3084 ctlr->dev.parent = dev; 3085 pm_suspend_ignore_children(&ctlr->dev, true); 3086 spi_controller_set_devdata(ctlr, (void *)ctlr + ctlr_size); 3087 3088 return ctlr; 3089 } 3090 EXPORT_SYMBOL_GPL(__spi_alloc_controller); 3091 3092 static void devm_spi_release_controller(struct device *dev, void *ctlr) 3093 { 3094 spi_controller_put(*(struct spi_controller **)ctlr); 3095 } 3096 3097 /** 3098 * __devm_spi_alloc_controller - resource-managed __spi_alloc_controller() 3099 * @dev: physical device of SPI controller 3100 * @size: how much zeroed driver-private data to allocate 3101 * @slave: whether to allocate an SPI master (false) or SPI slave (true) 3102 * Context: can sleep 3103 * 3104 * Allocate an SPI controller and automatically release a reference on it 3105 * when @dev is unbound from its driver. Drivers are thus relieved from 3106 * having to call spi_controller_put(). 3107 * 3108 * The arguments to this function are identical to __spi_alloc_controller(). 3109 * 3110 * Return: the SPI controller structure on success, else NULL. 3111 */ 3112 struct spi_controller *__devm_spi_alloc_controller(struct device *dev, 3113 unsigned int size, 3114 bool slave) 3115 { 3116 struct spi_controller **ptr, *ctlr; 3117 3118 ptr = devres_alloc(devm_spi_release_controller, sizeof(*ptr), 3119 GFP_KERNEL); 3120 if (!ptr) 3121 return NULL; 3122 3123 ctlr = __spi_alloc_controller(dev, size, slave); 3124 if (ctlr) { 3125 ctlr->devm_allocated = true; 3126 *ptr = ctlr; 3127 devres_add(dev, ptr); 3128 } else { 3129 devres_free(ptr); 3130 } 3131 3132 return ctlr; 3133 } 3134 EXPORT_SYMBOL_GPL(__devm_spi_alloc_controller); 3135 3136 /** 3137 * spi_get_gpio_descs() - grab chip select GPIOs for the master 3138 * @ctlr: The SPI master to grab GPIO descriptors for 3139 */ 3140 static int spi_get_gpio_descs(struct spi_controller *ctlr) 3141 { 3142 int nb, i; 3143 struct gpio_desc **cs; 3144 struct device *dev = &ctlr->dev; 3145 unsigned long native_cs_mask = 0; 3146 unsigned int num_cs_gpios = 0; 3147 3148 nb = gpiod_count(dev, "cs"); 3149 if (nb < 0) { 3150 /* No GPIOs at all is fine, else return the error */ 3151 if (nb == -ENOENT) 3152 return 0; 3153 return nb; 3154 } 3155 3156 ctlr->num_chipselect = max_t(int, nb, ctlr->num_chipselect); 3157 3158 cs = devm_kcalloc(dev, ctlr->num_chipselect, sizeof(*cs), 3159 GFP_KERNEL); 3160 if (!cs) 3161 return -ENOMEM; 3162 ctlr->cs_gpiods = cs; 3163 3164 for (i = 0; i < nb; i++) { 3165 /* 3166 * Most chipselects are active low, the inverted 3167 * semantics are handled by special quirks in gpiolib, 3168 * so initializing them GPIOD_OUT_LOW here means 3169 * "unasserted", in most cases this will drive the physical 3170 * line high. 3171 */ 3172 cs[i] = devm_gpiod_get_index_optional(dev, "cs", i, 3173 GPIOD_OUT_LOW); 3174 if (IS_ERR(cs[i])) 3175 return PTR_ERR(cs[i]); 3176 3177 if (cs[i]) { 3178 /* 3179 * If we find a CS GPIO, name it after the device and 3180 * chip select line. 3181 */ 3182 char *gpioname; 3183 3184 gpioname = devm_kasprintf(dev, GFP_KERNEL, "%s CS%d", 3185 dev_name(dev), i); 3186 if (!gpioname) 3187 return -ENOMEM; 3188 gpiod_set_consumer_name(cs[i], gpioname); 3189 num_cs_gpios++; 3190 continue; 3191 } 3192 3193 if (ctlr->max_native_cs && i >= ctlr->max_native_cs) { 3194 dev_err(dev, "Invalid native chip select %d\n", i); 3195 return -EINVAL; 3196 } 3197 native_cs_mask |= BIT(i); 3198 } 3199 3200 ctlr->unused_native_cs = ffs(~native_cs_mask) - 1; 3201 3202 if ((ctlr->flags & SPI_CONTROLLER_GPIO_SS) && num_cs_gpios && 3203 ctlr->max_native_cs && ctlr->unused_native_cs >= ctlr->max_native_cs) { 3204 dev_err(dev, "No unused native chip select available\n"); 3205 return -EINVAL; 3206 } 3207 3208 return 0; 3209 } 3210 3211 static int spi_controller_check_ops(struct spi_controller *ctlr) 3212 { 3213 /* 3214 * The controller may implement only the high-level SPI-memory like 3215 * operations if it does not support regular SPI transfers, and this is 3216 * valid use case. 3217 * If ->mem_ops or ->mem_ops->exec_op is NULL, we request that at least 3218 * one of the ->transfer_xxx() method be implemented. 3219 */ 3220 if (!ctlr->mem_ops || !ctlr->mem_ops->exec_op) { 3221 if (!ctlr->transfer && !ctlr->transfer_one && 3222 !ctlr->transfer_one_message) { 3223 return -EINVAL; 3224 } 3225 } 3226 3227 return 0; 3228 } 3229 3230 /* Allocate dynamic bus number using Linux idr */ 3231 static int spi_controller_id_alloc(struct spi_controller *ctlr, int start, int end) 3232 { 3233 int id; 3234 3235 mutex_lock(&board_lock); 3236 id = idr_alloc(&spi_master_idr, ctlr, start, end, GFP_KERNEL); 3237 mutex_unlock(&board_lock); 3238 if (WARN(id < 0, "couldn't get idr")) 3239 return id == -ENOSPC ? -EBUSY : id; 3240 ctlr->bus_num = id; 3241 return 0; 3242 } 3243 3244 /** 3245 * spi_register_controller - register SPI host or target controller 3246 * @ctlr: initialized controller, originally from spi_alloc_host() or 3247 * spi_alloc_target() 3248 * Context: can sleep 3249 * 3250 * SPI controllers connect to their drivers using some non-SPI bus, 3251 * such as the platform bus. The final stage of probe() in that code 3252 * includes calling spi_register_controller() to hook up to this SPI bus glue. 3253 * 3254 * SPI controllers use board specific (often SOC specific) bus numbers, 3255 * and board-specific addressing for SPI devices combines those numbers 3256 * with chip select numbers. Since SPI does not directly support dynamic 3257 * device identification, boards need configuration tables telling which 3258 * chip is at which address. 3259 * 3260 * This must be called from context that can sleep. It returns zero on 3261 * success, else a negative error code (dropping the controller's refcount). 3262 * After a successful return, the caller is responsible for calling 3263 * spi_unregister_controller(). 3264 * 3265 * Return: zero on success, else a negative error code. 3266 */ 3267 int spi_register_controller(struct spi_controller *ctlr) 3268 { 3269 struct device *dev = ctlr->dev.parent; 3270 struct boardinfo *bi; 3271 int first_dynamic; 3272 int status; 3273 int idx; 3274 3275 if (!dev) 3276 return -ENODEV; 3277 3278 /* 3279 * Make sure all necessary hooks are implemented before registering 3280 * the SPI controller. 3281 */ 3282 status = spi_controller_check_ops(ctlr); 3283 if (status) 3284 return status; 3285 3286 if (ctlr->bus_num < 0) 3287 ctlr->bus_num = of_alias_get_id(ctlr->dev.of_node, "spi"); 3288 if (ctlr->bus_num >= 0) { 3289 /* Devices with a fixed bus num must check-in with the num */ 3290 status = spi_controller_id_alloc(ctlr, ctlr->bus_num, ctlr->bus_num + 1); 3291 if (status) 3292 return status; 3293 } 3294 if (ctlr->bus_num < 0) { 3295 first_dynamic = of_alias_get_highest_id("spi"); 3296 if (first_dynamic < 0) 3297 first_dynamic = 0; 3298 else 3299 first_dynamic++; 3300 3301 status = spi_controller_id_alloc(ctlr, first_dynamic, 0); 3302 if (status) 3303 return status; 3304 } 3305 ctlr->bus_lock_flag = 0; 3306 init_completion(&ctlr->xfer_completion); 3307 init_completion(&ctlr->cur_msg_completion); 3308 if (!ctlr->max_dma_len) 3309 ctlr->max_dma_len = INT_MAX; 3310 3311 /* 3312 * Register the device, then userspace will see it. 3313 * Registration fails if the bus ID is in use. 3314 */ 3315 dev_set_name(&ctlr->dev, "spi%u", ctlr->bus_num); 3316 3317 if (!spi_controller_is_target(ctlr) && ctlr->use_gpio_descriptors) { 3318 status = spi_get_gpio_descs(ctlr); 3319 if (status) 3320 goto free_bus_id; 3321 /* 3322 * A controller using GPIO descriptors always 3323 * supports SPI_CS_HIGH if need be. 3324 */ 3325 ctlr->mode_bits |= SPI_CS_HIGH; 3326 } 3327 3328 /* 3329 * Even if it's just one always-selected device, there must 3330 * be at least one chipselect. 3331 */ 3332 if (!ctlr->num_chipselect) { 3333 status = -EINVAL; 3334 goto free_bus_id; 3335 } 3336 3337 /* Setting last_cs to SPI_INVALID_CS means no chip selected */ 3338 for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) 3339 ctlr->last_cs[idx] = SPI_INVALID_CS; 3340 3341 status = device_add(&ctlr->dev); 3342 if (status < 0) 3343 goto free_bus_id; 3344 dev_dbg(dev, "registered %s %s\n", 3345 spi_controller_is_target(ctlr) ? "target" : "host", 3346 dev_name(&ctlr->dev)); 3347 3348 /* 3349 * If we're using a queued driver, start the queue. Note that we don't 3350 * need the queueing logic if the driver is only supporting high-level 3351 * memory operations. 3352 */ 3353 if (ctlr->transfer) { 3354 dev_info(dev, "controller is unqueued, this is deprecated\n"); 3355 } else if (ctlr->transfer_one || ctlr->transfer_one_message) { 3356 status = spi_controller_initialize_queue(ctlr); 3357 if (status) { 3358 device_del(&ctlr->dev); 3359 goto free_bus_id; 3360 } 3361 } 3362 /* Add statistics */ 3363 ctlr->pcpu_statistics = spi_alloc_pcpu_stats(dev); 3364 if (!ctlr->pcpu_statistics) { 3365 dev_err(dev, "Error allocating per-cpu statistics\n"); 3366 status = -ENOMEM; 3367 goto destroy_queue; 3368 } 3369 3370 mutex_lock(&board_lock); 3371 list_add_tail(&ctlr->list, &spi_controller_list); 3372 list_for_each_entry(bi, &board_list, list) 3373 spi_match_controller_to_boardinfo(ctlr, &bi->board_info); 3374 mutex_unlock(&board_lock); 3375 3376 /* Register devices from the device tree and ACPI */ 3377 of_register_spi_devices(ctlr); 3378 acpi_register_spi_devices(ctlr); 3379 return status; 3380 3381 destroy_queue: 3382 spi_destroy_queue(ctlr); 3383 free_bus_id: 3384 mutex_lock(&board_lock); 3385 idr_remove(&spi_master_idr, ctlr->bus_num); 3386 mutex_unlock(&board_lock); 3387 return status; 3388 } 3389 EXPORT_SYMBOL_GPL(spi_register_controller); 3390 3391 static void devm_spi_unregister(struct device *dev, void *res) 3392 { 3393 spi_unregister_controller(*(struct spi_controller **)res); 3394 } 3395 3396 /** 3397 * devm_spi_register_controller - register managed SPI host or target 3398 * controller 3399 * @dev: device managing SPI controller 3400 * @ctlr: initialized controller, originally from spi_alloc_host() or 3401 * spi_alloc_target() 3402 * Context: can sleep 3403 * 3404 * Register a SPI device as with spi_register_controller() which will 3405 * automatically be unregistered and freed. 3406 * 3407 * Return: zero on success, else a negative error code. 3408 */ 3409 int devm_spi_register_controller(struct device *dev, 3410 struct spi_controller *ctlr) 3411 { 3412 struct spi_controller **ptr; 3413 int ret; 3414 3415 ptr = devres_alloc(devm_spi_unregister, sizeof(*ptr), GFP_KERNEL); 3416 if (!ptr) 3417 return -ENOMEM; 3418 3419 ret = spi_register_controller(ctlr); 3420 if (!ret) { 3421 *ptr = ctlr; 3422 devres_add(dev, ptr); 3423 } else { 3424 devres_free(ptr); 3425 } 3426 3427 return ret; 3428 } 3429 EXPORT_SYMBOL_GPL(devm_spi_register_controller); 3430 3431 static int __unregister(struct device *dev, void *null) 3432 { 3433 spi_unregister_device(to_spi_device(dev)); 3434 return 0; 3435 } 3436 3437 /** 3438 * spi_unregister_controller - unregister SPI master or slave controller 3439 * @ctlr: the controller being unregistered 3440 * Context: can sleep 3441 * 3442 * This call is used only by SPI controller drivers, which are the 3443 * only ones directly touching chip registers. 3444 * 3445 * This must be called from context that can sleep. 3446 * 3447 * Note that this function also drops a reference to the controller. 3448 */ 3449 void spi_unregister_controller(struct spi_controller *ctlr) 3450 { 3451 struct spi_controller *found; 3452 int id = ctlr->bus_num; 3453 3454 /* Prevent addition of new devices, unregister existing ones */ 3455 if (IS_ENABLED(CONFIG_SPI_DYNAMIC)) 3456 mutex_lock(&ctlr->add_lock); 3457 3458 device_for_each_child(&ctlr->dev, NULL, __unregister); 3459 3460 /* First make sure that this controller was ever added */ 3461 mutex_lock(&board_lock); 3462 found = idr_find(&spi_master_idr, id); 3463 mutex_unlock(&board_lock); 3464 if (ctlr->queued) { 3465 if (spi_destroy_queue(ctlr)) 3466 dev_err(&ctlr->dev, "queue remove failed\n"); 3467 } 3468 mutex_lock(&board_lock); 3469 list_del(&ctlr->list); 3470 mutex_unlock(&board_lock); 3471 3472 device_del(&ctlr->dev); 3473 3474 /* Free bus id */ 3475 mutex_lock(&board_lock); 3476 if (found == ctlr) 3477 idr_remove(&spi_master_idr, id); 3478 mutex_unlock(&board_lock); 3479 3480 if (IS_ENABLED(CONFIG_SPI_DYNAMIC)) 3481 mutex_unlock(&ctlr->add_lock); 3482 3483 /* 3484 * Release the last reference on the controller if its driver 3485 * has not yet been converted to devm_spi_alloc_host/target(). 3486 */ 3487 if (!ctlr->devm_allocated) 3488 put_device(&ctlr->dev); 3489 } 3490 EXPORT_SYMBOL_GPL(spi_unregister_controller); 3491 3492 static inline int __spi_check_suspended(const struct spi_controller *ctlr) 3493 { 3494 return ctlr->flags & SPI_CONTROLLER_SUSPENDED ? -ESHUTDOWN : 0; 3495 } 3496 3497 static inline void __spi_mark_suspended(struct spi_controller *ctlr) 3498 { 3499 mutex_lock(&ctlr->bus_lock_mutex); 3500 ctlr->flags |= SPI_CONTROLLER_SUSPENDED; 3501 mutex_unlock(&ctlr->bus_lock_mutex); 3502 } 3503 3504 static inline void __spi_mark_resumed(struct spi_controller *ctlr) 3505 { 3506 mutex_lock(&ctlr->bus_lock_mutex); 3507 ctlr->flags &= ~SPI_CONTROLLER_SUSPENDED; 3508 mutex_unlock(&ctlr->bus_lock_mutex); 3509 } 3510 3511 int spi_controller_suspend(struct spi_controller *ctlr) 3512 { 3513 int ret = 0; 3514 3515 /* Basically no-ops for non-queued controllers */ 3516 if (ctlr->queued) { 3517 ret = spi_stop_queue(ctlr); 3518 if (ret) 3519 dev_err(&ctlr->dev, "queue stop failed\n"); 3520 } 3521 3522 __spi_mark_suspended(ctlr); 3523 return ret; 3524 } 3525 EXPORT_SYMBOL_GPL(spi_controller_suspend); 3526 3527 int spi_controller_resume(struct spi_controller *ctlr) 3528 { 3529 int ret = 0; 3530 3531 __spi_mark_resumed(ctlr); 3532 3533 if (ctlr->queued) { 3534 ret = spi_start_queue(ctlr); 3535 if (ret) 3536 dev_err(&ctlr->dev, "queue restart failed\n"); 3537 } 3538 return ret; 3539 } 3540 EXPORT_SYMBOL_GPL(spi_controller_resume); 3541 3542 /*-------------------------------------------------------------------------*/ 3543 3544 /* Core methods for spi_message alterations */ 3545 3546 static void __spi_replace_transfers_release(struct spi_controller *ctlr, 3547 struct spi_message *msg, 3548 void *res) 3549 { 3550 struct spi_replaced_transfers *rxfer = res; 3551 size_t i; 3552 3553 /* Call extra callback if requested */ 3554 if (rxfer->release) 3555 rxfer->release(ctlr, msg, res); 3556 3557 /* Insert replaced transfers back into the message */ 3558 list_splice(&rxfer->replaced_transfers, rxfer->replaced_after); 3559 3560 /* Remove the formerly inserted entries */ 3561 for (i = 0; i < rxfer->inserted; i++) 3562 list_del(&rxfer->inserted_transfers[i].transfer_list); 3563 } 3564 3565 /** 3566 * spi_replace_transfers - replace transfers with several transfers 3567 * and register change with spi_message.resources 3568 * @msg: the spi_message we work upon 3569 * @xfer_first: the first spi_transfer we want to replace 3570 * @remove: number of transfers to remove 3571 * @insert: the number of transfers we want to insert instead 3572 * @release: extra release code necessary in some circumstances 3573 * @extradatasize: extra data to allocate (with alignment guarantees 3574 * of struct @spi_transfer) 3575 * @gfp: gfp flags 3576 * 3577 * Returns: pointer to @spi_replaced_transfers, 3578 * PTR_ERR(...) in case of errors. 3579 */ 3580 static struct spi_replaced_transfers *spi_replace_transfers( 3581 struct spi_message *msg, 3582 struct spi_transfer *xfer_first, 3583 size_t remove, 3584 size_t insert, 3585 spi_replaced_release_t release, 3586 size_t extradatasize, 3587 gfp_t gfp) 3588 { 3589 struct spi_replaced_transfers *rxfer; 3590 struct spi_transfer *xfer; 3591 size_t i; 3592 3593 /* Allocate the structure using spi_res */ 3594 rxfer = spi_res_alloc(msg->spi, __spi_replace_transfers_release, 3595 struct_size(rxfer, inserted_transfers, insert) 3596 + extradatasize, 3597 gfp); 3598 if (!rxfer) 3599 return ERR_PTR(-ENOMEM); 3600 3601 /* The release code to invoke before running the generic release */ 3602 rxfer->release = release; 3603 3604 /* Assign extradata */ 3605 if (extradatasize) 3606 rxfer->extradata = 3607 &rxfer->inserted_transfers[insert]; 3608 3609 /* Init the replaced_transfers list */ 3610 INIT_LIST_HEAD(&rxfer->replaced_transfers); 3611 3612 /* 3613 * Assign the list_entry after which we should reinsert 3614 * the @replaced_transfers - it may be spi_message.messages! 3615 */ 3616 rxfer->replaced_after = xfer_first->transfer_list.prev; 3617 3618 /* Remove the requested number of transfers */ 3619 for (i = 0; i < remove; i++) { 3620 /* 3621 * If the entry after replaced_after it is msg->transfers 3622 * then we have been requested to remove more transfers 3623 * than are in the list. 3624 */ 3625 if (rxfer->replaced_after->next == &msg->transfers) { 3626 dev_err(&msg->spi->dev, 3627 "requested to remove more spi_transfers than are available\n"); 3628 /* Insert replaced transfers back into the message */ 3629 list_splice(&rxfer->replaced_transfers, 3630 rxfer->replaced_after); 3631 3632 /* Free the spi_replace_transfer structure... */ 3633 spi_res_free(rxfer); 3634 3635 /* ...and return with an error */ 3636 return ERR_PTR(-EINVAL); 3637 } 3638 3639 /* 3640 * Remove the entry after replaced_after from list of 3641 * transfers and add it to list of replaced_transfers. 3642 */ 3643 list_move_tail(rxfer->replaced_after->next, 3644 &rxfer->replaced_transfers); 3645 } 3646 3647 /* 3648 * Create copy of the given xfer with identical settings 3649 * based on the first transfer to get removed. 3650 */ 3651 for (i = 0; i < insert; i++) { 3652 /* We need to run in reverse order */ 3653 xfer = &rxfer->inserted_transfers[insert - 1 - i]; 3654 3655 /* Copy all spi_transfer data */ 3656 memcpy(xfer, xfer_first, sizeof(*xfer)); 3657 3658 /* Add to list */ 3659 list_add(&xfer->transfer_list, rxfer->replaced_after); 3660 3661 /* Clear cs_change and delay for all but the last */ 3662 if (i) { 3663 xfer->cs_change = false; 3664 xfer->delay.value = 0; 3665 } 3666 } 3667 3668 /* Set up inserted... */ 3669 rxfer->inserted = insert; 3670 3671 /* ...and register it with spi_res/spi_message */ 3672 spi_res_add(msg, rxfer); 3673 3674 return rxfer; 3675 } 3676 3677 static int __spi_split_transfer_maxsize(struct spi_controller *ctlr, 3678 struct spi_message *msg, 3679 struct spi_transfer **xferp, 3680 size_t maxsize) 3681 { 3682 struct spi_transfer *xfer = *xferp, *xfers; 3683 struct spi_replaced_transfers *srt; 3684 size_t offset; 3685 size_t count, i; 3686 3687 /* Calculate how many we have to replace */ 3688 count = DIV_ROUND_UP(xfer->len, maxsize); 3689 3690 /* Create replacement */ 3691 srt = spi_replace_transfers(msg, xfer, 1, count, NULL, 0, GFP_KERNEL); 3692 if (IS_ERR(srt)) 3693 return PTR_ERR(srt); 3694 xfers = srt->inserted_transfers; 3695 3696 /* 3697 * Now handle each of those newly inserted spi_transfers. 3698 * Note that the replacements spi_transfers all are preset 3699 * to the same values as *xferp, so tx_buf, rx_buf and len 3700 * are all identical (as well as most others) 3701 * so we just have to fix up len and the pointers. 3702 */ 3703 3704 /* 3705 * The first transfer just needs the length modified, so we 3706 * run it outside the loop. 3707 */ 3708 xfers[0].len = min_t(size_t, maxsize, xfer[0].len); 3709 3710 /* All the others need rx_buf/tx_buf also set */ 3711 for (i = 1, offset = maxsize; i < count; offset += maxsize, i++) { 3712 /* Update rx_buf, tx_buf and DMA */ 3713 if (xfers[i].rx_buf) 3714 xfers[i].rx_buf += offset; 3715 if (xfers[i].tx_buf) 3716 xfers[i].tx_buf += offset; 3717 3718 /* Update length */ 3719 xfers[i].len = min(maxsize, xfers[i].len - offset); 3720 } 3721 3722 /* 3723 * We set up xferp to the last entry we have inserted, 3724 * so that we skip those already split transfers. 3725 */ 3726 *xferp = &xfers[count - 1]; 3727 3728 /* Increment statistics counters */ 3729 SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics, 3730 transfers_split_maxsize); 3731 SPI_STATISTICS_INCREMENT_FIELD(msg->spi->pcpu_statistics, 3732 transfers_split_maxsize); 3733 3734 return 0; 3735 } 3736 3737 /** 3738 * spi_split_transfers_maxsize - split spi transfers into multiple transfers 3739 * when an individual transfer exceeds a 3740 * certain size 3741 * @ctlr: the @spi_controller for this transfer 3742 * @msg: the @spi_message to transform 3743 * @maxsize: the maximum when to apply this 3744 * 3745 * This function allocates resources that are automatically freed during the 3746 * spi message unoptimize phase so this function should only be called from 3747 * optimize_message callbacks. 3748 * 3749 * Return: status of transformation 3750 */ 3751 int spi_split_transfers_maxsize(struct spi_controller *ctlr, 3752 struct spi_message *msg, 3753 size_t maxsize) 3754 { 3755 struct spi_transfer *xfer; 3756 int ret; 3757 3758 /* 3759 * Iterate over the transfer_list, 3760 * but note that xfer is advanced to the last transfer inserted 3761 * to avoid checking sizes again unnecessarily (also xfer does 3762 * potentially belong to a different list by the time the 3763 * replacement has happened). 3764 */ 3765 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 3766 if (xfer->len > maxsize) { 3767 ret = __spi_split_transfer_maxsize(ctlr, msg, &xfer, 3768 maxsize); 3769 if (ret) 3770 return ret; 3771 } 3772 } 3773 3774 return 0; 3775 } 3776 EXPORT_SYMBOL_GPL(spi_split_transfers_maxsize); 3777 3778 3779 /** 3780 * spi_split_transfers_maxwords - split SPI transfers into multiple transfers 3781 * when an individual transfer exceeds a 3782 * certain number of SPI words 3783 * @ctlr: the @spi_controller for this transfer 3784 * @msg: the @spi_message to transform 3785 * @maxwords: the number of words to limit each transfer to 3786 * 3787 * This function allocates resources that are automatically freed during the 3788 * spi message unoptimize phase so this function should only be called from 3789 * optimize_message callbacks. 3790 * 3791 * Return: status of transformation 3792 */ 3793 int spi_split_transfers_maxwords(struct spi_controller *ctlr, 3794 struct spi_message *msg, 3795 size_t maxwords) 3796 { 3797 struct spi_transfer *xfer; 3798 3799 /* 3800 * Iterate over the transfer_list, 3801 * but note that xfer is advanced to the last transfer inserted 3802 * to avoid checking sizes again unnecessarily (also xfer does 3803 * potentially belong to a different list by the time the 3804 * replacement has happened). 3805 */ 3806 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 3807 size_t maxsize; 3808 int ret; 3809 3810 maxsize = maxwords * roundup_pow_of_two(BITS_TO_BYTES(xfer->bits_per_word)); 3811 if (xfer->len > maxsize) { 3812 ret = __spi_split_transfer_maxsize(ctlr, msg, &xfer, 3813 maxsize); 3814 if (ret) 3815 return ret; 3816 } 3817 } 3818 3819 return 0; 3820 } 3821 EXPORT_SYMBOL_GPL(spi_split_transfers_maxwords); 3822 3823 /*-------------------------------------------------------------------------*/ 3824 3825 /* 3826 * Core methods for SPI controller protocol drivers. Some of the 3827 * other core methods are currently defined as inline functions. 3828 */ 3829 3830 static int __spi_validate_bits_per_word(struct spi_controller *ctlr, 3831 u8 bits_per_word) 3832 { 3833 if (ctlr->bits_per_word_mask) { 3834 /* Only 32 bits fit in the mask */ 3835 if (bits_per_word > 32) 3836 return -EINVAL; 3837 if (!(ctlr->bits_per_word_mask & SPI_BPW_MASK(bits_per_word))) 3838 return -EINVAL; 3839 } 3840 3841 return 0; 3842 } 3843 3844 /** 3845 * spi_set_cs_timing - configure CS setup, hold, and inactive delays 3846 * @spi: the device that requires specific CS timing configuration 3847 * 3848 * Return: zero on success, else a negative error code. 3849 */ 3850 static int spi_set_cs_timing(struct spi_device *spi) 3851 { 3852 struct device *parent = spi->controller->dev.parent; 3853 int status = 0; 3854 3855 if (spi->controller->set_cs_timing && !spi_get_csgpiod(spi, 0)) { 3856 if (spi->controller->auto_runtime_pm) { 3857 status = pm_runtime_get_sync(parent); 3858 if (status < 0) { 3859 pm_runtime_put_noidle(parent); 3860 dev_err(&spi->controller->dev, "Failed to power device: %d\n", 3861 status); 3862 return status; 3863 } 3864 3865 status = spi->controller->set_cs_timing(spi); 3866 pm_runtime_mark_last_busy(parent); 3867 pm_runtime_put_autosuspend(parent); 3868 } else { 3869 status = spi->controller->set_cs_timing(spi); 3870 } 3871 } 3872 return status; 3873 } 3874 3875 /** 3876 * spi_setup - setup SPI mode and clock rate 3877 * @spi: the device whose settings are being modified 3878 * Context: can sleep, and no requests are queued to the device 3879 * 3880 * SPI protocol drivers may need to update the transfer mode if the 3881 * device doesn't work with its default. They may likewise need 3882 * to update clock rates or word sizes from initial values. This function 3883 * changes those settings, and must be called from a context that can sleep. 3884 * Except for SPI_CS_HIGH, which takes effect immediately, the changes take 3885 * effect the next time the device is selected and data is transferred to 3886 * or from it. When this function returns, the SPI device is deselected. 3887 * 3888 * Note that this call will fail if the protocol driver specifies an option 3889 * that the underlying controller or its driver does not support. For 3890 * example, not all hardware supports wire transfers using nine bit words, 3891 * LSB-first wire encoding, or active-high chipselects. 3892 * 3893 * Return: zero on success, else a negative error code. 3894 */ 3895 int spi_setup(struct spi_device *spi) 3896 { 3897 unsigned bad_bits, ugly_bits; 3898 int status; 3899 3900 /* 3901 * Check mode to prevent that any two of DUAL, QUAD and NO_MOSI/MISO 3902 * are set at the same time. 3903 */ 3904 if ((hweight_long(spi->mode & 3905 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_NO_TX)) > 1) || 3906 (hweight_long(spi->mode & 3907 (SPI_RX_DUAL | SPI_RX_QUAD | SPI_NO_RX)) > 1)) { 3908 dev_err(&spi->dev, 3909 "setup: can not select any two of dual, quad and no-rx/tx at the same time\n"); 3910 return -EINVAL; 3911 } 3912 /* If it is SPI_3WIRE mode, DUAL and QUAD should be forbidden */ 3913 if ((spi->mode & SPI_3WIRE) && (spi->mode & 3914 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL | 3915 SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL))) 3916 return -EINVAL; 3917 /* Check against conflicting MOSI idle configuration */ 3918 if ((spi->mode & SPI_MOSI_IDLE_LOW) && (spi->mode & SPI_MOSI_IDLE_HIGH)) { 3919 dev_err(&spi->dev, 3920 "setup: MOSI configured to idle low and high at the same time.\n"); 3921 return -EINVAL; 3922 } 3923 /* 3924 * Help drivers fail *cleanly* when they need options 3925 * that aren't supported with their current controller. 3926 * SPI_CS_WORD has a fallback software implementation, 3927 * so it is ignored here. 3928 */ 3929 bad_bits = spi->mode & ~(spi->controller->mode_bits | SPI_CS_WORD | 3930 SPI_NO_TX | SPI_NO_RX); 3931 ugly_bits = bad_bits & 3932 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL | 3933 SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL); 3934 if (ugly_bits) { 3935 dev_warn(&spi->dev, 3936 "setup: ignoring unsupported mode bits %x\n", 3937 ugly_bits); 3938 spi->mode &= ~ugly_bits; 3939 bad_bits &= ~ugly_bits; 3940 } 3941 if (bad_bits) { 3942 dev_err(&spi->dev, "setup: unsupported mode bits %x\n", 3943 bad_bits); 3944 return -EINVAL; 3945 } 3946 3947 if (!spi->bits_per_word) { 3948 spi->bits_per_word = 8; 3949 } else { 3950 /* 3951 * Some controllers may not support the default 8 bits-per-word 3952 * so only perform the check when this is explicitly provided. 3953 */ 3954 status = __spi_validate_bits_per_word(spi->controller, 3955 spi->bits_per_word); 3956 if (status) 3957 return status; 3958 } 3959 3960 if (spi->controller->max_speed_hz && 3961 (!spi->max_speed_hz || 3962 spi->max_speed_hz > spi->controller->max_speed_hz)) 3963 spi->max_speed_hz = spi->controller->max_speed_hz; 3964 3965 mutex_lock(&spi->controller->io_mutex); 3966 3967 if (spi->controller->setup) { 3968 status = spi->controller->setup(spi); 3969 if (status) { 3970 mutex_unlock(&spi->controller->io_mutex); 3971 dev_err(&spi->controller->dev, "Failed to setup device: %d\n", 3972 status); 3973 return status; 3974 } 3975 } 3976 3977 status = spi_set_cs_timing(spi); 3978 if (status) { 3979 mutex_unlock(&spi->controller->io_mutex); 3980 return status; 3981 } 3982 3983 if (spi->controller->auto_runtime_pm && spi->controller->set_cs) { 3984 status = pm_runtime_resume_and_get(spi->controller->dev.parent); 3985 if (status < 0) { 3986 mutex_unlock(&spi->controller->io_mutex); 3987 dev_err(&spi->controller->dev, "Failed to power device: %d\n", 3988 status); 3989 return status; 3990 } 3991 3992 /* 3993 * We do not want to return positive value from pm_runtime_get, 3994 * there are many instances of devices calling spi_setup() and 3995 * checking for a non-zero return value instead of a negative 3996 * return value. 3997 */ 3998 status = 0; 3999 4000 spi_set_cs(spi, false, true); 4001 pm_runtime_mark_last_busy(spi->controller->dev.parent); 4002 pm_runtime_put_autosuspend(spi->controller->dev.parent); 4003 } else { 4004 spi_set_cs(spi, false, true); 4005 } 4006 4007 mutex_unlock(&spi->controller->io_mutex); 4008 4009 if (spi->rt && !spi->controller->rt) { 4010 spi->controller->rt = true; 4011 spi_set_thread_rt(spi->controller); 4012 } 4013 4014 trace_spi_setup(spi, status); 4015 4016 dev_dbg(&spi->dev, "setup mode %lu, %s%s%s%s%u bits/w, %u Hz max --> %d\n", 4017 spi->mode & SPI_MODE_X_MASK, 4018 (spi->mode & SPI_CS_HIGH) ? "cs_high, " : "", 4019 (spi->mode & SPI_LSB_FIRST) ? "lsb, " : "", 4020 (spi->mode & SPI_3WIRE) ? "3wire, " : "", 4021 (spi->mode & SPI_LOOP) ? "loopback, " : "", 4022 spi->bits_per_word, spi->max_speed_hz, 4023 status); 4024 4025 return status; 4026 } 4027 EXPORT_SYMBOL_GPL(spi_setup); 4028 4029 static int _spi_xfer_word_delay_update(struct spi_transfer *xfer, 4030 struct spi_device *spi) 4031 { 4032 int delay1, delay2; 4033 4034 delay1 = spi_delay_to_ns(&xfer->word_delay, xfer); 4035 if (delay1 < 0) 4036 return delay1; 4037 4038 delay2 = spi_delay_to_ns(&spi->word_delay, xfer); 4039 if (delay2 < 0) 4040 return delay2; 4041 4042 if (delay1 < delay2) 4043 memcpy(&xfer->word_delay, &spi->word_delay, 4044 sizeof(xfer->word_delay)); 4045 4046 return 0; 4047 } 4048 4049 static int __spi_validate(struct spi_device *spi, struct spi_message *message) 4050 { 4051 struct spi_controller *ctlr = spi->controller; 4052 struct spi_transfer *xfer; 4053 int w_size; 4054 4055 if (list_empty(&message->transfers)) 4056 return -EINVAL; 4057 4058 message->spi = spi; 4059 4060 /* 4061 * Half-duplex links include original MicroWire, and ones with 4062 * only one data pin like SPI_3WIRE (switches direction) or where 4063 * either MOSI or MISO is missing. They can also be caused by 4064 * software limitations. 4065 */ 4066 if ((ctlr->flags & SPI_CONTROLLER_HALF_DUPLEX) || 4067 (spi->mode & SPI_3WIRE)) { 4068 unsigned flags = ctlr->flags; 4069 4070 list_for_each_entry(xfer, &message->transfers, transfer_list) { 4071 if (xfer->rx_buf && xfer->tx_buf) 4072 return -EINVAL; 4073 if ((flags & SPI_CONTROLLER_NO_TX) && xfer->tx_buf) 4074 return -EINVAL; 4075 if ((flags & SPI_CONTROLLER_NO_RX) && xfer->rx_buf) 4076 return -EINVAL; 4077 } 4078 } 4079 4080 /* 4081 * Set transfer bits_per_word and max speed as spi device default if 4082 * it is not set for this transfer. 4083 * Set transfer tx_nbits and rx_nbits as single transfer default 4084 * (SPI_NBITS_SINGLE) if it is not set for this transfer. 4085 * Ensure transfer word_delay is at least as long as that required by 4086 * device itself. 4087 */ 4088 message->frame_length = 0; 4089 list_for_each_entry(xfer, &message->transfers, transfer_list) { 4090 xfer->effective_speed_hz = 0; 4091 message->frame_length += xfer->len; 4092 if (!xfer->bits_per_word) 4093 xfer->bits_per_word = spi->bits_per_word; 4094 4095 if (!xfer->speed_hz) 4096 xfer->speed_hz = spi->max_speed_hz; 4097 4098 if (ctlr->max_speed_hz && xfer->speed_hz > ctlr->max_speed_hz) 4099 xfer->speed_hz = ctlr->max_speed_hz; 4100 4101 if (__spi_validate_bits_per_word(ctlr, xfer->bits_per_word)) 4102 return -EINVAL; 4103 4104 /* 4105 * SPI transfer length should be multiple of SPI word size 4106 * where SPI word size should be power-of-two multiple. 4107 */ 4108 if (xfer->bits_per_word <= 8) 4109 w_size = 1; 4110 else if (xfer->bits_per_word <= 16) 4111 w_size = 2; 4112 else 4113 w_size = 4; 4114 4115 /* No partial transfers accepted */ 4116 if (xfer->len % w_size) 4117 return -EINVAL; 4118 4119 if (xfer->speed_hz && ctlr->min_speed_hz && 4120 xfer->speed_hz < ctlr->min_speed_hz) 4121 return -EINVAL; 4122 4123 if (xfer->tx_buf && !xfer->tx_nbits) 4124 xfer->tx_nbits = SPI_NBITS_SINGLE; 4125 if (xfer->rx_buf && !xfer->rx_nbits) 4126 xfer->rx_nbits = SPI_NBITS_SINGLE; 4127 /* 4128 * Check transfer tx/rx_nbits: 4129 * 1. check the value matches one of single, dual and quad 4130 * 2. check tx/rx_nbits match the mode in spi_device 4131 */ 4132 if (xfer->tx_buf) { 4133 if (spi->mode & SPI_NO_TX) 4134 return -EINVAL; 4135 if (xfer->tx_nbits != SPI_NBITS_SINGLE && 4136 xfer->tx_nbits != SPI_NBITS_DUAL && 4137 xfer->tx_nbits != SPI_NBITS_QUAD && 4138 xfer->tx_nbits != SPI_NBITS_OCTAL) 4139 return -EINVAL; 4140 if ((xfer->tx_nbits == SPI_NBITS_DUAL) && 4141 !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD))) 4142 return -EINVAL; 4143 if ((xfer->tx_nbits == SPI_NBITS_QUAD) && 4144 !(spi->mode & SPI_TX_QUAD)) 4145 return -EINVAL; 4146 } 4147 /* Check transfer rx_nbits */ 4148 if (xfer->rx_buf) { 4149 if (spi->mode & SPI_NO_RX) 4150 return -EINVAL; 4151 if (xfer->rx_nbits != SPI_NBITS_SINGLE && 4152 xfer->rx_nbits != SPI_NBITS_DUAL && 4153 xfer->rx_nbits != SPI_NBITS_QUAD && 4154 xfer->rx_nbits != SPI_NBITS_OCTAL) 4155 return -EINVAL; 4156 if ((xfer->rx_nbits == SPI_NBITS_DUAL) && 4157 !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD))) 4158 return -EINVAL; 4159 if ((xfer->rx_nbits == SPI_NBITS_QUAD) && 4160 !(spi->mode & SPI_RX_QUAD)) 4161 return -EINVAL; 4162 } 4163 4164 if (_spi_xfer_word_delay_update(xfer, spi)) 4165 return -EINVAL; 4166 } 4167 4168 message->status = -EINPROGRESS; 4169 4170 return 0; 4171 } 4172 4173 /* 4174 * spi_split_transfers - generic handling of transfer splitting 4175 * @msg: the message to split 4176 * 4177 * Under certain conditions, a SPI controller may not support arbitrary 4178 * transfer sizes or other features required by a peripheral. This function 4179 * will split the transfers in the message into smaller transfers that are 4180 * supported by the controller. 4181 * 4182 * Controllers with special requirements not covered here can also split 4183 * transfers in the optimize_message() callback. 4184 * 4185 * Context: can sleep 4186 * Return: zero on success, else a negative error code 4187 */ 4188 static int spi_split_transfers(struct spi_message *msg) 4189 { 4190 struct spi_controller *ctlr = msg->spi->controller; 4191 struct spi_transfer *xfer; 4192 int ret; 4193 4194 /* 4195 * If an SPI controller does not support toggling the CS line on each 4196 * transfer (indicated by the SPI_CS_WORD flag) or we are using a GPIO 4197 * for the CS line, we can emulate the CS-per-word hardware function by 4198 * splitting transfers into one-word transfers and ensuring that 4199 * cs_change is set for each transfer. 4200 */ 4201 if ((msg->spi->mode & SPI_CS_WORD) && 4202 (!(ctlr->mode_bits & SPI_CS_WORD) || spi_is_csgpiod(msg->spi))) { 4203 ret = spi_split_transfers_maxwords(ctlr, msg, 1); 4204 if (ret) 4205 return ret; 4206 4207 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 4208 /* Don't change cs_change on the last entry in the list */ 4209 if (list_is_last(&xfer->transfer_list, &msg->transfers)) 4210 break; 4211 4212 xfer->cs_change = 1; 4213 } 4214 } else { 4215 ret = spi_split_transfers_maxsize(ctlr, msg, 4216 spi_max_transfer_size(msg->spi)); 4217 if (ret) 4218 return ret; 4219 } 4220 4221 return 0; 4222 } 4223 4224 /* 4225 * __spi_optimize_message - shared implementation for spi_optimize_message() 4226 * and spi_maybe_optimize_message() 4227 * @spi: the device that will be used for the message 4228 * @msg: the message to optimize 4229 * 4230 * Peripheral drivers will call spi_optimize_message() and the spi core will 4231 * call spi_maybe_optimize_message() instead of calling this directly. 4232 * 4233 * It is not valid to call this on a message that has already been optimized. 4234 * 4235 * Return: zero on success, else a negative error code 4236 */ 4237 static int __spi_optimize_message(struct spi_device *spi, 4238 struct spi_message *msg) 4239 { 4240 struct spi_controller *ctlr = spi->controller; 4241 int ret; 4242 4243 ret = __spi_validate(spi, msg); 4244 if (ret) 4245 return ret; 4246 4247 ret = spi_split_transfers(msg); 4248 if (ret) 4249 return ret; 4250 4251 if (ctlr->optimize_message) { 4252 ret = ctlr->optimize_message(msg); 4253 if (ret) { 4254 spi_res_release(ctlr, msg); 4255 return ret; 4256 } 4257 } 4258 4259 msg->optimized = true; 4260 4261 return 0; 4262 } 4263 4264 /* 4265 * spi_maybe_optimize_message - optimize message if it isn't already pre-optimized 4266 * @spi: the device that will be used for the message 4267 * @msg: the message to optimize 4268 * Return: zero on success, else a negative error code 4269 */ 4270 static int spi_maybe_optimize_message(struct spi_device *spi, 4271 struct spi_message *msg) 4272 { 4273 if (spi->controller->defer_optimize_message) { 4274 msg->spi = spi; 4275 return 0; 4276 } 4277 4278 if (msg->pre_optimized) 4279 return 0; 4280 4281 return __spi_optimize_message(spi, msg); 4282 } 4283 4284 /** 4285 * spi_optimize_message - do any one-time validation and setup for a SPI message 4286 * @spi: the device that will be used for the message 4287 * @msg: the message to optimize 4288 * 4289 * Peripheral drivers that reuse the same message repeatedly may call this to 4290 * perform as much message prep as possible once, rather than repeating it each 4291 * time a message transfer is performed to improve throughput and reduce CPU 4292 * usage. 4293 * 4294 * Once a message has been optimized, it cannot be modified with the exception 4295 * of updating the contents of any xfer->tx_buf (the pointer can't be changed, 4296 * only the data in the memory it points to). 4297 * 4298 * Calls to this function must be balanced with calls to spi_unoptimize_message() 4299 * to avoid leaking resources. 4300 * 4301 * Context: can sleep 4302 * Return: zero on success, else a negative error code 4303 */ 4304 int spi_optimize_message(struct spi_device *spi, struct spi_message *msg) 4305 { 4306 int ret; 4307 4308 /* 4309 * Pre-optimization is not supported and optimization is deferred e.g. 4310 * when using spi-mux. 4311 */ 4312 if (spi->controller->defer_optimize_message) 4313 return 0; 4314 4315 ret = __spi_optimize_message(spi, msg); 4316 if (ret) 4317 return ret; 4318 4319 /* 4320 * This flag indicates that the peripheral driver called spi_optimize_message() 4321 * and therefore we shouldn't unoptimize message automatically when finalizing 4322 * the message but rather wait until spi_unoptimize_message() is called 4323 * by the peripheral driver. 4324 */ 4325 msg->pre_optimized = true; 4326 4327 return 0; 4328 } 4329 EXPORT_SYMBOL_GPL(spi_optimize_message); 4330 4331 /** 4332 * spi_unoptimize_message - releases any resources allocated by spi_optimize_message() 4333 * @msg: the message to unoptimize 4334 * 4335 * Calls to this function must be balanced with calls to spi_optimize_message(). 4336 * 4337 * Context: can sleep 4338 */ 4339 void spi_unoptimize_message(struct spi_message *msg) 4340 { 4341 if (msg->spi->controller->defer_optimize_message) 4342 return; 4343 4344 __spi_unoptimize_message(msg); 4345 msg->pre_optimized = false; 4346 } 4347 EXPORT_SYMBOL_GPL(spi_unoptimize_message); 4348 4349 static int __spi_async(struct spi_device *spi, struct spi_message *message) 4350 { 4351 struct spi_controller *ctlr = spi->controller; 4352 struct spi_transfer *xfer; 4353 4354 /* 4355 * Some controllers do not support doing regular SPI transfers. Return 4356 * ENOTSUPP when this is the case. 4357 */ 4358 if (!ctlr->transfer) 4359 return -ENOTSUPP; 4360 4361 SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics, spi_async); 4362 SPI_STATISTICS_INCREMENT_FIELD(spi->pcpu_statistics, spi_async); 4363 4364 trace_spi_message_submit(message); 4365 4366 if (!ctlr->ptp_sts_supported) { 4367 list_for_each_entry(xfer, &message->transfers, transfer_list) { 4368 xfer->ptp_sts_word_pre = 0; 4369 ptp_read_system_prets(xfer->ptp_sts); 4370 } 4371 } 4372 4373 return ctlr->transfer(spi, message); 4374 } 4375 4376 static void devm_spi_unoptimize_message(void *msg) 4377 { 4378 spi_unoptimize_message(msg); 4379 } 4380 4381 /** 4382 * devm_spi_optimize_message - managed version of spi_optimize_message() 4383 * @dev: the device that manages @msg (usually @spi->dev) 4384 * @spi: the device that will be used for the message 4385 * @msg: the message to optimize 4386 * Return: zero on success, else a negative error code 4387 * 4388 * spi_unoptimize_message() will automatically be called when the device is 4389 * removed. 4390 */ 4391 int devm_spi_optimize_message(struct device *dev, struct spi_device *spi, 4392 struct spi_message *msg) 4393 { 4394 int ret; 4395 4396 ret = spi_optimize_message(spi, msg); 4397 if (ret) 4398 return ret; 4399 4400 return devm_add_action_or_reset(dev, devm_spi_unoptimize_message, msg); 4401 } 4402 EXPORT_SYMBOL_GPL(devm_spi_optimize_message); 4403 4404 /** 4405 * spi_async - asynchronous SPI transfer 4406 * @spi: device with which data will be exchanged 4407 * @message: describes the data transfers, including completion callback 4408 * Context: any (IRQs may be blocked, etc) 4409 * 4410 * This call may be used in_irq and other contexts which can't sleep, 4411 * as well as from task contexts which can sleep. 4412 * 4413 * The completion callback is invoked in a context which can't sleep. 4414 * Before that invocation, the value of message->status is undefined. 4415 * When the callback is issued, message->status holds either zero (to 4416 * indicate complete success) or a negative error code. After that 4417 * callback returns, the driver which issued the transfer request may 4418 * deallocate the associated memory; it's no longer in use by any SPI 4419 * core or controller driver code. 4420 * 4421 * Note that although all messages to a spi_device are handled in 4422 * FIFO order, messages may go to different devices in other orders. 4423 * Some device might be higher priority, or have various "hard" access 4424 * time requirements, for example. 4425 * 4426 * On detection of any fault during the transfer, processing of 4427 * the entire message is aborted, and the device is deselected. 4428 * Until returning from the associated message completion callback, 4429 * no other spi_message queued to that device will be processed. 4430 * (This rule applies equally to all the synchronous transfer calls, 4431 * which are wrappers around this core asynchronous primitive.) 4432 * 4433 * Return: zero on success, else a negative error code. 4434 */ 4435 int spi_async(struct spi_device *spi, struct spi_message *message) 4436 { 4437 struct spi_controller *ctlr = spi->controller; 4438 int ret; 4439 unsigned long flags; 4440 4441 ret = spi_maybe_optimize_message(spi, message); 4442 if (ret) 4443 return ret; 4444 4445 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags); 4446 4447 if (ctlr->bus_lock_flag) 4448 ret = -EBUSY; 4449 else 4450 ret = __spi_async(spi, message); 4451 4452 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags); 4453 4454 return ret; 4455 } 4456 EXPORT_SYMBOL_GPL(spi_async); 4457 4458 static void __spi_transfer_message_noqueue(struct spi_controller *ctlr, struct spi_message *msg) 4459 { 4460 bool was_busy; 4461 int ret; 4462 4463 mutex_lock(&ctlr->io_mutex); 4464 4465 was_busy = ctlr->busy; 4466 4467 ctlr->cur_msg = msg; 4468 ret = __spi_pump_transfer_message(ctlr, msg, was_busy); 4469 if (ret) 4470 dev_err(&ctlr->dev, "noqueue transfer failed\n"); 4471 ctlr->cur_msg = NULL; 4472 ctlr->fallback = false; 4473 4474 if (!was_busy) { 4475 kfree(ctlr->dummy_rx); 4476 ctlr->dummy_rx = NULL; 4477 kfree(ctlr->dummy_tx); 4478 ctlr->dummy_tx = NULL; 4479 if (ctlr->unprepare_transfer_hardware && 4480 ctlr->unprepare_transfer_hardware(ctlr)) 4481 dev_err(&ctlr->dev, 4482 "failed to unprepare transfer hardware\n"); 4483 spi_idle_runtime_pm(ctlr); 4484 } 4485 4486 mutex_unlock(&ctlr->io_mutex); 4487 } 4488 4489 /*-------------------------------------------------------------------------*/ 4490 4491 /* 4492 * Utility methods for SPI protocol drivers, layered on 4493 * top of the core. Some other utility methods are defined as 4494 * inline functions. 4495 */ 4496 4497 static void spi_complete(void *arg) 4498 { 4499 complete(arg); 4500 } 4501 4502 static int __spi_sync(struct spi_device *spi, struct spi_message *message) 4503 { 4504 DECLARE_COMPLETION_ONSTACK(done); 4505 unsigned long flags; 4506 int status; 4507 struct spi_controller *ctlr = spi->controller; 4508 4509 if (__spi_check_suspended(ctlr)) { 4510 dev_warn_once(&spi->dev, "Attempted to sync while suspend\n"); 4511 return -ESHUTDOWN; 4512 } 4513 4514 status = spi_maybe_optimize_message(spi, message); 4515 if (status) 4516 return status; 4517 4518 SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics, spi_sync); 4519 SPI_STATISTICS_INCREMENT_FIELD(spi->pcpu_statistics, spi_sync); 4520 4521 /* 4522 * Checking queue_empty here only guarantees async/sync message 4523 * ordering when coming from the same context. It does not need to 4524 * guard against reentrancy from a different context. The io_mutex 4525 * will catch those cases. 4526 */ 4527 if (READ_ONCE(ctlr->queue_empty) && !ctlr->must_async) { 4528 message->actual_length = 0; 4529 message->status = -EINPROGRESS; 4530 4531 trace_spi_message_submit(message); 4532 4533 SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics, spi_sync_immediate); 4534 SPI_STATISTICS_INCREMENT_FIELD(spi->pcpu_statistics, spi_sync_immediate); 4535 4536 __spi_transfer_message_noqueue(ctlr, message); 4537 4538 return message->status; 4539 } 4540 4541 /* 4542 * There are messages in the async queue that could have originated 4543 * from the same context, so we need to preserve ordering. 4544 * Therefor we send the message to the async queue and wait until they 4545 * are completed. 4546 */ 4547 message->complete = spi_complete; 4548 message->context = &done; 4549 4550 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags); 4551 status = __spi_async(spi, message); 4552 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags); 4553 4554 if (status == 0) { 4555 wait_for_completion(&done); 4556 status = message->status; 4557 } 4558 message->complete = NULL; 4559 message->context = NULL; 4560 4561 return status; 4562 } 4563 4564 /** 4565 * spi_sync - blocking/synchronous SPI data transfers 4566 * @spi: device with which data will be exchanged 4567 * @message: describes the data transfers 4568 * Context: can sleep 4569 * 4570 * This call may only be used from a context that may sleep. The sleep 4571 * is non-interruptible, and has no timeout. Low-overhead controller 4572 * drivers may DMA directly into and out of the message buffers. 4573 * 4574 * Note that the SPI device's chip select is active during the message, 4575 * and then is normally disabled between messages. Drivers for some 4576 * frequently-used devices may want to minimize costs of selecting a chip, 4577 * by leaving it selected in anticipation that the next message will go 4578 * to the same chip. (That may increase power usage.) 4579 * 4580 * Also, the caller is guaranteeing that the memory associated with the 4581 * message will not be freed before this call returns. 4582 * 4583 * Return: zero on success, else a negative error code. 4584 */ 4585 int spi_sync(struct spi_device *spi, struct spi_message *message) 4586 { 4587 int ret; 4588 4589 mutex_lock(&spi->controller->bus_lock_mutex); 4590 ret = __spi_sync(spi, message); 4591 mutex_unlock(&spi->controller->bus_lock_mutex); 4592 4593 return ret; 4594 } 4595 EXPORT_SYMBOL_GPL(spi_sync); 4596 4597 /** 4598 * spi_sync_locked - version of spi_sync with exclusive bus usage 4599 * @spi: device with which data will be exchanged 4600 * @message: describes the data transfers 4601 * Context: can sleep 4602 * 4603 * This call may only be used from a context that may sleep. The sleep 4604 * is non-interruptible, and has no timeout. Low-overhead controller 4605 * drivers may DMA directly into and out of the message buffers. 4606 * 4607 * This call should be used by drivers that require exclusive access to the 4608 * SPI bus. It has to be preceded by a spi_bus_lock call. The SPI bus must 4609 * be released by a spi_bus_unlock call when the exclusive access is over. 4610 * 4611 * Return: zero on success, else a negative error code. 4612 */ 4613 int spi_sync_locked(struct spi_device *spi, struct spi_message *message) 4614 { 4615 return __spi_sync(spi, message); 4616 } 4617 EXPORT_SYMBOL_GPL(spi_sync_locked); 4618 4619 /** 4620 * spi_bus_lock - obtain a lock for exclusive SPI bus usage 4621 * @ctlr: SPI bus master that should be locked for exclusive bus access 4622 * Context: can sleep 4623 * 4624 * This call may only be used from a context that may sleep. The sleep 4625 * is non-interruptible, and has no timeout. 4626 * 4627 * This call should be used by drivers that require exclusive access to the 4628 * SPI bus. The SPI bus must be released by a spi_bus_unlock call when the 4629 * exclusive access is over. Data transfer must be done by spi_sync_locked 4630 * and spi_async_locked calls when the SPI bus lock is held. 4631 * 4632 * Return: always zero. 4633 */ 4634 int spi_bus_lock(struct spi_controller *ctlr) 4635 { 4636 unsigned long flags; 4637 4638 mutex_lock(&ctlr->bus_lock_mutex); 4639 4640 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags); 4641 ctlr->bus_lock_flag = 1; 4642 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags); 4643 4644 /* Mutex remains locked until spi_bus_unlock() is called */ 4645 4646 return 0; 4647 } 4648 EXPORT_SYMBOL_GPL(spi_bus_lock); 4649 4650 /** 4651 * spi_bus_unlock - release the lock for exclusive SPI bus usage 4652 * @ctlr: SPI bus master that was locked for exclusive bus access 4653 * Context: can sleep 4654 * 4655 * This call may only be used from a context that may sleep. The sleep 4656 * is non-interruptible, and has no timeout. 4657 * 4658 * This call releases an SPI bus lock previously obtained by an spi_bus_lock 4659 * call. 4660 * 4661 * Return: always zero. 4662 */ 4663 int spi_bus_unlock(struct spi_controller *ctlr) 4664 { 4665 ctlr->bus_lock_flag = 0; 4666 4667 mutex_unlock(&ctlr->bus_lock_mutex); 4668 4669 return 0; 4670 } 4671 EXPORT_SYMBOL_GPL(spi_bus_unlock); 4672 4673 /* Portable code must never pass more than 32 bytes */ 4674 #define SPI_BUFSIZ max(32, SMP_CACHE_BYTES) 4675 4676 static u8 *buf; 4677 4678 /** 4679 * spi_write_then_read - SPI synchronous write followed by read 4680 * @spi: device with which data will be exchanged 4681 * @txbuf: data to be written (need not be DMA-safe) 4682 * @n_tx: size of txbuf, in bytes 4683 * @rxbuf: buffer into which data will be read (need not be DMA-safe) 4684 * @n_rx: size of rxbuf, in bytes 4685 * Context: can sleep 4686 * 4687 * This performs a half duplex MicroWire style transaction with the 4688 * device, sending txbuf and then reading rxbuf. The return value 4689 * is zero for success, else a negative errno status code. 4690 * This call may only be used from a context that may sleep. 4691 * 4692 * Parameters to this routine are always copied using a small buffer. 4693 * Performance-sensitive or bulk transfer code should instead use 4694 * spi_{async,sync}() calls with DMA-safe buffers. 4695 * 4696 * Return: zero on success, else a negative error code. 4697 */ 4698 int spi_write_then_read(struct spi_device *spi, 4699 const void *txbuf, unsigned n_tx, 4700 void *rxbuf, unsigned n_rx) 4701 { 4702 static DEFINE_MUTEX(lock); 4703 4704 int status; 4705 struct spi_message message; 4706 struct spi_transfer x[2]; 4707 u8 *local_buf; 4708 4709 /* 4710 * Use preallocated DMA-safe buffer if we can. We can't avoid 4711 * copying here, (as a pure convenience thing), but we can 4712 * keep heap costs out of the hot path unless someone else is 4713 * using the pre-allocated buffer or the transfer is too large. 4714 */ 4715 if ((n_tx + n_rx) > SPI_BUFSIZ || !mutex_trylock(&lock)) { 4716 local_buf = kmalloc(max((unsigned)SPI_BUFSIZ, n_tx + n_rx), 4717 GFP_KERNEL | GFP_DMA); 4718 if (!local_buf) 4719 return -ENOMEM; 4720 } else { 4721 local_buf = buf; 4722 } 4723 4724 spi_message_init(&message); 4725 memset(x, 0, sizeof(x)); 4726 if (n_tx) { 4727 x[0].len = n_tx; 4728 spi_message_add_tail(&x[0], &message); 4729 } 4730 if (n_rx) { 4731 x[1].len = n_rx; 4732 spi_message_add_tail(&x[1], &message); 4733 } 4734 4735 memcpy(local_buf, txbuf, n_tx); 4736 x[0].tx_buf = local_buf; 4737 x[1].rx_buf = local_buf + n_tx; 4738 4739 /* Do the I/O */ 4740 status = spi_sync(spi, &message); 4741 if (status == 0) 4742 memcpy(rxbuf, x[1].rx_buf, n_rx); 4743 4744 if (x[0].tx_buf == buf) 4745 mutex_unlock(&lock); 4746 else 4747 kfree(local_buf); 4748 4749 return status; 4750 } 4751 EXPORT_SYMBOL_GPL(spi_write_then_read); 4752 4753 /*-------------------------------------------------------------------------*/ 4754 4755 #if IS_ENABLED(CONFIG_OF_DYNAMIC) 4756 /* Must call put_device() when done with returned spi_device device */ 4757 static struct spi_device *of_find_spi_device_by_node(struct device_node *node) 4758 { 4759 struct device *dev = bus_find_device_by_of_node(&spi_bus_type, node); 4760 4761 return dev ? to_spi_device(dev) : NULL; 4762 } 4763 4764 /* The spi controllers are not using spi_bus, so we find it with another way */ 4765 static struct spi_controller *of_find_spi_controller_by_node(struct device_node *node) 4766 { 4767 struct device *dev; 4768 4769 dev = class_find_device_by_of_node(&spi_master_class, node); 4770 if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE)) 4771 dev = class_find_device_by_of_node(&spi_slave_class, node); 4772 if (!dev) 4773 return NULL; 4774 4775 /* Reference got in class_find_device */ 4776 return container_of(dev, struct spi_controller, dev); 4777 } 4778 4779 static int of_spi_notify(struct notifier_block *nb, unsigned long action, 4780 void *arg) 4781 { 4782 struct of_reconfig_data *rd = arg; 4783 struct spi_controller *ctlr; 4784 struct spi_device *spi; 4785 4786 switch (of_reconfig_get_state_change(action, arg)) { 4787 case OF_RECONFIG_CHANGE_ADD: 4788 ctlr = of_find_spi_controller_by_node(rd->dn->parent); 4789 if (ctlr == NULL) 4790 return NOTIFY_OK; /* Not for us */ 4791 4792 if (of_node_test_and_set_flag(rd->dn, OF_POPULATED)) { 4793 put_device(&ctlr->dev); 4794 return NOTIFY_OK; 4795 } 4796 4797 /* 4798 * Clear the flag before adding the device so that fw_devlink 4799 * doesn't skip adding consumers to this device. 4800 */ 4801 rd->dn->fwnode.flags &= ~FWNODE_FLAG_NOT_DEVICE; 4802 spi = of_register_spi_device(ctlr, rd->dn); 4803 put_device(&ctlr->dev); 4804 4805 if (IS_ERR(spi)) { 4806 pr_err("%s: failed to create for '%pOF'\n", 4807 __func__, rd->dn); 4808 of_node_clear_flag(rd->dn, OF_POPULATED); 4809 return notifier_from_errno(PTR_ERR(spi)); 4810 } 4811 break; 4812 4813 case OF_RECONFIG_CHANGE_REMOVE: 4814 /* Already depopulated? */ 4815 if (!of_node_check_flag(rd->dn, OF_POPULATED)) 4816 return NOTIFY_OK; 4817 4818 /* Find our device by node */ 4819 spi = of_find_spi_device_by_node(rd->dn); 4820 if (spi == NULL) 4821 return NOTIFY_OK; /* No? not meant for us */ 4822 4823 /* Unregister takes one ref away */ 4824 spi_unregister_device(spi); 4825 4826 /* And put the reference of the find */ 4827 put_device(&spi->dev); 4828 break; 4829 } 4830 4831 return NOTIFY_OK; 4832 } 4833 4834 static struct notifier_block spi_of_notifier = { 4835 .notifier_call = of_spi_notify, 4836 }; 4837 #else /* IS_ENABLED(CONFIG_OF_DYNAMIC) */ 4838 extern struct notifier_block spi_of_notifier; 4839 #endif /* IS_ENABLED(CONFIG_OF_DYNAMIC) */ 4840 4841 #if IS_ENABLED(CONFIG_ACPI) 4842 static int spi_acpi_controller_match(struct device *dev, const void *data) 4843 { 4844 return ACPI_COMPANION(dev->parent) == data; 4845 } 4846 4847 struct spi_controller *acpi_spi_find_controller_by_adev(struct acpi_device *adev) 4848 { 4849 struct device *dev; 4850 4851 dev = class_find_device(&spi_master_class, NULL, adev, 4852 spi_acpi_controller_match); 4853 if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE)) 4854 dev = class_find_device(&spi_slave_class, NULL, adev, 4855 spi_acpi_controller_match); 4856 if (!dev) 4857 return NULL; 4858 4859 return container_of(dev, struct spi_controller, dev); 4860 } 4861 EXPORT_SYMBOL_GPL(acpi_spi_find_controller_by_adev); 4862 4863 static struct spi_device *acpi_spi_find_device_by_adev(struct acpi_device *adev) 4864 { 4865 struct device *dev; 4866 4867 dev = bus_find_device_by_acpi_dev(&spi_bus_type, adev); 4868 return to_spi_device(dev); 4869 } 4870 4871 static int acpi_spi_notify(struct notifier_block *nb, unsigned long value, 4872 void *arg) 4873 { 4874 struct acpi_device *adev = arg; 4875 struct spi_controller *ctlr; 4876 struct spi_device *spi; 4877 4878 switch (value) { 4879 case ACPI_RECONFIG_DEVICE_ADD: 4880 ctlr = acpi_spi_find_controller_by_adev(acpi_dev_parent(adev)); 4881 if (!ctlr) 4882 break; 4883 4884 acpi_register_spi_device(ctlr, adev); 4885 put_device(&ctlr->dev); 4886 break; 4887 case ACPI_RECONFIG_DEVICE_REMOVE: 4888 if (!acpi_device_enumerated(adev)) 4889 break; 4890 4891 spi = acpi_spi_find_device_by_adev(adev); 4892 if (!spi) 4893 break; 4894 4895 spi_unregister_device(spi); 4896 put_device(&spi->dev); 4897 break; 4898 } 4899 4900 return NOTIFY_OK; 4901 } 4902 4903 static struct notifier_block spi_acpi_notifier = { 4904 .notifier_call = acpi_spi_notify, 4905 }; 4906 #else 4907 extern struct notifier_block spi_acpi_notifier; 4908 #endif 4909 4910 static int __init spi_init(void) 4911 { 4912 int status; 4913 4914 buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL); 4915 if (!buf) { 4916 status = -ENOMEM; 4917 goto err0; 4918 } 4919 4920 status = bus_register(&spi_bus_type); 4921 if (status < 0) 4922 goto err1; 4923 4924 status = class_register(&spi_master_class); 4925 if (status < 0) 4926 goto err2; 4927 4928 if (IS_ENABLED(CONFIG_SPI_SLAVE)) { 4929 status = class_register(&spi_slave_class); 4930 if (status < 0) 4931 goto err3; 4932 } 4933 4934 if (IS_ENABLED(CONFIG_OF_DYNAMIC)) 4935 WARN_ON(of_reconfig_notifier_register(&spi_of_notifier)); 4936 if (IS_ENABLED(CONFIG_ACPI)) 4937 WARN_ON(acpi_reconfig_notifier_register(&spi_acpi_notifier)); 4938 4939 return 0; 4940 4941 err3: 4942 class_unregister(&spi_master_class); 4943 err2: 4944 bus_unregister(&spi_bus_type); 4945 err1: 4946 kfree(buf); 4947 buf = NULL; 4948 err0: 4949 return status; 4950 } 4951 4952 /* 4953 * A board_info is normally registered in arch_initcall(), 4954 * but even essential drivers wait till later. 4955 * 4956 * REVISIT only boardinfo really needs static linking. The rest (device and 4957 * driver registration) _could_ be dynamically linked (modular) ... Costs 4958 * include needing to have boardinfo data structures be much more public. 4959 */ 4960 postcore_initcall(spi_init); 4961