1 /* 2 * SPI init/core code 3 * 4 * Copyright (C) 2005 David Brownell 5 * Copyright (C) 2008 Secret Lab Technologies Ltd. 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License as published by 9 * the Free Software Foundation; either version 2 of the License, or 10 * (at your option) any later version. 11 * 12 * This program is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * GNU General Public License for more details. 16 */ 17 18 #include <linux/kernel.h> 19 #include <linux/device.h> 20 #include <linux/init.h> 21 #include <linux/cache.h> 22 #include <linux/dma-mapping.h> 23 #include <linux/dmaengine.h> 24 #include <linux/mutex.h> 25 #include <linux/of_device.h> 26 #include <linux/of_irq.h> 27 #include <linux/clk/clk-conf.h> 28 #include <linux/slab.h> 29 #include <linux/mod_devicetable.h> 30 #include <linux/spi/spi.h> 31 #include <linux/of_gpio.h> 32 #include <linux/pm_runtime.h> 33 #include <linux/pm_domain.h> 34 #include <linux/export.h> 35 #include <linux/sched/rt.h> 36 #include <linux/delay.h> 37 #include <linux/kthread.h> 38 #include <linux/ioport.h> 39 #include <linux/acpi.h> 40 41 #define CREATE_TRACE_POINTS 42 #include <trace/events/spi.h> 43 44 static void spidev_release(struct device *dev) 45 { 46 struct spi_device *spi = to_spi_device(dev); 47 48 /* spi masters may cleanup for released devices */ 49 if (spi->master->cleanup) 50 spi->master->cleanup(spi); 51 52 spi_master_put(spi->master); 53 kfree(spi); 54 } 55 56 static ssize_t 57 modalias_show(struct device *dev, struct device_attribute *a, char *buf) 58 { 59 const struct spi_device *spi = to_spi_device(dev); 60 int len; 61 62 len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1); 63 if (len != -ENODEV) 64 return len; 65 66 return sprintf(buf, "%s%s\n", SPI_MODULE_PREFIX, spi->modalias); 67 } 68 static DEVICE_ATTR_RO(modalias); 69 70 #define SPI_STATISTICS_ATTRS(field, file) \ 71 static ssize_t spi_master_##field##_show(struct device *dev, \ 72 struct device_attribute *attr, \ 73 char *buf) \ 74 { \ 75 struct spi_master *master = container_of(dev, \ 76 struct spi_master, dev); \ 77 return spi_statistics_##field##_show(&master->statistics, buf); \ 78 } \ 79 static struct device_attribute dev_attr_spi_master_##field = { \ 80 .attr = { .name = file, .mode = S_IRUGO }, \ 81 .show = spi_master_##field##_show, \ 82 }; \ 83 static ssize_t spi_device_##field##_show(struct device *dev, \ 84 struct device_attribute *attr, \ 85 char *buf) \ 86 { \ 87 struct spi_device *spi = container_of(dev, \ 88 struct spi_device, dev); \ 89 return spi_statistics_##field##_show(&spi->statistics, buf); \ 90 } \ 91 static struct device_attribute dev_attr_spi_device_##field = { \ 92 .attr = { .name = file, .mode = S_IRUGO }, \ 93 .show = spi_device_##field##_show, \ 94 } 95 96 #define SPI_STATISTICS_SHOW_NAME(name, file, field, format_string) \ 97 static ssize_t spi_statistics_##name##_show(struct spi_statistics *stat, \ 98 char *buf) \ 99 { \ 100 unsigned long flags; \ 101 ssize_t len; \ 102 spin_lock_irqsave(&stat->lock, flags); \ 103 len = sprintf(buf, format_string, stat->field); \ 104 spin_unlock_irqrestore(&stat->lock, flags); \ 105 return len; \ 106 } \ 107 SPI_STATISTICS_ATTRS(name, file) 108 109 #define SPI_STATISTICS_SHOW(field, format_string) \ 110 SPI_STATISTICS_SHOW_NAME(field, __stringify(field), \ 111 field, format_string) 112 113 SPI_STATISTICS_SHOW(messages, "%lu"); 114 SPI_STATISTICS_SHOW(transfers, "%lu"); 115 SPI_STATISTICS_SHOW(errors, "%lu"); 116 SPI_STATISTICS_SHOW(timedout, "%lu"); 117 118 SPI_STATISTICS_SHOW(spi_sync, "%lu"); 119 SPI_STATISTICS_SHOW(spi_sync_immediate, "%lu"); 120 SPI_STATISTICS_SHOW(spi_async, "%lu"); 121 122 SPI_STATISTICS_SHOW(bytes, "%llu"); 123 SPI_STATISTICS_SHOW(bytes_rx, "%llu"); 124 SPI_STATISTICS_SHOW(bytes_tx, "%llu"); 125 126 #define SPI_STATISTICS_TRANSFER_BYTES_HISTO(index, number) \ 127 SPI_STATISTICS_SHOW_NAME(transfer_bytes_histo##index, \ 128 "transfer_bytes_histo_" number, \ 129 transfer_bytes_histo[index], "%lu") 130 SPI_STATISTICS_TRANSFER_BYTES_HISTO(0, "0-1"); 131 SPI_STATISTICS_TRANSFER_BYTES_HISTO(1, "2-3"); 132 SPI_STATISTICS_TRANSFER_BYTES_HISTO(2, "4-7"); 133 SPI_STATISTICS_TRANSFER_BYTES_HISTO(3, "8-15"); 134 SPI_STATISTICS_TRANSFER_BYTES_HISTO(4, "16-31"); 135 SPI_STATISTICS_TRANSFER_BYTES_HISTO(5, "32-63"); 136 SPI_STATISTICS_TRANSFER_BYTES_HISTO(6, "64-127"); 137 SPI_STATISTICS_TRANSFER_BYTES_HISTO(7, "128-255"); 138 SPI_STATISTICS_TRANSFER_BYTES_HISTO(8, "256-511"); 139 SPI_STATISTICS_TRANSFER_BYTES_HISTO(9, "512-1023"); 140 SPI_STATISTICS_TRANSFER_BYTES_HISTO(10, "1024-2047"); 141 SPI_STATISTICS_TRANSFER_BYTES_HISTO(11, "2048-4095"); 142 SPI_STATISTICS_TRANSFER_BYTES_HISTO(12, "4096-8191"); 143 SPI_STATISTICS_TRANSFER_BYTES_HISTO(13, "8192-16383"); 144 SPI_STATISTICS_TRANSFER_BYTES_HISTO(14, "16384-32767"); 145 SPI_STATISTICS_TRANSFER_BYTES_HISTO(15, "32768-65535"); 146 SPI_STATISTICS_TRANSFER_BYTES_HISTO(16, "65536+"); 147 148 static struct attribute *spi_dev_attrs[] = { 149 &dev_attr_modalias.attr, 150 NULL, 151 }; 152 153 static const struct attribute_group spi_dev_group = { 154 .attrs = spi_dev_attrs, 155 }; 156 157 static struct attribute *spi_device_statistics_attrs[] = { 158 &dev_attr_spi_device_messages.attr, 159 &dev_attr_spi_device_transfers.attr, 160 &dev_attr_spi_device_errors.attr, 161 &dev_attr_spi_device_timedout.attr, 162 &dev_attr_spi_device_spi_sync.attr, 163 &dev_attr_spi_device_spi_sync_immediate.attr, 164 &dev_attr_spi_device_spi_async.attr, 165 &dev_attr_spi_device_bytes.attr, 166 &dev_attr_spi_device_bytes_rx.attr, 167 &dev_attr_spi_device_bytes_tx.attr, 168 &dev_attr_spi_device_transfer_bytes_histo0.attr, 169 &dev_attr_spi_device_transfer_bytes_histo1.attr, 170 &dev_attr_spi_device_transfer_bytes_histo2.attr, 171 &dev_attr_spi_device_transfer_bytes_histo3.attr, 172 &dev_attr_spi_device_transfer_bytes_histo4.attr, 173 &dev_attr_spi_device_transfer_bytes_histo5.attr, 174 &dev_attr_spi_device_transfer_bytes_histo6.attr, 175 &dev_attr_spi_device_transfer_bytes_histo7.attr, 176 &dev_attr_spi_device_transfer_bytes_histo8.attr, 177 &dev_attr_spi_device_transfer_bytes_histo9.attr, 178 &dev_attr_spi_device_transfer_bytes_histo10.attr, 179 &dev_attr_spi_device_transfer_bytes_histo11.attr, 180 &dev_attr_spi_device_transfer_bytes_histo12.attr, 181 &dev_attr_spi_device_transfer_bytes_histo13.attr, 182 &dev_attr_spi_device_transfer_bytes_histo14.attr, 183 &dev_attr_spi_device_transfer_bytes_histo15.attr, 184 &dev_attr_spi_device_transfer_bytes_histo16.attr, 185 NULL, 186 }; 187 188 static const struct attribute_group spi_device_statistics_group = { 189 .name = "statistics", 190 .attrs = spi_device_statistics_attrs, 191 }; 192 193 static const struct attribute_group *spi_dev_groups[] = { 194 &spi_dev_group, 195 &spi_device_statistics_group, 196 NULL, 197 }; 198 199 static struct attribute *spi_master_statistics_attrs[] = { 200 &dev_attr_spi_master_messages.attr, 201 &dev_attr_spi_master_transfers.attr, 202 &dev_attr_spi_master_errors.attr, 203 &dev_attr_spi_master_timedout.attr, 204 &dev_attr_spi_master_spi_sync.attr, 205 &dev_attr_spi_master_spi_sync_immediate.attr, 206 &dev_attr_spi_master_spi_async.attr, 207 &dev_attr_spi_master_bytes.attr, 208 &dev_attr_spi_master_bytes_rx.attr, 209 &dev_attr_spi_master_bytes_tx.attr, 210 &dev_attr_spi_master_transfer_bytes_histo0.attr, 211 &dev_attr_spi_master_transfer_bytes_histo1.attr, 212 &dev_attr_spi_master_transfer_bytes_histo2.attr, 213 &dev_attr_spi_master_transfer_bytes_histo3.attr, 214 &dev_attr_spi_master_transfer_bytes_histo4.attr, 215 &dev_attr_spi_master_transfer_bytes_histo5.attr, 216 &dev_attr_spi_master_transfer_bytes_histo6.attr, 217 &dev_attr_spi_master_transfer_bytes_histo7.attr, 218 &dev_attr_spi_master_transfer_bytes_histo8.attr, 219 &dev_attr_spi_master_transfer_bytes_histo9.attr, 220 &dev_attr_spi_master_transfer_bytes_histo10.attr, 221 &dev_attr_spi_master_transfer_bytes_histo11.attr, 222 &dev_attr_spi_master_transfer_bytes_histo12.attr, 223 &dev_attr_spi_master_transfer_bytes_histo13.attr, 224 &dev_attr_spi_master_transfer_bytes_histo14.attr, 225 &dev_attr_spi_master_transfer_bytes_histo15.attr, 226 &dev_attr_spi_master_transfer_bytes_histo16.attr, 227 NULL, 228 }; 229 230 static const struct attribute_group spi_master_statistics_group = { 231 .name = "statistics", 232 .attrs = spi_master_statistics_attrs, 233 }; 234 235 static const struct attribute_group *spi_master_groups[] = { 236 &spi_master_statistics_group, 237 NULL, 238 }; 239 240 void spi_statistics_add_transfer_stats(struct spi_statistics *stats, 241 struct spi_transfer *xfer, 242 struct spi_master *master) 243 { 244 unsigned long flags; 245 int l2len = min(fls(xfer->len), SPI_STATISTICS_HISTO_SIZE) - 1; 246 247 if (l2len < 0) 248 l2len = 0; 249 250 spin_lock_irqsave(&stats->lock, flags); 251 252 stats->transfers++; 253 stats->transfer_bytes_histo[l2len]++; 254 255 stats->bytes += xfer->len; 256 if ((xfer->tx_buf) && 257 (xfer->tx_buf != master->dummy_tx)) 258 stats->bytes_tx += xfer->len; 259 if ((xfer->rx_buf) && 260 (xfer->rx_buf != master->dummy_rx)) 261 stats->bytes_rx += xfer->len; 262 263 spin_unlock_irqrestore(&stats->lock, flags); 264 } 265 EXPORT_SYMBOL_GPL(spi_statistics_add_transfer_stats); 266 267 /* modalias support makes "modprobe $MODALIAS" new-style hotplug work, 268 * and the sysfs version makes coldplug work too. 269 */ 270 271 static const struct spi_device_id *spi_match_id(const struct spi_device_id *id, 272 const struct spi_device *sdev) 273 { 274 while (id->name[0]) { 275 if (!strcmp(sdev->modalias, id->name)) 276 return id; 277 id++; 278 } 279 return NULL; 280 } 281 282 const struct spi_device_id *spi_get_device_id(const struct spi_device *sdev) 283 { 284 const struct spi_driver *sdrv = to_spi_driver(sdev->dev.driver); 285 286 return spi_match_id(sdrv->id_table, sdev); 287 } 288 EXPORT_SYMBOL_GPL(spi_get_device_id); 289 290 static int spi_match_device(struct device *dev, struct device_driver *drv) 291 { 292 const struct spi_device *spi = to_spi_device(dev); 293 const struct spi_driver *sdrv = to_spi_driver(drv); 294 295 /* Attempt an OF style match */ 296 if (of_driver_match_device(dev, drv)) 297 return 1; 298 299 /* Then try ACPI */ 300 if (acpi_driver_match_device(dev, drv)) 301 return 1; 302 303 if (sdrv->id_table) 304 return !!spi_match_id(sdrv->id_table, spi); 305 306 return strcmp(spi->modalias, drv->name) == 0; 307 } 308 309 static int spi_uevent(struct device *dev, struct kobj_uevent_env *env) 310 { 311 const struct spi_device *spi = to_spi_device(dev); 312 int rc; 313 314 rc = acpi_device_uevent_modalias(dev, env); 315 if (rc != -ENODEV) 316 return rc; 317 318 add_uevent_var(env, "MODALIAS=%s%s", SPI_MODULE_PREFIX, spi->modalias); 319 return 0; 320 } 321 322 struct bus_type spi_bus_type = { 323 .name = "spi", 324 .dev_groups = spi_dev_groups, 325 .match = spi_match_device, 326 .uevent = spi_uevent, 327 }; 328 EXPORT_SYMBOL_GPL(spi_bus_type); 329 330 331 static int spi_drv_probe(struct device *dev) 332 { 333 const struct spi_driver *sdrv = to_spi_driver(dev->driver); 334 struct spi_device *spi = to_spi_device(dev); 335 int ret; 336 337 ret = of_clk_set_defaults(dev->of_node, false); 338 if (ret) 339 return ret; 340 341 if (dev->of_node) { 342 spi->irq = of_irq_get(dev->of_node, 0); 343 if (spi->irq == -EPROBE_DEFER) 344 return -EPROBE_DEFER; 345 if (spi->irq < 0) 346 spi->irq = 0; 347 } 348 349 ret = dev_pm_domain_attach(dev, true); 350 if (ret != -EPROBE_DEFER) { 351 ret = sdrv->probe(spi); 352 if (ret) 353 dev_pm_domain_detach(dev, true); 354 } 355 356 return ret; 357 } 358 359 static int spi_drv_remove(struct device *dev) 360 { 361 const struct spi_driver *sdrv = to_spi_driver(dev->driver); 362 int ret; 363 364 ret = sdrv->remove(to_spi_device(dev)); 365 dev_pm_domain_detach(dev, true); 366 367 return ret; 368 } 369 370 static void spi_drv_shutdown(struct device *dev) 371 { 372 const struct spi_driver *sdrv = to_spi_driver(dev->driver); 373 374 sdrv->shutdown(to_spi_device(dev)); 375 } 376 377 /** 378 * __spi_register_driver - register a SPI driver 379 * @sdrv: the driver to register 380 * Context: can sleep 381 * 382 * Return: zero on success, else a negative error code. 383 */ 384 int __spi_register_driver(struct module *owner, struct spi_driver *sdrv) 385 { 386 sdrv->driver.owner = owner; 387 sdrv->driver.bus = &spi_bus_type; 388 if (sdrv->probe) 389 sdrv->driver.probe = spi_drv_probe; 390 if (sdrv->remove) 391 sdrv->driver.remove = spi_drv_remove; 392 if (sdrv->shutdown) 393 sdrv->driver.shutdown = spi_drv_shutdown; 394 return driver_register(&sdrv->driver); 395 } 396 EXPORT_SYMBOL_GPL(__spi_register_driver); 397 398 /*-------------------------------------------------------------------------*/ 399 400 /* SPI devices should normally not be created by SPI device drivers; that 401 * would make them board-specific. Similarly with SPI master drivers. 402 * Device registration normally goes into like arch/.../mach.../board-YYY.c 403 * with other readonly (flashable) information about mainboard devices. 404 */ 405 406 struct boardinfo { 407 struct list_head list; 408 struct spi_board_info board_info; 409 }; 410 411 static LIST_HEAD(board_list); 412 static LIST_HEAD(spi_master_list); 413 414 /* 415 * Used to protect add/del opertion for board_info list and 416 * spi_master list, and their matching process 417 */ 418 static DEFINE_MUTEX(board_lock); 419 420 /** 421 * spi_alloc_device - Allocate a new SPI device 422 * @master: Controller to which device is connected 423 * Context: can sleep 424 * 425 * Allows a driver to allocate and initialize a spi_device without 426 * registering it immediately. This allows a driver to directly 427 * fill the spi_device with device parameters before calling 428 * spi_add_device() on it. 429 * 430 * Caller is responsible to call spi_add_device() on the returned 431 * spi_device structure to add it to the SPI master. If the caller 432 * needs to discard the spi_device without adding it, then it should 433 * call spi_dev_put() on it. 434 * 435 * Return: a pointer to the new device, or NULL. 436 */ 437 struct spi_device *spi_alloc_device(struct spi_master *master) 438 { 439 struct spi_device *spi; 440 441 if (!spi_master_get(master)) 442 return NULL; 443 444 spi = kzalloc(sizeof(*spi), GFP_KERNEL); 445 if (!spi) { 446 spi_master_put(master); 447 return NULL; 448 } 449 450 spi->master = master; 451 spi->dev.parent = &master->dev; 452 spi->dev.bus = &spi_bus_type; 453 spi->dev.release = spidev_release; 454 spi->cs_gpio = -ENOENT; 455 456 spin_lock_init(&spi->statistics.lock); 457 458 device_initialize(&spi->dev); 459 return spi; 460 } 461 EXPORT_SYMBOL_GPL(spi_alloc_device); 462 463 static void spi_dev_set_name(struct spi_device *spi) 464 { 465 struct acpi_device *adev = ACPI_COMPANION(&spi->dev); 466 467 if (adev) { 468 dev_set_name(&spi->dev, "spi-%s", acpi_dev_name(adev)); 469 return; 470 } 471 472 dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->master->dev), 473 spi->chip_select); 474 } 475 476 static int spi_dev_check(struct device *dev, void *data) 477 { 478 struct spi_device *spi = to_spi_device(dev); 479 struct spi_device *new_spi = data; 480 481 if (spi->master == new_spi->master && 482 spi->chip_select == new_spi->chip_select) 483 return -EBUSY; 484 return 0; 485 } 486 487 /** 488 * spi_add_device - Add spi_device allocated with spi_alloc_device 489 * @spi: spi_device to register 490 * 491 * Companion function to spi_alloc_device. Devices allocated with 492 * spi_alloc_device can be added onto the spi bus with this function. 493 * 494 * Return: 0 on success; negative errno on failure 495 */ 496 int spi_add_device(struct spi_device *spi) 497 { 498 static DEFINE_MUTEX(spi_add_lock); 499 struct spi_master *master = spi->master; 500 struct device *dev = master->dev.parent; 501 int status; 502 503 /* Chipselects are numbered 0..max; validate. */ 504 if (spi->chip_select >= master->num_chipselect) { 505 dev_err(dev, "cs%d >= max %d\n", 506 spi->chip_select, 507 master->num_chipselect); 508 return -EINVAL; 509 } 510 511 /* Set the bus ID string */ 512 spi_dev_set_name(spi); 513 514 /* We need to make sure there's no other device with this 515 * chipselect **BEFORE** we call setup(), else we'll trash 516 * its configuration. Lock against concurrent add() calls. 517 */ 518 mutex_lock(&spi_add_lock); 519 520 status = bus_for_each_dev(&spi_bus_type, NULL, spi, spi_dev_check); 521 if (status) { 522 dev_err(dev, "chipselect %d already in use\n", 523 spi->chip_select); 524 goto done; 525 } 526 527 if (master->cs_gpios) 528 spi->cs_gpio = master->cs_gpios[spi->chip_select]; 529 530 /* Drivers may modify this initial i/o setup, but will 531 * normally rely on the device being setup. Devices 532 * using SPI_CS_HIGH can't coexist well otherwise... 533 */ 534 status = spi_setup(spi); 535 if (status < 0) { 536 dev_err(dev, "can't setup %s, status %d\n", 537 dev_name(&spi->dev), status); 538 goto done; 539 } 540 541 /* Device may be bound to an active driver when this returns */ 542 status = device_add(&spi->dev); 543 if (status < 0) 544 dev_err(dev, "can't add %s, status %d\n", 545 dev_name(&spi->dev), status); 546 else 547 dev_dbg(dev, "registered child %s\n", dev_name(&spi->dev)); 548 549 done: 550 mutex_unlock(&spi_add_lock); 551 return status; 552 } 553 EXPORT_SYMBOL_GPL(spi_add_device); 554 555 /** 556 * spi_new_device - instantiate one new SPI device 557 * @master: Controller to which device is connected 558 * @chip: Describes the SPI device 559 * Context: can sleep 560 * 561 * On typical mainboards, this is purely internal; and it's not needed 562 * after board init creates the hard-wired devices. Some development 563 * platforms may not be able to use spi_register_board_info though, and 564 * this is exported so that for example a USB or parport based adapter 565 * driver could add devices (which it would learn about out-of-band). 566 * 567 * Return: the new device, or NULL. 568 */ 569 struct spi_device *spi_new_device(struct spi_master *master, 570 struct spi_board_info *chip) 571 { 572 struct spi_device *proxy; 573 int status; 574 575 /* NOTE: caller did any chip->bus_num checks necessary. 576 * 577 * Also, unless we change the return value convention to use 578 * error-or-pointer (not NULL-or-pointer), troubleshootability 579 * suggests syslogged diagnostics are best here (ugh). 580 */ 581 582 proxy = spi_alloc_device(master); 583 if (!proxy) 584 return NULL; 585 586 WARN_ON(strlen(chip->modalias) >= sizeof(proxy->modalias)); 587 588 proxy->chip_select = chip->chip_select; 589 proxy->max_speed_hz = chip->max_speed_hz; 590 proxy->mode = chip->mode; 591 proxy->irq = chip->irq; 592 strlcpy(proxy->modalias, chip->modalias, sizeof(proxy->modalias)); 593 proxy->dev.platform_data = (void *) chip->platform_data; 594 proxy->controller_data = chip->controller_data; 595 proxy->controller_state = NULL; 596 597 status = spi_add_device(proxy); 598 if (status < 0) { 599 spi_dev_put(proxy); 600 return NULL; 601 } 602 603 return proxy; 604 } 605 EXPORT_SYMBOL_GPL(spi_new_device); 606 607 static void spi_match_master_to_boardinfo(struct spi_master *master, 608 struct spi_board_info *bi) 609 { 610 struct spi_device *dev; 611 612 if (master->bus_num != bi->bus_num) 613 return; 614 615 dev = spi_new_device(master, bi); 616 if (!dev) 617 dev_err(master->dev.parent, "can't create new device for %s\n", 618 bi->modalias); 619 } 620 621 /** 622 * spi_register_board_info - register SPI devices for a given board 623 * @info: array of chip descriptors 624 * @n: how many descriptors are provided 625 * Context: can sleep 626 * 627 * Board-specific early init code calls this (probably during arch_initcall) 628 * with segments of the SPI device table. Any device nodes are created later, 629 * after the relevant parent SPI controller (bus_num) is defined. We keep 630 * this table of devices forever, so that reloading a controller driver will 631 * not make Linux forget about these hard-wired devices. 632 * 633 * Other code can also call this, e.g. a particular add-on board might provide 634 * SPI devices through its expansion connector, so code initializing that board 635 * would naturally declare its SPI devices. 636 * 637 * The board info passed can safely be __initdata ... but be careful of 638 * any embedded pointers (platform_data, etc), they're copied as-is. 639 * 640 * Return: zero on success, else a negative error code. 641 */ 642 int spi_register_board_info(struct spi_board_info const *info, unsigned n) 643 { 644 struct boardinfo *bi; 645 int i; 646 647 if (!n) 648 return -EINVAL; 649 650 bi = kzalloc(n * sizeof(*bi), GFP_KERNEL); 651 if (!bi) 652 return -ENOMEM; 653 654 for (i = 0; i < n; i++, bi++, info++) { 655 struct spi_master *master; 656 657 memcpy(&bi->board_info, info, sizeof(*info)); 658 mutex_lock(&board_lock); 659 list_add_tail(&bi->list, &board_list); 660 list_for_each_entry(master, &spi_master_list, list) 661 spi_match_master_to_boardinfo(master, &bi->board_info); 662 mutex_unlock(&board_lock); 663 } 664 665 return 0; 666 } 667 668 /*-------------------------------------------------------------------------*/ 669 670 static void spi_set_cs(struct spi_device *spi, bool enable) 671 { 672 if (spi->mode & SPI_CS_HIGH) 673 enable = !enable; 674 675 if (gpio_is_valid(spi->cs_gpio)) 676 gpio_set_value(spi->cs_gpio, !enable); 677 else if (spi->master->set_cs) 678 spi->master->set_cs(spi, !enable); 679 } 680 681 #ifdef CONFIG_HAS_DMA 682 static int spi_map_buf(struct spi_master *master, struct device *dev, 683 struct sg_table *sgt, void *buf, size_t len, 684 enum dma_data_direction dir) 685 { 686 const bool vmalloced_buf = is_vmalloc_addr(buf); 687 int desc_len; 688 int sgs; 689 struct page *vm_page; 690 void *sg_buf; 691 size_t min; 692 int i, ret; 693 694 if (vmalloced_buf) { 695 desc_len = PAGE_SIZE; 696 sgs = DIV_ROUND_UP(len + offset_in_page(buf), desc_len); 697 } else { 698 desc_len = master->max_dma_len; 699 sgs = DIV_ROUND_UP(len, desc_len); 700 } 701 702 ret = sg_alloc_table(sgt, sgs, GFP_KERNEL); 703 if (ret != 0) 704 return ret; 705 706 for (i = 0; i < sgs; i++) { 707 708 if (vmalloced_buf) { 709 min = min_t(size_t, 710 len, desc_len - offset_in_page(buf)); 711 vm_page = vmalloc_to_page(buf); 712 if (!vm_page) { 713 sg_free_table(sgt); 714 return -ENOMEM; 715 } 716 sg_set_page(&sgt->sgl[i], vm_page, 717 min, offset_in_page(buf)); 718 } else { 719 min = min_t(size_t, len, desc_len); 720 sg_buf = buf; 721 sg_set_buf(&sgt->sgl[i], sg_buf, min); 722 } 723 724 725 buf += min; 726 len -= min; 727 } 728 729 ret = dma_map_sg(dev, sgt->sgl, sgt->nents, dir); 730 if (!ret) 731 ret = -ENOMEM; 732 if (ret < 0) { 733 sg_free_table(sgt); 734 return ret; 735 } 736 737 sgt->nents = ret; 738 739 return 0; 740 } 741 742 static void spi_unmap_buf(struct spi_master *master, struct device *dev, 743 struct sg_table *sgt, enum dma_data_direction dir) 744 { 745 if (sgt->orig_nents) { 746 dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir); 747 sg_free_table(sgt); 748 } 749 } 750 751 static int __spi_map_msg(struct spi_master *master, struct spi_message *msg) 752 { 753 struct device *tx_dev, *rx_dev; 754 struct spi_transfer *xfer; 755 int ret; 756 757 if (!master->can_dma) 758 return 0; 759 760 if (master->dma_tx) 761 tx_dev = master->dma_tx->device->dev; 762 else 763 tx_dev = &master->dev; 764 765 if (master->dma_rx) 766 rx_dev = master->dma_rx->device->dev; 767 else 768 rx_dev = &master->dev; 769 770 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 771 if (!master->can_dma(master, msg->spi, xfer)) 772 continue; 773 774 if (xfer->tx_buf != NULL) { 775 ret = spi_map_buf(master, tx_dev, &xfer->tx_sg, 776 (void *)xfer->tx_buf, xfer->len, 777 DMA_TO_DEVICE); 778 if (ret != 0) 779 return ret; 780 } 781 782 if (xfer->rx_buf != NULL) { 783 ret = spi_map_buf(master, rx_dev, &xfer->rx_sg, 784 xfer->rx_buf, xfer->len, 785 DMA_FROM_DEVICE); 786 if (ret != 0) { 787 spi_unmap_buf(master, tx_dev, &xfer->tx_sg, 788 DMA_TO_DEVICE); 789 return ret; 790 } 791 } 792 } 793 794 master->cur_msg_mapped = true; 795 796 return 0; 797 } 798 799 static int __spi_unmap_msg(struct spi_master *master, struct spi_message *msg) 800 { 801 struct spi_transfer *xfer; 802 struct device *tx_dev, *rx_dev; 803 804 if (!master->cur_msg_mapped || !master->can_dma) 805 return 0; 806 807 if (master->dma_tx) 808 tx_dev = master->dma_tx->device->dev; 809 else 810 tx_dev = &master->dev; 811 812 if (master->dma_rx) 813 rx_dev = master->dma_rx->device->dev; 814 else 815 rx_dev = &master->dev; 816 817 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 818 if (!master->can_dma(master, msg->spi, xfer)) 819 continue; 820 821 spi_unmap_buf(master, rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE); 822 spi_unmap_buf(master, tx_dev, &xfer->tx_sg, DMA_TO_DEVICE); 823 } 824 825 return 0; 826 } 827 #else /* !CONFIG_HAS_DMA */ 828 static inline int __spi_map_msg(struct spi_master *master, 829 struct spi_message *msg) 830 { 831 return 0; 832 } 833 834 static inline int __spi_unmap_msg(struct spi_master *master, 835 struct spi_message *msg) 836 { 837 return 0; 838 } 839 #endif /* !CONFIG_HAS_DMA */ 840 841 static inline int spi_unmap_msg(struct spi_master *master, 842 struct spi_message *msg) 843 { 844 struct spi_transfer *xfer; 845 846 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 847 /* 848 * Restore the original value of tx_buf or rx_buf if they are 849 * NULL. 850 */ 851 if (xfer->tx_buf == master->dummy_tx) 852 xfer->tx_buf = NULL; 853 if (xfer->rx_buf == master->dummy_rx) 854 xfer->rx_buf = NULL; 855 } 856 857 return __spi_unmap_msg(master, msg); 858 } 859 860 static int spi_map_msg(struct spi_master *master, struct spi_message *msg) 861 { 862 struct spi_transfer *xfer; 863 void *tmp; 864 unsigned int max_tx, max_rx; 865 866 if (master->flags & (SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX)) { 867 max_tx = 0; 868 max_rx = 0; 869 870 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 871 if ((master->flags & SPI_MASTER_MUST_TX) && 872 !xfer->tx_buf) 873 max_tx = max(xfer->len, max_tx); 874 if ((master->flags & SPI_MASTER_MUST_RX) && 875 !xfer->rx_buf) 876 max_rx = max(xfer->len, max_rx); 877 } 878 879 if (max_tx) { 880 tmp = krealloc(master->dummy_tx, max_tx, 881 GFP_KERNEL | GFP_DMA); 882 if (!tmp) 883 return -ENOMEM; 884 master->dummy_tx = tmp; 885 memset(tmp, 0, max_tx); 886 } 887 888 if (max_rx) { 889 tmp = krealloc(master->dummy_rx, max_rx, 890 GFP_KERNEL | GFP_DMA); 891 if (!tmp) 892 return -ENOMEM; 893 master->dummy_rx = tmp; 894 } 895 896 if (max_tx || max_rx) { 897 list_for_each_entry(xfer, &msg->transfers, 898 transfer_list) { 899 if (!xfer->tx_buf) 900 xfer->tx_buf = master->dummy_tx; 901 if (!xfer->rx_buf) 902 xfer->rx_buf = master->dummy_rx; 903 } 904 } 905 } 906 907 return __spi_map_msg(master, msg); 908 } 909 910 /* 911 * spi_transfer_one_message - Default implementation of transfer_one_message() 912 * 913 * This is a standard implementation of transfer_one_message() for 914 * drivers which impelment a transfer_one() operation. It provides 915 * standard handling of delays and chip select management. 916 */ 917 static int spi_transfer_one_message(struct spi_master *master, 918 struct spi_message *msg) 919 { 920 struct spi_transfer *xfer; 921 bool keep_cs = false; 922 int ret = 0; 923 unsigned long ms = 1; 924 struct spi_statistics *statm = &master->statistics; 925 struct spi_statistics *stats = &msg->spi->statistics; 926 927 spi_set_cs(msg->spi, true); 928 929 SPI_STATISTICS_INCREMENT_FIELD(statm, messages); 930 SPI_STATISTICS_INCREMENT_FIELD(stats, messages); 931 932 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 933 trace_spi_transfer_start(msg, xfer); 934 935 spi_statistics_add_transfer_stats(statm, xfer, master); 936 spi_statistics_add_transfer_stats(stats, xfer, master); 937 938 if (xfer->tx_buf || xfer->rx_buf) { 939 reinit_completion(&master->xfer_completion); 940 941 ret = master->transfer_one(master, msg->spi, xfer); 942 if (ret < 0) { 943 SPI_STATISTICS_INCREMENT_FIELD(statm, 944 errors); 945 SPI_STATISTICS_INCREMENT_FIELD(stats, 946 errors); 947 dev_err(&msg->spi->dev, 948 "SPI transfer failed: %d\n", ret); 949 goto out; 950 } 951 952 if (ret > 0) { 953 ret = 0; 954 ms = xfer->len * 8 * 1000 / xfer->speed_hz; 955 ms += ms + 100; /* some tolerance */ 956 957 ms = wait_for_completion_timeout(&master->xfer_completion, 958 msecs_to_jiffies(ms)); 959 } 960 961 if (ms == 0) { 962 SPI_STATISTICS_INCREMENT_FIELD(statm, 963 timedout); 964 SPI_STATISTICS_INCREMENT_FIELD(stats, 965 timedout); 966 dev_err(&msg->spi->dev, 967 "SPI transfer timed out\n"); 968 msg->status = -ETIMEDOUT; 969 } 970 } else { 971 if (xfer->len) 972 dev_err(&msg->spi->dev, 973 "Bufferless transfer has length %u\n", 974 xfer->len); 975 } 976 977 trace_spi_transfer_stop(msg, xfer); 978 979 if (msg->status != -EINPROGRESS) 980 goto out; 981 982 if (xfer->delay_usecs) 983 udelay(xfer->delay_usecs); 984 985 if (xfer->cs_change) { 986 if (list_is_last(&xfer->transfer_list, 987 &msg->transfers)) { 988 keep_cs = true; 989 } else { 990 spi_set_cs(msg->spi, false); 991 udelay(10); 992 spi_set_cs(msg->spi, true); 993 } 994 } 995 996 msg->actual_length += xfer->len; 997 } 998 999 out: 1000 if (ret != 0 || !keep_cs) 1001 spi_set_cs(msg->spi, false); 1002 1003 if (msg->status == -EINPROGRESS) 1004 msg->status = ret; 1005 1006 if (msg->status && master->handle_err) 1007 master->handle_err(master, msg); 1008 1009 spi_finalize_current_message(master); 1010 1011 return ret; 1012 } 1013 1014 /** 1015 * spi_finalize_current_transfer - report completion of a transfer 1016 * @master: the master reporting completion 1017 * 1018 * Called by SPI drivers using the core transfer_one_message() 1019 * implementation to notify it that the current interrupt driven 1020 * transfer has finished and the next one may be scheduled. 1021 */ 1022 void spi_finalize_current_transfer(struct spi_master *master) 1023 { 1024 complete(&master->xfer_completion); 1025 } 1026 EXPORT_SYMBOL_GPL(spi_finalize_current_transfer); 1027 1028 /** 1029 * __spi_pump_messages - function which processes spi message queue 1030 * @master: master to process queue for 1031 * @in_kthread: true if we are in the context of the message pump thread 1032 * 1033 * This function checks if there is any spi message in the queue that 1034 * needs processing and if so call out to the driver to initialize hardware 1035 * and transfer each message. 1036 * 1037 * Note that it is called both from the kthread itself and also from 1038 * inside spi_sync(); the queue extraction handling at the top of the 1039 * function should deal with this safely. 1040 */ 1041 static void __spi_pump_messages(struct spi_master *master, bool in_kthread) 1042 { 1043 unsigned long flags; 1044 bool was_busy = false; 1045 int ret; 1046 1047 /* Lock queue */ 1048 spin_lock_irqsave(&master->queue_lock, flags); 1049 1050 /* Make sure we are not already running a message */ 1051 if (master->cur_msg) { 1052 spin_unlock_irqrestore(&master->queue_lock, flags); 1053 return; 1054 } 1055 1056 /* If another context is idling the device then defer */ 1057 if (master->idling) { 1058 queue_kthread_work(&master->kworker, &master->pump_messages); 1059 spin_unlock_irqrestore(&master->queue_lock, flags); 1060 return; 1061 } 1062 1063 /* Check if the queue is idle */ 1064 if (list_empty(&master->queue) || !master->running) { 1065 if (!master->busy) { 1066 spin_unlock_irqrestore(&master->queue_lock, flags); 1067 return; 1068 } 1069 1070 /* Only do teardown in the thread */ 1071 if (!in_kthread) { 1072 queue_kthread_work(&master->kworker, 1073 &master->pump_messages); 1074 spin_unlock_irqrestore(&master->queue_lock, flags); 1075 return; 1076 } 1077 1078 master->busy = false; 1079 master->idling = true; 1080 spin_unlock_irqrestore(&master->queue_lock, flags); 1081 1082 kfree(master->dummy_rx); 1083 master->dummy_rx = NULL; 1084 kfree(master->dummy_tx); 1085 master->dummy_tx = NULL; 1086 if (master->unprepare_transfer_hardware && 1087 master->unprepare_transfer_hardware(master)) 1088 dev_err(&master->dev, 1089 "failed to unprepare transfer hardware\n"); 1090 if (master->auto_runtime_pm) { 1091 pm_runtime_mark_last_busy(master->dev.parent); 1092 pm_runtime_put_autosuspend(master->dev.parent); 1093 } 1094 trace_spi_master_idle(master); 1095 1096 spin_lock_irqsave(&master->queue_lock, flags); 1097 master->idling = false; 1098 spin_unlock_irqrestore(&master->queue_lock, flags); 1099 return; 1100 } 1101 1102 /* Extract head of queue */ 1103 master->cur_msg = 1104 list_first_entry(&master->queue, struct spi_message, queue); 1105 1106 list_del_init(&master->cur_msg->queue); 1107 if (master->busy) 1108 was_busy = true; 1109 else 1110 master->busy = true; 1111 spin_unlock_irqrestore(&master->queue_lock, flags); 1112 1113 if (!was_busy && master->auto_runtime_pm) { 1114 ret = pm_runtime_get_sync(master->dev.parent); 1115 if (ret < 0) { 1116 dev_err(&master->dev, "Failed to power device: %d\n", 1117 ret); 1118 return; 1119 } 1120 } 1121 1122 if (!was_busy) 1123 trace_spi_master_busy(master); 1124 1125 if (!was_busy && master->prepare_transfer_hardware) { 1126 ret = master->prepare_transfer_hardware(master); 1127 if (ret) { 1128 dev_err(&master->dev, 1129 "failed to prepare transfer hardware\n"); 1130 1131 if (master->auto_runtime_pm) 1132 pm_runtime_put(master->dev.parent); 1133 return; 1134 } 1135 } 1136 1137 trace_spi_message_start(master->cur_msg); 1138 1139 if (master->prepare_message) { 1140 ret = master->prepare_message(master, master->cur_msg); 1141 if (ret) { 1142 dev_err(&master->dev, 1143 "failed to prepare message: %d\n", ret); 1144 master->cur_msg->status = ret; 1145 spi_finalize_current_message(master); 1146 return; 1147 } 1148 master->cur_msg_prepared = true; 1149 } 1150 1151 ret = spi_map_msg(master, master->cur_msg); 1152 if (ret) { 1153 master->cur_msg->status = ret; 1154 spi_finalize_current_message(master); 1155 return; 1156 } 1157 1158 ret = master->transfer_one_message(master, master->cur_msg); 1159 if (ret) { 1160 dev_err(&master->dev, 1161 "failed to transfer one message from queue\n"); 1162 return; 1163 } 1164 } 1165 1166 /** 1167 * spi_pump_messages - kthread work function which processes spi message queue 1168 * @work: pointer to kthread work struct contained in the master struct 1169 */ 1170 static void spi_pump_messages(struct kthread_work *work) 1171 { 1172 struct spi_master *master = 1173 container_of(work, struct spi_master, pump_messages); 1174 1175 __spi_pump_messages(master, true); 1176 } 1177 1178 static int spi_init_queue(struct spi_master *master) 1179 { 1180 struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 }; 1181 1182 master->running = false; 1183 master->busy = false; 1184 1185 init_kthread_worker(&master->kworker); 1186 master->kworker_task = kthread_run(kthread_worker_fn, 1187 &master->kworker, "%s", 1188 dev_name(&master->dev)); 1189 if (IS_ERR(master->kworker_task)) { 1190 dev_err(&master->dev, "failed to create message pump task\n"); 1191 return PTR_ERR(master->kworker_task); 1192 } 1193 init_kthread_work(&master->pump_messages, spi_pump_messages); 1194 1195 /* 1196 * Master config will indicate if this controller should run the 1197 * message pump with high (realtime) priority to reduce the transfer 1198 * latency on the bus by minimising the delay between a transfer 1199 * request and the scheduling of the message pump thread. Without this 1200 * setting the message pump thread will remain at default priority. 1201 */ 1202 if (master->rt) { 1203 dev_info(&master->dev, 1204 "will run message pump with realtime priority\n"); 1205 sched_setscheduler(master->kworker_task, SCHED_FIFO, ¶m); 1206 } 1207 1208 return 0; 1209 } 1210 1211 /** 1212 * spi_get_next_queued_message() - called by driver to check for queued 1213 * messages 1214 * @master: the master to check for queued messages 1215 * 1216 * If there are more messages in the queue, the next message is returned from 1217 * this call. 1218 * 1219 * Return: the next message in the queue, else NULL if the queue is empty. 1220 */ 1221 struct spi_message *spi_get_next_queued_message(struct spi_master *master) 1222 { 1223 struct spi_message *next; 1224 unsigned long flags; 1225 1226 /* get a pointer to the next message, if any */ 1227 spin_lock_irqsave(&master->queue_lock, flags); 1228 next = list_first_entry_or_null(&master->queue, struct spi_message, 1229 queue); 1230 spin_unlock_irqrestore(&master->queue_lock, flags); 1231 1232 return next; 1233 } 1234 EXPORT_SYMBOL_GPL(spi_get_next_queued_message); 1235 1236 /** 1237 * spi_finalize_current_message() - the current message is complete 1238 * @master: the master to return the message to 1239 * 1240 * Called by the driver to notify the core that the message in the front of the 1241 * queue is complete and can be removed from the queue. 1242 */ 1243 void spi_finalize_current_message(struct spi_master *master) 1244 { 1245 struct spi_message *mesg; 1246 unsigned long flags; 1247 int ret; 1248 1249 spin_lock_irqsave(&master->queue_lock, flags); 1250 mesg = master->cur_msg; 1251 spin_unlock_irqrestore(&master->queue_lock, flags); 1252 1253 spi_unmap_msg(master, mesg); 1254 1255 if (master->cur_msg_prepared && master->unprepare_message) { 1256 ret = master->unprepare_message(master, mesg); 1257 if (ret) { 1258 dev_err(&master->dev, 1259 "failed to unprepare message: %d\n", ret); 1260 } 1261 } 1262 1263 spin_lock_irqsave(&master->queue_lock, flags); 1264 master->cur_msg = NULL; 1265 master->cur_msg_prepared = false; 1266 queue_kthread_work(&master->kworker, &master->pump_messages); 1267 spin_unlock_irqrestore(&master->queue_lock, flags); 1268 1269 trace_spi_message_done(mesg); 1270 1271 mesg->state = NULL; 1272 if (mesg->complete) 1273 mesg->complete(mesg->context); 1274 } 1275 EXPORT_SYMBOL_GPL(spi_finalize_current_message); 1276 1277 static int spi_start_queue(struct spi_master *master) 1278 { 1279 unsigned long flags; 1280 1281 spin_lock_irqsave(&master->queue_lock, flags); 1282 1283 if (master->running || master->busy) { 1284 spin_unlock_irqrestore(&master->queue_lock, flags); 1285 return -EBUSY; 1286 } 1287 1288 master->running = true; 1289 master->cur_msg = NULL; 1290 spin_unlock_irqrestore(&master->queue_lock, flags); 1291 1292 queue_kthread_work(&master->kworker, &master->pump_messages); 1293 1294 return 0; 1295 } 1296 1297 static int spi_stop_queue(struct spi_master *master) 1298 { 1299 unsigned long flags; 1300 unsigned limit = 500; 1301 int ret = 0; 1302 1303 spin_lock_irqsave(&master->queue_lock, flags); 1304 1305 /* 1306 * This is a bit lame, but is optimized for the common execution path. 1307 * A wait_queue on the master->busy could be used, but then the common 1308 * execution path (pump_messages) would be required to call wake_up or 1309 * friends on every SPI message. Do this instead. 1310 */ 1311 while ((!list_empty(&master->queue) || master->busy) && limit--) { 1312 spin_unlock_irqrestore(&master->queue_lock, flags); 1313 usleep_range(10000, 11000); 1314 spin_lock_irqsave(&master->queue_lock, flags); 1315 } 1316 1317 if (!list_empty(&master->queue) || master->busy) 1318 ret = -EBUSY; 1319 else 1320 master->running = false; 1321 1322 spin_unlock_irqrestore(&master->queue_lock, flags); 1323 1324 if (ret) { 1325 dev_warn(&master->dev, 1326 "could not stop message queue\n"); 1327 return ret; 1328 } 1329 return ret; 1330 } 1331 1332 static int spi_destroy_queue(struct spi_master *master) 1333 { 1334 int ret; 1335 1336 ret = spi_stop_queue(master); 1337 1338 /* 1339 * flush_kthread_worker will block until all work is done. 1340 * If the reason that stop_queue timed out is that the work will never 1341 * finish, then it does no good to call flush/stop thread, so 1342 * return anyway. 1343 */ 1344 if (ret) { 1345 dev_err(&master->dev, "problem destroying queue\n"); 1346 return ret; 1347 } 1348 1349 flush_kthread_worker(&master->kworker); 1350 kthread_stop(master->kworker_task); 1351 1352 return 0; 1353 } 1354 1355 static int __spi_queued_transfer(struct spi_device *spi, 1356 struct spi_message *msg, 1357 bool need_pump) 1358 { 1359 struct spi_master *master = spi->master; 1360 unsigned long flags; 1361 1362 spin_lock_irqsave(&master->queue_lock, flags); 1363 1364 if (!master->running) { 1365 spin_unlock_irqrestore(&master->queue_lock, flags); 1366 return -ESHUTDOWN; 1367 } 1368 msg->actual_length = 0; 1369 msg->status = -EINPROGRESS; 1370 1371 list_add_tail(&msg->queue, &master->queue); 1372 if (!master->busy && need_pump) 1373 queue_kthread_work(&master->kworker, &master->pump_messages); 1374 1375 spin_unlock_irqrestore(&master->queue_lock, flags); 1376 return 0; 1377 } 1378 1379 /** 1380 * spi_queued_transfer - transfer function for queued transfers 1381 * @spi: spi device which is requesting transfer 1382 * @msg: spi message which is to handled is queued to driver queue 1383 * 1384 * Return: zero on success, else a negative error code. 1385 */ 1386 static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg) 1387 { 1388 return __spi_queued_transfer(spi, msg, true); 1389 } 1390 1391 static int spi_master_initialize_queue(struct spi_master *master) 1392 { 1393 int ret; 1394 1395 master->transfer = spi_queued_transfer; 1396 if (!master->transfer_one_message) 1397 master->transfer_one_message = spi_transfer_one_message; 1398 1399 /* Initialize and start queue */ 1400 ret = spi_init_queue(master); 1401 if (ret) { 1402 dev_err(&master->dev, "problem initializing queue\n"); 1403 goto err_init_queue; 1404 } 1405 master->queued = true; 1406 ret = spi_start_queue(master); 1407 if (ret) { 1408 dev_err(&master->dev, "problem starting queue\n"); 1409 goto err_start_queue; 1410 } 1411 1412 return 0; 1413 1414 err_start_queue: 1415 spi_destroy_queue(master); 1416 err_init_queue: 1417 return ret; 1418 } 1419 1420 /*-------------------------------------------------------------------------*/ 1421 1422 #if defined(CONFIG_OF) 1423 static struct spi_device * 1424 of_register_spi_device(struct spi_master *master, struct device_node *nc) 1425 { 1426 struct spi_device *spi; 1427 int rc; 1428 u32 value; 1429 1430 /* Alloc an spi_device */ 1431 spi = spi_alloc_device(master); 1432 if (!spi) { 1433 dev_err(&master->dev, "spi_device alloc error for %s\n", 1434 nc->full_name); 1435 rc = -ENOMEM; 1436 goto err_out; 1437 } 1438 1439 /* Select device driver */ 1440 rc = of_modalias_node(nc, spi->modalias, 1441 sizeof(spi->modalias)); 1442 if (rc < 0) { 1443 dev_err(&master->dev, "cannot find modalias for %s\n", 1444 nc->full_name); 1445 goto err_out; 1446 } 1447 1448 /* Device address */ 1449 rc = of_property_read_u32(nc, "reg", &value); 1450 if (rc) { 1451 dev_err(&master->dev, "%s has no valid 'reg' property (%d)\n", 1452 nc->full_name, rc); 1453 goto err_out; 1454 } 1455 spi->chip_select = value; 1456 1457 /* Mode (clock phase/polarity/etc.) */ 1458 if (of_find_property(nc, "spi-cpha", NULL)) 1459 spi->mode |= SPI_CPHA; 1460 if (of_find_property(nc, "spi-cpol", NULL)) 1461 spi->mode |= SPI_CPOL; 1462 if (of_find_property(nc, "spi-cs-high", NULL)) 1463 spi->mode |= SPI_CS_HIGH; 1464 if (of_find_property(nc, "spi-3wire", NULL)) 1465 spi->mode |= SPI_3WIRE; 1466 if (of_find_property(nc, "spi-lsb-first", NULL)) 1467 spi->mode |= SPI_LSB_FIRST; 1468 1469 /* Device DUAL/QUAD mode */ 1470 if (!of_property_read_u32(nc, "spi-tx-bus-width", &value)) { 1471 switch (value) { 1472 case 1: 1473 break; 1474 case 2: 1475 spi->mode |= SPI_TX_DUAL; 1476 break; 1477 case 4: 1478 spi->mode |= SPI_TX_QUAD; 1479 break; 1480 default: 1481 dev_warn(&master->dev, 1482 "spi-tx-bus-width %d not supported\n", 1483 value); 1484 break; 1485 } 1486 } 1487 1488 if (!of_property_read_u32(nc, "spi-rx-bus-width", &value)) { 1489 switch (value) { 1490 case 1: 1491 break; 1492 case 2: 1493 spi->mode |= SPI_RX_DUAL; 1494 break; 1495 case 4: 1496 spi->mode |= SPI_RX_QUAD; 1497 break; 1498 default: 1499 dev_warn(&master->dev, 1500 "spi-rx-bus-width %d not supported\n", 1501 value); 1502 break; 1503 } 1504 } 1505 1506 /* Device speed */ 1507 rc = of_property_read_u32(nc, "spi-max-frequency", &value); 1508 if (rc) { 1509 dev_err(&master->dev, "%s has no valid 'spi-max-frequency' property (%d)\n", 1510 nc->full_name, rc); 1511 goto err_out; 1512 } 1513 spi->max_speed_hz = value; 1514 1515 /* Store a pointer to the node in the device structure */ 1516 of_node_get(nc); 1517 spi->dev.of_node = nc; 1518 1519 /* Register the new device */ 1520 rc = spi_add_device(spi); 1521 if (rc) { 1522 dev_err(&master->dev, "spi_device register error %s\n", 1523 nc->full_name); 1524 goto err_out; 1525 } 1526 1527 return spi; 1528 1529 err_out: 1530 spi_dev_put(spi); 1531 return ERR_PTR(rc); 1532 } 1533 1534 /** 1535 * of_register_spi_devices() - Register child devices onto the SPI bus 1536 * @master: Pointer to spi_master device 1537 * 1538 * Registers an spi_device for each child node of master node which has a 'reg' 1539 * property. 1540 */ 1541 static void of_register_spi_devices(struct spi_master *master) 1542 { 1543 struct spi_device *spi; 1544 struct device_node *nc; 1545 1546 if (!master->dev.of_node) 1547 return; 1548 1549 for_each_available_child_of_node(master->dev.of_node, nc) { 1550 spi = of_register_spi_device(master, nc); 1551 if (IS_ERR(spi)) 1552 dev_warn(&master->dev, "Failed to create SPI device for %s\n", 1553 nc->full_name); 1554 } 1555 } 1556 #else 1557 static void of_register_spi_devices(struct spi_master *master) { } 1558 #endif 1559 1560 #ifdef CONFIG_ACPI 1561 static int acpi_spi_add_resource(struct acpi_resource *ares, void *data) 1562 { 1563 struct spi_device *spi = data; 1564 1565 if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) { 1566 struct acpi_resource_spi_serialbus *sb; 1567 1568 sb = &ares->data.spi_serial_bus; 1569 if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_SPI) { 1570 spi->chip_select = sb->device_selection; 1571 spi->max_speed_hz = sb->connection_speed; 1572 1573 if (sb->clock_phase == ACPI_SPI_SECOND_PHASE) 1574 spi->mode |= SPI_CPHA; 1575 if (sb->clock_polarity == ACPI_SPI_START_HIGH) 1576 spi->mode |= SPI_CPOL; 1577 if (sb->device_polarity == ACPI_SPI_ACTIVE_HIGH) 1578 spi->mode |= SPI_CS_HIGH; 1579 } 1580 } else if (spi->irq < 0) { 1581 struct resource r; 1582 1583 if (acpi_dev_resource_interrupt(ares, 0, &r)) 1584 spi->irq = r.start; 1585 } 1586 1587 /* Always tell the ACPI core to skip this resource */ 1588 return 1; 1589 } 1590 1591 static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level, 1592 void *data, void **return_value) 1593 { 1594 struct spi_master *master = data; 1595 struct list_head resource_list; 1596 struct acpi_device *adev; 1597 struct spi_device *spi; 1598 int ret; 1599 1600 if (acpi_bus_get_device(handle, &adev)) 1601 return AE_OK; 1602 if (acpi_bus_get_status(adev) || !adev->status.present) 1603 return AE_OK; 1604 1605 spi = spi_alloc_device(master); 1606 if (!spi) { 1607 dev_err(&master->dev, "failed to allocate SPI device for %s\n", 1608 dev_name(&adev->dev)); 1609 return AE_NO_MEMORY; 1610 } 1611 1612 ACPI_COMPANION_SET(&spi->dev, adev); 1613 spi->irq = -1; 1614 1615 INIT_LIST_HEAD(&resource_list); 1616 ret = acpi_dev_get_resources(adev, &resource_list, 1617 acpi_spi_add_resource, spi); 1618 acpi_dev_free_resource_list(&resource_list); 1619 1620 if (ret < 0 || !spi->max_speed_hz) { 1621 spi_dev_put(spi); 1622 return AE_OK; 1623 } 1624 1625 adev->power.flags.ignore_parent = true; 1626 strlcpy(spi->modalias, acpi_device_hid(adev), sizeof(spi->modalias)); 1627 if (spi_add_device(spi)) { 1628 adev->power.flags.ignore_parent = false; 1629 dev_err(&master->dev, "failed to add SPI device %s from ACPI\n", 1630 dev_name(&adev->dev)); 1631 spi_dev_put(spi); 1632 } 1633 1634 return AE_OK; 1635 } 1636 1637 static void acpi_register_spi_devices(struct spi_master *master) 1638 { 1639 acpi_status status; 1640 acpi_handle handle; 1641 1642 handle = ACPI_HANDLE(master->dev.parent); 1643 if (!handle) 1644 return; 1645 1646 status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1, 1647 acpi_spi_add_device, NULL, 1648 master, NULL); 1649 if (ACPI_FAILURE(status)) 1650 dev_warn(&master->dev, "failed to enumerate SPI slaves\n"); 1651 } 1652 #else 1653 static inline void acpi_register_spi_devices(struct spi_master *master) {} 1654 #endif /* CONFIG_ACPI */ 1655 1656 static void spi_master_release(struct device *dev) 1657 { 1658 struct spi_master *master; 1659 1660 master = container_of(dev, struct spi_master, dev); 1661 kfree(master); 1662 } 1663 1664 static struct class spi_master_class = { 1665 .name = "spi_master", 1666 .owner = THIS_MODULE, 1667 .dev_release = spi_master_release, 1668 .dev_groups = spi_master_groups, 1669 }; 1670 1671 1672 /** 1673 * spi_alloc_master - allocate SPI master controller 1674 * @dev: the controller, possibly using the platform_bus 1675 * @size: how much zeroed driver-private data to allocate; the pointer to this 1676 * memory is in the driver_data field of the returned device, 1677 * accessible with spi_master_get_devdata(). 1678 * Context: can sleep 1679 * 1680 * This call is used only by SPI master controller drivers, which are the 1681 * only ones directly touching chip registers. It's how they allocate 1682 * an spi_master structure, prior to calling spi_register_master(). 1683 * 1684 * This must be called from context that can sleep. 1685 * 1686 * The caller is responsible for assigning the bus number and initializing 1687 * the master's methods before calling spi_register_master(); and (after errors 1688 * adding the device) calling spi_master_put() to prevent a memory leak. 1689 * 1690 * Return: the SPI master structure on success, else NULL. 1691 */ 1692 struct spi_master *spi_alloc_master(struct device *dev, unsigned size) 1693 { 1694 struct spi_master *master; 1695 1696 if (!dev) 1697 return NULL; 1698 1699 master = kzalloc(size + sizeof(*master), GFP_KERNEL); 1700 if (!master) 1701 return NULL; 1702 1703 device_initialize(&master->dev); 1704 master->bus_num = -1; 1705 master->num_chipselect = 1; 1706 master->dev.class = &spi_master_class; 1707 master->dev.parent = get_device(dev); 1708 spi_master_set_devdata(master, &master[1]); 1709 1710 return master; 1711 } 1712 EXPORT_SYMBOL_GPL(spi_alloc_master); 1713 1714 #ifdef CONFIG_OF 1715 static int of_spi_register_master(struct spi_master *master) 1716 { 1717 int nb, i, *cs; 1718 struct device_node *np = master->dev.of_node; 1719 1720 if (!np) 1721 return 0; 1722 1723 nb = of_gpio_named_count(np, "cs-gpios"); 1724 master->num_chipselect = max_t(int, nb, master->num_chipselect); 1725 1726 /* Return error only for an incorrectly formed cs-gpios property */ 1727 if (nb == 0 || nb == -ENOENT) 1728 return 0; 1729 else if (nb < 0) 1730 return nb; 1731 1732 cs = devm_kzalloc(&master->dev, 1733 sizeof(int) * master->num_chipselect, 1734 GFP_KERNEL); 1735 master->cs_gpios = cs; 1736 1737 if (!master->cs_gpios) 1738 return -ENOMEM; 1739 1740 for (i = 0; i < master->num_chipselect; i++) 1741 cs[i] = -ENOENT; 1742 1743 for (i = 0; i < nb; i++) 1744 cs[i] = of_get_named_gpio(np, "cs-gpios", i); 1745 1746 return 0; 1747 } 1748 #else 1749 static int of_spi_register_master(struct spi_master *master) 1750 { 1751 return 0; 1752 } 1753 #endif 1754 1755 /** 1756 * spi_register_master - register SPI master controller 1757 * @master: initialized master, originally from spi_alloc_master() 1758 * Context: can sleep 1759 * 1760 * SPI master controllers connect to their drivers using some non-SPI bus, 1761 * such as the platform bus. The final stage of probe() in that code 1762 * includes calling spi_register_master() to hook up to this SPI bus glue. 1763 * 1764 * SPI controllers use board specific (often SOC specific) bus numbers, 1765 * and board-specific addressing for SPI devices combines those numbers 1766 * with chip select numbers. Since SPI does not directly support dynamic 1767 * device identification, boards need configuration tables telling which 1768 * chip is at which address. 1769 * 1770 * This must be called from context that can sleep. It returns zero on 1771 * success, else a negative error code (dropping the master's refcount). 1772 * After a successful return, the caller is responsible for calling 1773 * spi_unregister_master(). 1774 * 1775 * Return: zero on success, else a negative error code. 1776 */ 1777 int spi_register_master(struct spi_master *master) 1778 { 1779 static atomic_t dyn_bus_id = ATOMIC_INIT((1<<15) - 1); 1780 struct device *dev = master->dev.parent; 1781 struct boardinfo *bi; 1782 int status = -ENODEV; 1783 int dynamic = 0; 1784 1785 if (!dev) 1786 return -ENODEV; 1787 1788 status = of_spi_register_master(master); 1789 if (status) 1790 return status; 1791 1792 /* even if it's just one always-selected device, there must 1793 * be at least one chipselect 1794 */ 1795 if (master->num_chipselect == 0) 1796 return -EINVAL; 1797 1798 if ((master->bus_num < 0) && master->dev.of_node) 1799 master->bus_num = of_alias_get_id(master->dev.of_node, "spi"); 1800 1801 /* convention: dynamically assigned bus IDs count down from the max */ 1802 if (master->bus_num < 0) { 1803 /* FIXME switch to an IDR based scheme, something like 1804 * I2C now uses, so we can't run out of "dynamic" IDs 1805 */ 1806 master->bus_num = atomic_dec_return(&dyn_bus_id); 1807 dynamic = 1; 1808 } 1809 1810 INIT_LIST_HEAD(&master->queue); 1811 spin_lock_init(&master->queue_lock); 1812 spin_lock_init(&master->bus_lock_spinlock); 1813 mutex_init(&master->bus_lock_mutex); 1814 master->bus_lock_flag = 0; 1815 init_completion(&master->xfer_completion); 1816 if (!master->max_dma_len) 1817 master->max_dma_len = INT_MAX; 1818 1819 /* register the device, then userspace will see it. 1820 * registration fails if the bus ID is in use. 1821 */ 1822 dev_set_name(&master->dev, "spi%u", master->bus_num); 1823 status = device_add(&master->dev); 1824 if (status < 0) 1825 goto done; 1826 dev_dbg(dev, "registered master %s%s\n", dev_name(&master->dev), 1827 dynamic ? " (dynamic)" : ""); 1828 1829 /* If we're using a queued driver, start the queue */ 1830 if (master->transfer) 1831 dev_info(dev, "master is unqueued, this is deprecated\n"); 1832 else { 1833 status = spi_master_initialize_queue(master); 1834 if (status) { 1835 device_del(&master->dev); 1836 goto done; 1837 } 1838 } 1839 /* add statistics */ 1840 spin_lock_init(&master->statistics.lock); 1841 1842 mutex_lock(&board_lock); 1843 list_add_tail(&master->list, &spi_master_list); 1844 list_for_each_entry(bi, &board_list, list) 1845 spi_match_master_to_boardinfo(master, &bi->board_info); 1846 mutex_unlock(&board_lock); 1847 1848 /* Register devices from the device tree and ACPI */ 1849 of_register_spi_devices(master); 1850 acpi_register_spi_devices(master); 1851 done: 1852 return status; 1853 } 1854 EXPORT_SYMBOL_GPL(spi_register_master); 1855 1856 static void devm_spi_unregister(struct device *dev, void *res) 1857 { 1858 spi_unregister_master(*(struct spi_master **)res); 1859 } 1860 1861 /** 1862 * dev_spi_register_master - register managed SPI master controller 1863 * @dev: device managing SPI master 1864 * @master: initialized master, originally from spi_alloc_master() 1865 * Context: can sleep 1866 * 1867 * Register a SPI device as with spi_register_master() which will 1868 * automatically be unregister 1869 * 1870 * Return: zero on success, else a negative error code. 1871 */ 1872 int devm_spi_register_master(struct device *dev, struct spi_master *master) 1873 { 1874 struct spi_master **ptr; 1875 int ret; 1876 1877 ptr = devres_alloc(devm_spi_unregister, sizeof(*ptr), GFP_KERNEL); 1878 if (!ptr) 1879 return -ENOMEM; 1880 1881 ret = spi_register_master(master); 1882 if (!ret) { 1883 *ptr = master; 1884 devres_add(dev, ptr); 1885 } else { 1886 devres_free(ptr); 1887 } 1888 1889 return ret; 1890 } 1891 EXPORT_SYMBOL_GPL(devm_spi_register_master); 1892 1893 static int __unregister(struct device *dev, void *null) 1894 { 1895 spi_unregister_device(to_spi_device(dev)); 1896 return 0; 1897 } 1898 1899 /** 1900 * spi_unregister_master - unregister SPI master controller 1901 * @master: the master being unregistered 1902 * Context: can sleep 1903 * 1904 * This call is used only by SPI master controller drivers, which are the 1905 * only ones directly touching chip registers. 1906 * 1907 * This must be called from context that can sleep. 1908 */ 1909 void spi_unregister_master(struct spi_master *master) 1910 { 1911 int dummy; 1912 1913 if (master->queued) { 1914 if (spi_destroy_queue(master)) 1915 dev_err(&master->dev, "queue remove failed\n"); 1916 } 1917 1918 mutex_lock(&board_lock); 1919 list_del(&master->list); 1920 mutex_unlock(&board_lock); 1921 1922 dummy = device_for_each_child(&master->dev, NULL, __unregister); 1923 device_unregister(&master->dev); 1924 } 1925 EXPORT_SYMBOL_GPL(spi_unregister_master); 1926 1927 int spi_master_suspend(struct spi_master *master) 1928 { 1929 int ret; 1930 1931 /* Basically no-ops for non-queued masters */ 1932 if (!master->queued) 1933 return 0; 1934 1935 ret = spi_stop_queue(master); 1936 if (ret) 1937 dev_err(&master->dev, "queue stop failed\n"); 1938 1939 return ret; 1940 } 1941 EXPORT_SYMBOL_GPL(spi_master_suspend); 1942 1943 int spi_master_resume(struct spi_master *master) 1944 { 1945 int ret; 1946 1947 if (!master->queued) 1948 return 0; 1949 1950 ret = spi_start_queue(master); 1951 if (ret) 1952 dev_err(&master->dev, "queue restart failed\n"); 1953 1954 return ret; 1955 } 1956 EXPORT_SYMBOL_GPL(spi_master_resume); 1957 1958 static int __spi_master_match(struct device *dev, const void *data) 1959 { 1960 struct spi_master *m; 1961 const u16 *bus_num = data; 1962 1963 m = container_of(dev, struct spi_master, dev); 1964 return m->bus_num == *bus_num; 1965 } 1966 1967 /** 1968 * spi_busnum_to_master - look up master associated with bus_num 1969 * @bus_num: the master's bus number 1970 * Context: can sleep 1971 * 1972 * This call may be used with devices that are registered after 1973 * arch init time. It returns a refcounted pointer to the relevant 1974 * spi_master (which the caller must release), or NULL if there is 1975 * no such master registered. 1976 * 1977 * Return: the SPI master structure on success, else NULL. 1978 */ 1979 struct spi_master *spi_busnum_to_master(u16 bus_num) 1980 { 1981 struct device *dev; 1982 struct spi_master *master = NULL; 1983 1984 dev = class_find_device(&spi_master_class, NULL, &bus_num, 1985 __spi_master_match); 1986 if (dev) 1987 master = container_of(dev, struct spi_master, dev); 1988 /* reference got in class_find_device */ 1989 return master; 1990 } 1991 EXPORT_SYMBOL_GPL(spi_busnum_to_master); 1992 1993 1994 /*-------------------------------------------------------------------------*/ 1995 1996 /* Core methods for SPI master protocol drivers. Some of the 1997 * other core methods are currently defined as inline functions. 1998 */ 1999 2000 static int __spi_validate_bits_per_word(struct spi_master *master, u8 bits_per_word) 2001 { 2002 if (master->bits_per_word_mask) { 2003 /* Only 32 bits fit in the mask */ 2004 if (bits_per_word > 32) 2005 return -EINVAL; 2006 if (!(master->bits_per_word_mask & 2007 SPI_BPW_MASK(bits_per_word))) 2008 return -EINVAL; 2009 } 2010 2011 return 0; 2012 } 2013 2014 /** 2015 * spi_setup - setup SPI mode and clock rate 2016 * @spi: the device whose settings are being modified 2017 * Context: can sleep, and no requests are queued to the device 2018 * 2019 * SPI protocol drivers may need to update the transfer mode if the 2020 * device doesn't work with its default. They may likewise need 2021 * to update clock rates or word sizes from initial values. This function 2022 * changes those settings, and must be called from a context that can sleep. 2023 * Except for SPI_CS_HIGH, which takes effect immediately, the changes take 2024 * effect the next time the device is selected and data is transferred to 2025 * or from it. When this function returns, the spi device is deselected. 2026 * 2027 * Note that this call will fail if the protocol driver specifies an option 2028 * that the underlying controller or its driver does not support. For 2029 * example, not all hardware supports wire transfers using nine bit words, 2030 * LSB-first wire encoding, or active-high chipselects. 2031 * 2032 * Return: zero on success, else a negative error code. 2033 */ 2034 int spi_setup(struct spi_device *spi) 2035 { 2036 unsigned bad_bits, ugly_bits; 2037 int status; 2038 2039 /* check mode to prevent that DUAL and QUAD set at the same time 2040 */ 2041 if (((spi->mode & SPI_TX_DUAL) && (spi->mode & SPI_TX_QUAD)) || 2042 ((spi->mode & SPI_RX_DUAL) && (spi->mode & SPI_RX_QUAD))) { 2043 dev_err(&spi->dev, 2044 "setup: can not select dual and quad at the same time\n"); 2045 return -EINVAL; 2046 } 2047 /* if it is SPI_3WIRE mode, DUAL and QUAD should be forbidden 2048 */ 2049 if ((spi->mode & SPI_3WIRE) && (spi->mode & 2050 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD))) 2051 return -EINVAL; 2052 /* help drivers fail *cleanly* when they need options 2053 * that aren't supported with their current master 2054 */ 2055 bad_bits = spi->mode & ~spi->master->mode_bits; 2056 ugly_bits = bad_bits & 2057 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD); 2058 if (ugly_bits) { 2059 dev_warn(&spi->dev, 2060 "setup: ignoring unsupported mode bits %x\n", 2061 ugly_bits); 2062 spi->mode &= ~ugly_bits; 2063 bad_bits &= ~ugly_bits; 2064 } 2065 if (bad_bits) { 2066 dev_err(&spi->dev, "setup: unsupported mode bits %x\n", 2067 bad_bits); 2068 return -EINVAL; 2069 } 2070 2071 if (!spi->bits_per_word) 2072 spi->bits_per_word = 8; 2073 2074 status = __spi_validate_bits_per_word(spi->master, spi->bits_per_word); 2075 if (status) 2076 return status; 2077 2078 if (!spi->max_speed_hz) 2079 spi->max_speed_hz = spi->master->max_speed_hz; 2080 2081 if (spi->master->setup) 2082 status = spi->master->setup(spi); 2083 2084 spi_set_cs(spi, false); 2085 2086 dev_dbg(&spi->dev, "setup mode %d, %s%s%s%s%u bits/w, %u Hz max --> %d\n", 2087 (int) (spi->mode & (SPI_CPOL | SPI_CPHA)), 2088 (spi->mode & SPI_CS_HIGH) ? "cs_high, " : "", 2089 (spi->mode & SPI_LSB_FIRST) ? "lsb, " : "", 2090 (spi->mode & SPI_3WIRE) ? "3wire, " : "", 2091 (spi->mode & SPI_LOOP) ? "loopback, " : "", 2092 spi->bits_per_word, spi->max_speed_hz, 2093 status); 2094 2095 return status; 2096 } 2097 EXPORT_SYMBOL_GPL(spi_setup); 2098 2099 static int __spi_validate(struct spi_device *spi, struct spi_message *message) 2100 { 2101 struct spi_master *master = spi->master; 2102 struct spi_transfer *xfer; 2103 int w_size; 2104 2105 if (list_empty(&message->transfers)) 2106 return -EINVAL; 2107 2108 /* Half-duplex links include original MicroWire, and ones with 2109 * only one data pin like SPI_3WIRE (switches direction) or where 2110 * either MOSI or MISO is missing. They can also be caused by 2111 * software limitations. 2112 */ 2113 if ((master->flags & SPI_MASTER_HALF_DUPLEX) 2114 || (spi->mode & SPI_3WIRE)) { 2115 unsigned flags = master->flags; 2116 2117 list_for_each_entry(xfer, &message->transfers, transfer_list) { 2118 if (xfer->rx_buf && xfer->tx_buf) 2119 return -EINVAL; 2120 if ((flags & SPI_MASTER_NO_TX) && xfer->tx_buf) 2121 return -EINVAL; 2122 if ((flags & SPI_MASTER_NO_RX) && xfer->rx_buf) 2123 return -EINVAL; 2124 } 2125 } 2126 2127 /** 2128 * Set transfer bits_per_word and max speed as spi device default if 2129 * it is not set for this transfer. 2130 * Set transfer tx_nbits and rx_nbits as single transfer default 2131 * (SPI_NBITS_SINGLE) if it is not set for this transfer. 2132 */ 2133 list_for_each_entry(xfer, &message->transfers, transfer_list) { 2134 message->frame_length += xfer->len; 2135 if (!xfer->bits_per_word) 2136 xfer->bits_per_word = spi->bits_per_word; 2137 2138 if (!xfer->speed_hz) 2139 xfer->speed_hz = spi->max_speed_hz; 2140 if (!xfer->speed_hz) 2141 xfer->speed_hz = master->max_speed_hz; 2142 2143 if (master->max_speed_hz && 2144 xfer->speed_hz > master->max_speed_hz) 2145 xfer->speed_hz = master->max_speed_hz; 2146 2147 if (__spi_validate_bits_per_word(master, xfer->bits_per_word)) 2148 return -EINVAL; 2149 2150 /* 2151 * SPI transfer length should be multiple of SPI word size 2152 * where SPI word size should be power-of-two multiple 2153 */ 2154 if (xfer->bits_per_word <= 8) 2155 w_size = 1; 2156 else if (xfer->bits_per_word <= 16) 2157 w_size = 2; 2158 else 2159 w_size = 4; 2160 2161 /* No partial transfers accepted */ 2162 if (xfer->len % w_size) 2163 return -EINVAL; 2164 2165 if (xfer->speed_hz && master->min_speed_hz && 2166 xfer->speed_hz < master->min_speed_hz) 2167 return -EINVAL; 2168 2169 if (xfer->tx_buf && !xfer->tx_nbits) 2170 xfer->tx_nbits = SPI_NBITS_SINGLE; 2171 if (xfer->rx_buf && !xfer->rx_nbits) 2172 xfer->rx_nbits = SPI_NBITS_SINGLE; 2173 /* check transfer tx/rx_nbits: 2174 * 1. check the value matches one of single, dual and quad 2175 * 2. check tx/rx_nbits match the mode in spi_device 2176 */ 2177 if (xfer->tx_buf) { 2178 if (xfer->tx_nbits != SPI_NBITS_SINGLE && 2179 xfer->tx_nbits != SPI_NBITS_DUAL && 2180 xfer->tx_nbits != SPI_NBITS_QUAD) 2181 return -EINVAL; 2182 if ((xfer->tx_nbits == SPI_NBITS_DUAL) && 2183 !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD))) 2184 return -EINVAL; 2185 if ((xfer->tx_nbits == SPI_NBITS_QUAD) && 2186 !(spi->mode & SPI_TX_QUAD)) 2187 return -EINVAL; 2188 } 2189 /* check transfer rx_nbits */ 2190 if (xfer->rx_buf) { 2191 if (xfer->rx_nbits != SPI_NBITS_SINGLE && 2192 xfer->rx_nbits != SPI_NBITS_DUAL && 2193 xfer->rx_nbits != SPI_NBITS_QUAD) 2194 return -EINVAL; 2195 if ((xfer->rx_nbits == SPI_NBITS_DUAL) && 2196 !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD))) 2197 return -EINVAL; 2198 if ((xfer->rx_nbits == SPI_NBITS_QUAD) && 2199 !(spi->mode & SPI_RX_QUAD)) 2200 return -EINVAL; 2201 } 2202 } 2203 2204 message->status = -EINPROGRESS; 2205 2206 return 0; 2207 } 2208 2209 static int __spi_async(struct spi_device *spi, struct spi_message *message) 2210 { 2211 struct spi_master *master = spi->master; 2212 2213 message->spi = spi; 2214 2215 SPI_STATISTICS_INCREMENT_FIELD(&master->statistics, spi_async); 2216 SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_async); 2217 2218 trace_spi_message_submit(message); 2219 2220 return master->transfer(spi, message); 2221 } 2222 2223 /** 2224 * spi_async - asynchronous SPI transfer 2225 * @spi: device with which data will be exchanged 2226 * @message: describes the data transfers, including completion callback 2227 * Context: any (irqs may be blocked, etc) 2228 * 2229 * This call may be used in_irq and other contexts which can't sleep, 2230 * as well as from task contexts which can sleep. 2231 * 2232 * The completion callback is invoked in a context which can't sleep. 2233 * Before that invocation, the value of message->status is undefined. 2234 * When the callback is issued, message->status holds either zero (to 2235 * indicate complete success) or a negative error code. After that 2236 * callback returns, the driver which issued the transfer request may 2237 * deallocate the associated memory; it's no longer in use by any SPI 2238 * core or controller driver code. 2239 * 2240 * Note that although all messages to a spi_device are handled in 2241 * FIFO order, messages may go to different devices in other orders. 2242 * Some device might be higher priority, or have various "hard" access 2243 * time requirements, for example. 2244 * 2245 * On detection of any fault during the transfer, processing of 2246 * the entire message is aborted, and the device is deselected. 2247 * Until returning from the associated message completion callback, 2248 * no other spi_message queued to that device will be processed. 2249 * (This rule applies equally to all the synchronous transfer calls, 2250 * which are wrappers around this core asynchronous primitive.) 2251 * 2252 * Return: zero on success, else a negative error code. 2253 */ 2254 int spi_async(struct spi_device *spi, struct spi_message *message) 2255 { 2256 struct spi_master *master = spi->master; 2257 int ret; 2258 unsigned long flags; 2259 2260 ret = __spi_validate(spi, message); 2261 if (ret != 0) 2262 return ret; 2263 2264 spin_lock_irqsave(&master->bus_lock_spinlock, flags); 2265 2266 if (master->bus_lock_flag) 2267 ret = -EBUSY; 2268 else 2269 ret = __spi_async(spi, message); 2270 2271 spin_unlock_irqrestore(&master->bus_lock_spinlock, flags); 2272 2273 return ret; 2274 } 2275 EXPORT_SYMBOL_GPL(spi_async); 2276 2277 /** 2278 * spi_async_locked - version of spi_async with exclusive bus usage 2279 * @spi: device with which data will be exchanged 2280 * @message: describes the data transfers, including completion callback 2281 * Context: any (irqs may be blocked, etc) 2282 * 2283 * This call may be used in_irq and other contexts which can't sleep, 2284 * as well as from task contexts which can sleep. 2285 * 2286 * The completion callback is invoked in a context which can't sleep. 2287 * Before that invocation, the value of message->status is undefined. 2288 * When the callback is issued, message->status holds either zero (to 2289 * indicate complete success) or a negative error code. After that 2290 * callback returns, the driver which issued the transfer request may 2291 * deallocate the associated memory; it's no longer in use by any SPI 2292 * core or controller driver code. 2293 * 2294 * Note that although all messages to a spi_device are handled in 2295 * FIFO order, messages may go to different devices in other orders. 2296 * Some device might be higher priority, or have various "hard" access 2297 * time requirements, for example. 2298 * 2299 * On detection of any fault during the transfer, processing of 2300 * the entire message is aborted, and the device is deselected. 2301 * Until returning from the associated message completion callback, 2302 * no other spi_message queued to that device will be processed. 2303 * (This rule applies equally to all the synchronous transfer calls, 2304 * which are wrappers around this core asynchronous primitive.) 2305 * 2306 * Return: zero on success, else a negative error code. 2307 */ 2308 int spi_async_locked(struct spi_device *spi, struct spi_message *message) 2309 { 2310 struct spi_master *master = spi->master; 2311 int ret; 2312 unsigned long flags; 2313 2314 ret = __spi_validate(spi, message); 2315 if (ret != 0) 2316 return ret; 2317 2318 spin_lock_irqsave(&master->bus_lock_spinlock, flags); 2319 2320 ret = __spi_async(spi, message); 2321 2322 spin_unlock_irqrestore(&master->bus_lock_spinlock, flags); 2323 2324 return ret; 2325 2326 } 2327 EXPORT_SYMBOL_GPL(spi_async_locked); 2328 2329 2330 /*-------------------------------------------------------------------------*/ 2331 2332 /* Utility methods for SPI master protocol drivers, layered on 2333 * top of the core. Some other utility methods are defined as 2334 * inline functions. 2335 */ 2336 2337 static void spi_complete(void *arg) 2338 { 2339 complete(arg); 2340 } 2341 2342 static int __spi_sync(struct spi_device *spi, struct spi_message *message, 2343 int bus_locked) 2344 { 2345 DECLARE_COMPLETION_ONSTACK(done); 2346 int status; 2347 struct spi_master *master = spi->master; 2348 unsigned long flags; 2349 2350 status = __spi_validate(spi, message); 2351 if (status != 0) 2352 return status; 2353 2354 message->complete = spi_complete; 2355 message->context = &done; 2356 message->spi = spi; 2357 2358 SPI_STATISTICS_INCREMENT_FIELD(&master->statistics, spi_sync); 2359 SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_sync); 2360 2361 if (!bus_locked) 2362 mutex_lock(&master->bus_lock_mutex); 2363 2364 /* If we're not using the legacy transfer method then we will 2365 * try to transfer in the calling context so special case. 2366 * This code would be less tricky if we could remove the 2367 * support for driver implemented message queues. 2368 */ 2369 if (master->transfer == spi_queued_transfer) { 2370 spin_lock_irqsave(&master->bus_lock_spinlock, flags); 2371 2372 trace_spi_message_submit(message); 2373 2374 status = __spi_queued_transfer(spi, message, false); 2375 2376 spin_unlock_irqrestore(&master->bus_lock_spinlock, flags); 2377 } else { 2378 status = spi_async_locked(spi, message); 2379 } 2380 2381 if (!bus_locked) 2382 mutex_unlock(&master->bus_lock_mutex); 2383 2384 if (status == 0) { 2385 /* Push out the messages in the calling context if we 2386 * can. 2387 */ 2388 if (master->transfer == spi_queued_transfer) { 2389 SPI_STATISTICS_INCREMENT_FIELD(&master->statistics, 2390 spi_sync_immediate); 2391 SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, 2392 spi_sync_immediate); 2393 __spi_pump_messages(master, false); 2394 } 2395 2396 wait_for_completion(&done); 2397 status = message->status; 2398 } 2399 message->context = NULL; 2400 return status; 2401 } 2402 2403 /** 2404 * spi_sync - blocking/synchronous SPI data transfers 2405 * @spi: device with which data will be exchanged 2406 * @message: describes the data transfers 2407 * Context: can sleep 2408 * 2409 * This call may only be used from a context that may sleep. The sleep 2410 * is non-interruptible, and has no timeout. Low-overhead controller 2411 * drivers may DMA directly into and out of the message buffers. 2412 * 2413 * Note that the SPI device's chip select is active during the message, 2414 * and then is normally disabled between messages. Drivers for some 2415 * frequently-used devices may want to minimize costs of selecting a chip, 2416 * by leaving it selected in anticipation that the next message will go 2417 * to the same chip. (That may increase power usage.) 2418 * 2419 * Also, the caller is guaranteeing that the memory associated with the 2420 * message will not be freed before this call returns. 2421 * 2422 * Return: zero on success, else a negative error code. 2423 */ 2424 int spi_sync(struct spi_device *spi, struct spi_message *message) 2425 { 2426 return __spi_sync(spi, message, 0); 2427 } 2428 EXPORT_SYMBOL_GPL(spi_sync); 2429 2430 /** 2431 * spi_sync_locked - version of spi_sync with exclusive bus usage 2432 * @spi: device with which data will be exchanged 2433 * @message: describes the data transfers 2434 * Context: can sleep 2435 * 2436 * This call may only be used from a context that may sleep. The sleep 2437 * is non-interruptible, and has no timeout. Low-overhead controller 2438 * drivers may DMA directly into and out of the message buffers. 2439 * 2440 * This call should be used by drivers that require exclusive access to the 2441 * SPI bus. It has to be preceded by a spi_bus_lock call. The SPI bus must 2442 * be released by a spi_bus_unlock call when the exclusive access is over. 2443 * 2444 * Return: zero on success, else a negative error code. 2445 */ 2446 int spi_sync_locked(struct spi_device *spi, struct spi_message *message) 2447 { 2448 return __spi_sync(spi, message, 1); 2449 } 2450 EXPORT_SYMBOL_GPL(spi_sync_locked); 2451 2452 /** 2453 * spi_bus_lock - obtain a lock for exclusive SPI bus usage 2454 * @master: SPI bus master that should be locked for exclusive bus access 2455 * Context: can sleep 2456 * 2457 * This call may only be used from a context that may sleep. The sleep 2458 * is non-interruptible, and has no timeout. 2459 * 2460 * This call should be used by drivers that require exclusive access to the 2461 * SPI bus. The SPI bus must be released by a spi_bus_unlock call when the 2462 * exclusive access is over. Data transfer must be done by spi_sync_locked 2463 * and spi_async_locked calls when the SPI bus lock is held. 2464 * 2465 * Return: always zero. 2466 */ 2467 int spi_bus_lock(struct spi_master *master) 2468 { 2469 unsigned long flags; 2470 2471 mutex_lock(&master->bus_lock_mutex); 2472 2473 spin_lock_irqsave(&master->bus_lock_spinlock, flags); 2474 master->bus_lock_flag = 1; 2475 spin_unlock_irqrestore(&master->bus_lock_spinlock, flags); 2476 2477 /* mutex remains locked until spi_bus_unlock is called */ 2478 2479 return 0; 2480 } 2481 EXPORT_SYMBOL_GPL(spi_bus_lock); 2482 2483 /** 2484 * spi_bus_unlock - release the lock for exclusive SPI bus usage 2485 * @master: SPI bus master that was locked for exclusive bus access 2486 * Context: can sleep 2487 * 2488 * This call may only be used from a context that may sleep. The sleep 2489 * is non-interruptible, and has no timeout. 2490 * 2491 * This call releases an SPI bus lock previously obtained by an spi_bus_lock 2492 * call. 2493 * 2494 * Return: always zero. 2495 */ 2496 int spi_bus_unlock(struct spi_master *master) 2497 { 2498 master->bus_lock_flag = 0; 2499 2500 mutex_unlock(&master->bus_lock_mutex); 2501 2502 return 0; 2503 } 2504 EXPORT_SYMBOL_GPL(spi_bus_unlock); 2505 2506 /* portable code must never pass more than 32 bytes */ 2507 #define SPI_BUFSIZ max(32, SMP_CACHE_BYTES) 2508 2509 static u8 *buf; 2510 2511 /** 2512 * spi_write_then_read - SPI synchronous write followed by read 2513 * @spi: device with which data will be exchanged 2514 * @txbuf: data to be written (need not be dma-safe) 2515 * @n_tx: size of txbuf, in bytes 2516 * @rxbuf: buffer into which data will be read (need not be dma-safe) 2517 * @n_rx: size of rxbuf, in bytes 2518 * Context: can sleep 2519 * 2520 * This performs a half duplex MicroWire style transaction with the 2521 * device, sending txbuf and then reading rxbuf. The return value 2522 * is zero for success, else a negative errno status code. 2523 * This call may only be used from a context that may sleep. 2524 * 2525 * Parameters to this routine are always copied using a small buffer; 2526 * portable code should never use this for more than 32 bytes. 2527 * Performance-sensitive or bulk transfer code should instead use 2528 * spi_{async,sync}() calls with dma-safe buffers. 2529 * 2530 * Return: zero on success, else a negative error code. 2531 */ 2532 int spi_write_then_read(struct spi_device *spi, 2533 const void *txbuf, unsigned n_tx, 2534 void *rxbuf, unsigned n_rx) 2535 { 2536 static DEFINE_MUTEX(lock); 2537 2538 int status; 2539 struct spi_message message; 2540 struct spi_transfer x[2]; 2541 u8 *local_buf; 2542 2543 /* Use preallocated DMA-safe buffer if we can. We can't avoid 2544 * copying here, (as a pure convenience thing), but we can 2545 * keep heap costs out of the hot path unless someone else is 2546 * using the pre-allocated buffer or the transfer is too large. 2547 */ 2548 if ((n_tx + n_rx) > SPI_BUFSIZ || !mutex_trylock(&lock)) { 2549 local_buf = kmalloc(max((unsigned)SPI_BUFSIZ, n_tx + n_rx), 2550 GFP_KERNEL | GFP_DMA); 2551 if (!local_buf) 2552 return -ENOMEM; 2553 } else { 2554 local_buf = buf; 2555 } 2556 2557 spi_message_init(&message); 2558 memset(x, 0, sizeof(x)); 2559 if (n_tx) { 2560 x[0].len = n_tx; 2561 spi_message_add_tail(&x[0], &message); 2562 } 2563 if (n_rx) { 2564 x[1].len = n_rx; 2565 spi_message_add_tail(&x[1], &message); 2566 } 2567 2568 memcpy(local_buf, txbuf, n_tx); 2569 x[0].tx_buf = local_buf; 2570 x[1].rx_buf = local_buf + n_tx; 2571 2572 /* do the i/o */ 2573 status = spi_sync(spi, &message); 2574 if (status == 0) 2575 memcpy(rxbuf, x[1].rx_buf, n_rx); 2576 2577 if (x[0].tx_buf == buf) 2578 mutex_unlock(&lock); 2579 else 2580 kfree(local_buf); 2581 2582 return status; 2583 } 2584 EXPORT_SYMBOL_GPL(spi_write_then_read); 2585 2586 /*-------------------------------------------------------------------------*/ 2587 2588 #if IS_ENABLED(CONFIG_OF_DYNAMIC) 2589 static int __spi_of_device_match(struct device *dev, void *data) 2590 { 2591 return dev->of_node == data; 2592 } 2593 2594 /* must call put_device() when done with returned spi_device device */ 2595 static struct spi_device *of_find_spi_device_by_node(struct device_node *node) 2596 { 2597 struct device *dev = bus_find_device(&spi_bus_type, NULL, node, 2598 __spi_of_device_match); 2599 return dev ? to_spi_device(dev) : NULL; 2600 } 2601 2602 static int __spi_of_master_match(struct device *dev, const void *data) 2603 { 2604 return dev->of_node == data; 2605 } 2606 2607 /* the spi masters are not using spi_bus, so we find it with another way */ 2608 static struct spi_master *of_find_spi_master_by_node(struct device_node *node) 2609 { 2610 struct device *dev; 2611 2612 dev = class_find_device(&spi_master_class, NULL, node, 2613 __spi_of_master_match); 2614 if (!dev) 2615 return NULL; 2616 2617 /* reference got in class_find_device */ 2618 return container_of(dev, struct spi_master, dev); 2619 } 2620 2621 static int of_spi_notify(struct notifier_block *nb, unsigned long action, 2622 void *arg) 2623 { 2624 struct of_reconfig_data *rd = arg; 2625 struct spi_master *master; 2626 struct spi_device *spi; 2627 2628 switch (of_reconfig_get_state_change(action, arg)) { 2629 case OF_RECONFIG_CHANGE_ADD: 2630 master = of_find_spi_master_by_node(rd->dn->parent); 2631 if (master == NULL) 2632 return NOTIFY_OK; /* not for us */ 2633 2634 spi = of_register_spi_device(master, rd->dn); 2635 put_device(&master->dev); 2636 2637 if (IS_ERR(spi)) { 2638 pr_err("%s: failed to create for '%s'\n", 2639 __func__, rd->dn->full_name); 2640 return notifier_from_errno(PTR_ERR(spi)); 2641 } 2642 break; 2643 2644 case OF_RECONFIG_CHANGE_REMOVE: 2645 /* find our device by node */ 2646 spi = of_find_spi_device_by_node(rd->dn); 2647 if (spi == NULL) 2648 return NOTIFY_OK; /* no? not meant for us */ 2649 2650 /* unregister takes one ref away */ 2651 spi_unregister_device(spi); 2652 2653 /* and put the reference of the find */ 2654 put_device(&spi->dev); 2655 break; 2656 } 2657 2658 return NOTIFY_OK; 2659 } 2660 2661 static struct notifier_block spi_of_notifier = { 2662 .notifier_call = of_spi_notify, 2663 }; 2664 #else /* IS_ENABLED(CONFIG_OF_DYNAMIC) */ 2665 extern struct notifier_block spi_of_notifier; 2666 #endif /* IS_ENABLED(CONFIG_OF_DYNAMIC) */ 2667 2668 static int __init spi_init(void) 2669 { 2670 int status; 2671 2672 buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL); 2673 if (!buf) { 2674 status = -ENOMEM; 2675 goto err0; 2676 } 2677 2678 status = bus_register(&spi_bus_type); 2679 if (status < 0) 2680 goto err1; 2681 2682 status = class_register(&spi_master_class); 2683 if (status < 0) 2684 goto err2; 2685 2686 if (IS_ENABLED(CONFIG_OF_DYNAMIC)) 2687 WARN_ON(of_reconfig_notifier_register(&spi_of_notifier)); 2688 2689 return 0; 2690 2691 err2: 2692 bus_unregister(&spi_bus_type); 2693 err1: 2694 kfree(buf); 2695 buf = NULL; 2696 err0: 2697 return status; 2698 } 2699 2700 /* board_info is normally registered in arch_initcall(), 2701 * but even essential drivers wait till later 2702 * 2703 * REVISIT only boardinfo really needs static linking. the rest (device and 2704 * driver registration) _could_ be dynamically linked (modular) ... costs 2705 * include needing to have boardinfo data structures be much more public. 2706 */ 2707 postcore_initcall(spi_init); 2708 2709