1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * linux/drivers/spi/spi-loopback-test.c 4 * 5 * (c) Martin Sperl <kernel@martin.sperl.org> 6 * 7 * Loopback test driver to test several typical spi_message conditions 8 * that a spi_master driver may encounter 9 * this can also get used for regression testing 10 */ 11 12 #include <linux/delay.h> 13 #include <linux/kernel.h> 14 #include <linux/ktime.h> 15 #include <linux/list.h> 16 #include <linux/list_sort.h> 17 #include <linux/mod_devicetable.h> 18 #include <linux/module.h> 19 #include <linux/printk.h> 20 #include <linux/vmalloc.h> 21 #include <linux/spi/spi.h> 22 23 #include "spi-test.h" 24 25 /* flag to only simulate transfers */ 26 static int simulate_only; 27 module_param(simulate_only, int, 0); 28 MODULE_PARM_DESC(simulate_only, "if not 0 do not execute the spi message"); 29 30 /* dump spi messages */ 31 static int dump_messages; 32 module_param(dump_messages, int, 0); 33 MODULE_PARM_DESC(dump_messages, 34 "=1 dump the basic spi_message_structure, " \ 35 "=2 dump the spi_message_structure including data, " \ 36 "=3 dump the spi_message structure before and after execution"); 37 /* the device is jumpered for loopback - enabling some rx_buf tests */ 38 static int loopback; 39 module_param(loopback, int, 0); 40 MODULE_PARM_DESC(loopback, 41 "if set enable loopback mode, where the rx_buf " \ 42 "is checked to match tx_buf after the spi_message " \ 43 "is executed"); 44 45 static int loop_req; 46 module_param(loop_req, int, 0); 47 MODULE_PARM_DESC(loop_req, 48 "if set controller will be asked to enable test loop mode. " \ 49 "If controller supported it, MISO and MOSI will be connected"); 50 51 static int no_cs; 52 module_param(no_cs, int, 0); 53 MODULE_PARM_DESC(no_cs, 54 "if set Chip Select (CS) will not be used"); 55 56 /* run tests only for a specific length */ 57 static int run_only_iter_len = -1; 58 module_param(run_only_iter_len, int, 0); 59 MODULE_PARM_DESC(run_only_iter_len, 60 "only run tests for a length of this number in iterate_len list"); 61 62 /* run only a specific test */ 63 static int run_only_test = -1; 64 module_param(run_only_test, int, 0); 65 MODULE_PARM_DESC(run_only_test, 66 "only run the test with this number (0-based !)"); 67 68 /* use vmalloc'ed buffers */ 69 static int use_vmalloc; 70 module_param(use_vmalloc, int, 0644); 71 MODULE_PARM_DESC(use_vmalloc, 72 "use vmalloc'ed buffers instead of kmalloc'ed"); 73 74 /* check rx ranges */ 75 static int check_ranges = 1; 76 module_param(check_ranges, int, 0644); 77 MODULE_PARM_DESC(check_ranges, 78 "checks rx_buffer pattern are valid"); 79 80 static unsigned int delay_ms = 100; 81 module_param(delay_ms, uint, 0644); 82 MODULE_PARM_DESC(delay_ms, 83 "delay between tests, in milliseconds (default: 100)"); 84 85 /* the actual tests to execute */ 86 static struct spi_test spi_tests[] = { 87 { 88 .description = "tx/rx-transfer - start of page", 89 .fill_option = FILL_COUNT_8, 90 .iterate_len = { ITERATE_MAX_LEN }, 91 .iterate_tx_align = ITERATE_ALIGN, 92 .iterate_rx_align = ITERATE_ALIGN, 93 .transfer_count = 1, 94 .transfers = { 95 { 96 .tx_buf = TX(0), 97 .rx_buf = RX(0), 98 }, 99 }, 100 }, 101 { 102 .description = "tx/rx-transfer - crossing PAGE_SIZE", 103 .fill_option = FILL_COUNT_8, 104 .iterate_len = { ITERATE_LEN }, 105 .iterate_tx_align = ITERATE_ALIGN, 106 .iterate_rx_align = ITERATE_ALIGN, 107 .transfer_count = 1, 108 .transfers = { 109 { 110 .tx_buf = TX(PAGE_SIZE - 4), 111 .rx_buf = RX(PAGE_SIZE - 4), 112 }, 113 }, 114 }, 115 { 116 .description = "tx-transfer - only", 117 .fill_option = FILL_COUNT_8, 118 .iterate_len = { ITERATE_MAX_LEN }, 119 .iterate_tx_align = ITERATE_ALIGN, 120 .transfer_count = 1, 121 .transfers = { 122 { 123 .tx_buf = TX(0), 124 }, 125 }, 126 }, 127 { 128 .description = "rx-transfer - only", 129 .fill_option = FILL_COUNT_8, 130 .iterate_len = { ITERATE_MAX_LEN }, 131 .iterate_rx_align = ITERATE_ALIGN, 132 .transfer_count = 1, 133 .transfers = { 134 { 135 .rx_buf = RX(0), 136 }, 137 }, 138 }, 139 { 140 .description = "two tx-transfers - alter both", 141 .fill_option = FILL_COUNT_8, 142 .iterate_len = { ITERATE_LEN }, 143 .iterate_tx_align = ITERATE_ALIGN, 144 .iterate_transfer_mask = BIT(0) | BIT(1), 145 .transfer_count = 2, 146 .transfers = { 147 { 148 .tx_buf = TX(0), 149 }, 150 { 151 /* this is why we cant use ITERATE_MAX_LEN */ 152 .tx_buf = TX(SPI_TEST_MAX_SIZE_HALF), 153 }, 154 }, 155 }, 156 { 157 .description = "two tx-transfers - alter first", 158 .fill_option = FILL_COUNT_8, 159 .iterate_len = { ITERATE_MAX_LEN }, 160 .iterate_tx_align = ITERATE_ALIGN, 161 .iterate_transfer_mask = BIT(0), 162 .transfer_count = 2, 163 .transfers = { 164 { 165 .tx_buf = TX(64), 166 }, 167 { 168 .len = 1, 169 .tx_buf = TX(0), 170 }, 171 }, 172 }, 173 { 174 .description = "two tx-transfers - alter second", 175 .fill_option = FILL_COUNT_8, 176 .iterate_len = { ITERATE_MAX_LEN }, 177 .iterate_tx_align = ITERATE_ALIGN, 178 .iterate_transfer_mask = BIT(1), 179 .transfer_count = 2, 180 .transfers = { 181 { 182 .len = 16, 183 .tx_buf = TX(0), 184 }, 185 { 186 .tx_buf = TX(64), 187 }, 188 }, 189 }, 190 { 191 .description = "two transfers tx then rx - alter both", 192 .fill_option = FILL_COUNT_8, 193 .iterate_len = { ITERATE_MAX_LEN }, 194 .iterate_tx_align = ITERATE_ALIGN, 195 .iterate_transfer_mask = BIT(0) | BIT(1), 196 .transfer_count = 2, 197 .transfers = { 198 { 199 .tx_buf = TX(0), 200 }, 201 { 202 .rx_buf = RX(0), 203 }, 204 }, 205 }, 206 { 207 .description = "two transfers tx then rx - alter tx", 208 .fill_option = FILL_COUNT_8, 209 .iterate_len = { ITERATE_MAX_LEN }, 210 .iterate_tx_align = ITERATE_ALIGN, 211 .iterate_transfer_mask = BIT(0), 212 .transfer_count = 2, 213 .transfers = { 214 { 215 .tx_buf = TX(0), 216 }, 217 { 218 .len = 1, 219 .rx_buf = RX(0), 220 }, 221 }, 222 }, 223 { 224 .description = "two transfers tx then rx - alter rx", 225 .fill_option = FILL_COUNT_8, 226 .iterate_len = { ITERATE_MAX_LEN }, 227 .iterate_tx_align = ITERATE_ALIGN, 228 .iterate_transfer_mask = BIT(1), 229 .transfer_count = 2, 230 .transfers = { 231 { 232 .len = 1, 233 .tx_buf = TX(0), 234 }, 235 { 236 .rx_buf = RX(0), 237 }, 238 }, 239 }, 240 { 241 .description = "two tx+rx transfers - alter both", 242 .fill_option = FILL_COUNT_8, 243 .iterate_len = { ITERATE_LEN }, 244 .iterate_tx_align = ITERATE_ALIGN, 245 .iterate_transfer_mask = BIT(0) | BIT(1), 246 .transfer_count = 2, 247 .transfers = { 248 { 249 .tx_buf = TX(0), 250 .rx_buf = RX(0), 251 }, 252 { 253 /* making sure we align without overwrite 254 * the reason we can not use ITERATE_MAX_LEN 255 */ 256 .tx_buf = TX(SPI_TEST_MAX_SIZE_HALF), 257 .rx_buf = RX(SPI_TEST_MAX_SIZE_HALF), 258 }, 259 }, 260 }, 261 { 262 .description = "two tx+rx transfers - alter first", 263 .fill_option = FILL_COUNT_8, 264 .iterate_len = { ITERATE_MAX_LEN }, 265 .iterate_tx_align = ITERATE_ALIGN, 266 .iterate_transfer_mask = BIT(0), 267 .transfer_count = 2, 268 .transfers = { 269 { 270 /* making sure we align without overwrite */ 271 .tx_buf = TX(1024), 272 .rx_buf = RX(1024), 273 }, 274 { 275 .len = 1, 276 /* making sure we align without overwrite */ 277 .tx_buf = TX(0), 278 .rx_buf = RX(0), 279 }, 280 }, 281 }, 282 { 283 .description = "two tx+rx transfers - alter second", 284 .fill_option = FILL_COUNT_8, 285 .iterate_len = { ITERATE_MAX_LEN }, 286 .iterate_tx_align = ITERATE_ALIGN, 287 .iterate_transfer_mask = BIT(1), 288 .transfer_count = 2, 289 .transfers = { 290 { 291 .len = 1, 292 .tx_buf = TX(0), 293 .rx_buf = RX(0), 294 }, 295 { 296 /* making sure we align without overwrite */ 297 .tx_buf = TX(1024), 298 .rx_buf = RX(1024), 299 }, 300 }, 301 }, 302 { 303 .description = "two tx+rx transfers - delay after transfer", 304 .fill_option = FILL_COUNT_8, 305 .iterate_len = { ITERATE_MAX_LEN }, 306 .iterate_transfer_mask = BIT(0) | BIT(1), 307 .transfer_count = 2, 308 .transfers = { 309 { 310 .tx_buf = TX(0), 311 .rx_buf = RX(0), 312 .delay = { 313 .value = 1000, 314 .unit = SPI_DELAY_UNIT_USECS, 315 }, 316 }, 317 { 318 .tx_buf = TX(0), 319 .rx_buf = RX(0), 320 .delay = { 321 .value = 1000, 322 .unit = SPI_DELAY_UNIT_USECS, 323 }, 324 }, 325 }, 326 }, 327 { 328 .description = "three tx+rx transfers with overlapping cache lines", 329 .fill_option = FILL_COUNT_8, 330 /* 331 * This should be large enough for the controller driver to 332 * choose to transfer it with DMA. 333 */ 334 .iterate_len = { 512, -1 }, 335 .iterate_transfer_mask = BIT(1), 336 .transfer_count = 3, 337 .transfers = { 338 { 339 .len = 1, 340 .tx_buf = TX(0), 341 .rx_buf = RX(0), 342 }, 343 { 344 .tx_buf = TX(1), 345 .rx_buf = RX(1), 346 }, 347 { 348 .len = 1, 349 .tx_buf = TX(513), 350 .rx_buf = RX(513), 351 }, 352 }, 353 }, 354 355 { /* end of tests sequence */ } 356 }; 357 358 static int spi_loopback_test_probe(struct spi_device *spi) 359 { 360 int ret; 361 362 if (loop_req || no_cs) { 363 spi->mode |= loop_req ? SPI_LOOP : 0; 364 spi->mode |= no_cs ? SPI_NO_CS : 0; 365 ret = spi_setup(spi); 366 if (ret) { 367 dev_err(&spi->dev, "SPI setup with SPI_LOOP or SPI_NO_CS failed (%d)\n", 368 ret); 369 return ret; 370 } 371 } 372 373 dev_info(&spi->dev, "Executing spi-loopback-tests\n"); 374 375 ret = spi_test_run_tests(spi, spi_tests); 376 377 dev_info(&spi->dev, "Finished spi-loopback-tests with return: %i\n", 378 ret); 379 380 return ret; 381 } 382 383 /* non const match table to permit to change via a module parameter */ 384 static struct of_device_id spi_loopback_test_of_match[] = { 385 { .compatible = "linux,spi-loopback-test", }, 386 { } 387 }; 388 389 /* allow to override the compatible string via a module_parameter */ 390 module_param_string(compatible, spi_loopback_test_of_match[0].compatible, 391 sizeof(spi_loopback_test_of_match[0].compatible), 392 0000); 393 394 MODULE_DEVICE_TABLE(of, spi_loopback_test_of_match); 395 396 static struct spi_driver spi_loopback_test_driver = { 397 .driver = { 398 .name = "spi-loopback-test", 399 .of_match_table = spi_loopback_test_of_match, 400 }, 401 .probe = spi_loopback_test_probe, 402 }; 403 404 module_spi_driver(spi_loopback_test_driver); 405 406 MODULE_AUTHOR("Martin Sperl <kernel@martin.sperl.org>"); 407 MODULE_DESCRIPTION("test spi_driver to check core functionality"); 408 MODULE_LICENSE("GPL"); 409 410 /*-------------------------------------------------------------------------*/ 411 412 /* spi_test implementation */ 413 414 #define RANGE_CHECK(ptr, plen, start, slen) \ 415 ((ptr >= start) && (ptr + plen <= start + slen)) 416 417 /* we allocate one page more, to allow for offsets */ 418 #define SPI_TEST_MAX_SIZE_PLUS (SPI_TEST_MAX_SIZE + PAGE_SIZE) 419 420 static void spi_test_print_hex_dump(char *pre, const void *ptr, size_t len) 421 { 422 /* limit the hex_dump */ 423 if (len < 1024) { 424 print_hex_dump(KERN_INFO, pre, 425 DUMP_PREFIX_OFFSET, 16, 1, 426 ptr, len, 0); 427 return; 428 } 429 /* print head */ 430 print_hex_dump(KERN_INFO, pre, 431 DUMP_PREFIX_OFFSET, 16, 1, 432 ptr, 512, 0); 433 /* print tail */ 434 pr_info("%s truncated - continuing at offset %04zx\n", 435 pre, len - 512); 436 print_hex_dump(KERN_INFO, pre, 437 DUMP_PREFIX_OFFSET, 16, 1, 438 ptr + (len - 512), 512, 0); 439 } 440 441 static void spi_test_dump_message(struct spi_device *spi, 442 struct spi_message *msg, 443 bool dump_data) 444 { 445 struct spi_transfer *xfer; 446 int i; 447 u8 b; 448 449 dev_info(&spi->dev, " spi_msg@%pK\n", msg); 450 if (msg->status) 451 dev_info(&spi->dev, " status: %i\n", 452 msg->status); 453 dev_info(&spi->dev, " frame_length: %i\n", 454 msg->frame_length); 455 dev_info(&spi->dev, " actual_length: %i\n", 456 msg->actual_length); 457 458 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 459 dev_info(&spi->dev, " spi_transfer@%pK\n", xfer); 460 dev_info(&spi->dev, " len: %i\n", xfer->len); 461 dev_info(&spi->dev, " tx_buf: %pK\n", xfer->tx_buf); 462 if (dump_data && xfer->tx_buf) 463 spi_test_print_hex_dump(" TX: ", 464 xfer->tx_buf, 465 xfer->len); 466 467 dev_info(&spi->dev, " rx_buf: %pK\n", xfer->rx_buf); 468 if (dump_data && xfer->rx_buf) 469 spi_test_print_hex_dump(" RX: ", 470 xfer->rx_buf, 471 xfer->len); 472 /* check for unwritten test pattern on rx_buf */ 473 if (xfer->rx_buf) { 474 for (i = 0 ; i < xfer->len ; i++) { 475 b = ((u8 *)xfer->rx_buf)[xfer->len - 1 - i]; 476 if (b != SPI_TEST_PATTERN_UNWRITTEN) 477 break; 478 } 479 if (i) 480 dev_info(&spi->dev, 481 " rx_buf filled with %02x starts at offset: %i\n", 482 SPI_TEST_PATTERN_UNWRITTEN, 483 xfer->len - i); 484 } 485 } 486 } 487 488 struct rx_ranges { 489 struct list_head list; 490 u8 *start; 491 u8 *end; 492 }; 493 494 static int rx_ranges_cmp(void *priv, const struct list_head *a, 495 const struct list_head *b) 496 { 497 struct rx_ranges *rx_a = list_entry(a, struct rx_ranges, list); 498 struct rx_ranges *rx_b = list_entry(b, struct rx_ranges, list); 499 500 if (rx_a->start > rx_b->start) 501 return 1; 502 if (rx_a->start < rx_b->start) 503 return -1; 504 return 0; 505 } 506 507 static int spi_check_rx_ranges(struct spi_device *spi, 508 struct spi_message *msg, 509 void *rx) 510 { 511 struct spi_transfer *xfer; 512 struct rx_ranges ranges[SPI_TEST_MAX_TRANSFERS], *r; 513 int i = 0; 514 LIST_HEAD(ranges_list); 515 u8 *addr; 516 int ret = 0; 517 518 /* loop over all transfers to fill in the rx_ranges */ 519 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 520 /* if there is no rx, then no check is needed */ 521 if (!xfer->rx_buf) 522 continue; 523 /* fill in the rx_range */ 524 if (RANGE_CHECK(xfer->rx_buf, xfer->len, 525 rx, SPI_TEST_MAX_SIZE_PLUS)) { 526 ranges[i].start = xfer->rx_buf; 527 ranges[i].end = xfer->rx_buf + xfer->len; 528 list_add(&ranges[i].list, &ranges_list); 529 i++; 530 } 531 } 532 533 /* if no ranges, then we can return and avoid the checks...*/ 534 if (!i) 535 return 0; 536 537 /* sort the list */ 538 list_sort(NULL, &ranges_list, rx_ranges_cmp); 539 540 /* and iterate over all the rx addresses */ 541 for (addr = rx; addr < (u8 *)rx + SPI_TEST_MAX_SIZE_PLUS; addr++) { 542 /* if we are the DO not write pattern, 543 * then continue with the loop... 544 */ 545 if (*addr == SPI_TEST_PATTERN_DO_NOT_WRITE) 546 continue; 547 548 /* check if we are inside a range */ 549 list_for_each_entry(r, &ranges_list, list) { 550 /* if so then set to end... */ 551 if ((addr >= r->start) && (addr < r->end)) 552 addr = r->end; 553 } 554 /* second test after a (hopefull) translation */ 555 if (*addr == SPI_TEST_PATTERN_DO_NOT_WRITE) 556 continue; 557 558 /* if still not found then something has modified too much */ 559 /* we could list the "closest" transfer here... */ 560 dev_err(&spi->dev, 561 "loopback strangeness - rx changed outside of allowed range at: %pK\n", 562 addr); 563 /* do not return, only set ret, 564 * so that we list all addresses 565 */ 566 ret = -ERANGE; 567 } 568 569 return ret; 570 } 571 572 static int spi_test_check_elapsed_time(struct spi_device *spi, 573 struct spi_test *test) 574 { 575 int i; 576 unsigned long long estimated_time = 0; 577 unsigned long long delay_usecs = 0; 578 579 for (i = 0; i < test->transfer_count; i++) { 580 struct spi_transfer *xfer = test->transfers + i; 581 unsigned long long nbits = (unsigned long long)BITS_PER_BYTE * 582 xfer->len; 583 584 delay_usecs += xfer->delay.value; 585 if (!xfer->speed_hz) 586 continue; 587 estimated_time += div_u64(nbits * NSEC_PER_SEC, xfer->speed_hz); 588 } 589 590 estimated_time += delay_usecs * NSEC_PER_USEC; 591 if (test->elapsed_time < estimated_time) { 592 dev_err(&spi->dev, 593 "elapsed time %lld ns is shorter than minimum estimated time %lld ns\n", 594 test->elapsed_time, estimated_time); 595 596 return -EINVAL; 597 } 598 599 return 0; 600 } 601 602 static int spi_test_check_loopback_result(struct spi_device *spi, 603 struct spi_message *msg, 604 void *tx, void *rx) 605 { 606 struct spi_transfer *xfer; 607 u8 rxb, txb; 608 size_t i; 609 int ret; 610 611 /* checks rx_buffer pattern are valid with loopback or without */ 612 if (check_ranges) { 613 ret = spi_check_rx_ranges(spi, msg, rx); 614 if (ret) 615 return ret; 616 } 617 618 /* if we run without loopback, then return now */ 619 if (!loopback) 620 return 0; 621 622 /* if applicable to transfer check that rx_buf is equal to tx_buf */ 623 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 624 /* if there is no rx, then no check is needed */ 625 if (!xfer->len || !xfer->rx_buf) 626 continue; 627 /* so depending on tx_buf we need to handle things */ 628 if (xfer->tx_buf) { 629 for (i = 0; i < xfer->len; i++) { 630 txb = ((u8 *)xfer->tx_buf)[i]; 631 rxb = ((u8 *)xfer->rx_buf)[i]; 632 if (txb != rxb) 633 goto mismatch_error; 634 } 635 } else { 636 /* first byte received */ 637 txb = ((u8 *)xfer->rx_buf)[0]; 638 /* first byte may be 0 or xff */ 639 if (!((txb == 0) || (txb == 0xff))) { 640 dev_err(&spi->dev, 641 "loopback strangeness - we expect 0x00 or 0xff, but not 0x%02x\n", 642 txb); 643 return -EINVAL; 644 } 645 /* check that all bytes are identical */ 646 for (i = 1; i < xfer->len; i++) { 647 rxb = ((u8 *)xfer->rx_buf)[i]; 648 if (rxb != txb) 649 goto mismatch_error; 650 } 651 } 652 } 653 654 return 0; 655 656 mismatch_error: 657 dev_err(&spi->dev, 658 "loopback strangeness - transfer mismatch on byte %04zx - expected 0x%02x, but got 0x%02x\n", 659 i, txb, rxb); 660 661 return -EINVAL; 662 } 663 664 static int spi_test_translate(struct spi_device *spi, 665 void **ptr, size_t len, 666 void *tx, void *rx) 667 { 668 size_t off; 669 670 /* return on null */ 671 if (!*ptr) 672 return 0; 673 674 /* in the MAX_SIZE_HALF case modify the pointer */ 675 if (((size_t)*ptr) & SPI_TEST_MAX_SIZE_HALF) 676 /* move the pointer to the correct range */ 677 *ptr += (SPI_TEST_MAX_SIZE_PLUS / 2) - 678 SPI_TEST_MAX_SIZE_HALF; 679 680 /* RX range 681 * - we check against MAX_SIZE_PLUS to allow for automated alignment 682 */ 683 if (RANGE_CHECK(*ptr, len, RX(0), SPI_TEST_MAX_SIZE_PLUS)) { 684 off = *ptr - RX(0); 685 *ptr = rx + off; 686 687 return 0; 688 } 689 690 /* TX range */ 691 if (RANGE_CHECK(*ptr, len, TX(0), SPI_TEST_MAX_SIZE_PLUS)) { 692 off = *ptr - TX(0); 693 *ptr = tx + off; 694 695 return 0; 696 } 697 698 dev_err(&spi->dev, 699 "PointerRange [%pK:%pK[ not in range [%pK:%pK[ or [%pK:%pK[\n", 700 *ptr, *ptr + len, 701 RX(0), RX(SPI_TEST_MAX_SIZE), 702 TX(0), TX(SPI_TEST_MAX_SIZE)); 703 704 return -EINVAL; 705 } 706 707 static int spi_test_fill_pattern(struct spi_device *spi, 708 struct spi_test *test) 709 { 710 struct spi_transfer *xfers = test->transfers; 711 u8 *tx_buf; 712 size_t count = 0; 713 int i, j; 714 715 #ifdef __BIG_ENDIAN 716 #define GET_VALUE_BYTE(value, index, bytes) \ 717 (value >> (8 * (bytes - 1 - count % bytes))) 718 #else 719 #define GET_VALUE_BYTE(value, index, bytes) \ 720 (value >> (8 * (count % bytes))) 721 #endif 722 723 /* fill all transfers with the pattern requested */ 724 for (i = 0; i < test->transfer_count; i++) { 725 /* fill rx_buf with SPI_TEST_PATTERN_UNWRITTEN */ 726 if (xfers[i].rx_buf) 727 memset(xfers[i].rx_buf, SPI_TEST_PATTERN_UNWRITTEN, 728 xfers[i].len); 729 /* if tx_buf is NULL then skip */ 730 tx_buf = (u8 *)xfers[i].tx_buf; 731 if (!tx_buf) 732 continue; 733 /* modify all the transfers */ 734 for (j = 0; j < xfers[i].len; j++, tx_buf++, count++) { 735 /* fill tx */ 736 switch (test->fill_option) { 737 case FILL_MEMSET_8: 738 *tx_buf = test->fill_pattern; 739 break; 740 case FILL_MEMSET_16: 741 *tx_buf = GET_VALUE_BYTE(test->fill_pattern, 742 count, 2); 743 break; 744 case FILL_MEMSET_24: 745 *tx_buf = GET_VALUE_BYTE(test->fill_pattern, 746 count, 3); 747 break; 748 case FILL_MEMSET_32: 749 *tx_buf = GET_VALUE_BYTE(test->fill_pattern, 750 count, 4); 751 break; 752 case FILL_COUNT_8: 753 *tx_buf = count; 754 break; 755 case FILL_COUNT_16: 756 *tx_buf = GET_VALUE_BYTE(count, count, 2); 757 break; 758 case FILL_COUNT_24: 759 *tx_buf = GET_VALUE_BYTE(count, count, 3); 760 break; 761 case FILL_COUNT_32: 762 *tx_buf = GET_VALUE_BYTE(count, count, 4); 763 break; 764 case FILL_TRANSFER_BYTE_8: 765 *tx_buf = j; 766 break; 767 case FILL_TRANSFER_BYTE_16: 768 *tx_buf = GET_VALUE_BYTE(j, j, 2); 769 break; 770 case FILL_TRANSFER_BYTE_24: 771 *tx_buf = GET_VALUE_BYTE(j, j, 3); 772 break; 773 case FILL_TRANSFER_BYTE_32: 774 *tx_buf = GET_VALUE_BYTE(j, j, 4); 775 break; 776 case FILL_TRANSFER_NUM: 777 *tx_buf = i; 778 break; 779 default: 780 dev_err(&spi->dev, 781 "unsupported fill_option: %i\n", 782 test->fill_option); 783 return -EINVAL; 784 } 785 } 786 } 787 788 return 0; 789 } 790 791 static int _spi_test_run_iter(struct spi_device *spi, 792 struct spi_test *test, 793 void *tx, void *rx) 794 { 795 struct spi_message *msg = &test->msg; 796 struct spi_transfer *x; 797 int i, ret; 798 799 /* initialize message - zero-filled via static initialization */ 800 spi_message_init_no_memset(msg); 801 802 /* fill rx with the DO_NOT_WRITE pattern */ 803 memset(rx, SPI_TEST_PATTERN_DO_NOT_WRITE, SPI_TEST_MAX_SIZE_PLUS); 804 805 /* add the individual transfers */ 806 for (i = 0; i < test->transfer_count; i++) { 807 x = &test->transfers[i]; 808 809 /* patch the values of tx_buf */ 810 ret = spi_test_translate(spi, (void **)&x->tx_buf, x->len, 811 (void *)tx, rx); 812 if (ret) 813 return ret; 814 815 /* patch the values of rx_buf */ 816 ret = spi_test_translate(spi, &x->rx_buf, x->len, 817 (void *)tx, rx); 818 if (ret) 819 return ret; 820 821 /* and add it to the list */ 822 spi_message_add_tail(x, msg); 823 } 824 825 /* fill in the transfer buffers with pattern */ 826 ret = spi_test_fill_pattern(spi, test); 827 if (ret) 828 return ret; 829 830 /* and execute */ 831 if (test->execute_msg) 832 ret = test->execute_msg(spi, test, tx, rx); 833 else 834 ret = spi_test_execute_msg(spi, test, tx, rx); 835 836 /* handle result */ 837 if (ret == test->expected_return) 838 return 0; 839 840 dev_err(&spi->dev, 841 "test failed - test returned %i, but we expect %i\n", 842 ret, test->expected_return); 843 844 if (ret) 845 return ret; 846 847 /* if it is 0, as we expected something else, 848 * then return something special 849 */ 850 return -EFAULT; 851 } 852 853 static int spi_test_run_iter(struct spi_device *spi, 854 const struct spi_test *testtemplate, 855 void *tx, void *rx, 856 size_t len, 857 size_t tx_off, 858 size_t rx_off 859 ) 860 { 861 struct spi_test test; 862 int i, tx_count, rx_count; 863 864 /* copy the test template to test */ 865 memcpy(&test, testtemplate, sizeof(test)); 866 867 /* if iterate_transfer_mask is not set, 868 * then set it to first transfer only 869 */ 870 if (!(test.iterate_transfer_mask & (BIT(test.transfer_count) - 1))) 871 test.iterate_transfer_mask = 1; 872 873 /* count number of transfers with tx/rx_buf != NULL */ 874 rx_count = tx_count = 0; 875 for (i = 0; i < test.transfer_count; i++) { 876 if (test.transfers[i].tx_buf) 877 tx_count++; 878 if (test.transfers[i].rx_buf) 879 rx_count++; 880 } 881 882 /* in some iteration cases warn and exit early, 883 * as there is nothing to do, that has not been tested already... 884 */ 885 if (tx_off && (!tx_count)) { 886 dev_warn_once(&spi->dev, 887 "%s: iterate_tx_off configured with tx_buf==NULL - ignoring\n", 888 test.description); 889 return 0; 890 } 891 if (rx_off && (!rx_count)) { 892 dev_warn_once(&spi->dev, 893 "%s: iterate_rx_off configured with rx_buf==NULL - ignoring\n", 894 test.description); 895 return 0; 896 } 897 898 /* write out info */ 899 if (!(len || tx_off || rx_off)) { 900 dev_info(&spi->dev, "Running test %s\n", test.description); 901 } else { 902 dev_info(&spi->dev, 903 " with iteration values: len = %zu, tx_off = %zu, rx_off = %zu\n", 904 len, tx_off, rx_off); 905 } 906 907 /* update in the values from iteration values */ 908 for (i = 0; i < test.transfer_count; i++) { 909 /* only when bit in transfer mask is set */ 910 if (!(test.iterate_transfer_mask & BIT(i))) 911 continue; 912 test.transfers[i].len = len; 913 if (test.transfers[i].tx_buf) 914 test.transfers[i].tx_buf += tx_off; 915 if (test.transfers[i].rx_buf) 916 test.transfers[i].rx_buf += rx_off; 917 } 918 919 /* and execute */ 920 return _spi_test_run_iter(spi, &test, tx, rx); 921 } 922 923 /** 924 * spi_test_execute_msg - default implementation to run a test 925 * 926 * @spi: @spi_device on which to run the @spi_message 927 * @test: the test to execute, which already contains @msg 928 * @tx: the tx buffer allocated for the test sequence 929 * @rx: the rx buffer allocated for the test sequence 930 * 931 * Returns: error code of spi_sync as well as basic error checking 932 */ 933 int spi_test_execute_msg(struct spi_device *spi, struct spi_test *test, 934 void *tx, void *rx) 935 { 936 struct spi_message *msg = &test->msg; 937 int ret = 0; 938 int i; 939 940 /* only if we do not simulate */ 941 if (!simulate_only) { 942 ktime_t start; 943 944 /* dump the complete message before and after the transfer */ 945 if (dump_messages == 3) 946 spi_test_dump_message(spi, msg, true); 947 948 start = ktime_get(); 949 /* run spi message */ 950 ret = spi_sync(spi, msg); 951 test->elapsed_time = ktime_to_ns(ktime_sub(ktime_get(), start)); 952 if (ret == -ETIMEDOUT) { 953 dev_info(&spi->dev, 954 "spi-message timed out - rerunning...\n"); 955 /* rerun after a few explicit schedules */ 956 for (i = 0; i < 16; i++) 957 schedule(); 958 ret = spi_sync(spi, msg); 959 } 960 if (ret) { 961 dev_err(&spi->dev, 962 "Failed to execute spi_message: %i\n", 963 ret); 964 goto exit; 965 } 966 967 /* do some extra error checks */ 968 if (msg->frame_length != msg->actual_length) { 969 dev_err(&spi->dev, 970 "actual length differs from expected\n"); 971 ret = -EIO; 972 goto exit; 973 } 974 975 /* run rx-buffer tests */ 976 ret = spi_test_check_loopback_result(spi, msg, tx, rx); 977 if (ret) 978 goto exit; 979 980 ret = spi_test_check_elapsed_time(spi, test); 981 } 982 983 /* if requested or on error dump message (including data) */ 984 exit: 985 if (dump_messages || ret) 986 spi_test_dump_message(spi, msg, 987 (dump_messages >= 2) || (ret)); 988 989 return ret; 990 } 991 EXPORT_SYMBOL_GPL(spi_test_execute_msg); 992 993 /** 994 * spi_test_run_test - run an individual spi_test 995 * including all the relevant iterations on: 996 * length and buffer alignment 997 * 998 * @spi: the spi_device to send the messages to 999 * @test: the test which we need to execute 1000 * @tx: the tx buffer allocated for the test sequence 1001 * @rx: the rx buffer allocated for the test sequence 1002 * 1003 * Returns: status code of spi_sync or other failures 1004 */ 1005 1006 int spi_test_run_test(struct spi_device *spi, const struct spi_test *test, 1007 void *tx, void *rx) 1008 { 1009 int idx_len; 1010 size_t len; 1011 size_t tx_align, rx_align; 1012 int ret; 1013 1014 /* test for transfer limits */ 1015 if (test->transfer_count >= SPI_TEST_MAX_TRANSFERS) { 1016 dev_err(&spi->dev, 1017 "%s: Exceeded max number of transfers with %i\n", 1018 test->description, test->transfer_count); 1019 return -E2BIG; 1020 } 1021 1022 /* setting up some values in spi_message 1023 * based on some settings in spi_master 1024 * some of this can also get done in the run() method 1025 */ 1026 1027 /* iterate over all the iterable values using macros 1028 * (to make it a bit more readable... 1029 */ 1030 #define FOR_EACH_ALIGNMENT(var) \ 1031 for (var = 0; \ 1032 var < (test->iterate_##var ? \ 1033 (spi->controller->dma_alignment ? \ 1034 spi->controller->dma_alignment : \ 1035 test->iterate_##var) : \ 1036 1); \ 1037 var++) 1038 1039 for (idx_len = 0; idx_len < SPI_TEST_MAX_ITERATE && 1040 (len = test->iterate_len[idx_len]) != -1; idx_len++) { 1041 if ((run_only_iter_len > -1) && len != run_only_iter_len) 1042 continue; 1043 FOR_EACH_ALIGNMENT(tx_align) { 1044 FOR_EACH_ALIGNMENT(rx_align) { 1045 /* and run the iteration */ 1046 ret = spi_test_run_iter(spi, test, 1047 tx, rx, 1048 len, 1049 tx_align, 1050 rx_align); 1051 if (ret) 1052 return ret; 1053 } 1054 } 1055 } 1056 1057 return 0; 1058 } 1059 EXPORT_SYMBOL_GPL(spi_test_run_test); 1060 1061 /** 1062 * spi_test_run_tests - run an array of spi_messages tests 1063 * @spi: the spi device on which to run the tests 1064 * @tests: NULL-terminated array of @spi_test 1065 * 1066 * Returns: status errors as per @spi_test_run_test() 1067 */ 1068 1069 int spi_test_run_tests(struct spi_device *spi, 1070 struct spi_test *tests) 1071 { 1072 char *rx = NULL, *tx = NULL; 1073 int ret = 0, count = 0; 1074 struct spi_test *test; 1075 1076 /* allocate rx/tx buffers of 128kB size without devm 1077 * in the hope that is on a page boundary 1078 */ 1079 if (use_vmalloc) 1080 rx = vmalloc(SPI_TEST_MAX_SIZE_PLUS); 1081 else 1082 rx = kzalloc(SPI_TEST_MAX_SIZE_PLUS, GFP_KERNEL); 1083 if (!rx) 1084 return -ENOMEM; 1085 1086 1087 if (use_vmalloc) 1088 tx = vmalloc(SPI_TEST_MAX_SIZE_PLUS); 1089 else 1090 tx = kzalloc(SPI_TEST_MAX_SIZE_PLUS, GFP_KERNEL); 1091 if (!tx) { 1092 ret = -ENOMEM; 1093 goto err_tx; 1094 } 1095 1096 /* now run the individual tests in the table */ 1097 for (test = tests, count = 0; test->description[0]; 1098 test++, count++) { 1099 /* only run test if requested */ 1100 if ((run_only_test > -1) && (count != run_only_test)) 1101 continue; 1102 /* run custom implementation */ 1103 if (test->run_test) 1104 ret = test->run_test(spi, test, tx, rx); 1105 else 1106 ret = spi_test_run_test(spi, test, tx, rx); 1107 if (ret) 1108 goto out; 1109 /* add some delays so that we can easily 1110 * detect the individual tests when using a logic analyzer 1111 * we also add scheduling to avoid potential spi_timeouts... 1112 */ 1113 if (delay_ms) 1114 mdelay(delay_ms); 1115 schedule(); 1116 } 1117 1118 out: 1119 kvfree(tx); 1120 err_tx: 1121 kvfree(rx); 1122 return ret; 1123 } 1124 EXPORT_SYMBOL_GPL(spi_test_run_tests); 1125