1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Copyright 2007-2008 Pierre Ossman 4 */ 5 6 #include <linux/mmc/core.h> 7 #include <linux/mmc/card.h> 8 #include <linux/mmc/host.h> 9 #include <linux/mmc/mmc.h> 10 #include <linux/slab.h> 11 12 #include <linux/scatterlist.h> 13 #include <linux/list.h> 14 15 #include <linux/debugfs.h> 16 #include <linux/uaccess.h> 17 #include <linux/seq_file.h> 18 #include <linux/module.h> 19 20 #include "core.h" 21 #include "card.h" 22 #include "host.h" 23 #include "bus.h" 24 #include "mmc_ops.h" 25 26 #define RESULT_OK 0 27 #define RESULT_FAIL 1 28 #define RESULT_UNSUP_HOST 2 29 #define RESULT_UNSUP_CARD 3 30 31 #define BUFFER_ORDER 2 32 #define BUFFER_SIZE (PAGE_SIZE << BUFFER_ORDER) 33 34 #define TEST_ALIGN_END 8 35 36 /* 37 * Limit the test area size to the maximum MMC HC erase group size. Note that 38 * the maximum SD allocation unit size is just 4MiB. 39 */ 40 #define TEST_AREA_MAX_SIZE (128 * 1024 * 1024) 41 42 /** 43 * struct mmc_test_pages - pages allocated by 'alloc_pages()'. 44 * @page: first page in the allocation 45 * @order: order of the number of pages allocated 46 */ 47 struct mmc_test_pages { 48 struct page *page; 49 unsigned int order; 50 }; 51 52 /** 53 * struct mmc_test_mem - allocated memory. 54 * @arr: array of allocations 55 * @cnt: number of allocations 56 */ 57 struct mmc_test_mem { 58 struct mmc_test_pages *arr; 59 unsigned int cnt; 60 }; 61 62 /** 63 * struct mmc_test_area - information for performance tests. 64 * @max_sz: test area size (in bytes) 65 * @dev_addr: address on card at which to do performance tests 66 * @max_tfr: maximum transfer size allowed by driver (in bytes) 67 * @max_segs: maximum segments allowed by driver in scatterlist @sg 68 * @max_seg_sz: maximum segment size allowed by driver 69 * @blocks: number of (512 byte) blocks currently mapped by @sg 70 * @sg_len: length of currently mapped scatterlist @sg 71 * @mem: allocated memory 72 * @sg: scatterlist 73 * @sg_areq: scatterlist for non-blocking request 74 */ 75 struct mmc_test_area { 76 unsigned long max_sz; 77 unsigned int dev_addr; 78 unsigned int max_tfr; 79 unsigned int max_segs; 80 unsigned int max_seg_sz; 81 unsigned int blocks; 82 unsigned int sg_len; 83 struct mmc_test_mem *mem; 84 struct scatterlist *sg; 85 struct scatterlist *sg_areq; 86 }; 87 88 /** 89 * struct mmc_test_transfer_result - transfer results for performance tests. 90 * @link: double-linked list 91 * @count: amount of group of sectors to check 92 * @sectors: amount of sectors to check in one group 93 * @ts: time values of transfer 94 * @rate: calculated transfer rate 95 * @iops: I/O operations per second (times 100) 96 */ 97 struct mmc_test_transfer_result { 98 struct list_head link; 99 unsigned int count; 100 unsigned int sectors; 101 struct timespec64 ts; 102 unsigned int rate; 103 unsigned int iops; 104 }; 105 106 /** 107 * struct mmc_test_general_result - results for tests. 108 * @link: double-linked list 109 * @card: card under test 110 * @testcase: number of test case 111 * @result: result of test run 112 * @tr_lst: transfer measurements if any as mmc_test_transfer_result 113 */ 114 struct mmc_test_general_result { 115 struct list_head link; 116 struct mmc_card *card; 117 int testcase; 118 int result; 119 struct list_head tr_lst; 120 }; 121 122 /** 123 * struct mmc_test_dbgfs_file - debugfs related file. 124 * @link: double-linked list 125 * @card: card under test 126 * @file: file created under debugfs 127 */ 128 struct mmc_test_dbgfs_file { 129 struct list_head link; 130 struct mmc_card *card; 131 struct dentry *file; 132 }; 133 134 /** 135 * struct mmc_test_card - test information. 136 * @card: card under test 137 * @scratch: transfer buffer 138 * @buffer: transfer buffer 139 * @highmem: buffer for highmem tests 140 * @area: information for performance tests 141 * @gr: pointer to results of current testcase 142 */ 143 struct mmc_test_card { 144 struct mmc_card *card; 145 146 u8 scratch[BUFFER_SIZE]; 147 u8 *buffer; 148 #ifdef CONFIG_HIGHMEM 149 struct page *highmem; 150 #endif 151 struct mmc_test_area area; 152 struct mmc_test_general_result *gr; 153 }; 154 155 enum mmc_test_prep_media { 156 MMC_TEST_PREP_NONE = 0, 157 MMC_TEST_PREP_WRITE_FULL = 1 << 0, 158 MMC_TEST_PREP_ERASE = 1 << 1, 159 }; 160 161 struct mmc_test_multiple_rw { 162 unsigned int *sg_len; 163 unsigned int *bs; 164 unsigned int len; 165 unsigned int size; 166 bool do_write; 167 bool do_nonblock_req; 168 enum mmc_test_prep_media prepare; 169 }; 170 171 /*******************************************************************/ 172 /* General helper functions */ 173 /*******************************************************************/ 174 175 /* 176 * Configure correct block size in card 177 */ 178 static int mmc_test_set_blksize(struct mmc_test_card *test, unsigned size) 179 { 180 return mmc_set_blocklen(test->card, size); 181 } 182 183 static void mmc_test_prepare_sbc(struct mmc_test_card *test, 184 struct mmc_request *mrq, unsigned int blocks) 185 { 186 struct mmc_card *card = test->card; 187 188 if (!mrq->sbc || !mmc_host_can_cmd23(card->host) || 189 !mmc_card_can_cmd23(card) || !mmc_op_multi(mrq->cmd->opcode) || 190 mmc_card_blk_no_cmd23(card)) { 191 mrq->sbc = NULL; 192 return; 193 } 194 195 mrq->sbc->opcode = MMC_SET_BLOCK_COUNT; 196 mrq->sbc->arg = blocks; 197 mrq->sbc->flags = MMC_RSP_R1 | MMC_CMD_AC; 198 } 199 200 /* 201 * Fill in the mmc_request structure given a set of transfer parameters. 202 */ 203 static void mmc_test_prepare_mrq(struct mmc_test_card *test, 204 struct mmc_request *mrq, struct scatterlist *sg, unsigned sg_len, 205 unsigned dev_addr, unsigned blocks, unsigned blksz, int write) 206 { 207 if (WARN_ON(!mrq || !mrq->cmd || !mrq->data || !mrq->stop)) 208 return; 209 210 if (blocks > 1) { 211 mrq->cmd->opcode = write ? 212 MMC_WRITE_MULTIPLE_BLOCK : MMC_READ_MULTIPLE_BLOCK; 213 } else { 214 mrq->cmd->opcode = write ? 215 MMC_WRITE_BLOCK : MMC_READ_SINGLE_BLOCK; 216 } 217 218 mrq->cmd->arg = dev_addr; 219 if (!mmc_card_blockaddr(test->card)) 220 mrq->cmd->arg <<= 9; 221 222 mrq->cmd->flags = MMC_RSP_R1 | MMC_CMD_ADTC; 223 224 if (blocks == 1) 225 mrq->stop = NULL; 226 else { 227 mrq->stop->opcode = MMC_STOP_TRANSMISSION; 228 mrq->stop->arg = 0; 229 mrq->stop->flags = MMC_RSP_R1B | MMC_CMD_AC; 230 } 231 232 mrq->data->blksz = blksz; 233 mrq->data->blocks = blocks; 234 mrq->data->flags = write ? MMC_DATA_WRITE : MMC_DATA_READ; 235 mrq->data->sg = sg; 236 mrq->data->sg_len = sg_len; 237 238 mmc_test_prepare_sbc(test, mrq, blocks); 239 240 mmc_set_data_timeout(mrq->data, test->card); 241 } 242 243 static int mmc_test_busy(struct mmc_command *cmd) 244 { 245 return !(cmd->resp[0] & R1_READY_FOR_DATA) || 246 (R1_CURRENT_STATE(cmd->resp[0]) == R1_STATE_PRG); 247 } 248 249 /* 250 * Wait for the card to finish the busy state 251 */ 252 static int mmc_test_wait_busy(struct mmc_test_card *test) 253 { 254 int ret, busy; 255 struct mmc_command cmd = {}; 256 257 busy = 0; 258 do { 259 memset(&cmd, 0, sizeof(struct mmc_command)); 260 261 cmd.opcode = MMC_SEND_STATUS; 262 cmd.arg = test->card->rca << 16; 263 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; 264 265 ret = mmc_wait_for_cmd(test->card->host, &cmd, 0); 266 if (ret) 267 break; 268 269 if (!busy && mmc_test_busy(&cmd)) { 270 busy = 1; 271 if (test->card->host->caps & MMC_CAP_WAIT_WHILE_BUSY) 272 pr_info("%s: Warning: Host did not wait for busy state to end.\n", 273 mmc_hostname(test->card->host)); 274 } 275 } while (mmc_test_busy(&cmd)); 276 277 return ret; 278 } 279 280 /* 281 * Transfer a single sector of kernel addressable data 282 */ 283 static int mmc_test_buffer_transfer(struct mmc_test_card *test, 284 u8 *buffer, unsigned addr, unsigned blksz, int write) 285 { 286 struct mmc_request mrq = {}; 287 struct mmc_command cmd = {}; 288 struct mmc_command stop = {}; 289 struct mmc_data data = {}; 290 291 struct scatterlist sg; 292 293 mrq.cmd = &cmd; 294 mrq.data = &data; 295 mrq.stop = &stop; 296 297 sg_init_one(&sg, buffer, blksz); 298 299 mmc_test_prepare_mrq(test, &mrq, &sg, 1, addr, 1, blksz, write); 300 301 mmc_wait_for_req(test->card->host, &mrq); 302 303 if (cmd.error) 304 return cmd.error; 305 if (data.error) 306 return data.error; 307 308 return mmc_test_wait_busy(test); 309 } 310 311 static void mmc_test_free_mem(struct mmc_test_mem *mem) 312 { 313 if (!mem) 314 return; 315 while (mem->cnt--) 316 __free_pages(mem->arr[mem->cnt].page, 317 mem->arr[mem->cnt].order); 318 kfree(mem->arr); 319 kfree(mem); 320 } 321 322 /* 323 * Allocate a lot of memory, preferably max_sz but at least min_sz. In case 324 * there isn't much memory do not exceed 1/16th total lowmem pages. Also do 325 * not exceed a maximum number of segments and try not to make segments much 326 * bigger than maximum segment size. 327 */ 328 static struct mmc_test_mem *mmc_test_alloc_mem(unsigned long min_sz, 329 unsigned long max_sz, 330 unsigned int max_segs, 331 unsigned int max_seg_sz) 332 { 333 unsigned long max_page_cnt = DIV_ROUND_UP(max_sz, PAGE_SIZE); 334 unsigned long min_page_cnt = DIV_ROUND_UP(min_sz, PAGE_SIZE); 335 unsigned long max_seg_page_cnt = DIV_ROUND_UP(max_seg_sz, PAGE_SIZE); 336 unsigned long page_cnt = 0; 337 unsigned long limit = nr_free_buffer_pages() >> 4; 338 struct mmc_test_mem *mem; 339 340 if (max_page_cnt > limit) 341 max_page_cnt = limit; 342 if (min_page_cnt > max_page_cnt) 343 min_page_cnt = max_page_cnt; 344 345 if (max_seg_page_cnt > max_page_cnt) 346 max_seg_page_cnt = max_page_cnt; 347 348 if (max_segs > max_page_cnt) 349 max_segs = max_page_cnt; 350 351 mem = kzalloc(sizeof(*mem), GFP_KERNEL); 352 if (!mem) 353 return NULL; 354 355 mem->arr = kcalloc(max_segs, sizeof(*mem->arr), GFP_KERNEL); 356 if (!mem->arr) 357 goto out_free; 358 359 while (max_page_cnt) { 360 struct page *page; 361 unsigned int order; 362 gfp_t flags = GFP_KERNEL | GFP_DMA | __GFP_NOWARN | 363 __GFP_NORETRY; 364 365 order = get_order(max_seg_page_cnt << PAGE_SHIFT); 366 while (1) { 367 page = alloc_pages(flags, order); 368 if (page || !order) 369 break; 370 order -= 1; 371 } 372 if (!page) { 373 if (page_cnt < min_page_cnt) 374 goto out_free; 375 break; 376 } 377 mem->arr[mem->cnt].page = page; 378 mem->arr[mem->cnt].order = order; 379 mem->cnt += 1; 380 if (max_page_cnt <= (1UL << order)) 381 break; 382 max_page_cnt -= 1UL << order; 383 page_cnt += 1UL << order; 384 if (mem->cnt >= max_segs) { 385 if (page_cnt < min_page_cnt) 386 goto out_free; 387 break; 388 } 389 } 390 391 return mem; 392 393 out_free: 394 mmc_test_free_mem(mem); 395 return NULL; 396 } 397 398 /* 399 * Map memory into a scatterlist. Optionally allow the same memory to be 400 * mapped more than once. 401 */ 402 static int mmc_test_map_sg(struct mmc_test_mem *mem, unsigned long size, 403 struct scatterlist *sglist, int repeat, 404 unsigned int max_segs, unsigned int max_seg_sz, 405 unsigned int *sg_len, int min_sg_len) 406 { 407 struct scatterlist *sg = NULL; 408 unsigned int i; 409 unsigned long sz = size; 410 411 sg_init_table(sglist, max_segs); 412 if (min_sg_len > max_segs) 413 min_sg_len = max_segs; 414 415 *sg_len = 0; 416 do { 417 for (i = 0; i < mem->cnt; i++) { 418 unsigned long len = PAGE_SIZE << mem->arr[i].order; 419 420 if (min_sg_len && (size / min_sg_len < len)) 421 len = ALIGN(size / min_sg_len, 512); 422 if (len > sz) 423 len = sz; 424 if (len > max_seg_sz) 425 len = max_seg_sz; 426 if (sg) 427 sg = sg_next(sg); 428 else 429 sg = sglist; 430 if (!sg) 431 return -EINVAL; 432 sg_set_page(sg, mem->arr[i].page, len, 0); 433 sz -= len; 434 *sg_len += 1; 435 if (!sz) 436 break; 437 } 438 } while (sz && repeat); 439 440 if (sz) 441 return -EINVAL; 442 443 if (sg) 444 sg_mark_end(sg); 445 446 return 0; 447 } 448 449 /* 450 * Map memory into a scatterlist so that no pages are contiguous. Allow the 451 * same memory to be mapped more than once. 452 */ 453 static int mmc_test_map_sg_max_scatter(struct mmc_test_mem *mem, 454 unsigned long sz, 455 struct scatterlist *sglist, 456 unsigned int max_segs, 457 unsigned int max_seg_sz, 458 unsigned int *sg_len) 459 { 460 struct scatterlist *sg = NULL; 461 unsigned int i = mem->cnt, cnt; 462 unsigned long len; 463 void *base, *addr, *last_addr = NULL; 464 465 sg_init_table(sglist, max_segs); 466 467 *sg_len = 0; 468 while (sz) { 469 base = page_address(mem->arr[--i].page); 470 cnt = 1 << mem->arr[i].order; 471 while (sz && cnt) { 472 addr = base + PAGE_SIZE * --cnt; 473 if (last_addr && last_addr + PAGE_SIZE == addr) 474 continue; 475 last_addr = addr; 476 len = PAGE_SIZE; 477 if (len > max_seg_sz) 478 len = max_seg_sz; 479 if (len > sz) 480 len = sz; 481 if (sg) 482 sg = sg_next(sg); 483 else 484 sg = sglist; 485 if (!sg) 486 return -EINVAL; 487 sg_set_page(sg, virt_to_page(addr), len, 0); 488 sz -= len; 489 *sg_len += 1; 490 } 491 if (i == 0) 492 i = mem->cnt; 493 } 494 495 if (sg) 496 sg_mark_end(sg); 497 498 return 0; 499 } 500 501 /* 502 * Calculate transfer rate in bytes per second. 503 */ 504 static unsigned int mmc_test_rate(uint64_t bytes, struct timespec64 *ts) 505 { 506 uint64_t ns; 507 508 ns = timespec64_to_ns(ts); 509 bytes *= 1000000000; 510 511 while (ns > UINT_MAX) { 512 bytes >>= 1; 513 ns >>= 1; 514 } 515 516 if (!ns) 517 return 0; 518 519 do_div(bytes, (uint32_t)ns); 520 521 return bytes; 522 } 523 524 /* 525 * Save transfer results for future usage 526 */ 527 static void mmc_test_save_transfer_result(struct mmc_test_card *test, 528 unsigned int count, unsigned int sectors, struct timespec64 ts, 529 unsigned int rate, unsigned int iops) 530 { 531 struct mmc_test_transfer_result *tr; 532 533 if (!test->gr) 534 return; 535 536 tr = kmalloc(sizeof(*tr), GFP_KERNEL); 537 if (!tr) 538 return; 539 540 tr->count = count; 541 tr->sectors = sectors; 542 tr->ts = ts; 543 tr->rate = rate; 544 tr->iops = iops; 545 546 list_add_tail(&tr->link, &test->gr->tr_lst); 547 } 548 549 /* 550 * Print the transfer rate. 551 */ 552 static void mmc_test_print_rate(struct mmc_test_card *test, uint64_t bytes, 553 struct timespec64 *ts1, struct timespec64 *ts2) 554 { 555 unsigned int rate, iops, sectors = bytes >> 9; 556 struct timespec64 ts; 557 558 ts = timespec64_sub(*ts2, *ts1); 559 560 rate = mmc_test_rate(bytes, &ts); 561 iops = mmc_test_rate(100, &ts); /* I/O ops per sec x 100 */ 562 563 pr_info("%s: Transfer of %u sectors (%u%s KiB) took %llu.%09u " 564 "seconds (%u kB/s, %u KiB/s, %u.%02u IOPS)\n", 565 mmc_hostname(test->card->host), sectors, sectors >> 1, 566 (sectors & 1 ? ".5" : ""), (u64)ts.tv_sec, 567 (u32)ts.tv_nsec, rate / 1000, rate / 1024, 568 iops / 100, iops % 100); 569 570 mmc_test_save_transfer_result(test, 1, sectors, ts, rate, iops); 571 } 572 573 /* 574 * Print the average transfer rate. 575 */ 576 static void mmc_test_print_avg_rate(struct mmc_test_card *test, uint64_t bytes, 577 unsigned int count, struct timespec64 *ts1, 578 struct timespec64 *ts2) 579 { 580 unsigned int rate, iops, sectors = bytes >> 9; 581 uint64_t tot = bytes * count; 582 struct timespec64 ts; 583 584 ts = timespec64_sub(*ts2, *ts1); 585 586 rate = mmc_test_rate(tot, &ts); 587 iops = mmc_test_rate(count * 100, &ts); /* I/O ops per sec x 100 */ 588 589 pr_info("%s: Transfer of %u x %u sectors (%u x %u%s KiB) took " 590 "%llu.%09u seconds (%u kB/s, %u KiB/s, " 591 "%u.%02u IOPS, sg_len %d)\n", 592 mmc_hostname(test->card->host), count, sectors, count, 593 sectors >> 1, (sectors & 1 ? ".5" : ""), 594 (u64)ts.tv_sec, (u32)ts.tv_nsec, 595 rate / 1000, rate / 1024, iops / 100, iops % 100, 596 test->area.sg_len); 597 598 mmc_test_save_transfer_result(test, count, sectors, ts, rate, iops); 599 } 600 601 /* 602 * Return the card size in sectors. 603 */ 604 static unsigned int mmc_test_capacity(struct mmc_card *card) 605 { 606 if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) 607 return card->ext_csd.sectors; 608 else 609 return card->csd.capacity << (card->csd.read_blkbits - 9); 610 } 611 612 /*******************************************************************/ 613 /* Test preparation and cleanup */ 614 /*******************************************************************/ 615 616 /* 617 * Fill the first couple of sectors of the card with known data 618 * so that bad reads/writes can be detected 619 */ 620 static int __mmc_test_prepare(struct mmc_test_card *test, int write, int val) 621 { 622 int ret, i; 623 624 ret = mmc_test_set_blksize(test, 512); 625 if (ret) 626 return ret; 627 628 if (write) 629 memset(test->buffer, val, 512); 630 else { 631 for (i = 0; i < 512; i++) 632 test->buffer[i] = i; 633 } 634 635 for (i = 0; i < BUFFER_SIZE / 512; i++) { 636 ret = mmc_test_buffer_transfer(test, test->buffer, i, 512, 1); 637 if (ret) 638 return ret; 639 } 640 641 return 0; 642 } 643 644 static int mmc_test_prepare_write(struct mmc_test_card *test) 645 { 646 return __mmc_test_prepare(test, 1, 0xDF); 647 } 648 649 static int mmc_test_prepare_read(struct mmc_test_card *test) 650 { 651 return __mmc_test_prepare(test, 0, 0); 652 } 653 654 static int mmc_test_cleanup(struct mmc_test_card *test) 655 { 656 return __mmc_test_prepare(test, 1, 0); 657 } 658 659 /*******************************************************************/ 660 /* Test execution helpers */ 661 /*******************************************************************/ 662 663 /* 664 * Modifies the mmc_request to perform the "short transfer" tests 665 */ 666 static void mmc_test_prepare_broken_mrq(struct mmc_test_card *test, 667 struct mmc_request *mrq, int write) 668 { 669 if (WARN_ON(!mrq || !mrq->cmd || !mrq->data)) 670 return; 671 672 if (mrq->data->blocks > 1) { 673 mrq->cmd->opcode = write ? 674 MMC_WRITE_BLOCK : MMC_READ_SINGLE_BLOCK; 675 mrq->stop = NULL; 676 } else { 677 mrq->cmd->opcode = MMC_SEND_STATUS; 678 mrq->cmd->arg = test->card->rca << 16; 679 } 680 } 681 682 /* 683 * Checks that a normal transfer didn't have any errors 684 */ 685 static int mmc_test_check_result(struct mmc_test_card *test, 686 struct mmc_request *mrq) 687 { 688 int ret; 689 690 if (WARN_ON(!mrq || !mrq->cmd || !mrq->data)) 691 return -EINVAL; 692 693 ret = 0; 694 695 if (mrq->sbc && mrq->sbc->error) 696 ret = mrq->sbc->error; 697 if (!ret && mrq->cmd->error) 698 ret = mrq->cmd->error; 699 if (!ret && mrq->data->error) 700 ret = mrq->data->error; 701 if (!ret && mrq->stop && mrq->stop->error) 702 ret = mrq->stop->error; 703 if (!ret && mrq->data->bytes_xfered != 704 mrq->data->blocks * mrq->data->blksz) 705 ret = RESULT_FAIL; 706 707 if (ret == -EINVAL) 708 ret = RESULT_UNSUP_HOST; 709 710 return ret; 711 } 712 713 /* 714 * Checks that a "short transfer" behaved as expected 715 */ 716 static int mmc_test_check_broken_result(struct mmc_test_card *test, 717 struct mmc_request *mrq) 718 { 719 int ret; 720 721 if (WARN_ON(!mrq || !mrq->cmd || !mrq->data)) 722 return -EINVAL; 723 724 ret = 0; 725 726 if (!ret && mrq->cmd->error) 727 ret = mrq->cmd->error; 728 if (!ret && mrq->data->error == 0) 729 ret = RESULT_FAIL; 730 if (!ret && mrq->data->error != -ETIMEDOUT) 731 ret = mrq->data->error; 732 if (!ret && mrq->stop && mrq->stop->error) 733 ret = mrq->stop->error; 734 if (mrq->data->blocks > 1) { 735 if (!ret && mrq->data->bytes_xfered > mrq->data->blksz) 736 ret = RESULT_FAIL; 737 } else { 738 if (!ret && mrq->data->bytes_xfered > 0) 739 ret = RESULT_FAIL; 740 } 741 742 if (ret == -EINVAL) 743 ret = RESULT_UNSUP_HOST; 744 745 return ret; 746 } 747 748 struct mmc_test_req { 749 struct mmc_request mrq; 750 struct mmc_command sbc; 751 struct mmc_command cmd; 752 struct mmc_command stop; 753 struct mmc_command status; 754 struct mmc_data data; 755 }; 756 757 /* 758 * Tests nonblock transfer with certain parameters 759 */ 760 static void mmc_test_req_reset(struct mmc_test_req *rq) 761 { 762 memset(rq, 0, sizeof(struct mmc_test_req)); 763 764 rq->mrq.cmd = &rq->cmd; 765 rq->mrq.data = &rq->data; 766 rq->mrq.stop = &rq->stop; 767 } 768 769 static struct mmc_test_req *mmc_test_req_alloc(void) 770 { 771 struct mmc_test_req *rq = kmalloc(sizeof(*rq), GFP_KERNEL); 772 773 if (rq) 774 mmc_test_req_reset(rq); 775 776 return rq; 777 } 778 779 static void mmc_test_wait_done(struct mmc_request *mrq) 780 { 781 complete(&mrq->completion); 782 } 783 784 static int mmc_test_start_areq(struct mmc_test_card *test, 785 struct mmc_request *mrq, 786 struct mmc_request *prev_mrq) 787 { 788 struct mmc_host *host = test->card->host; 789 int err = 0; 790 791 if (mrq) { 792 init_completion(&mrq->completion); 793 mrq->done = mmc_test_wait_done; 794 mmc_pre_req(host, mrq); 795 } 796 797 if (prev_mrq) { 798 wait_for_completion(&prev_mrq->completion); 799 err = mmc_test_wait_busy(test); 800 if (!err) 801 err = mmc_test_check_result(test, prev_mrq); 802 } 803 804 if (!err && mrq) { 805 err = mmc_start_request(host, mrq); 806 if (err) 807 mmc_retune_release(host); 808 } 809 810 if (prev_mrq) 811 mmc_post_req(host, prev_mrq, 0); 812 813 if (err && mrq) 814 mmc_post_req(host, mrq, err); 815 816 return err; 817 } 818 819 static int mmc_test_nonblock_transfer(struct mmc_test_card *test, 820 unsigned int dev_addr, int write, 821 int count) 822 { 823 struct mmc_test_req *rq1, *rq2; 824 struct mmc_request *mrq, *prev_mrq; 825 int i; 826 int ret = RESULT_OK; 827 struct mmc_test_area *t = &test->area; 828 struct scatterlist *sg = t->sg; 829 struct scatterlist *sg_areq = t->sg_areq; 830 831 rq1 = mmc_test_req_alloc(); 832 rq2 = mmc_test_req_alloc(); 833 if (!rq1 || !rq2) { 834 ret = RESULT_FAIL; 835 goto err; 836 } 837 838 mrq = &rq1->mrq; 839 prev_mrq = NULL; 840 841 for (i = 0; i < count; i++) { 842 mmc_test_req_reset(container_of(mrq, struct mmc_test_req, mrq)); 843 mmc_test_prepare_mrq(test, mrq, sg, t->sg_len, dev_addr, 844 t->blocks, 512, write); 845 ret = mmc_test_start_areq(test, mrq, prev_mrq); 846 if (ret) 847 goto err; 848 849 if (!prev_mrq) 850 prev_mrq = &rq2->mrq; 851 852 swap(mrq, prev_mrq); 853 swap(sg, sg_areq); 854 dev_addr += t->blocks; 855 } 856 857 ret = mmc_test_start_areq(test, NULL, prev_mrq); 858 err: 859 kfree(rq1); 860 kfree(rq2); 861 return ret; 862 } 863 864 /* 865 * Tests a basic transfer with certain parameters 866 */ 867 static int mmc_test_simple_transfer(struct mmc_test_card *test, 868 struct scatterlist *sg, unsigned sg_len, unsigned dev_addr, 869 unsigned blocks, unsigned blksz, int write) 870 { 871 struct mmc_request mrq = {}; 872 struct mmc_command cmd = {}; 873 struct mmc_command stop = {}; 874 struct mmc_data data = {}; 875 876 mrq.cmd = &cmd; 877 mrq.data = &data; 878 mrq.stop = &stop; 879 880 mmc_test_prepare_mrq(test, &mrq, sg, sg_len, dev_addr, 881 blocks, blksz, write); 882 883 mmc_wait_for_req(test->card->host, &mrq); 884 885 mmc_test_wait_busy(test); 886 887 return mmc_test_check_result(test, &mrq); 888 } 889 890 /* 891 * Tests a transfer where the card will fail completely or partly 892 */ 893 static int mmc_test_broken_transfer(struct mmc_test_card *test, 894 unsigned blocks, unsigned blksz, int write) 895 { 896 struct mmc_request mrq = {}; 897 struct mmc_command cmd = {}; 898 struct mmc_command stop = {}; 899 struct mmc_data data = {}; 900 901 struct scatterlist sg; 902 903 mrq.cmd = &cmd; 904 mrq.data = &data; 905 mrq.stop = &stop; 906 907 sg_init_one(&sg, test->buffer, blocks * blksz); 908 909 mmc_test_prepare_mrq(test, &mrq, &sg, 1, 0, blocks, blksz, write); 910 mmc_test_prepare_broken_mrq(test, &mrq, write); 911 912 mmc_wait_for_req(test->card->host, &mrq); 913 914 mmc_test_wait_busy(test); 915 916 return mmc_test_check_broken_result(test, &mrq); 917 } 918 919 /* 920 * Does a complete transfer test where data is also validated 921 * 922 * Note: mmc_test_prepare() must have been done before this call 923 */ 924 static int mmc_test_transfer(struct mmc_test_card *test, 925 struct scatterlist *sg, unsigned sg_len, unsigned dev_addr, 926 unsigned blocks, unsigned blksz, int write) 927 { 928 int ret, i; 929 930 if (write) { 931 for (i = 0; i < blocks * blksz; i++) 932 test->scratch[i] = i; 933 } else { 934 memset(test->scratch, 0, BUFFER_SIZE); 935 } 936 sg_copy_from_buffer(sg, sg_len, test->scratch, BUFFER_SIZE); 937 938 ret = mmc_test_set_blksize(test, blksz); 939 if (ret) 940 return ret; 941 942 ret = mmc_test_simple_transfer(test, sg, sg_len, dev_addr, 943 blocks, blksz, write); 944 if (ret) 945 return ret; 946 947 if (write) { 948 int sectors; 949 950 ret = mmc_test_set_blksize(test, 512); 951 if (ret) 952 return ret; 953 954 sectors = (blocks * blksz + 511) / 512; 955 if ((sectors * 512) == (blocks * blksz)) 956 sectors++; 957 958 if ((sectors * 512) > BUFFER_SIZE) 959 return -EINVAL; 960 961 memset(test->buffer, 0, sectors * 512); 962 963 for (i = 0; i < sectors; i++) { 964 ret = mmc_test_buffer_transfer(test, 965 test->buffer + i * 512, 966 dev_addr + i, 512, 0); 967 if (ret) 968 return ret; 969 } 970 971 for (i = 0; i < blocks * blksz; i++) { 972 if (test->buffer[i] != (u8)i) 973 return RESULT_FAIL; 974 } 975 976 for (; i < sectors * 512; i++) { 977 if (test->buffer[i] != 0xDF) 978 return RESULT_FAIL; 979 } 980 } else { 981 sg_copy_to_buffer(sg, sg_len, test->scratch, BUFFER_SIZE); 982 for (i = 0; i < blocks * blksz; i++) { 983 if (test->scratch[i] != (u8)i) 984 return RESULT_FAIL; 985 } 986 } 987 988 return 0; 989 } 990 991 /*******************************************************************/ 992 /* Tests */ 993 /*******************************************************************/ 994 995 struct mmc_test_case { 996 const char *name; 997 998 int (*prepare)(struct mmc_test_card *); 999 int (*run)(struct mmc_test_card *); 1000 int (*cleanup)(struct mmc_test_card *); 1001 }; 1002 1003 static int mmc_test_basic_write(struct mmc_test_card *test) 1004 { 1005 int ret; 1006 struct scatterlist sg; 1007 1008 ret = mmc_test_set_blksize(test, 512); 1009 if (ret) 1010 return ret; 1011 1012 sg_init_one(&sg, test->buffer, 512); 1013 1014 return mmc_test_simple_transfer(test, &sg, 1, 0, 1, 512, 1); 1015 } 1016 1017 static int mmc_test_basic_read(struct mmc_test_card *test) 1018 { 1019 int ret; 1020 struct scatterlist sg; 1021 1022 ret = mmc_test_set_blksize(test, 512); 1023 if (ret) 1024 return ret; 1025 1026 sg_init_one(&sg, test->buffer, 512); 1027 1028 return mmc_test_simple_transfer(test, &sg, 1, 0, 1, 512, 0); 1029 } 1030 1031 static int mmc_test_verify_write(struct mmc_test_card *test) 1032 { 1033 struct scatterlist sg; 1034 1035 sg_init_one(&sg, test->buffer, 512); 1036 1037 return mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1); 1038 } 1039 1040 static int mmc_test_verify_read(struct mmc_test_card *test) 1041 { 1042 struct scatterlist sg; 1043 1044 sg_init_one(&sg, test->buffer, 512); 1045 1046 return mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0); 1047 } 1048 1049 static int mmc_test_multi_write(struct mmc_test_card *test) 1050 { 1051 unsigned int size; 1052 struct scatterlist sg; 1053 1054 if (test->card->host->max_blk_count == 1) 1055 return RESULT_UNSUP_HOST; 1056 1057 size = PAGE_SIZE * 2; 1058 size = min(size, test->card->host->max_req_size); 1059 size = min(size, test->card->host->max_seg_size); 1060 size = min(size, test->card->host->max_blk_count * 512); 1061 1062 if (size < 1024) 1063 return RESULT_UNSUP_HOST; 1064 1065 sg_init_one(&sg, test->buffer, size); 1066 1067 return mmc_test_transfer(test, &sg, 1, 0, size / 512, 512, 1); 1068 } 1069 1070 static int mmc_test_multi_read(struct mmc_test_card *test) 1071 { 1072 unsigned int size; 1073 struct scatterlist sg; 1074 1075 if (test->card->host->max_blk_count == 1) 1076 return RESULT_UNSUP_HOST; 1077 1078 size = PAGE_SIZE * 2; 1079 size = min(size, test->card->host->max_req_size); 1080 size = min(size, test->card->host->max_seg_size); 1081 size = min(size, test->card->host->max_blk_count * 512); 1082 1083 if (size < 1024) 1084 return RESULT_UNSUP_HOST; 1085 1086 sg_init_one(&sg, test->buffer, size); 1087 1088 return mmc_test_transfer(test, &sg, 1, 0, size / 512, 512, 0); 1089 } 1090 1091 static int mmc_test_pow2_write(struct mmc_test_card *test) 1092 { 1093 int ret, i; 1094 struct scatterlist sg; 1095 1096 if (!test->card->csd.write_partial) 1097 return RESULT_UNSUP_CARD; 1098 1099 for (i = 1; i < 512; i <<= 1) { 1100 sg_init_one(&sg, test->buffer, i); 1101 ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 1); 1102 if (ret) 1103 return ret; 1104 } 1105 1106 return 0; 1107 } 1108 1109 static int mmc_test_pow2_read(struct mmc_test_card *test) 1110 { 1111 int ret, i; 1112 struct scatterlist sg; 1113 1114 if (!test->card->csd.read_partial) 1115 return RESULT_UNSUP_CARD; 1116 1117 for (i = 1; i < 512; i <<= 1) { 1118 sg_init_one(&sg, test->buffer, i); 1119 ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 0); 1120 if (ret) 1121 return ret; 1122 } 1123 1124 return 0; 1125 } 1126 1127 static int mmc_test_weird_write(struct mmc_test_card *test) 1128 { 1129 int ret, i; 1130 struct scatterlist sg; 1131 1132 if (!test->card->csd.write_partial) 1133 return RESULT_UNSUP_CARD; 1134 1135 for (i = 3; i < 512; i += 7) { 1136 sg_init_one(&sg, test->buffer, i); 1137 ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 1); 1138 if (ret) 1139 return ret; 1140 } 1141 1142 return 0; 1143 } 1144 1145 static int mmc_test_weird_read(struct mmc_test_card *test) 1146 { 1147 int ret, i; 1148 struct scatterlist sg; 1149 1150 if (!test->card->csd.read_partial) 1151 return RESULT_UNSUP_CARD; 1152 1153 for (i = 3; i < 512; i += 7) { 1154 sg_init_one(&sg, test->buffer, i); 1155 ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 0); 1156 if (ret) 1157 return ret; 1158 } 1159 1160 return 0; 1161 } 1162 1163 static int mmc_test_align_write(struct mmc_test_card *test) 1164 { 1165 int ret, i; 1166 struct scatterlist sg; 1167 1168 for (i = 1; i < TEST_ALIGN_END; i++) { 1169 sg_init_one(&sg, test->buffer + i, 512); 1170 ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1); 1171 if (ret) 1172 return ret; 1173 } 1174 1175 return 0; 1176 } 1177 1178 static int mmc_test_align_read(struct mmc_test_card *test) 1179 { 1180 int ret, i; 1181 struct scatterlist sg; 1182 1183 for (i = 1; i < TEST_ALIGN_END; i++) { 1184 sg_init_one(&sg, test->buffer + i, 512); 1185 ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0); 1186 if (ret) 1187 return ret; 1188 } 1189 1190 return 0; 1191 } 1192 1193 static int mmc_test_align_multi_write(struct mmc_test_card *test) 1194 { 1195 int ret, i; 1196 unsigned int size; 1197 struct scatterlist sg; 1198 1199 if (test->card->host->max_blk_count == 1) 1200 return RESULT_UNSUP_HOST; 1201 1202 size = PAGE_SIZE * 2; 1203 size = min(size, test->card->host->max_req_size); 1204 size = min(size, test->card->host->max_seg_size); 1205 size = min(size, test->card->host->max_blk_count * 512); 1206 1207 if (size < 1024) 1208 return RESULT_UNSUP_HOST; 1209 1210 for (i = 1; i < TEST_ALIGN_END; i++) { 1211 sg_init_one(&sg, test->buffer + i, size); 1212 ret = mmc_test_transfer(test, &sg, 1, 0, size / 512, 512, 1); 1213 if (ret) 1214 return ret; 1215 } 1216 1217 return 0; 1218 } 1219 1220 static int mmc_test_align_multi_read(struct mmc_test_card *test) 1221 { 1222 int ret, i; 1223 unsigned int size; 1224 struct scatterlist sg; 1225 1226 if (test->card->host->max_blk_count == 1) 1227 return RESULT_UNSUP_HOST; 1228 1229 size = PAGE_SIZE * 2; 1230 size = min(size, test->card->host->max_req_size); 1231 size = min(size, test->card->host->max_seg_size); 1232 size = min(size, test->card->host->max_blk_count * 512); 1233 1234 if (size < 1024) 1235 return RESULT_UNSUP_HOST; 1236 1237 for (i = 1; i < TEST_ALIGN_END; i++) { 1238 sg_init_one(&sg, test->buffer + i, size); 1239 ret = mmc_test_transfer(test, &sg, 1, 0, size / 512, 512, 0); 1240 if (ret) 1241 return ret; 1242 } 1243 1244 return 0; 1245 } 1246 1247 static int mmc_test_xfersize_write(struct mmc_test_card *test) 1248 { 1249 int ret; 1250 1251 ret = mmc_test_set_blksize(test, 512); 1252 if (ret) 1253 return ret; 1254 1255 return mmc_test_broken_transfer(test, 1, 512, 1); 1256 } 1257 1258 static int mmc_test_xfersize_read(struct mmc_test_card *test) 1259 { 1260 int ret; 1261 1262 ret = mmc_test_set_blksize(test, 512); 1263 if (ret) 1264 return ret; 1265 1266 return mmc_test_broken_transfer(test, 1, 512, 0); 1267 } 1268 1269 static int mmc_test_multi_xfersize_write(struct mmc_test_card *test) 1270 { 1271 int ret; 1272 1273 if (test->card->host->max_blk_count == 1) 1274 return RESULT_UNSUP_HOST; 1275 1276 ret = mmc_test_set_blksize(test, 512); 1277 if (ret) 1278 return ret; 1279 1280 return mmc_test_broken_transfer(test, 2, 512, 1); 1281 } 1282 1283 static int mmc_test_multi_xfersize_read(struct mmc_test_card *test) 1284 { 1285 int ret; 1286 1287 if (test->card->host->max_blk_count == 1) 1288 return RESULT_UNSUP_HOST; 1289 1290 ret = mmc_test_set_blksize(test, 512); 1291 if (ret) 1292 return ret; 1293 1294 return mmc_test_broken_transfer(test, 2, 512, 0); 1295 } 1296 1297 #ifdef CONFIG_HIGHMEM 1298 1299 static int mmc_test_write_high(struct mmc_test_card *test) 1300 { 1301 struct scatterlist sg; 1302 1303 sg_init_table(&sg, 1); 1304 sg_set_page(&sg, test->highmem, 512, 0); 1305 1306 return mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1); 1307 } 1308 1309 static int mmc_test_read_high(struct mmc_test_card *test) 1310 { 1311 struct scatterlist sg; 1312 1313 sg_init_table(&sg, 1); 1314 sg_set_page(&sg, test->highmem, 512, 0); 1315 1316 return mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0); 1317 } 1318 1319 static int mmc_test_multi_write_high(struct mmc_test_card *test) 1320 { 1321 unsigned int size; 1322 struct scatterlist sg; 1323 1324 if (test->card->host->max_blk_count == 1) 1325 return RESULT_UNSUP_HOST; 1326 1327 size = PAGE_SIZE * 2; 1328 size = min(size, test->card->host->max_req_size); 1329 size = min(size, test->card->host->max_seg_size); 1330 size = min(size, test->card->host->max_blk_count * 512); 1331 1332 if (size < 1024) 1333 return RESULT_UNSUP_HOST; 1334 1335 sg_init_table(&sg, 1); 1336 sg_set_page(&sg, test->highmem, size, 0); 1337 1338 return mmc_test_transfer(test, &sg, 1, 0, size / 512, 512, 1); 1339 } 1340 1341 static int mmc_test_multi_read_high(struct mmc_test_card *test) 1342 { 1343 unsigned int size; 1344 struct scatterlist sg; 1345 1346 if (test->card->host->max_blk_count == 1) 1347 return RESULT_UNSUP_HOST; 1348 1349 size = PAGE_SIZE * 2; 1350 size = min(size, test->card->host->max_req_size); 1351 size = min(size, test->card->host->max_seg_size); 1352 size = min(size, test->card->host->max_blk_count * 512); 1353 1354 if (size < 1024) 1355 return RESULT_UNSUP_HOST; 1356 1357 sg_init_table(&sg, 1); 1358 sg_set_page(&sg, test->highmem, size, 0); 1359 1360 return mmc_test_transfer(test, &sg, 1, 0, size / 512, 512, 0); 1361 } 1362 1363 #else 1364 1365 static int mmc_test_no_highmem(struct mmc_test_card *test) 1366 { 1367 pr_info("%s: Highmem not configured - test skipped\n", 1368 mmc_hostname(test->card->host)); 1369 return 0; 1370 } 1371 1372 #endif /* CONFIG_HIGHMEM */ 1373 1374 /* 1375 * Map sz bytes so that it can be transferred. 1376 */ 1377 static int mmc_test_area_map(struct mmc_test_card *test, unsigned long sz, 1378 int max_scatter, int min_sg_len, bool nonblock) 1379 { 1380 struct mmc_test_area *t = &test->area; 1381 int err; 1382 unsigned int sg_len = 0; 1383 1384 t->blocks = sz >> 9; 1385 1386 if (max_scatter) { 1387 err = mmc_test_map_sg_max_scatter(t->mem, sz, t->sg, 1388 t->max_segs, t->max_seg_sz, 1389 &t->sg_len); 1390 } else { 1391 err = mmc_test_map_sg(t->mem, sz, t->sg, 1, t->max_segs, 1392 t->max_seg_sz, &t->sg_len, min_sg_len); 1393 } 1394 1395 if (err || !nonblock) 1396 goto err; 1397 1398 if (max_scatter) { 1399 err = mmc_test_map_sg_max_scatter(t->mem, sz, t->sg_areq, 1400 t->max_segs, t->max_seg_sz, 1401 &sg_len); 1402 } else { 1403 err = mmc_test_map_sg(t->mem, sz, t->sg_areq, 1, t->max_segs, 1404 t->max_seg_sz, &sg_len, min_sg_len); 1405 } 1406 if (!err && sg_len != t->sg_len) 1407 err = -EINVAL; 1408 1409 err: 1410 if (err) 1411 pr_info("%s: Failed to map sg list\n", 1412 mmc_hostname(test->card->host)); 1413 return err; 1414 } 1415 1416 /* 1417 * Transfer bytes mapped by mmc_test_area_map(). 1418 */ 1419 static int mmc_test_area_transfer(struct mmc_test_card *test, 1420 unsigned int dev_addr, int write) 1421 { 1422 struct mmc_test_area *t = &test->area; 1423 1424 return mmc_test_simple_transfer(test, t->sg, t->sg_len, dev_addr, 1425 t->blocks, 512, write); 1426 } 1427 1428 /* 1429 * Map and transfer bytes for multiple transfers. 1430 */ 1431 static int mmc_test_area_io_seq(struct mmc_test_card *test, unsigned long sz, 1432 unsigned int dev_addr, int write, 1433 int max_scatter, int timed, int count, 1434 bool nonblock, int min_sg_len) 1435 { 1436 struct timespec64 ts1, ts2; 1437 int ret = 0; 1438 int i; 1439 1440 /* 1441 * In the case of a maximally scattered transfer, the maximum transfer 1442 * size is further limited by using PAGE_SIZE segments. 1443 */ 1444 if (max_scatter) { 1445 struct mmc_test_area *t = &test->area; 1446 unsigned long max_tfr; 1447 1448 if (t->max_seg_sz >= PAGE_SIZE) 1449 max_tfr = t->max_segs * PAGE_SIZE; 1450 else 1451 max_tfr = t->max_segs * t->max_seg_sz; 1452 if (sz > max_tfr) 1453 sz = max_tfr; 1454 } 1455 1456 ret = mmc_test_area_map(test, sz, max_scatter, min_sg_len, nonblock); 1457 if (ret) 1458 return ret; 1459 1460 if (timed) 1461 ktime_get_ts64(&ts1); 1462 if (nonblock) 1463 ret = mmc_test_nonblock_transfer(test, dev_addr, write, count); 1464 else 1465 for (i = 0; i < count && ret == 0; i++) { 1466 ret = mmc_test_area_transfer(test, dev_addr, write); 1467 dev_addr += sz >> 9; 1468 } 1469 1470 if (ret) 1471 return ret; 1472 1473 if (timed) 1474 ktime_get_ts64(&ts2); 1475 1476 if (timed) 1477 mmc_test_print_avg_rate(test, sz, count, &ts1, &ts2); 1478 1479 return 0; 1480 } 1481 1482 static int mmc_test_area_io(struct mmc_test_card *test, unsigned long sz, 1483 unsigned int dev_addr, int write, int max_scatter, 1484 int timed) 1485 { 1486 return mmc_test_area_io_seq(test, sz, dev_addr, write, max_scatter, 1487 timed, 1, false, 0); 1488 } 1489 1490 /* 1491 * Write the test area entirely. 1492 */ 1493 static int mmc_test_area_fill(struct mmc_test_card *test) 1494 { 1495 struct mmc_test_area *t = &test->area; 1496 1497 return mmc_test_area_io(test, t->max_tfr, t->dev_addr, 1, 0, 0); 1498 } 1499 1500 /* 1501 * Erase the test area entirely. 1502 */ 1503 static int mmc_test_area_erase(struct mmc_test_card *test) 1504 { 1505 struct mmc_test_area *t = &test->area; 1506 1507 if (!mmc_card_can_erase(test->card)) 1508 return 0; 1509 1510 return mmc_erase(test->card, t->dev_addr, t->max_sz >> 9, 1511 MMC_ERASE_ARG); 1512 } 1513 1514 /* 1515 * Cleanup struct mmc_test_area. 1516 */ 1517 static int mmc_test_area_cleanup(struct mmc_test_card *test) 1518 { 1519 struct mmc_test_area *t = &test->area; 1520 1521 kfree(t->sg); 1522 kfree(t->sg_areq); 1523 mmc_test_free_mem(t->mem); 1524 1525 return 0; 1526 } 1527 1528 /* 1529 * Initialize an area for testing large transfers. The test area is set to the 1530 * middle of the card because cards may have different characteristics at the 1531 * front (for FAT file system optimization). Optionally, the area is erased 1532 * (if the card supports it) which may improve write performance. Optionally, 1533 * the area is filled with data for subsequent read tests. 1534 */ 1535 static int mmc_test_area_init(struct mmc_test_card *test, int erase, int fill) 1536 { 1537 struct mmc_test_area *t = &test->area; 1538 unsigned long min_sz = 64 * 1024, sz; 1539 int ret; 1540 1541 ret = mmc_test_set_blksize(test, 512); 1542 if (ret) 1543 return ret; 1544 1545 /* Make the test area size about 4MiB */ 1546 sz = (unsigned long)test->card->pref_erase << 9; 1547 t->max_sz = sz; 1548 while (t->max_sz < 4 * 1024 * 1024) 1549 t->max_sz += sz; 1550 while (t->max_sz > TEST_AREA_MAX_SIZE && t->max_sz > sz) 1551 t->max_sz -= sz; 1552 1553 t->max_segs = test->card->host->max_segs; 1554 t->max_seg_sz = test->card->host->max_seg_size; 1555 t->max_seg_sz -= t->max_seg_sz % 512; 1556 1557 t->max_tfr = t->max_sz; 1558 if (t->max_tfr >> 9 > test->card->host->max_blk_count) 1559 t->max_tfr = test->card->host->max_blk_count << 9; 1560 if (t->max_tfr > test->card->host->max_req_size) 1561 t->max_tfr = test->card->host->max_req_size; 1562 if (t->max_tfr / t->max_seg_sz > t->max_segs) 1563 t->max_tfr = t->max_segs * t->max_seg_sz; 1564 1565 /* 1566 * Try to allocate enough memory for a max. sized transfer. Less is OK 1567 * because the same memory can be mapped into the scatterlist more than 1568 * once. Also, take into account the limits imposed on scatterlist 1569 * segments by the host driver. 1570 */ 1571 t->mem = mmc_test_alloc_mem(min_sz, t->max_tfr, t->max_segs, 1572 t->max_seg_sz); 1573 if (!t->mem) 1574 return -ENOMEM; 1575 1576 t->sg = kmalloc_array(t->max_segs, sizeof(*t->sg), GFP_KERNEL); 1577 if (!t->sg) { 1578 ret = -ENOMEM; 1579 goto out_free; 1580 } 1581 1582 t->sg_areq = kmalloc_array(t->max_segs, sizeof(*t->sg_areq), 1583 GFP_KERNEL); 1584 if (!t->sg_areq) { 1585 ret = -ENOMEM; 1586 goto out_free; 1587 } 1588 1589 t->dev_addr = mmc_test_capacity(test->card) / 2; 1590 t->dev_addr -= t->dev_addr % (t->max_sz >> 9); 1591 1592 if (erase) { 1593 ret = mmc_test_area_erase(test); 1594 if (ret) 1595 goto out_free; 1596 } 1597 1598 if (fill) { 1599 ret = mmc_test_area_fill(test); 1600 if (ret) 1601 goto out_free; 1602 } 1603 1604 return 0; 1605 1606 out_free: 1607 mmc_test_area_cleanup(test); 1608 return ret; 1609 } 1610 1611 /* 1612 * Prepare for large transfers. Do not erase the test area. 1613 */ 1614 static int mmc_test_area_prepare(struct mmc_test_card *test) 1615 { 1616 return mmc_test_area_init(test, 0, 0); 1617 } 1618 1619 /* 1620 * Prepare for large transfers. Do erase the test area. 1621 */ 1622 static int mmc_test_area_prepare_erase(struct mmc_test_card *test) 1623 { 1624 return mmc_test_area_init(test, 1, 0); 1625 } 1626 1627 /* 1628 * Prepare for large transfers. Erase and fill the test area. 1629 */ 1630 static int mmc_test_area_prepare_fill(struct mmc_test_card *test) 1631 { 1632 return mmc_test_area_init(test, 1, 1); 1633 } 1634 1635 /* 1636 * Test best-case performance. Best-case performance is expected from 1637 * a single large transfer. 1638 * 1639 * An additional option (max_scatter) allows the measurement of the same 1640 * transfer but with no contiguous pages in the scatter list. This tests 1641 * the efficiency of DMA to handle scattered pages. 1642 */ 1643 static int mmc_test_best_performance(struct mmc_test_card *test, int write, 1644 int max_scatter) 1645 { 1646 struct mmc_test_area *t = &test->area; 1647 1648 return mmc_test_area_io(test, t->max_tfr, t->dev_addr, write, 1649 max_scatter, 1); 1650 } 1651 1652 /* 1653 * Best-case read performance. 1654 */ 1655 static int mmc_test_best_read_performance(struct mmc_test_card *test) 1656 { 1657 return mmc_test_best_performance(test, 0, 0); 1658 } 1659 1660 /* 1661 * Best-case write performance. 1662 */ 1663 static int mmc_test_best_write_performance(struct mmc_test_card *test) 1664 { 1665 return mmc_test_best_performance(test, 1, 0); 1666 } 1667 1668 /* 1669 * Best-case read performance into scattered pages. 1670 */ 1671 static int mmc_test_best_read_perf_max_scatter(struct mmc_test_card *test) 1672 { 1673 return mmc_test_best_performance(test, 0, 1); 1674 } 1675 1676 /* 1677 * Best-case write performance from scattered pages. 1678 */ 1679 static int mmc_test_best_write_perf_max_scatter(struct mmc_test_card *test) 1680 { 1681 return mmc_test_best_performance(test, 1, 1); 1682 } 1683 1684 /* 1685 * Single read performance by transfer size. 1686 */ 1687 static int mmc_test_profile_read_perf(struct mmc_test_card *test) 1688 { 1689 struct mmc_test_area *t = &test->area; 1690 unsigned long sz; 1691 unsigned int dev_addr; 1692 int ret; 1693 1694 for (sz = 512; sz < t->max_tfr; sz <<= 1) { 1695 dev_addr = t->dev_addr + (sz >> 9); 1696 ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 1); 1697 if (ret) 1698 return ret; 1699 } 1700 sz = t->max_tfr; 1701 dev_addr = t->dev_addr; 1702 return mmc_test_area_io(test, sz, dev_addr, 0, 0, 1); 1703 } 1704 1705 /* 1706 * Single write performance by transfer size. 1707 */ 1708 static int mmc_test_profile_write_perf(struct mmc_test_card *test) 1709 { 1710 struct mmc_test_area *t = &test->area; 1711 unsigned long sz; 1712 unsigned int dev_addr; 1713 int ret; 1714 1715 ret = mmc_test_area_erase(test); 1716 if (ret) 1717 return ret; 1718 for (sz = 512; sz < t->max_tfr; sz <<= 1) { 1719 dev_addr = t->dev_addr + (sz >> 9); 1720 ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 1); 1721 if (ret) 1722 return ret; 1723 } 1724 ret = mmc_test_area_erase(test); 1725 if (ret) 1726 return ret; 1727 sz = t->max_tfr; 1728 dev_addr = t->dev_addr; 1729 return mmc_test_area_io(test, sz, dev_addr, 1, 0, 1); 1730 } 1731 1732 /* 1733 * Single trim performance by transfer size. 1734 */ 1735 static int mmc_test_profile_trim_perf(struct mmc_test_card *test) 1736 { 1737 struct mmc_test_area *t = &test->area; 1738 unsigned long sz; 1739 unsigned int dev_addr; 1740 struct timespec64 ts1, ts2; 1741 int ret; 1742 1743 if (!mmc_card_can_trim(test->card)) 1744 return RESULT_UNSUP_CARD; 1745 1746 if (!mmc_card_can_erase(test->card)) 1747 return RESULT_UNSUP_HOST; 1748 1749 for (sz = 512; sz < t->max_sz; sz <<= 1) { 1750 dev_addr = t->dev_addr + (sz >> 9); 1751 ktime_get_ts64(&ts1); 1752 ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG); 1753 if (ret) 1754 return ret; 1755 ktime_get_ts64(&ts2); 1756 mmc_test_print_rate(test, sz, &ts1, &ts2); 1757 } 1758 dev_addr = t->dev_addr; 1759 ktime_get_ts64(&ts1); 1760 ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG); 1761 if (ret) 1762 return ret; 1763 ktime_get_ts64(&ts2); 1764 mmc_test_print_rate(test, sz, &ts1, &ts2); 1765 return 0; 1766 } 1767 1768 static int mmc_test_seq_read_perf(struct mmc_test_card *test, unsigned long sz) 1769 { 1770 struct mmc_test_area *t = &test->area; 1771 unsigned int dev_addr, i, cnt; 1772 struct timespec64 ts1, ts2; 1773 int ret; 1774 1775 cnt = t->max_sz / sz; 1776 dev_addr = t->dev_addr; 1777 ktime_get_ts64(&ts1); 1778 for (i = 0; i < cnt; i++) { 1779 ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 0); 1780 if (ret) 1781 return ret; 1782 dev_addr += (sz >> 9); 1783 } 1784 ktime_get_ts64(&ts2); 1785 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2); 1786 return 0; 1787 } 1788 1789 /* 1790 * Consecutive read performance by transfer size. 1791 */ 1792 static int mmc_test_profile_seq_read_perf(struct mmc_test_card *test) 1793 { 1794 struct mmc_test_area *t = &test->area; 1795 unsigned long sz; 1796 int ret; 1797 1798 for (sz = 512; sz < t->max_tfr; sz <<= 1) { 1799 ret = mmc_test_seq_read_perf(test, sz); 1800 if (ret) 1801 return ret; 1802 } 1803 sz = t->max_tfr; 1804 return mmc_test_seq_read_perf(test, sz); 1805 } 1806 1807 static int mmc_test_seq_write_perf(struct mmc_test_card *test, unsigned long sz) 1808 { 1809 struct mmc_test_area *t = &test->area; 1810 unsigned int dev_addr, i, cnt; 1811 struct timespec64 ts1, ts2; 1812 int ret; 1813 1814 ret = mmc_test_area_erase(test); 1815 if (ret) 1816 return ret; 1817 cnt = t->max_sz / sz; 1818 dev_addr = t->dev_addr; 1819 ktime_get_ts64(&ts1); 1820 for (i = 0; i < cnt; i++) { 1821 ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 0); 1822 if (ret) 1823 return ret; 1824 dev_addr += (sz >> 9); 1825 } 1826 ktime_get_ts64(&ts2); 1827 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2); 1828 return 0; 1829 } 1830 1831 /* 1832 * Consecutive write performance by transfer size. 1833 */ 1834 static int mmc_test_profile_seq_write_perf(struct mmc_test_card *test) 1835 { 1836 struct mmc_test_area *t = &test->area; 1837 unsigned long sz; 1838 int ret; 1839 1840 for (sz = 512; sz < t->max_tfr; sz <<= 1) { 1841 ret = mmc_test_seq_write_perf(test, sz); 1842 if (ret) 1843 return ret; 1844 } 1845 sz = t->max_tfr; 1846 return mmc_test_seq_write_perf(test, sz); 1847 } 1848 1849 /* 1850 * Consecutive trim performance by transfer size. 1851 */ 1852 static int mmc_test_profile_seq_trim_perf(struct mmc_test_card *test) 1853 { 1854 struct mmc_test_area *t = &test->area; 1855 unsigned long sz; 1856 unsigned int dev_addr, i, cnt; 1857 struct timespec64 ts1, ts2; 1858 int ret; 1859 1860 if (!mmc_card_can_trim(test->card)) 1861 return RESULT_UNSUP_CARD; 1862 1863 if (!mmc_card_can_erase(test->card)) 1864 return RESULT_UNSUP_HOST; 1865 1866 for (sz = 512; sz <= t->max_sz; sz <<= 1) { 1867 ret = mmc_test_area_erase(test); 1868 if (ret) 1869 return ret; 1870 ret = mmc_test_area_fill(test); 1871 if (ret) 1872 return ret; 1873 cnt = t->max_sz / sz; 1874 dev_addr = t->dev_addr; 1875 ktime_get_ts64(&ts1); 1876 for (i = 0; i < cnt; i++) { 1877 ret = mmc_erase(test->card, dev_addr, sz >> 9, 1878 MMC_TRIM_ARG); 1879 if (ret) 1880 return ret; 1881 dev_addr += (sz >> 9); 1882 } 1883 ktime_get_ts64(&ts2); 1884 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2); 1885 } 1886 return 0; 1887 } 1888 1889 static unsigned int rnd_next = 1; 1890 1891 static unsigned int mmc_test_rnd_num(unsigned int rnd_cnt) 1892 { 1893 uint64_t r; 1894 1895 rnd_next = rnd_next * 1103515245 + 12345; 1896 r = (rnd_next >> 16) & 0x7fff; 1897 return (r * rnd_cnt) >> 15; 1898 } 1899 1900 static int mmc_test_rnd_perf(struct mmc_test_card *test, int write, int print, 1901 unsigned long sz, int secs, int force_retuning) 1902 { 1903 unsigned int dev_addr, cnt, rnd_addr, range1, range2, last_ea = 0, ea; 1904 unsigned int ssz; 1905 struct timespec64 ts1, ts2, ts; 1906 int ret; 1907 1908 ssz = sz >> 9; 1909 1910 rnd_addr = mmc_test_capacity(test->card) / 4; 1911 range1 = rnd_addr / test->card->pref_erase; 1912 range2 = range1 / ssz; 1913 1914 ktime_get_ts64(&ts1); 1915 for (cnt = 0; cnt < UINT_MAX; cnt++) { 1916 ktime_get_ts64(&ts2); 1917 ts = timespec64_sub(ts2, ts1); 1918 if (ts.tv_sec >= secs) 1919 break; 1920 ea = mmc_test_rnd_num(range1); 1921 if (ea == last_ea) 1922 ea -= 1; 1923 last_ea = ea; 1924 dev_addr = rnd_addr + test->card->pref_erase * ea + 1925 ssz * mmc_test_rnd_num(range2); 1926 if (force_retuning) 1927 mmc_retune_needed(test->card->host); 1928 ret = mmc_test_area_io(test, sz, dev_addr, write, 0, 0); 1929 if (ret) 1930 return ret; 1931 } 1932 if (print) 1933 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2); 1934 return 0; 1935 } 1936 1937 static int mmc_test_random_perf(struct mmc_test_card *test, int write) 1938 { 1939 struct mmc_test_area *t = &test->area; 1940 unsigned int next; 1941 unsigned long sz; 1942 int ret; 1943 1944 for (sz = 512; sz < t->max_tfr; sz <<= 1) { 1945 /* 1946 * When writing, try to get more consistent results by running 1947 * the test twice with exactly the same I/O but outputting the 1948 * results only for the 2nd run. 1949 */ 1950 if (write) { 1951 next = rnd_next; 1952 ret = mmc_test_rnd_perf(test, write, 0, sz, 10, 0); 1953 if (ret) 1954 return ret; 1955 rnd_next = next; 1956 } 1957 ret = mmc_test_rnd_perf(test, write, 1, sz, 10, 0); 1958 if (ret) 1959 return ret; 1960 } 1961 sz = t->max_tfr; 1962 if (write) { 1963 next = rnd_next; 1964 ret = mmc_test_rnd_perf(test, write, 0, sz, 10, 0); 1965 if (ret) 1966 return ret; 1967 rnd_next = next; 1968 } 1969 return mmc_test_rnd_perf(test, write, 1, sz, 10, 0); 1970 } 1971 1972 static int mmc_test_retuning(struct mmc_test_card *test) 1973 { 1974 if (!mmc_can_retune(test->card->host)) { 1975 pr_info("%s: No retuning - test skipped\n", 1976 mmc_hostname(test->card->host)); 1977 return RESULT_UNSUP_HOST; 1978 } 1979 1980 return mmc_test_rnd_perf(test, 0, 0, 8192, 30, 1); 1981 } 1982 1983 /* 1984 * Random read performance by transfer size. 1985 */ 1986 static int mmc_test_random_read_perf(struct mmc_test_card *test) 1987 { 1988 return mmc_test_random_perf(test, 0); 1989 } 1990 1991 /* 1992 * Random write performance by transfer size. 1993 */ 1994 static int mmc_test_random_write_perf(struct mmc_test_card *test) 1995 { 1996 return mmc_test_random_perf(test, 1); 1997 } 1998 1999 static int mmc_test_seq_perf(struct mmc_test_card *test, int write, 2000 unsigned int tot_sz, int max_scatter) 2001 { 2002 struct mmc_test_area *t = &test->area; 2003 unsigned int dev_addr, i, cnt, sz, ssz; 2004 struct timespec64 ts1, ts2; 2005 int ret; 2006 2007 sz = t->max_tfr; 2008 2009 /* 2010 * In the case of a maximally scattered transfer, the maximum transfer 2011 * size is further limited by using PAGE_SIZE segments. 2012 */ 2013 if (max_scatter) { 2014 unsigned long max_tfr; 2015 2016 if (t->max_seg_sz >= PAGE_SIZE) 2017 max_tfr = t->max_segs * PAGE_SIZE; 2018 else 2019 max_tfr = t->max_segs * t->max_seg_sz; 2020 if (sz > max_tfr) 2021 sz = max_tfr; 2022 } 2023 2024 ssz = sz >> 9; 2025 dev_addr = mmc_test_capacity(test->card) / 4; 2026 if (tot_sz > dev_addr << 9) 2027 tot_sz = dev_addr << 9; 2028 cnt = tot_sz / sz; 2029 dev_addr &= 0xffff0000; /* Round to 64MiB boundary */ 2030 2031 ktime_get_ts64(&ts1); 2032 for (i = 0; i < cnt; i++) { 2033 ret = mmc_test_area_io(test, sz, dev_addr, write, 2034 max_scatter, 0); 2035 if (ret) 2036 return ret; 2037 dev_addr += ssz; 2038 } 2039 ktime_get_ts64(&ts2); 2040 2041 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2); 2042 2043 return 0; 2044 } 2045 2046 static int mmc_test_large_seq_perf(struct mmc_test_card *test, int write) 2047 { 2048 int ret, i; 2049 2050 for (i = 0; i < 10; i++) { 2051 ret = mmc_test_seq_perf(test, write, 10 * 1024 * 1024, 1); 2052 if (ret) 2053 return ret; 2054 } 2055 for (i = 0; i < 5; i++) { 2056 ret = mmc_test_seq_perf(test, write, 100 * 1024 * 1024, 1); 2057 if (ret) 2058 return ret; 2059 } 2060 for (i = 0; i < 3; i++) { 2061 ret = mmc_test_seq_perf(test, write, 1000 * 1024 * 1024, 1); 2062 if (ret) 2063 return ret; 2064 } 2065 2066 return ret; 2067 } 2068 2069 /* 2070 * Large sequential read performance. 2071 */ 2072 static int mmc_test_large_seq_read_perf(struct mmc_test_card *test) 2073 { 2074 return mmc_test_large_seq_perf(test, 0); 2075 } 2076 2077 /* 2078 * Large sequential write performance. 2079 */ 2080 static int mmc_test_large_seq_write_perf(struct mmc_test_card *test) 2081 { 2082 return mmc_test_large_seq_perf(test, 1); 2083 } 2084 2085 static int mmc_test_rw_multiple(struct mmc_test_card *test, 2086 struct mmc_test_multiple_rw *tdata, 2087 unsigned int reqsize, unsigned int size, 2088 int min_sg_len) 2089 { 2090 unsigned int dev_addr; 2091 struct mmc_test_area *t = &test->area; 2092 int ret = 0; 2093 2094 /* Set up test area */ 2095 if (size > mmc_test_capacity(test->card) / 2 * 512) 2096 size = mmc_test_capacity(test->card) / 2 * 512; 2097 if (reqsize > t->max_tfr) 2098 reqsize = t->max_tfr; 2099 dev_addr = mmc_test_capacity(test->card) / 4; 2100 if ((dev_addr & 0xffff0000)) 2101 dev_addr &= 0xffff0000; /* Round to 64MiB boundary */ 2102 else 2103 dev_addr &= 0xfffff800; /* Round to 1MiB boundary */ 2104 if (!dev_addr) 2105 goto err; 2106 2107 if (reqsize > size) 2108 return 0; 2109 2110 /* prepare test area */ 2111 if (mmc_card_can_erase(test->card) && 2112 tdata->prepare & MMC_TEST_PREP_ERASE) { 2113 ret = mmc_erase(test->card, dev_addr, 2114 size / 512, test->card->erase_arg); 2115 if (ret) 2116 ret = mmc_erase(test->card, dev_addr, 2117 size / 512, MMC_ERASE_ARG); 2118 if (ret) 2119 goto err; 2120 } 2121 2122 /* Run test */ 2123 ret = mmc_test_area_io_seq(test, reqsize, dev_addr, 2124 tdata->do_write, 0, 1, size / reqsize, 2125 tdata->do_nonblock_req, min_sg_len); 2126 if (ret) 2127 goto err; 2128 2129 return ret; 2130 err: 2131 pr_info("[%s] error\n", __func__); 2132 return ret; 2133 } 2134 2135 static int mmc_test_rw_multiple_size(struct mmc_test_card *test, 2136 struct mmc_test_multiple_rw *rw) 2137 { 2138 int ret = 0; 2139 int i; 2140 void *pre_req = test->card->host->ops->pre_req; 2141 void *post_req = test->card->host->ops->post_req; 2142 2143 if (rw->do_nonblock_req && 2144 ((!pre_req && post_req) || (pre_req && !post_req))) { 2145 pr_info("error: only one of pre/post is defined\n"); 2146 return -EINVAL; 2147 } 2148 2149 for (i = 0 ; i < rw->len && ret == 0; i++) { 2150 ret = mmc_test_rw_multiple(test, rw, rw->bs[i], rw->size, 0); 2151 if (ret) 2152 break; 2153 } 2154 return ret; 2155 } 2156 2157 static int mmc_test_rw_multiple_sg_len(struct mmc_test_card *test, 2158 struct mmc_test_multiple_rw *rw) 2159 { 2160 int ret = 0; 2161 int i; 2162 2163 for (i = 0 ; i < rw->len && ret == 0; i++) { 2164 ret = mmc_test_rw_multiple(test, rw, 512 * 1024, rw->size, 2165 rw->sg_len[i]); 2166 if (ret) 2167 break; 2168 } 2169 return ret; 2170 } 2171 2172 /* 2173 * Multiple blocking write 4k to 4 MB chunks 2174 */ 2175 static int mmc_test_profile_mult_write_blocking_perf(struct mmc_test_card *test) 2176 { 2177 unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16, 2178 1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22}; 2179 struct mmc_test_multiple_rw test_data = { 2180 .bs = bs, 2181 .size = TEST_AREA_MAX_SIZE, 2182 .len = ARRAY_SIZE(bs), 2183 .do_write = true, 2184 .do_nonblock_req = false, 2185 .prepare = MMC_TEST_PREP_ERASE, 2186 }; 2187 2188 return mmc_test_rw_multiple_size(test, &test_data); 2189 }; 2190 2191 /* 2192 * Multiple non-blocking write 4k to 4 MB chunks 2193 */ 2194 static int mmc_test_profile_mult_write_nonblock_perf(struct mmc_test_card *test) 2195 { 2196 unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16, 2197 1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22}; 2198 struct mmc_test_multiple_rw test_data = { 2199 .bs = bs, 2200 .size = TEST_AREA_MAX_SIZE, 2201 .len = ARRAY_SIZE(bs), 2202 .do_write = true, 2203 .do_nonblock_req = true, 2204 .prepare = MMC_TEST_PREP_ERASE, 2205 }; 2206 2207 return mmc_test_rw_multiple_size(test, &test_data); 2208 } 2209 2210 /* 2211 * Multiple blocking read 4k to 4 MB chunks 2212 */ 2213 static int mmc_test_profile_mult_read_blocking_perf(struct mmc_test_card *test) 2214 { 2215 unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16, 2216 1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22}; 2217 struct mmc_test_multiple_rw test_data = { 2218 .bs = bs, 2219 .size = TEST_AREA_MAX_SIZE, 2220 .len = ARRAY_SIZE(bs), 2221 .do_write = false, 2222 .do_nonblock_req = false, 2223 .prepare = MMC_TEST_PREP_NONE, 2224 }; 2225 2226 return mmc_test_rw_multiple_size(test, &test_data); 2227 } 2228 2229 /* 2230 * Multiple non-blocking read 4k to 4 MB chunks 2231 */ 2232 static int mmc_test_profile_mult_read_nonblock_perf(struct mmc_test_card *test) 2233 { 2234 unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16, 2235 1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22}; 2236 struct mmc_test_multiple_rw test_data = { 2237 .bs = bs, 2238 .size = TEST_AREA_MAX_SIZE, 2239 .len = ARRAY_SIZE(bs), 2240 .do_write = false, 2241 .do_nonblock_req = true, 2242 .prepare = MMC_TEST_PREP_NONE, 2243 }; 2244 2245 return mmc_test_rw_multiple_size(test, &test_data); 2246 } 2247 2248 /* 2249 * Multiple blocking write 1 to 512 sg elements 2250 */ 2251 static int mmc_test_profile_sglen_wr_blocking_perf(struct mmc_test_card *test) 2252 { 2253 unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6, 2254 1 << 7, 1 << 8, 1 << 9}; 2255 struct mmc_test_multiple_rw test_data = { 2256 .sg_len = sg_len, 2257 .size = TEST_AREA_MAX_SIZE, 2258 .len = ARRAY_SIZE(sg_len), 2259 .do_write = true, 2260 .do_nonblock_req = false, 2261 .prepare = MMC_TEST_PREP_ERASE, 2262 }; 2263 2264 return mmc_test_rw_multiple_sg_len(test, &test_data); 2265 }; 2266 2267 /* 2268 * Multiple non-blocking write 1 to 512 sg elements 2269 */ 2270 static int mmc_test_profile_sglen_wr_nonblock_perf(struct mmc_test_card *test) 2271 { 2272 unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6, 2273 1 << 7, 1 << 8, 1 << 9}; 2274 struct mmc_test_multiple_rw test_data = { 2275 .sg_len = sg_len, 2276 .size = TEST_AREA_MAX_SIZE, 2277 .len = ARRAY_SIZE(sg_len), 2278 .do_write = true, 2279 .do_nonblock_req = true, 2280 .prepare = MMC_TEST_PREP_ERASE, 2281 }; 2282 2283 return mmc_test_rw_multiple_sg_len(test, &test_data); 2284 } 2285 2286 /* 2287 * Multiple blocking read 1 to 512 sg elements 2288 */ 2289 static int mmc_test_profile_sglen_r_blocking_perf(struct mmc_test_card *test) 2290 { 2291 unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6, 2292 1 << 7, 1 << 8, 1 << 9}; 2293 struct mmc_test_multiple_rw test_data = { 2294 .sg_len = sg_len, 2295 .size = TEST_AREA_MAX_SIZE, 2296 .len = ARRAY_SIZE(sg_len), 2297 .do_write = false, 2298 .do_nonblock_req = false, 2299 .prepare = MMC_TEST_PREP_NONE, 2300 }; 2301 2302 return mmc_test_rw_multiple_sg_len(test, &test_data); 2303 } 2304 2305 /* 2306 * Multiple non-blocking read 1 to 512 sg elements 2307 */ 2308 static int mmc_test_profile_sglen_r_nonblock_perf(struct mmc_test_card *test) 2309 { 2310 unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6, 2311 1 << 7, 1 << 8, 1 << 9}; 2312 struct mmc_test_multiple_rw test_data = { 2313 .sg_len = sg_len, 2314 .size = TEST_AREA_MAX_SIZE, 2315 .len = ARRAY_SIZE(sg_len), 2316 .do_write = false, 2317 .do_nonblock_req = true, 2318 .prepare = MMC_TEST_PREP_NONE, 2319 }; 2320 2321 return mmc_test_rw_multiple_sg_len(test, &test_data); 2322 } 2323 2324 /* 2325 * eMMC hardware reset. 2326 */ 2327 static int mmc_test_reset(struct mmc_test_card *test) 2328 { 2329 struct mmc_card *card = test->card; 2330 int err; 2331 2332 err = mmc_hw_reset(card); 2333 if (!err) { 2334 /* 2335 * Reset will re-enable the card's command queue, but tests 2336 * expect it to be disabled. 2337 */ 2338 if (card->ext_csd.cmdq_en) 2339 mmc_cmdq_disable(card); 2340 return RESULT_OK; 2341 } else if (err == -EOPNOTSUPP) { 2342 return RESULT_UNSUP_HOST; 2343 } 2344 2345 return RESULT_FAIL; 2346 } 2347 2348 static int mmc_test_send_status(struct mmc_test_card *test, 2349 struct mmc_command *cmd) 2350 { 2351 memset(cmd, 0, sizeof(*cmd)); 2352 2353 cmd->opcode = MMC_SEND_STATUS; 2354 if (!mmc_host_is_spi(test->card->host)) 2355 cmd->arg = test->card->rca << 16; 2356 cmd->flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC; 2357 2358 return mmc_wait_for_cmd(test->card->host, cmd, 0); 2359 } 2360 2361 static int mmc_test_ongoing_transfer(struct mmc_test_card *test, 2362 unsigned int dev_addr, int use_sbc, 2363 int repeat_cmd, int write, int use_areq) 2364 { 2365 struct mmc_test_req *rq = mmc_test_req_alloc(); 2366 struct mmc_host *host = test->card->host; 2367 struct mmc_test_area *t = &test->area; 2368 struct mmc_request *mrq; 2369 unsigned long timeout; 2370 bool expired = false; 2371 int ret = 0, cmd_ret; 2372 u32 status = 0; 2373 int count = 0; 2374 2375 if (!rq) 2376 return -ENOMEM; 2377 2378 mrq = &rq->mrq; 2379 if (use_sbc) 2380 mrq->sbc = &rq->sbc; 2381 mrq->cap_cmd_during_tfr = true; 2382 2383 mmc_test_prepare_mrq(test, mrq, t->sg, t->sg_len, dev_addr, t->blocks, 2384 512, write); 2385 2386 if (use_sbc && t->blocks > 1 && !mrq->sbc) { 2387 ret = mmc_host_can_cmd23(host) ? 2388 RESULT_UNSUP_CARD : 2389 RESULT_UNSUP_HOST; 2390 goto out_free; 2391 } 2392 2393 /* Start ongoing data request */ 2394 if (use_areq) { 2395 ret = mmc_test_start_areq(test, mrq, NULL); 2396 if (ret) 2397 goto out_free; 2398 } else { 2399 mmc_wait_for_req(host, mrq); 2400 } 2401 2402 timeout = jiffies + msecs_to_jiffies(3000); 2403 do { 2404 count += 1; 2405 2406 /* Send status command while data transfer in progress */ 2407 cmd_ret = mmc_test_send_status(test, &rq->status); 2408 if (cmd_ret) 2409 break; 2410 2411 status = rq->status.resp[0]; 2412 if (status & R1_ERROR) { 2413 cmd_ret = -EIO; 2414 break; 2415 } 2416 2417 if (mmc_is_req_done(host, mrq)) 2418 break; 2419 2420 expired = time_after(jiffies, timeout); 2421 if (expired) { 2422 pr_info("%s: timeout waiting for Tran state status %#x\n", 2423 mmc_hostname(host), status); 2424 cmd_ret = -ETIMEDOUT; 2425 break; 2426 } 2427 } while (repeat_cmd && R1_CURRENT_STATE(status) != R1_STATE_TRAN); 2428 2429 /* Wait for data request to complete */ 2430 if (use_areq) { 2431 ret = mmc_test_start_areq(test, NULL, mrq); 2432 } else { 2433 mmc_wait_for_req_done(test->card->host, mrq); 2434 } 2435 2436 /* 2437 * For cap_cmd_during_tfr request, upper layer must send stop if 2438 * required. 2439 */ 2440 if (mrq->data->stop && (mrq->data->error || !mrq->sbc)) { 2441 if (ret) 2442 mmc_wait_for_cmd(host, mrq->data->stop, 0); 2443 else 2444 ret = mmc_wait_for_cmd(host, mrq->data->stop, 0); 2445 } 2446 2447 if (ret) 2448 goto out_free; 2449 2450 if (cmd_ret) { 2451 pr_info("%s: Send Status failed: status %#x, error %d\n", 2452 mmc_hostname(test->card->host), status, cmd_ret); 2453 } 2454 2455 ret = mmc_test_check_result(test, mrq); 2456 if (ret) 2457 goto out_free; 2458 2459 ret = mmc_test_wait_busy(test); 2460 if (ret) 2461 goto out_free; 2462 2463 if (repeat_cmd && (t->blocks + 1) << 9 > t->max_tfr) 2464 pr_info("%s: %d commands completed during transfer of %u blocks\n", 2465 mmc_hostname(test->card->host), count, t->blocks); 2466 2467 if (cmd_ret) 2468 ret = cmd_ret; 2469 out_free: 2470 kfree(rq); 2471 2472 return ret; 2473 } 2474 2475 static int __mmc_test_cmds_during_tfr(struct mmc_test_card *test, 2476 unsigned long sz, int use_sbc, int write, 2477 int use_areq) 2478 { 2479 struct mmc_test_area *t = &test->area; 2480 int ret; 2481 2482 if (!(test->card->host->caps & MMC_CAP_CMD_DURING_TFR)) 2483 return RESULT_UNSUP_HOST; 2484 2485 ret = mmc_test_area_map(test, sz, 0, 0, use_areq); 2486 if (ret) 2487 return ret; 2488 2489 ret = mmc_test_ongoing_transfer(test, t->dev_addr, use_sbc, 0, write, 2490 use_areq); 2491 if (ret) 2492 return ret; 2493 2494 return mmc_test_ongoing_transfer(test, t->dev_addr, use_sbc, 1, write, 2495 use_areq); 2496 } 2497 2498 static int mmc_test_cmds_during_tfr(struct mmc_test_card *test, int use_sbc, 2499 int write, int use_areq) 2500 { 2501 struct mmc_test_area *t = &test->area; 2502 unsigned long sz; 2503 int ret; 2504 2505 for (sz = 512; sz <= t->max_tfr; sz += 512) { 2506 ret = __mmc_test_cmds_during_tfr(test, sz, use_sbc, write, 2507 use_areq); 2508 if (ret) 2509 return ret; 2510 } 2511 return 0; 2512 } 2513 2514 /* 2515 * Commands during read - no Set Block Count (CMD23). 2516 */ 2517 static int mmc_test_cmds_during_read(struct mmc_test_card *test) 2518 { 2519 return mmc_test_cmds_during_tfr(test, 0, 0, 0); 2520 } 2521 2522 /* 2523 * Commands during write - no Set Block Count (CMD23). 2524 */ 2525 static int mmc_test_cmds_during_write(struct mmc_test_card *test) 2526 { 2527 return mmc_test_cmds_during_tfr(test, 0, 1, 0); 2528 } 2529 2530 /* 2531 * Commands during read - use Set Block Count (CMD23). 2532 */ 2533 static int mmc_test_cmds_during_read_cmd23(struct mmc_test_card *test) 2534 { 2535 return mmc_test_cmds_during_tfr(test, 1, 0, 0); 2536 } 2537 2538 /* 2539 * Commands during write - use Set Block Count (CMD23). 2540 */ 2541 static int mmc_test_cmds_during_write_cmd23(struct mmc_test_card *test) 2542 { 2543 return mmc_test_cmds_during_tfr(test, 1, 1, 0); 2544 } 2545 2546 /* 2547 * Commands during non-blocking read - use Set Block Count (CMD23). 2548 */ 2549 static int mmc_test_cmds_during_read_cmd23_nonblock(struct mmc_test_card *test) 2550 { 2551 return mmc_test_cmds_during_tfr(test, 1, 0, 1); 2552 } 2553 2554 /* 2555 * Commands during non-blocking write - use Set Block Count (CMD23). 2556 */ 2557 static int mmc_test_cmds_during_write_cmd23_nonblock(struct mmc_test_card *test) 2558 { 2559 return mmc_test_cmds_during_tfr(test, 1, 1, 1); 2560 } 2561 2562 static const struct mmc_test_case mmc_test_cases[] = { 2563 { 2564 .name = "Basic write (no data verification)", 2565 .run = mmc_test_basic_write, 2566 }, 2567 2568 { 2569 .name = "Basic read (no data verification)", 2570 .run = mmc_test_basic_read, 2571 }, 2572 2573 { 2574 .name = "Basic write (with data verification)", 2575 .prepare = mmc_test_prepare_write, 2576 .run = mmc_test_verify_write, 2577 .cleanup = mmc_test_cleanup, 2578 }, 2579 2580 { 2581 .name = "Basic read (with data verification)", 2582 .prepare = mmc_test_prepare_read, 2583 .run = mmc_test_verify_read, 2584 .cleanup = mmc_test_cleanup, 2585 }, 2586 2587 { 2588 .name = "Multi-block write", 2589 .prepare = mmc_test_prepare_write, 2590 .run = mmc_test_multi_write, 2591 .cleanup = mmc_test_cleanup, 2592 }, 2593 2594 { 2595 .name = "Multi-block read", 2596 .prepare = mmc_test_prepare_read, 2597 .run = mmc_test_multi_read, 2598 .cleanup = mmc_test_cleanup, 2599 }, 2600 2601 { 2602 .name = "Power of two block writes", 2603 .prepare = mmc_test_prepare_write, 2604 .run = mmc_test_pow2_write, 2605 .cleanup = mmc_test_cleanup, 2606 }, 2607 2608 { 2609 .name = "Power of two block reads", 2610 .prepare = mmc_test_prepare_read, 2611 .run = mmc_test_pow2_read, 2612 .cleanup = mmc_test_cleanup, 2613 }, 2614 2615 { 2616 .name = "Weird sized block writes", 2617 .prepare = mmc_test_prepare_write, 2618 .run = mmc_test_weird_write, 2619 .cleanup = mmc_test_cleanup, 2620 }, 2621 2622 { 2623 .name = "Weird sized block reads", 2624 .prepare = mmc_test_prepare_read, 2625 .run = mmc_test_weird_read, 2626 .cleanup = mmc_test_cleanup, 2627 }, 2628 2629 { 2630 .name = "Badly aligned write", 2631 .prepare = mmc_test_prepare_write, 2632 .run = mmc_test_align_write, 2633 .cleanup = mmc_test_cleanup, 2634 }, 2635 2636 { 2637 .name = "Badly aligned read", 2638 .prepare = mmc_test_prepare_read, 2639 .run = mmc_test_align_read, 2640 .cleanup = mmc_test_cleanup, 2641 }, 2642 2643 { 2644 .name = "Badly aligned multi-block write", 2645 .prepare = mmc_test_prepare_write, 2646 .run = mmc_test_align_multi_write, 2647 .cleanup = mmc_test_cleanup, 2648 }, 2649 2650 { 2651 .name = "Badly aligned multi-block read", 2652 .prepare = mmc_test_prepare_read, 2653 .run = mmc_test_align_multi_read, 2654 .cleanup = mmc_test_cleanup, 2655 }, 2656 2657 { 2658 .name = "Proper xfer_size at write (start failure)", 2659 .run = mmc_test_xfersize_write, 2660 }, 2661 2662 { 2663 .name = "Proper xfer_size at read (start failure)", 2664 .run = mmc_test_xfersize_read, 2665 }, 2666 2667 { 2668 .name = "Proper xfer_size at write (midway failure)", 2669 .run = mmc_test_multi_xfersize_write, 2670 }, 2671 2672 { 2673 .name = "Proper xfer_size at read (midway failure)", 2674 .run = mmc_test_multi_xfersize_read, 2675 }, 2676 2677 #ifdef CONFIG_HIGHMEM 2678 2679 { 2680 .name = "Highmem write", 2681 .prepare = mmc_test_prepare_write, 2682 .run = mmc_test_write_high, 2683 .cleanup = mmc_test_cleanup, 2684 }, 2685 2686 { 2687 .name = "Highmem read", 2688 .prepare = mmc_test_prepare_read, 2689 .run = mmc_test_read_high, 2690 .cleanup = mmc_test_cleanup, 2691 }, 2692 2693 { 2694 .name = "Multi-block highmem write", 2695 .prepare = mmc_test_prepare_write, 2696 .run = mmc_test_multi_write_high, 2697 .cleanup = mmc_test_cleanup, 2698 }, 2699 2700 { 2701 .name = "Multi-block highmem read", 2702 .prepare = mmc_test_prepare_read, 2703 .run = mmc_test_multi_read_high, 2704 .cleanup = mmc_test_cleanup, 2705 }, 2706 2707 #else 2708 2709 { 2710 .name = "Highmem write", 2711 .run = mmc_test_no_highmem, 2712 }, 2713 2714 { 2715 .name = "Highmem read", 2716 .run = mmc_test_no_highmem, 2717 }, 2718 2719 { 2720 .name = "Multi-block highmem write", 2721 .run = mmc_test_no_highmem, 2722 }, 2723 2724 { 2725 .name = "Multi-block highmem read", 2726 .run = mmc_test_no_highmem, 2727 }, 2728 2729 #endif /* CONFIG_HIGHMEM */ 2730 2731 { 2732 .name = "Best-case read performance", 2733 .prepare = mmc_test_area_prepare_fill, 2734 .run = mmc_test_best_read_performance, 2735 .cleanup = mmc_test_area_cleanup, 2736 }, 2737 2738 { 2739 .name = "Best-case write performance", 2740 .prepare = mmc_test_area_prepare_erase, 2741 .run = mmc_test_best_write_performance, 2742 .cleanup = mmc_test_area_cleanup, 2743 }, 2744 2745 { 2746 .name = "Best-case read performance into scattered pages", 2747 .prepare = mmc_test_area_prepare_fill, 2748 .run = mmc_test_best_read_perf_max_scatter, 2749 .cleanup = mmc_test_area_cleanup, 2750 }, 2751 2752 { 2753 .name = "Best-case write performance from scattered pages", 2754 .prepare = mmc_test_area_prepare_erase, 2755 .run = mmc_test_best_write_perf_max_scatter, 2756 .cleanup = mmc_test_area_cleanup, 2757 }, 2758 2759 { 2760 .name = "Single read performance by transfer size", 2761 .prepare = mmc_test_area_prepare_fill, 2762 .run = mmc_test_profile_read_perf, 2763 .cleanup = mmc_test_area_cleanup, 2764 }, 2765 2766 { 2767 .name = "Single write performance by transfer size", 2768 .prepare = mmc_test_area_prepare, 2769 .run = mmc_test_profile_write_perf, 2770 .cleanup = mmc_test_area_cleanup, 2771 }, 2772 2773 { 2774 .name = "Single trim performance by transfer size", 2775 .prepare = mmc_test_area_prepare_fill, 2776 .run = mmc_test_profile_trim_perf, 2777 .cleanup = mmc_test_area_cleanup, 2778 }, 2779 2780 { 2781 .name = "Consecutive read performance by transfer size", 2782 .prepare = mmc_test_area_prepare_fill, 2783 .run = mmc_test_profile_seq_read_perf, 2784 .cleanup = mmc_test_area_cleanup, 2785 }, 2786 2787 { 2788 .name = "Consecutive write performance by transfer size", 2789 .prepare = mmc_test_area_prepare, 2790 .run = mmc_test_profile_seq_write_perf, 2791 .cleanup = mmc_test_area_cleanup, 2792 }, 2793 2794 { 2795 .name = "Consecutive trim performance by transfer size", 2796 .prepare = mmc_test_area_prepare, 2797 .run = mmc_test_profile_seq_trim_perf, 2798 .cleanup = mmc_test_area_cleanup, 2799 }, 2800 2801 { 2802 .name = "Random read performance by transfer size", 2803 .prepare = mmc_test_area_prepare, 2804 .run = mmc_test_random_read_perf, 2805 .cleanup = mmc_test_area_cleanup, 2806 }, 2807 2808 { 2809 .name = "Random write performance by transfer size", 2810 .prepare = mmc_test_area_prepare, 2811 .run = mmc_test_random_write_perf, 2812 .cleanup = mmc_test_area_cleanup, 2813 }, 2814 2815 { 2816 .name = "Large sequential read into scattered pages", 2817 .prepare = mmc_test_area_prepare, 2818 .run = mmc_test_large_seq_read_perf, 2819 .cleanup = mmc_test_area_cleanup, 2820 }, 2821 2822 { 2823 .name = "Large sequential write from scattered pages", 2824 .prepare = mmc_test_area_prepare, 2825 .run = mmc_test_large_seq_write_perf, 2826 .cleanup = mmc_test_area_cleanup, 2827 }, 2828 2829 { 2830 .name = "Write performance with blocking req 4k to 4MB", 2831 .prepare = mmc_test_area_prepare, 2832 .run = mmc_test_profile_mult_write_blocking_perf, 2833 .cleanup = mmc_test_area_cleanup, 2834 }, 2835 2836 { 2837 .name = "Write performance with non-blocking req 4k to 4MB", 2838 .prepare = mmc_test_area_prepare, 2839 .run = mmc_test_profile_mult_write_nonblock_perf, 2840 .cleanup = mmc_test_area_cleanup, 2841 }, 2842 2843 { 2844 .name = "Read performance with blocking req 4k to 4MB", 2845 .prepare = mmc_test_area_prepare, 2846 .run = mmc_test_profile_mult_read_blocking_perf, 2847 .cleanup = mmc_test_area_cleanup, 2848 }, 2849 2850 { 2851 .name = "Read performance with non-blocking req 4k to 4MB", 2852 .prepare = mmc_test_area_prepare, 2853 .run = mmc_test_profile_mult_read_nonblock_perf, 2854 .cleanup = mmc_test_area_cleanup, 2855 }, 2856 2857 { 2858 .name = "Write performance blocking req 1 to 512 sg elems", 2859 .prepare = mmc_test_area_prepare, 2860 .run = mmc_test_profile_sglen_wr_blocking_perf, 2861 .cleanup = mmc_test_area_cleanup, 2862 }, 2863 2864 { 2865 .name = "Write performance non-blocking req 1 to 512 sg elems", 2866 .prepare = mmc_test_area_prepare, 2867 .run = mmc_test_profile_sglen_wr_nonblock_perf, 2868 .cleanup = mmc_test_area_cleanup, 2869 }, 2870 2871 { 2872 .name = "Read performance blocking req 1 to 512 sg elems", 2873 .prepare = mmc_test_area_prepare, 2874 .run = mmc_test_profile_sglen_r_blocking_perf, 2875 .cleanup = mmc_test_area_cleanup, 2876 }, 2877 2878 { 2879 .name = "Read performance non-blocking req 1 to 512 sg elems", 2880 .prepare = mmc_test_area_prepare, 2881 .run = mmc_test_profile_sglen_r_nonblock_perf, 2882 .cleanup = mmc_test_area_cleanup, 2883 }, 2884 2885 { 2886 .name = "Reset test", 2887 .run = mmc_test_reset, 2888 }, 2889 2890 { 2891 .name = "Commands during read - no Set Block Count (CMD23)", 2892 .prepare = mmc_test_area_prepare, 2893 .run = mmc_test_cmds_during_read, 2894 .cleanup = mmc_test_area_cleanup, 2895 }, 2896 2897 { 2898 .name = "Commands during write - no Set Block Count (CMD23)", 2899 .prepare = mmc_test_area_prepare, 2900 .run = mmc_test_cmds_during_write, 2901 .cleanup = mmc_test_area_cleanup, 2902 }, 2903 2904 { 2905 .name = "Commands during read - use Set Block Count (CMD23)", 2906 .prepare = mmc_test_area_prepare, 2907 .run = mmc_test_cmds_during_read_cmd23, 2908 .cleanup = mmc_test_area_cleanup, 2909 }, 2910 2911 { 2912 .name = "Commands during write - use Set Block Count (CMD23)", 2913 .prepare = mmc_test_area_prepare, 2914 .run = mmc_test_cmds_during_write_cmd23, 2915 .cleanup = mmc_test_area_cleanup, 2916 }, 2917 2918 { 2919 .name = "Commands during non-blocking read - use Set Block Count (CMD23)", 2920 .prepare = mmc_test_area_prepare, 2921 .run = mmc_test_cmds_during_read_cmd23_nonblock, 2922 .cleanup = mmc_test_area_cleanup, 2923 }, 2924 2925 { 2926 .name = "Commands during non-blocking write - use Set Block Count (CMD23)", 2927 .prepare = mmc_test_area_prepare, 2928 .run = mmc_test_cmds_during_write_cmd23_nonblock, 2929 .cleanup = mmc_test_area_cleanup, 2930 }, 2931 2932 { 2933 .name = "Re-tuning reliability", 2934 .prepare = mmc_test_area_prepare, 2935 .run = mmc_test_retuning, 2936 .cleanup = mmc_test_area_cleanup, 2937 }, 2938 2939 }; 2940 2941 static DEFINE_MUTEX(mmc_test_lock); 2942 2943 static LIST_HEAD(mmc_test_result); 2944 2945 static void mmc_test_run(struct mmc_test_card *test, int testcase) 2946 { 2947 int i, ret; 2948 2949 pr_info("%s: Starting tests of card %s...\n", 2950 mmc_hostname(test->card->host), mmc_card_id(test->card)); 2951 2952 mmc_claim_host(test->card->host); 2953 2954 for (i = 0; i < ARRAY_SIZE(mmc_test_cases); i++) { 2955 struct mmc_test_general_result *gr; 2956 2957 if (testcase && ((i + 1) != testcase)) 2958 continue; 2959 2960 pr_info("%s: Test case %d. %s...\n", 2961 mmc_hostname(test->card->host), i + 1, 2962 mmc_test_cases[i].name); 2963 2964 if (mmc_test_cases[i].prepare) { 2965 ret = mmc_test_cases[i].prepare(test); 2966 if (ret) { 2967 pr_info("%s: Result: Prepare stage failed! (%d)\n", 2968 mmc_hostname(test->card->host), 2969 ret); 2970 continue; 2971 } 2972 } 2973 2974 gr = kzalloc(sizeof(*gr), GFP_KERNEL); 2975 if (gr) { 2976 INIT_LIST_HEAD(&gr->tr_lst); 2977 2978 /* Assign data what we know already */ 2979 gr->card = test->card; 2980 gr->testcase = i; 2981 2982 /* Append container to global one */ 2983 list_add_tail(&gr->link, &mmc_test_result); 2984 2985 /* 2986 * Save the pointer to created container in our private 2987 * structure. 2988 */ 2989 test->gr = gr; 2990 } 2991 2992 ret = mmc_test_cases[i].run(test); 2993 switch (ret) { 2994 case RESULT_OK: 2995 pr_info("%s: Result: OK\n", 2996 mmc_hostname(test->card->host)); 2997 break; 2998 case RESULT_FAIL: 2999 pr_info("%s: Result: FAILED\n", 3000 mmc_hostname(test->card->host)); 3001 break; 3002 case RESULT_UNSUP_HOST: 3003 pr_info("%s: Result: UNSUPPORTED (by host)\n", 3004 mmc_hostname(test->card->host)); 3005 break; 3006 case RESULT_UNSUP_CARD: 3007 pr_info("%s: Result: UNSUPPORTED (by card)\n", 3008 mmc_hostname(test->card->host)); 3009 break; 3010 default: 3011 pr_info("%s: Result: ERROR (%d)\n", 3012 mmc_hostname(test->card->host), ret); 3013 } 3014 3015 /* Save the result */ 3016 if (gr) 3017 gr->result = ret; 3018 3019 if (mmc_test_cases[i].cleanup) { 3020 ret = mmc_test_cases[i].cleanup(test); 3021 if (ret) { 3022 pr_info("%s: Warning: Cleanup stage failed! (%d)\n", 3023 mmc_hostname(test->card->host), 3024 ret); 3025 } 3026 } 3027 } 3028 3029 mmc_release_host(test->card->host); 3030 3031 pr_info("%s: Tests completed.\n", 3032 mmc_hostname(test->card->host)); 3033 } 3034 3035 static void mmc_test_free_result(struct mmc_card *card) 3036 { 3037 struct mmc_test_general_result *gr, *grs; 3038 3039 mutex_lock(&mmc_test_lock); 3040 3041 list_for_each_entry_safe(gr, grs, &mmc_test_result, link) { 3042 struct mmc_test_transfer_result *tr, *trs; 3043 3044 if (card && gr->card != card) 3045 continue; 3046 3047 list_for_each_entry_safe(tr, trs, &gr->tr_lst, link) { 3048 list_del(&tr->link); 3049 kfree(tr); 3050 } 3051 3052 list_del(&gr->link); 3053 kfree(gr); 3054 } 3055 3056 mutex_unlock(&mmc_test_lock); 3057 } 3058 3059 static LIST_HEAD(mmc_test_file_test); 3060 3061 static int mtf_test_show(struct seq_file *sf, void *data) 3062 { 3063 struct mmc_card *card = sf->private; 3064 struct mmc_test_general_result *gr; 3065 3066 mutex_lock(&mmc_test_lock); 3067 3068 list_for_each_entry(gr, &mmc_test_result, link) { 3069 struct mmc_test_transfer_result *tr; 3070 3071 if (gr->card != card) 3072 continue; 3073 3074 seq_printf(sf, "Test %d: %d\n", gr->testcase + 1, gr->result); 3075 3076 list_for_each_entry(tr, &gr->tr_lst, link) { 3077 seq_printf(sf, "%u %d %llu.%09u %u %u.%02u\n", 3078 tr->count, tr->sectors, 3079 (u64)tr->ts.tv_sec, (u32)tr->ts.tv_nsec, 3080 tr->rate, tr->iops / 100, tr->iops % 100); 3081 } 3082 } 3083 3084 mutex_unlock(&mmc_test_lock); 3085 3086 return 0; 3087 } 3088 3089 static int mtf_test_open(struct inode *inode, struct file *file) 3090 { 3091 return single_open(file, mtf_test_show, inode->i_private); 3092 } 3093 3094 static ssize_t mtf_test_write(struct file *file, const char __user *buf, 3095 size_t count, loff_t *pos) 3096 { 3097 struct seq_file *sf = file->private_data; 3098 struct mmc_card *card = sf->private; 3099 struct mmc_test_card *test; 3100 long testcase; 3101 int ret; 3102 3103 ret = kstrtol_from_user(buf, count, 10, &testcase); 3104 if (ret) 3105 return ret; 3106 3107 test = kzalloc(sizeof(*test), GFP_KERNEL); 3108 if (!test) 3109 return -ENOMEM; 3110 3111 /* 3112 * Remove all test cases associated with given card. Thus we have only 3113 * actual data of the last run. 3114 */ 3115 mmc_test_free_result(card); 3116 3117 test->card = card; 3118 3119 test->buffer = kzalloc(BUFFER_SIZE, GFP_KERNEL); 3120 #ifdef CONFIG_HIGHMEM 3121 test->highmem = alloc_pages(GFP_KERNEL | __GFP_HIGHMEM, BUFFER_ORDER); 3122 if (!test->highmem) { 3123 count = -ENOMEM; 3124 goto free_test_buffer; 3125 } 3126 #endif 3127 3128 if (test->buffer) { 3129 mutex_lock(&mmc_test_lock); 3130 mmc_test_run(test, testcase); 3131 mutex_unlock(&mmc_test_lock); 3132 } 3133 3134 #ifdef CONFIG_HIGHMEM 3135 __free_pages(test->highmem, BUFFER_ORDER); 3136 free_test_buffer: 3137 #endif 3138 kfree(test->buffer); 3139 kfree(test); 3140 3141 return count; 3142 } 3143 3144 static const struct file_operations mmc_test_fops_test = { 3145 .open = mtf_test_open, 3146 .read = seq_read, 3147 .write = mtf_test_write, 3148 .llseek = seq_lseek, 3149 .release = single_release, 3150 }; 3151 3152 static int mtf_testlist_show(struct seq_file *sf, void *data) 3153 { 3154 int i; 3155 3156 mutex_lock(&mmc_test_lock); 3157 3158 seq_puts(sf, "0:\tRun all tests\n"); 3159 for (i = 0; i < ARRAY_SIZE(mmc_test_cases); i++) 3160 seq_printf(sf, "%d:\t%s\n", i + 1, mmc_test_cases[i].name); 3161 3162 mutex_unlock(&mmc_test_lock); 3163 3164 return 0; 3165 } 3166 3167 DEFINE_SHOW_ATTRIBUTE(mtf_testlist); 3168 3169 static void mmc_test_free_dbgfs_file(struct mmc_card *card) 3170 { 3171 struct mmc_test_dbgfs_file *df, *dfs; 3172 3173 mutex_lock(&mmc_test_lock); 3174 3175 list_for_each_entry_safe(df, dfs, &mmc_test_file_test, link) { 3176 if (card && df->card != card) 3177 continue; 3178 debugfs_remove(df->file); 3179 list_del(&df->link); 3180 kfree(df); 3181 } 3182 3183 mutex_unlock(&mmc_test_lock); 3184 } 3185 3186 static int __mmc_test_register_dbgfs_file(struct mmc_card *card, 3187 const char *name, umode_t mode, const struct file_operations *fops) 3188 { 3189 struct dentry *file = NULL; 3190 struct mmc_test_dbgfs_file *df; 3191 3192 if (card->debugfs_root) 3193 file = debugfs_create_file(name, mode, card->debugfs_root, 3194 card, fops); 3195 3196 df = kmalloc(sizeof(*df), GFP_KERNEL); 3197 if (!df) { 3198 debugfs_remove(file); 3199 return -ENOMEM; 3200 } 3201 3202 df->card = card; 3203 df->file = file; 3204 3205 list_add(&df->link, &mmc_test_file_test); 3206 return 0; 3207 } 3208 3209 static int mmc_test_register_dbgfs_file(struct mmc_card *card) 3210 { 3211 int ret; 3212 3213 mutex_lock(&mmc_test_lock); 3214 3215 ret = __mmc_test_register_dbgfs_file(card, "test", S_IWUSR | S_IRUGO, 3216 &mmc_test_fops_test); 3217 if (ret) 3218 goto err; 3219 3220 ret = __mmc_test_register_dbgfs_file(card, "testlist", S_IRUGO, 3221 &mtf_testlist_fops); 3222 if (ret) 3223 goto err; 3224 3225 err: 3226 mutex_unlock(&mmc_test_lock); 3227 3228 return ret; 3229 } 3230 3231 static int mmc_test_probe(struct mmc_card *card) 3232 { 3233 int ret; 3234 3235 if (!mmc_card_mmc(card) && !mmc_card_sd(card)) 3236 return -ENODEV; 3237 3238 if (mmc_card_ult_capacity(card)) { 3239 pr_info("%s: mmc-test currently UNSUPPORTED for SDUC\n", 3240 mmc_hostname(card->host)); 3241 return -EOPNOTSUPP; 3242 } 3243 3244 ret = mmc_test_register_dbgfs_file(card); 3245 if (ret) 3246 return ret; 3247 3248 if (card->ext_csd.cmdq_en) { 3249 mmc_claim_host(card->host); 3250 ret = mmc_cmdq_disable(card); 3251 mmc_release_host(card->host); 3252 if (ret) 3253 return ret; 3254 } 3255 3256 dev_info(&card->dev, "Card claimed for testing.\n"); 3257 3258 return 0; 3259 } 3260 3261 static void mmc_test_remove(struct mmc_card *card) 3262 { 3263 if (card->reenable_cmdq) { 3264 mmc_claim_host(card->host); 3265 mmc_cmdq_enable(card); 3266 mmc_release_host(card->host); 3267 } 3268 mmc_test_free_result(card); 3269 mmc_test_free_dbgfs_file(card); 3270 } 3271 3272 static struct mmc_driver mmc_driver = { 3273 .drv = { 3274 .name = "mmc_test", 3275 }, 3276 .probe = mmc_test_probe, 3277 .remove = mmc_test_remove, 3278 }; 3279 3280 static int __init mmc_test_init(void) 3281 { 3282 return mmc_register_driver(&mmc_driver); 3283 } 3284 3285 static void __exit mmc_test_exit(void) 3286 { 3287 /* Clear stalled data if card is still plugged */ 3288 mmc_test_free_result(NULL); 3289 mmc_test_free_dbgfs_file(NULL); 3290 3291 mmc_unregister_driver(&mmc_driver); 3292 } 3293 3294 module_init(mmc_test_init); 3295 module_exit(mmc_test_exit); 3296 3297 MODULE_LICENSE("GPL"); 3298 MODULE_DESCRIPTION("Multimedia Card (MMC) host test driver"); 3299 MODULE_AUTHOR("Pierre Ossman"); 3300