1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Copyright 2007-2008 Pierre Ossman 4 */ 5 6 #include <linux/mmc/core.h> 7 #include <linux/mmc/card.h> 8 #include <linux/mmc/host.h> 9 #include <linux/mmc/mmc.h> 10 #include <linux/slab.h> 11 12 #include <linux/scatterlist.h> 13 #include <linux/list.h> 14 15 #include <linux/debugfs.h> 16 #include <linux/uaccess.h> 17 #include <linux/seq_file.h> 18 #include <linux/module.h> 19 20 #include "core.h" 21 #include "card.h" 22 #include "host.h" 23 #include "bus.h" 24 #include "mmc_ops.h" 25 26 #define RESULT_OK 0 27 #define RESULT_FAIL 1 28 #define RESULT_UNSUP_HOST 2 29 #define RESULT_UNSUP_CARD 3 30 31 #define BUFFER_ORDER 2 32 #define BUFFER_SIZE (PAGE_SIZE << BUFFER_ORDER) 33 34 #define TEST_ALIGN_END 8 35 36 /* 37 * Limit the test area size to the maximum MMC HC erase group size. Note that 38 * the maximum SD allocation unit size is just 4MiB. 39 */ 40 #define TEST_AREA_MAX_SIZE (128 * 1024 * 1024) 41 42 /** 43 * struct mmc_test_pages - pages allocated by 'alloc_pages()'. 44 * @page: first page in the allocation 45 * @order: order of the number of pages allocated 46 */ 47 struct mmc_test_pages { 48 struct page *page; 49 unsigned int order; 50 }; 51 52 /** 53 * struct mmc_test_mem - allocated memory. 54 * @arr: array of allocations 55 * @cnt: number of allocations 56 */ 57 struct mmc_test_mem { 58 struct mmc_test_pages *arr; 59 unsigned int cnt; 60 }; 61 62 /** 63 * struct mmc_test_area - information for performance tests. 64 * @max_sz: test area size (in bytes) 65 * @dev_addr: address on card at which to do performance tests 66 * @max_tfr: maximum transfer size allowed by driver (in bytes) 67 * @max_segs: maximum segments allowed by driver in scatterlist @sg 68 * @max_seg_sz: maximum segment size allowed by driver 69 * @blocks: number of (512 byte) blocks currently mapped by @sg 70 * @sg_len: length of currently mapped scatterlist @sg 71 * @mem: allocated memory 72 * @sg: scatterlist 73 * @sg_areq: scatterlist for non-blocking request 74 */ 75 struct mmc_test_area { 76 unsigned long max_sz; 77 unsigned int dev_addr; 78 unsigned int max_tfr; 79 unsigned int max_segs; 80 unsigned int max_seg_sz; 81 unsigned int blocks; 82 unsigned int sg_len; 83 struct mmc_test_mem *mem; 84 struct scatterlist *sg; 85 struct scatterlist *sg_areq; 86 }; 87 88 /** 89 * struct mmc_test_transfer_result - transfer results for performance tests. 90 * @link: double-linked list 91 * @count: amount of group of sectors to check 92 * @sectors: amount of sectors to check in one group 93 * @ts: time values of transfer 94 * @rate: calculated transfer rate 95 * @iops: I/O operations per second (times 100) 96 */ 97 struct mmc_test_transfer_result { 98 struct list_head link; 99 unsigned int count; 100 unsigned int sectors; 101 struct timespec64 ts; 102 unsigned int rate; 103 unsigned int iops; 104 }; 105 106 /** 107 * struct mmc_test_general_result - results for tests. 108 * @link: double-linked list 109 * @card: card under test 110 * @testcase: number of test case 111 * @result: result of test run 112 * @tr_lst: transfer measurements if any as mmc_test_transfer_result 113 */ 114 struct mmc_test_general_result { 115 struct list_head link; 116 struct mmc_card *card; 117 int testcase; 118 int result; 119 struct list_head tr_lst; 120 }; 121 122 /** 123 * struct mmc_test_dbgfs_file - debugfs related file. 124 * @link: double-linked list 125 * @card: card under test 126 * @file: file created under debugfs 127 */ 128 struct mmc_test_dbgfs_file { 129 struct list_head link; 130 struct mmc_card *card; 131 struct dentry *file; 132 }; 133 134 /** 135 * struct mmc_test_card - test information. 136 * @card: card under test 137 * @scratch: transfer buffer 138 * @buffer: transfer buffer 139 * @highmem: buffer for highmem tests 140 * @area: information for performance tests 141 * @gr: pointer to results of current testcase 142 */ 143 struct mmc_test_card { 144 struct mmc_card *card; 145 146 u8 scratch[BUFFER_SIZE]; 147 u8 *buffer; 148 #ifdef CONFIG_HIGHMEM 149 struct page *highmem; 150 #endif 151 struct mmc_test_area area; 152 struct mmc_test_general_result *gr; 153 }; 154 155 enum mmc_test_prep_media { 156 MMC_TEST_PREP_NONE = 0, 157 MMC_TEST_PREP_WRITE_FULL = 1 << 0, 158 MMC_TEST_PREP_ERASE = 1 << 1, 159 }; 160 161 struct mmc_test_multiple_rw { 162 unsigned int *sg_len; 163 unsigned int *bs; 164 unsigned int len; 165 unsigned int size; 166 bool do_write; 167 bool do_nonblock_req; 168 enum mmc_test_prep_media prepare; 169 }; 170 171 /*******************************************************************/ 172 /* General helper functions */ 173 /*******************************************************************/ 174 175 /* 176 * Configure correct block size in card 177 */ 178 static int mmc_test_set_blksize(struct mmc_test_card *test, unsigned size) 179 { 180 return mmc_set_blocklen(test->card, size); 181 } 182 183 static void mmc_test_prepare_sbc(struct mmc_test_card *test, 184 struct mmc_request *mrq, unsigned int blocks) 185 { 186 struct mmc_card *card = test->card; 187 188 if (!mrq->sbc || !mmc_host_can_cmd23(card->host) || 189 !mmc_card_can_cmd23(card) || !mmc_op_multi(mrq->cmd->opcode) || 190 mmc_card_blk_no_cmd23(card)) { 191 mrq->sbc = NULL; 192 return; 193 } 194 195 mrq->sbc->opcode = MMC_SET_BLOCK_COUNT; 196 mrq->sbc->arg = blocks; 197 mrq->sbc->flags = MMC_RSP_R1 | MMC_CMD_AC; 198 } 199 200 /* 201 * Fill in the mmc_request structure given a set of transfer parameters. 202 */ 203 static void mmc_test_prepare_mrq(struct mmc_test_card *test, 204 struct mmc_request *mrq, struct scatterlist *sg, unsigned sg_len, 205 unsigned dev_addr, unsigned blocks, unsigned blksz, int write) 206 { 207 if (WARN_ON(!mrq || !mrq->cmd || !mrq->data || !mrq->stop)) 208 return; 209 210 if (blocks > 1) { 211 mrq->cmd->opcode = write ? 212 MMC_WRITE_MULTIPLE_BLOCK : MMC_READ_MULTIPLE_BLOCK; 213 } else { 214 mrq->cmd->opcode = write ? 215 MMC_WRITE_BLOCK : MMC_READ_SINGLE_BLOCK; 216 } 217 218 mrq->cmd->arg = dev_addr; 219 if (!mmc_card_blockaddr(test->card)) 220 mrq->cmd->arg <<= 9; 221 222 mrq->cmd->flags = MMC_RSP_R1 | MMC_CMD_ADTC; 223 224 if (blocks == 1) 225 mrq->stop = NULL; 226 else { 227 mrq->stop->opcode = MMC_STOP_TRANSMISSION; 228 mrq->stop->arg = 0; 229 mrq->stop->flags = MMC_RSP_R1B | MMC_CMD_AC; 230 } 231 232 mrq->data->blksz = blksz; 233 mrq->data->blocks = blocks; 234 mrq->data->flags = write ? MMC_DATA_WRITE : MMC_DATA_READ; 235 mrq->data->sg = sg; 236 mrq->data->sg_len = sg_len; 237 238 mmc_test_prepare_sbc(test, mrq, blocks); 239 240 mmc_set_data_timeout(mrq->data, test->card); 241 } 242 243 static int mmc_test_busy(struct mmc_command *cmd) 244 { 245 return !(cmd->resp[0] & R1_READY_FOR_DATA) || 246 (R1_CURRENT_STATE(cmd->resp[0]) == R1_STATE_PRG); 247 } 248 249 /* 250 * Wait for the card to finish the busy state 251 */ 252 static int mmc_test_wait_busy(struct mmc_test_card *test) 253 { 254 int ret, busy; 255 struct mmc_command cmd = {}; 256 257 busy = 0; 258 do { 259 memset(&cmd, 0, sizeof(struct mmc_command)); 260 261 cmd.opcode = MMC_SEND_STATUS; 262 cmd.arg = test->card->rca << 16; 263 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; 264 265 ret = mmc_wait_for_cmd(test->card->host, &cmd, 0); 266 if (ret) 267 break; 268 269 if (!busy && mmc_test_busy(&cmd)) { 270 busy = 1; 271 if (test->card->host->caps & MMC_CAP_WAIT_WHILE_BUSY) 272 pr_info("%s: Warning: Host did not wait for busy state to end.\n", 273 mmc_hostname(test->card->host)); 274 } 275 } while (mmc_test_busy(&cmd)); 276 277 return ret; 278 } 279 280 /* 281 * Transfer a single sector of kernel addressable data 282 */ 283 static int mmc_test_buffer_transfer(struct mmc_test_card *test, 284 u8 *buffer, unsigned addr, unsigned blksz, int write) 285 { 286 struct mmc_request mrq = {}; 287 struct mmc_command cmd = {}; 288 struct mmc_command stop = {}; 289 struct mmc_data data = {}; 290 291 struct scatterlist sg; 292 293 mrq.cmd = &cmd; 294 mrq.data = &data; 295 mrq.stop = &stop; 296 297 sg_init_one(&sg, buffer, blksz); 298 299 mmc_test_prepare_mrq(test, &mrq, &sg, 1, addr, 1, blksz, write); 300 301 mmc_wait_for_req(test->card->host, &mrq); 302 303 if (cmd.error) 304 return cmd.error; 305 if (data.error) 306 return data.error; 307 308 return mmc_test_wait_busy(test); 309 } 310 311 static void mmc_test_free_mem(struct mmc_test_mem *mem) 312 { 313 if (!mem) 314 return; 315 while (mem->cnt--) 316 __free_pages(mem->arr[mem->cnt].page, 317 mem->arr[mem->cnt].order); 318 kfree(mem->arr); 319 kfree(mem); 320 } 321 322 /* 323 * Allocate a lot of memory, preferably max_sz but at least min_sz. In case 324 * there isn't much memory do not exceed 1/16th total lowmem pages. Also do 325 * not exceed a maximum number of segments and try not to make segments much 326 * bigger than maximum segment size. 327 */ 328 static struct mmc_test_mem *mmc_test_alloc_mem(unsigned long min_sz, 329 unsigned long max_sz, 330 unsigned int max_segs, 331 unsigned int max_seg_sz) 332 { 333 unsigned long max_page_cnt = DIV_ROUND_UP(max_sz, PAGE_SIZE); 334 unsigned long min_page_cnt = DIV_ROUND_UP(min_sz, PAGE_SIZE); 335 unsigned long max_seg_page_cnt = DIV_ROUND_UP(max_seg_sz, PAGE_SIZE); 336 unsigned long page_cnt = 0; 337 unsigned long limit = nr_free_buffer_pages() >> 4; 338 struct mmc_test_mem *mem; 339 340 if (max_page_cnt > limit) 341 max_page_cnt = limit; 342 if (min_page_cnt > max_page_cnt) 343 min_page_cnt = max_page_cnt; 344 345 if (max_seg_page_cnt > max_page_cnt) 346 max_seg_page_cnt = max_page_cnt; 347 348 if (max_segs > max_page_cnt) 349 max_segs = max_page_cnt; 350 351 mem = kzalloc(sizeof(*mem), GFP_KERNEL); 352 if (!mem) 353 return NULL; 354 355 mem->arr = kcalloc(max_segs, sizeof(*mem->arr), GFP_KERNEL); 356 if (!mem->arr) 357 goto out_free; 358 359 while (max_page_cnt) { 360 struct page *page; 361 unsigned int order; 362 gfp_t flags = GFP_KERNEL | GFP_DMA | __GFP_NOWARN | 363 __GFP_NORETRY; 364 365 order = get_order(max_seg_page_cnt << PAGE_SHIFT); 366 while (1) { 367 page = alloc_pages(flags, order); 368 if (page || !order) 369 break; 370 order -= 1; 371 } 372 if (!page) { 373 if (page_cnt < min_page_cnt) 374 goto out_free; 375 break; 376 } 377 mem->arr[mem->cnt].page = page; 378 mem->arr[mem->cnt].order = order; 379 mem->cnt += 1; 380 if (max_page_cnt <= (1UL << order)) 381 break; 382 max_page_cnt -= 1UL << order; 383 page_cnt += 1UL << order; 384 if (mem->cnt >= max_segs) { 385 if (page_cnt < min_page_cnt) 386 goto out_free; 387 break; 388 } 389 } 390 391 return mem; 392 393 out_free: 394 mmc_test_free_mem(mem); 395 return NULL; 396 } 397 398 /* 399 * Map memory into a scatterlist. Optionally allow the same memory to be 400 * mapped more than once. 401 */ 402 static int mmc_test_map_sg(struct mmc_test_mem *mem, unsigned long size, 403 struct scatterlist *sglist, int repeat, 404 unsigned int max_segs, unsigned int max_seg_sz, 405 unsigned int *sg_len, int min_sg_len) 406 { 407 struct scatterlist *sg = NULL; 408 unsigned int i; 409 unsigned long sz = size; 410 411 sg_init_table(sglist, max_segs); 412 if (min_sg_len > max_segs) 413 min_sg_len = max_segs; 414 415 *sg_len = 0; 416 do { 417 for (i = 0; i < mem->cnt; i++) { 418 unsigned long len = PAGE_SIZE << mem->arr[i].order; 419 420 if (min_sg_len && (size / min_sg_len < len)) 421 len = ALIGN(size / min_sg_len, 512); 422 if (len > sz) 423 len = sz; 424 if (len > max_seg_sz) 425 len = max_seg_sz; 426 if (sg) 427 sg = sg_next(sg); 428 else 429 sg = sglist; 430 if (!sg) 431 return -EINVAL; 432 sg_set_page(sg, mem->arr[i].page, len, 0); 433 sz -= len; 434 *sg_len += 1; 435 if (!sz) 436 break; 437 } 438 } while (sz && repeat); 439 440 if (sz) 441 return -EINVAL; 442 443 if (sg) 444 sg_mark_end(sg); 445 446 return 0; 447 } 448 449 /* 450 * Map memory into a scatterlist so that no pages are contiguous. Allow the 451 * same memory to be mapped more than once. 452 */ 453 static int mmc_test_map_sg_max_scatter(struct mmc_test_mem *mem, 454 unsigned long sz, 455 struct scatterlist *sglist, 456 unsigned int max_segs, 457 unsigned int max_seg_sz, 458 unsigned int *sg_len) 459 { 460 struct scatterlist *sg = NULL; 461 unsigned int i = mem->cnt, cnt; 462 unsigned long len; 463 void *base, *addr, *last_addr = NULL; 464 465 sg_init_table(sglist, max_segs); 466 467 *sg_len = 0; 468 while (sz) { 469 base = page_address(mem->arr[--i].page); 470 cnt = 1 << mem->arr[i].order; 471 while (sz && cnt) { 472 addr = base + PAGE_SIZE * --cnt; 473 if (last_addr && last_addr + PAGE_SIZE == addr) 474 continue; 475 last_addr = addr; 476 len = PAGE_SIZE; 477 if (len > max_seg_sz) 478 len = max_seg_sz; 479 if (len > sz) 480 len = sz; 481 if (sg) 482 sg = sg_next(sg); 483 else 484 sg = sglist; 485 if (!sg) 486 return -EINVAL; 487 sg_set_page(sg, virt_to_page(addr), len, 0); 488 sz -= len; 489 *sg_len += 1; 490 } 491 if (i == 0) 492 i = mem->cnt; 493 } 494 495 if (sg) 496 sg_mark_end(sg); 497 498 return 0; 499 } 500 501 /* 502 * Calculate transfer rate in bytes per second. 503 */ 504 static unsigned int mmc_test_rate(uint64_t bytes, struct timespec64 *ts) 505 { 506 uint64_t ns; 507 508 ns = timespec64_to_ns(ts); 509 bytes *= 1000000000; 510 511 while (ns > UINT_MAX) { 512 bytes >>= 1; 513 ns >>= 1; 514 } 515 516 if (!ns) 517 return 0; 518 519 do_div(bytes, (uint32_t)ns); 520 521 return bytes; 522 } 523 524 /* 525 * Save transfer results for future usage 526 */ 527 static void mmc_test_save_transfer_result(struct mmc_test_card *test, 528 unsigned int count, unsigned int sectors, struct timespec64 ts, 529 unsigned int rate, unsigned int iops) 530 { 531 struct mmc_test_transfer_result *tr; 532 533 if (!test->gr) 534 return; 535 536 tr = kmalloc(sizeof(*tr), GFP_KERNEL); 537 if (!tr) 538 return; 539 540 tr->count = count; 541 tr->sectors = sectors; 542 tr->ts = ts; 543 tr->rate = rate; 544 tr->iops = iops; 545 546 list_add_tail(&tr->link, &test->gr->tr_lst); 547 } 548 549 /* 550 * Print the transfer rate. 551 */ 552 static void mmc_test_print_rate(struct mmc_test_card *test, uint64_t bytes, 553 struct timespec64 *ts1, struct timespec64 *ts2) 554 { 555 unsigned int rate, iops, sectors = bytes >> 9; 556 struct timespec64 ts; 557 558 ts = timespec64_sub(*ts2, *ts1); 559 560 rate = mmc_test_rate(bytes, &ts); 561 iops = mmc_test_rate(100, &ts); /* I/O ops per sec x 100 */ 562 563 pr_info("%s: Transfer of %u sectors (%u%s KiB) took %llu.%09u " 564 "seconds (%u kB/s, %u KiB/s, %u.%02u IOPS)\n", 565 mmc_hostname(test->card->host), sectors, sectors >> 1, 566 (sectors & 1 ? ".5" : ""), (u64)ts.tv_sec, 567 (u32)ts.tv_nsec, rate / 1000, rate / 1024, 568 iops / 100, iops % 100); 569 570 mmc_test_save_transfer_result(test, 1, sectors, ts, rate, iops); 571 } 572 573 /* 574 * Print the average transfer rate. 575 */ 576 static void mmc_test_print_avg_rate(struct mmc_test_card *test, uint64_t bytes, 577 unsigned int count, struct timespec64 *ts1, 578 struct timespec64 *ts2) 579 { 580 unsigned int rate, iops, sectors = bytes >> 9; 581 uint64_t tot = bytes * count; 582 struct timespec64 ts; 583 584 ts = timespec64_sub(*ts2, *ts1); 585 586 rate = mmc_test_rate(tot, &ts); 587 iops = mmc_test_rate(count * 100, &ts); /* I/O ops per sec x 100 */ 588 589 pr_info("%s: Transfer of %u x %u sectors (%u x %u%s KiB) took %ptSp seconds (%u kB/s, %u KiB/s, %u.%02u IOPS, sg_len %d)\n", 590 mmc_hostname(test->card->host), count, sectors, count, 591 sectors >> 1, (sectors & 1 ? ".5" : ""), &ts, 592 rate / 1000, rate / 1024, iops / 100, iops % 100, 593 test->area.sg_len); 594 595 mmc_test_save_transfer_result(test, count, sectors, ts, rate, iops); 596 } 597 598 /* 599 * Return the card size in sectors. 600 */ 601 static unsigned int mmc_test_capacity(struct mmc_card *card) 602 { 603 if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) 604 return card->ext_csd.sectors; 605 else 606 return card->csd.capacity << (card->csd.read_blkbits - 9); 607 } 608 609 /*******************************************************************/ 610 /* Test preparation and cleanup */ 611 /*******************************************************************/ 612 613 /* 614 * Fill the first couple of sectors of the card with known data 615 * so that bad reads/writes can be detected 616 */ 617 static int __mmc_test_prepare(struct mmc_test_card *test, int write, int val) 618 { 619 int ret, i; 620 621 ret = mmc_test_set_blksize(test, 512); 622 if (ret) 623 return ret; 624 625 if (write) 626 memset(test->buffer, val, 512); 627 else { 628 for (i = 0; i < 512; i++) 629 test->buffer[i] = i; 630 } 631 632 for (i = 0; i < BUFFER_SIZE / 512; i++) { 633 ret = mmc_test_buffer_transfer(test, test->buffer, i, 512, 1); 634 if (ret) 635 return ret; 636 } 637 638 return 0; 639 } 640 641 static int mmc_test_prepare_write(struct mmc_test_card *test) 642 { 643 return __mmc_test_prepare(test, 1, 0xDF); 644 } 645 646 static int mmc_test_prepare_read(struct mmc_test_card *test) 647 { 648 return __mmc_test_prepare(test, 0, 0); 649 } 650 651 static int mmc_test_cleanup(struct mmc_test_card *test) 652 { 653 return __mmc_test_prepare(test, 1, 0); 654 } 655 656 /*******************************************************************/ 657 /* Test execution helpers */ 658 /*******************************************************************/ 659 660 /* 661 * Modifies the mmc_request to perform the "short transfer" tests 662 */ 663 static void mmc_test_prepare_broken_mrq(struct mmc_test_card *test, 664 struct mmc_request *mrq, int write) 665 { 666 if (WARN_ON(!mrq || !mrq->cmd || !mrq->data)) 667 return; 668 669 if (mrq->data->blocks > 1) { 670 mrq->cmd->opcode = write ? 671 MMC_WRITE_BLOCK : MMC_READ_SINGLE_BLOCK; 672 mrq->stop = NULL; 673 } else { 674 mrq->cmd->opcode = MMC_SEND_STATUS; 675 mrq->cmd->arg = test->card->rca << 16; 676 } 677 } 678 679 /* 680 * Checks that a normal transfer didn't have any errors 681 */ 682 static int mmc_test_check_result(struct mmc_test_card *test, 683 struct mmc_request *mrq) 684 { 685 int ret; 686 687 if (WARN_ON(!mrq || !mrq->cmd || !mrq->data)) 688 return -EINVAL; 689 690 ret = 0; 691 692 if (mrq->sbc && mrq->sbc->error) 693 ret = mrq->sbc->error; 694 if (!ret && mrq->cmd->error) 695 ret = mrq->cmd->error; 696 if (!ret && mrq->data->error) 697 ret = mrq->data->error; 698 if (!ret && mrq->stop && mrq->stop->error) 699 ret = mrq->stop->error; 700 if (!ret && mrq->data->bytes_xfered != 701 mrq->data->blocks * mrq->data->blksz) 702 ret = RESULT_FAIL; 703 704 if (ret == -EINVAL) 705 ret = RESULT_UNSUP_HOST; 706 707 return ret; 708 } 709 710 /* 711 * Checks that a "short transfer" behaved as expected 712 */ 713 static int mmc_test_check_broken_result(struct mmc_test_card *test, 714 struct mmc_request *mrq) 715 { 716 int ret; 717 718 if (WARN_ON(!mrq || !mrq->cmd || !mrq->data)) 719 return -EINVAL; 720 721 ret = 0; 722 723 if (!ret && mrq->cmd->error) 724 ret = mrq->cmd->error; 725 if (!ret && mrq->data->error == 0) 726 ret = RESULT_FAIL; 727 if (!ret && mrq->data->error != -ETIMEDOUT) 728 ret = mrq->data->error; 729 if (!ret && mrq->stop && mrq->stop->error) 730 ret = mrq->stop->error; 731 if (mrq->data->blocks > 1) { 732 if (!ret && mrq->data->bytes_xfered > mrq->data->blksz) 733 ret = RESULT_FAIL; 734 } else { 735 if (!ret && mrq->data->bytes_xfered > 0) 736 ret = RESULT_FAIL; 737 } 738 739 if (ret == -EINVAL) 740 ret = RESULT_UNSUP_HOST; 741 742 return ret; 743 } 744 745 struct mmc_test_req { 746 struct mmc_request mrq; 747 struct mmc_command sbc; 748 struct mmc_command cmd; 749 struct mmc_command stop; 750 struct mmc_command status; 751 struct mmc_data data; 752 }; 753 754 /* 755 * Tests nonblock transfer with certain parameters 756 */ 757 static void mmc_test_req_reset(struct mmc_test_req *rq) 758 { 759 memset(rq, 0, sizeof(struct mmc_test_req)); 760 761 rq->mrq.cmd = &rq->cmd; 762 rq->mrq.data = &rq->data; 763 rq->mrq.stop = &rq->stop; 764 } 765 766 static struct mmc_test_req *mmc_test_req_alloc(void) 767 { 768 struct mmc_test_req *rq = kmalloc(sizeof(*rq), GFP_KERNEL); 769 770 if (rq) 771 mmc_test_req_reset(rq); 772 773 return rq; 774 } 775 776 static void mmc_test_wait_done(struct mmc_request *mrq) 777 { 778 complete(&mrq->completion); 779 } 780 781 static int mmc_test_start_areq(struct mmc_test_card *test, 782 struct mmc_request *mrq, 783 struct mmc_request *prev_mrq) 784 { 785 struct mmc_host *host = test->card->host; 786 int err = 0; 787 788 if (mrq) { 789 init_completion(&mrq->completion); 790 mrq->done = mmc_test_wait_done; 791 mmc_pre_req(host, mrq); 792 } 793 794 if (prev_mrq) { 795 wait_for_completion(&prev_mrq->completion); 796 err = mmc_test_wait_busy(test); 797 if (!err) 798 err = mmc_test_check_result(test, prev_mrq); 799 } 800 801 if (!err && mrq) { 802 err = mmc_start_request(host, mrq); 803 if (err) 804 mmc_retune_release(host); 805 } 806 807 if (prev_mrq) 808 mmc_post_req(host, prev_mrq, 0); 809 810 if (err && mrq) 811 mmc_post_req(host, mrq, err); 812 813 return err; 814 } 815 816 static int mmc_test_nonblock_transfer(struct mmc_test_card *test, 817 unsigned int dev_addr, int write, 818 int count) 819 { 820 struct mmc_test_req *rq1, *rq2; 821 struct mmc_request *mrq, *prev_mrq; 822 int i; 823 int ret = RESULT_OK; 824 struct mmc_test_area *t = &test->area; 825 struct scatterlist *sg = t->sg; 826 struct scatterlist *sg_areq = t->sg_areq; 827 828 rq1 = mmc_test_req_alloc(); 829 rq2 = mmc_test_req_alloc(); 830 if (!rq1 || !rq2) { 831 ret = RESULT_FAIL; 832 goto err; 833 } 834 835 mrq = &rq1->mrq; 836 prev_mrq = NULL; 837 838 for (i = 0; i < count; i++) { 839 mmc_test_req_reset(container_of(mrq, struct mmc_test_req, mrq)); 840 mmc_test_prepare_mrq(test, mrq, sg, t->sg_len, dev_addr, 841 t->blocks, 512, write); 842 ret = mmc_test_start_areq(test, mrq, prev_mrq); 843 if (ret) 844 goto err; 845 846 if (!prev_mrq) 847 prev_mrq = &rq2->mrq; 848 849 swap(mrq, prev_mrq); 850 swap(sg, sg_areq); 851 dev_addr += t->blocks; 852 } 853 854 ret = mmc_test_start_areq(test, NULL, prev_mrq); 855 err: 856 kfree(rq1); 857 kfree(rq2); 858 return ret; 859 } 860 861 /* 862 * Tests a basic transfer with certain parameters 863 */ 864 static int mmc_test_simple_transfer(struct mmc_test_card *test, 865 struct scatterlist *sg, unsigned sg_len, unsigned dev_addr, 866 unsigned blocks, unsigned blksz, int write) 867 { 868 struct mmc_request mrq = {}; 869 struct mmc_command cmd = {}; 870 struct mmc_command stop = {}; 871 struct mmc_data data = {}; 872 873 mrq.cmd = &cmd; 874 mrq.data = &data; 875 mrq.stop = &stop; 876 877 mmc_test_prepare_mrq(test, &mrq, sg, sg_len, dev_addr, 878 blocks, blksz, write); 879 880 mmc_wait_for_req(test->card->host, &mrq); 881 882 mmc_test_wait_busy(test); 883 884 return mmc_test_check_result(test, &mrq); 885 } 886 887 /* 888 * Tests a transfer where the card will fail completely or partly 889 */ 890 static int mmc_test_broken_transfer(struct mmc_test_card *test, 891 unsigned blocks, unsigned blksz, int write) 892 { 893 struct mmc_request mrq = {}; 894 struct mmc_command cmd = {}; 895 struct mmc_command stop = {}; 896 struct mmc_data data = {}; 897 898 struct scatterlist sg; 899 900 mrq.cmd = &cmd; 901 mrq.data = &data; 902 mrq.stop = &stop; 903 904 sg_init_one(&sg, test->buffer, blocks * blksz); 905 906 mmc_test_prepare_mrq(test, &mrq, &sg, 1, 0, blocks, blksz, write); 907 mmc_test_prepare_broken_mrq(test, &mrq, write); 908 909 mmc_wait_for_req(test->card->host, &mrq); 910 911 mmc_test_wait_busy(test); 912 913 return mmc_test_check_broken_result(test, &mrq); 914 } 915 916 /* 917 * Does a complete transfer test where data is also validated 918 * 919 * Note: mmc_test_prepare() must have been done before this call 920 */ 921 static int mmc_test_transfer(struct mmc_test_card *test, 922 struct scatterlist *sg, unsigned sg_len, unsigned dev_addr, 923 unsigned blocks, unsigned blksz, int write) 924 { 925 int ret, i; 926 927 if (write) { 928 for (i = 0; i < blocks * blksz; i++) 929 test->scratch[i] = i; 930 } else { 931 memset(test->scratch, 0, BUFFER_SIZE); 932 } 933 sg_copy_from_buffer(sg, sg_len, test->scratch, BUFFER_SIZE); 934 935 ret = mmc_test_set_blksize(test, blksz); 936 if (ret) 937 return ret; 938 939 ret = mmc_test_simple_transfer(test, sg, sg_len, dev_addr, 940 blocks, blksz, write); 941 if (ret) 942 return ret; 943 944 if (write) { 945 int sectors; 946 947 ret = mmc_test_set_blksize(test, 512); 948 if (ret) 949 return ret; 950 951 sectors = (blocks * blksz + 511) / 512; 952 if ((sectors * 512) == (blocks * blksz)) 953 sectors++; 954 955 if ((sectors * 512) > BUFFER_SIZE) 956 return -EINVAL; 957 958 memset(test->buffer, 0, sectors * 512); 959 960 for (i = 0; i < sectors; i++) { 961 ret = mmc_test_buffer_transfer(test, 962 test->buffer + i * 512, 963 dev_addr + i, 512, 0); 964 if (ret) 965 return ret; 966 } 967 968 for (i = 0; i < blocks * blksz; i++) { 969 if (test->buffer[i] != (u8)i) 970 return RESULT_FAIL; 971 } 972 973 for (; i < sectors * 512; i++) { 974 if (test->buffer[i] != 0xDF) 975 return RESULT_FAIL; 976 } 977 } else { 978 sg_copy_to_buffer(sg, sg_len, test->scratch, BUFFER_SIZE); 979 for (i = 0; i < blocks * blksz; i++) { 980 if (test->scratch[i] != (u8)i) 981 return RESULT_FAIL; 982 } 983 } 984 985 return 0; 986 } 987 988 /*******************************************************************/ 989 /* Tests */ 990 /*******************************************************************/ 991 992 struct mmc_test_case { 993 const char *name; 994 995 int (*prepare)(struct mmc_test_card *); 996 int (*run)(struct mmc_test_card *); 997 int (*cleanup)(struct mmc_test_card *); 998 }; 999 1000 static int mmc_test_basic_write(struct mmc_test_card *test) 1001 { 1002 int ret; 1003 struct scatterlist sg; 1004 1005 ret = mmc_test_set_blksize(test, 512); 1006 if (ret) 1007 return ret; 1008 1009 sg_init_one(&sg, test->buffer, 512); 1010 1011 return mmc_test_simple_transfer(test, &sg, 1, 0, 1, 512, 1); 1012 } 1013 1014 static int mmc_test_basic_read(struct mmc_test_card *test) 1015 { 1016 int ret; 1017 struct scatterlist sg; 1018 1019 ret = mmc_test_set_blksize(test, 512); 1020 if (ret) 1021 return ret; 1022 1023 sg_init_one(&sg, test->buffer, 512); 1024 1025 return mmc_test_simple_transfer(test, &sg, 1, 0, 1, 512, 0); 1026 } 1027 1028 static int mmc_test_verify_write(struct mmc_test_card *test) 1029 { 1030 struct scatterlist sg; 1031 1032 sg_init_one(&sg, test->buffer, 512); 1033 1034 return mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1); 1035 } 1036 1037 static int mmc_test_verify_read(struct mmc_test_card *test) 1038 { 1039 struct scatterlist sg; 1040 1041 sg_init_one(&sg, test->buffer, 512); 1042 1043 return mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0); 1044 } 1045 1046 static int mmc_test_multi_write(struct mmc_test_card *test) 1047 { 1048 unsigned int size; 1049 struct scatterlist sg; 1050 1051 if (test->card->host->max_blk_count == 1) 1052 return RESULT_UNSUP_HOST; 1053 1054 size = PAGE_SIZE * 2; 1055 size = min(size, test->card->host->max_req_size); 1056 size = min(size, test->card->host->max_seg_size); 1057 size = min(size, test->card->host->max_blk_count * 512); 1058 1059 if (size < 1024) 1060 return RESULT_UNSUP_HOST; 1061 1062 sg_init_one(&sg, test->buffer, size); 1063 1064 return mmc_test_transfer(test, &sg, 1, 0, size / 512, 512, 1); 1065 } 1066 1067 static int mmc_test_multi_read(struct mmc_test_card *test) 1068 { 1069 unsigned int size; 1070 struct scatterlist sg; 1071 1072 if (test->card->host->max_blk_count == 1) 1073 return RESULT_UNSUP_HOST; 1074 1075 size = PAGE_SIZE * 2; 1076 size = min(size, test->card->host->max_req_size); 1077 size = min(size, test->card->host->max_seg_size); 1078 size = min(size, test->card->host->max_blk_count * 512); 1079 1080 if (size < 1024) 1081 return RESULT_UNSUP_HOST; 1082 1083 sg_init_one(&sg, test->buffer, size); 1084 1085 return mmc_test_transfer(test, &sg, 1, 0, size / 512, 512, 0); 1086 } 1087 1088 static int mmc_test_pow2_write(struct mmc_test_card *test) 1089 { 1090 int ret, i; 1091 struct scatterlist sg; 1092 1093 if (!test->card->csd.write_partial) 1094 return RESULT_UNSUP_CARD; 1095 1096 for (i = 1; i < 512; i <<= 1) { 1097 sg_init_one(&sg, test->buffer, i); 1098 ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 1); 1099 if (ret) 1100 return ret; 1101 } 1102 1103 return 0; 1104 } 1105 1106 static int mmc_test_pow2_read(struct mmc_test_card *test) 1107 { 1108 int ret, i; 1109 struct scatterlist sg; 1110 1111 if (!test->card->csd.read_partial) 1112 return RESULT_UNSUP_CARD; 1113 1114 for (i = 1; i < 512; i <<= 1) { 1115 sg_init_one(&sg, test->buffer, i); 1116 ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 0); 1117 if (ret) 1118 return ret; 1119 } 1120 1121 return 0; 1122 } 1123 1124 static int mmc_test_weird_write(struct mmc_test_card *test) 1125 { 1126 int ret, i; 1127 struct scatterlist sg; 1128 1129 if (!test->card->csd.write_partial) 1130 return RESULT_UNSUP_CARD; 1131 1132 for (i = 3; i < 512; i += 7) { 1133 sg_init_one(&sg, test->buffer, i); 1134 ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 1); 1135 if (ret) 1136 return ret; 1137 } 1138 1139 return 0; 1140 } 1141 1142 static int mmc_test_weird_read(struct mmc_test_card *test) 1143 { 1144 int ret, i; 1145 struct scatterlist sg; 1146 1147 if (!test->card->csd.read_partial) 1148 return RESULT_UNSUP_CARD; 1149 1150 for (i = 3; i < 512; i += 7) { 1151 sg_init_one(&sg, test->buffer, i); 1152 ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 0); 1153 if (ret) 1154 return ret; 1155 } 1156 1157 return 0; 1158 } 1159 1160 static int mmc_test_align_write(struct mmc_test_card *test) 1161 { 1162 int ret, i; 1163 struct scatterlist sg; 1164 1165 for (i = 1; i < TEST_ALIGN_END; i++) { 1166 sg_init_one(&sg, test->buffer + i, 512); 1167 ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1); 1168 if (ret) 1169 return ret; 1170 } 1171 1172 return 0; 1173 } 1174 1175 static int mmc_test_align_read(struct mmc_test_card *test) 1176 { 1177 int ret, i; 1178 struct scatterlist sg; 1179 1180 for (i = 1; i < TEST_ALIGN_END; i++) { 1181 sg_init_one(&sg, test->buffer + i, 512); 1182 ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0); 1183 if (ret) 1184 return ret; 1185 } 1186 1187 return 0; 1188 } 1189 1190 static int mmc_test_align_multi_write(struct mmc_test_card *test) 1191 { 1192 int ret, i; 1193 unsigned int size; 1194 struct scatterlist sg; 1195 1196 if (test->card->host->max_blk_count == 1) 1197 return RESULT_UNSUP_HOST; 1198 1199 size = PAGE_SIZE * 2; 1200 size = min(size, test->card->host->max_req_size); 1201 size = min(size, test->card->host->max_seg_size); 1202 size = min(size, test->card->host->max_blk_count * 512); 1203 1204 if (size < 1024) 1205 return RESULT_UNSUP_HOST; 1206 1207 for (i = 1; i < TEST_ALIGN_END; i++) { 1208 sg_init_one(&sg, test->buffer + i, size); 1209 ret = mmc_test_transfer(test, &sg, 1, 0, size / 512, 512, 1); 1210 if (ret) 1211 return ret; 1212 } 1213 1214 return 0; 1215 } 1216 1217 static int mmc_test_align_multi_read(struct mmc_test_card *test) 1218 { 1219 int ret, i; 1220 unsigned int size; 1221 struct scatterlist sg; 1222 1223 if (test->card->host->max_blk_count == 1) 1224 return RESULT_UNSUP_HOST; 1225 1226 size = PAGE_SIZE * 2; 1227 size = min(size, test->card->host->max_req_size); 1228 size = min(size, test->card->host->max_seg_size); 1229 size = min(size, test->card->host->max_blk_count * 512); 1230 1231 if (size < 1024) 1232 return RESULT_UNSUP_HOST; 1233 1234 for (i = 1; i < TEST_ALIGN_END; i++) { 1235 sg_init_one(&sg, test->buffer + i, size); 1236 ret = mmc_test_transfer(test, &sg, 1, 0, size / 512, 512, 0); 1237 if (ret) 1238 return ret; 1239 } 1240 1241 return 0; 1242 } 1243 1244 static int mmc_test_xfersize_write(struct mmc_test_card *test) 1245 { 1246 int ret; 1247 1248 ret = mmc_test_set_blksize(test, 512); 1249 if (ret) 1250 return ret; 1251 1252 return mmc_test_broken_transfer(test, 1, 512, 1); 1253 } 1254 1255 static int mmc_test_xfersize_read(struct mmc_test_card *test) 1256 { 1257 int ret; 1258 1259 ret = mmc_test_set_blksize(test, 512); 1260 if (ret) 1261 return ret; 1262 1263 return mmc_test_broken_transfer(test, 1, 512, 0); 1264 } 1265 1266 static int mmc_test_multi_xfersize_write(struct mmc_test_card *test) 1267 { 1268 int ret; 1269 1270 if (test->card->host->max_blk_count == 1) 1271 return RESULT_UNSUP_HOST; 1272 1273 ret = mmc_test_set_blksize(test, 512); 1274 if (ret) 1275 return ret; 1276 1277 return mmc_test_broken_transfer(test, 2, 512, 1); 1278 } 1279 1280 static int mmc_test_multi_xfersize_read(struct mmc_test_card *test) 1281 { 1282 int ret; 1283 1284 if (test->card->host->max_blk_count == 1) 1285 return RESULT_UNSUP_HOST; 1286 1287 ret = mmc_test_set_blksize(test, 512); 1288 if (ret) 1289 return ret; 1290 1291 return mmc_test_broken_transfer(test, 2, 512, 0); 1292 } 1293 1294 #ifdef CONFIG_HIGHMEM 1295 1296 static int mmc_test_write_high(struct mmc_test_card *test) 1297 { 1298 struct scatterlist sg; 1299 1300 sg_init_table(&sg, 1); 1301 sg_set_page(&sg, test->highmem, 512, 0); 1302 1303 return mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1); 1304 } 1305 1306 static int mmc_test_read_high(struct mmc_test_card *test) 1307 { 1308 struct scatterlist sg; 1309 1310 sg_init_table(&sg, 1); 1311 sg_set_page(&sg, test->highmem, 512, 0); 1312 1313 return mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0); 1314 } 1315 1316 static int mmc_test_multi_write_high(struct mmc_test_card *test) 1317 { 1318 unsigned int size; 1319 struct scatterlist sg; 1320 1321 if (test->card->host->max_blk_count == 1) 1322 return RESULT_UNSUP_HOST; 1323 1324 size = PAGE_SIZE * 2; 1325 size = min(size, test->card->host->max_req_size); 1326 size = min(size, test->card->host->max_seg_size); 1327 size = min(size, test->card->host->max_blk_count * 512); 1328 1329 if (size < 1024) 1330 return RESULT_UNSUP_HOST; 1331 1332 sg_init_table(&sg, 1); 1333 sg_set_page(&sg, test->highmem, size, 0); 1334 1335 return mmc_test_transfer(test, &sg, 1, 0, size / 512, 512, 1); 1336 } 1337 1338 static int mmc_test_multi_read_high(struct mmc_test_card *test) 1339 { 1340 unsigned int size; 1341 struct scatterlist sg; 1342 1343 if (test->card->host->max_blk_count == 1) 1344 return RESULT_UNSUP_HOST; 1345 1346 size = PAGE_SIZE * 2; 1347 size = min(size, test->card->host->max_req_size); 1348 size = min(size, test->card->host->max_seg_size); 1349 size = min(size, test->card->host->max_blk_count * 512); 1350 1351 if (size < 1024) 1352 return RESULT_UNSUP_HOST; 1353 1354 sg_init_table(&sg, 1); 1355 sg_set_page(&sg, test->highmem, size, 0); 1356 1357 return mmc_test_transfer(test, &sg, 1, 0, size / 512, 512, 0); 1358 } 1359 1360 #else 1361 1362 static int mmc_test_no_highmem(struct mmc_test_card *test) 1363 { 1364 pr_info("%s: Highmem not configured - test skipped\n", 1365 mmc_hostname(test->card->host)); 1366 return 0; 1367 } 1368 1369 #endif /* CONFIG_HIGHMEM */ 1370 1371 /* 1372 * Map sz bytes so that it can be transferred. 1373 */ 1374 static int mmc_test_area_map(struct mmc_test_card *test, unsigned long sz, 1375 int max_scatter, int min_sg_len, bool nonblock) 1376 { 1377 struct mmc_test_area *t = &test->area; 1378 int err; 1379 unsigned int sg_len = 0; 1380 1381 t->blocks = sz >> 9; 1382 1383 if (max_scatter) { 1384 err = mmc_test_map_sg_max_scatter(t->mem, sz, t->sg, 1385 t->max_segs, t->max_seg_sz, 1386 &t->sg_len); 1387 } else { 1388 err = mmc_test_map_sg(t->mem, sz, t->sg, 1, t->max_segs, 1389 t->max_seg_sz, &t->sg_len, min_sg_len); 1390 } 1391 1392 if (err || !nonblock) 1393 goto err; 1394 1395 if (max_scatter) { 1396 err = mmc_test_map_sg_max_scatter(t->mem, sz, t->sg_areq, 1397 t->max_segs, t->max_seg_sz, 1398 &sg_len); 1399 } else { 1400 err = mmc_test_map_sg(t->mem, sz, t->sg_areq, 1, t->max_segs, 1401 t->max_seg_sz, &sg_len, min_sg_len); 1402 } 1403 if (!err && sg_len != t->sg_len) 1404 err = -EINVAL; 1405 1406 err: 1407 if (err) 1408 pr_info("%s: Failed to map sg list\n", 1409 mmc_hostname(test->card->host)); 1410 return err; 1411 } 1412 1413 /* 1414 * Transfer bytes mapped by mmc_test_area_map(). 1415 */ 1416 static int mmc_test_area_transfer(struct mmc_test_card *test, 1417 unsigned int dev_addr, int write) 1418 { 1419 struct mmc_test_area *t = &test->area; 1420 1421 return mmc_test_simple_transfer(test, t->sg, t->sg_len, dev_addr, 1422 t->blocks, 512, write); 1423 } 1424 1425 /* 1426 * Map and transfer bytes for multiple transfers. 1427 */ 1428 static int mmc_test_area_io_seq(struct mmc_test_card *test, unsigned long sz, 1429 unsigned int dev_addr, int write, 1430 int max_scatter, int timed, int count, 1431 bool nonblock, int min_sg_len) 1432 { 1433 struct timespec64 ts1, ts2; 1434 int ret = 0; 1435 int i; 1436 1437 /* 1438 * In the case of a maximally scattered transfer, the maximum transfer 1439 * size is further limited by using PAGE_SIZE segments. 1440 */ 1441 if (max_scatter) { 1442 struct mmc_test_area *t = &test->area; 1443 unsigned long max_tfr; 1444 1445 if (t->max_seg_sz >= PAGE_SIZE) 1446 max_tfr = t->max_segs * PAGE_SIZE; 1447 else 1448 max_tfr = t->max_segs * t->max_seg_sz; 1449 if (sz > max_tfr) 1450 sz = max_tfr; 1451 } 1452 1453 ret = mmc_test_area_map(test, sz, max_scatter, min_sg_len, nonblock); 1454 if (ret) 1455 return ret; 1456 1457 if (timed) 1458 ktime_get_ts64(&ts1); 1459 if (nonblock) 1460 ret = mmc_test_nonblock_transfer(test, dev_addr, write, count); 1461 else 1462 for (i = 0; i < count && ret == 0; i++) { 1463 ret = mmc_test_area_transfer(test, dev_addr, write); 1464 dev_addr += sz >> 9; 1465 } 1466 1467 if (ret) 1468 return ret; 1469 1470 if (timed) 1471 ktime_get_ts64(&ts2); 1472 1473 if (timed) 1474 mmc_test_print_avg_rate(test, sz, count, &ts1, &ts2); 1475 1476 return 0; 1477 } 1478 1479 static int mmc_test_area_io(struct mmc_test_card *test, unsigned long sz, 1480 unsigned int dev_addr, int write, int max_scatter, 1481 int timed) 1482 { 1483 return mmc_test_area_io_seq(test, sz, dev_addr, write, max_scatter, 1484 timed, 1, false, 0); 1485 } 1486 1487 /* 1488 * Write the test area entirely. 1489 */ 1490 static int mmc_test_area_fill(struct mmc_test_card *test) 1491 { 1492 struct mmc_test_area *t = &test->area; 1493 1494 return mmc_test_area_io(test, t->max_tfr, t->dev_addr, 1, 0, 0); 1495 } 1496 1497 /* 1498 * Erase the test area entirely. 1499 */ 1500 static int mmc_test_area_erase(struct mmc_test_card *test) 1501 { 1502 struct mmc_test_area *t = &test->area; 1503 1504 if (!mmc_card_can_erase(test->card)) 1505 return 0; 1506 1507 return mmc_erase(test->card, t->dev_addr, t->max_sz >> 9, 1508 MMC_ERASE_ARG); 1509 } 1510 1511 /* 1512 * Cleanup struct mmc_test_area. 1513 */ 1514 static int mmc_test_area_cleanup(struct mmc_test_card *test) 1515 { 1516 struct mmc_test_area *t = &test->area; 1517 1518 kfree(t->sg); 1519 kfree(t->sg_areq); 1520 mmc_test_free_mem(t->mem); 1521 1522 return 0; 1523 } 1524 1525 /* 1526 * Initialize an area for testing large transfers. The test area is set to the 1527 * middle of the card because cards may have different characteristics at the 1528 * front (for FAT file system optimization). Optionally, the area is erased 1529 * (if the card supports it) which may improve write performance. Optionally, 1530 * the area is filled with data for subsequent read tests. 1531 */ 1532 static int mmc_test_area_init(struct mmc_test_card *test, int erase, int fill) 1533 { 1534 struct mmc_test_area *t = &test->area; 1535 unsigned long min_sz = 64 * 1024, sz; 1536 int ret; 1537 1538 ret = mmc_test_set_blksize(test, 512); 1539 if (ret) 1540 return ret; 1541 1542 /* Make the test area size about 4MiB */ 1543 sz = (unsigned long)test->card->pref_erase << 9; 1544 t->max_sz = sz; 1545 while (t->max_sz < 4 * 1024 * 1024) 1546 t->max_sz += sz; 1547 while (t->max_sz > TEST_AREA_MAX_SIZE && t->max_sz > sz) 1548 t->max_sz -= sz; 1549 1550 t->max_segs = test->card->host->max_segs; 1551 t->max_seg_sz = test->card->host->max_seg_size; 1552 t->max_seg_sz -= t->max_seg_sz % 512; 1553 1554 t->max_tfr = t->max_sz; 1555 if (t->max_tfr >> 9 > test->card->host->max_blk_count) 1556 t->max_tfr = test->card->host->max_blk_count << 9; 1557 if (t->max_tfr > test->card->host->max_req_size) 1558 t->max_tfr = test->card->host->max_req_size; 1559 if (t->max_tfr / t->max_seg_sz > t->max_segs) 1560 t->max_tfr = t->max_segs * t->max_seg_sz; 1561 1562 /* 1563 * Try to allocate enough memory for a max. sized transfer. Less is OK 1564 * because the same memory can be mapped into the scatterlist more than 1565 * once. Also, take into account the limits imposed on scatterlist 1566 * segments by the host driver. 1567 */ 1568 t->mem = mmc_test_alloc_mem(min_sz, t->max_tfr, t->max_segs, 1569 t->max_seg_sz); 1570 if (!t->mem) 1571 return -ENOMEM; 1572 1573 t->sg = kmalloc_array(t->max_segs, sizeof(*t->sg), GFP_KERNEL); 1574 if (!t->sg) { 1575 ret = -ENOMEM; 1576 goto out_free; 1577 } 1578 1579 t->sg_areq = kmalloc_array(t->max_segs, sizeof(*t->sg_areq), 1580 GFP_KERNEL); 1581 if (!t->sg_areq) { 1582 ret = -ENOMEM; 1583 goto out_free; 1584 } 1585 1586 t->dev_addr = mmc_test_capacity(test->card) / 2; 1587 t->dev_addr -= t->dev_addr % (t->max_sz >> 9); 1588 1589 if (erase) { 1590 ret = mmc_test_area_erase(test); 1591 if (ret) 1592 goto out_free; 1593 } 1594 1595 if (fill) { 1596 ret = mmc_test_area_fill(test); 1597 if (ret) 1598 goto out_free; 1599 } 1600 1601 return 0; 1602 1603 out_free: 1604 mmc_test_area_cleanup(test); 1605 return ret; 1606 } 1607 1608 /* 1609 * Prepare for large transfers. Do not erase the test area. 1610 */ 1611 static int mmc_test_area_prepare(struct mmc_test_card *test) 1612 { 1613 return mmc_test_area_init(test, 0, 0); 1614 } 1615 1616 /* 1617 * Prepare for large transfers. Do erase the test area. 1618 */ 1619 static int mmc_test_area_prepare_erase(struct mmc_test_card *test) 1620 { 1621 return mmc_test_area_init(test, 1, 0); 1622 } 1623 1624 /* 1625 * Prepare for large transfers. Erase and fill the test area. 1626 */ 1627 static int mmc_test_area_prepare_fill(struct mmc_test_card *test) 1628 { 1629 return mmc_test_area_init(test, 1, 1); 1630 } 1631 1632 /* 1633 * Test best-case performance. Best-case performance is expected from 1634 * a single large transfer. 1635 * 1636 * An additional option (max_scatter) allows the measurement of the same 1637 * transfer but with no contiguous pages in the scatter list. This tests 1638 * the efficiency of DMA to handle scattered pages. 1639 */ 1640 static int mmc_test_best_performance(struct mmc_test_card *test, int write, 1641 int max_scatter) 1642 { 1643 struct mmc_test_area *t = &test->area; 1644 1645 return mmc_test_area_io(test, t->max_tfr, t->dev_addr, write, 1646 max_scatter, 1); 1647 } 1648 1649 /* 1650 * Best-case read performance. 1651 */ 1652 static int mmc_test_best_read_performance(struct mmc_test_card *test) 1653 { 1654 return mmc_test_best_performance(test, 0, 0); 1655 } 1656 1657 /* 1658 * Best-case write performance. 1659 */ 1660 static int mmc_test_best_write_performance(struct mmc_test_card *test) 1661 { 1662 return mmc_test_best_performance(test, 1, 0); 1663 } 1664 1665 /* 1666 * Best-case read performance into scattered pages. 1667 */ 1668 static int mmc_test_best_read_perf_max_scatter(struct mmc_test_card *test) 1669 { 1670 return mmc_test_best_performance(test, 0, 1); 1671 } 1672 1673 /* 1674 * Best-case write performance from scattered pages. 1675 */ 1676 static int mmc_test_best_write_perf_max_scatter(struct mmc_test_card *test) 1677 { 1678 return mmc_test_best_performance(test, 1, 1); 1679 } 1680 1681 /* 1682 * Single read performance by transfer size. 1683 */ 1684 static int mmc_test_profile_read_perf(struct mmc_test_card *test) 1685 { 1686 struct mmc_test_area *t = &test->area; 1687 unsigned long sz; 1688 unsigned int dev_addr; 1689 int ret; 1690 1691 for (sz = 512; sz < t->max_tfr; sz <<= 1) { 1692 dev_addr = t->dev_addr + (sz >> 9); 1693 ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 1); 1694 if (ret) 1695 return ret; 1696 } 1697 sz = t->max_tfr; 1698 dev_addr = t->dev_addr; 1699 return mmc_test_area_io(test, sz, dev_addr, 0, 0, 1); 1700 } 1701 1702 /* 1703 * Single write performance by transfer size. 1704 */ 1705 static int mmc_test_profile_write_perf(struct mmc_test_card *test) 1706 { 1707 struct mmc_test_area *t = &test->area; 1708 unsigned long sz; 1709 unsigned int dev_addr; 1710 int ret; 1711 1712 ret = mmc_test_area_erase(test); 1713 if (ret) 1714 return ret; 1715 for (sz = 512; sz < t->max_tfr; sz <<= 1) { 1716 dev_addr = t->dev_addr + (sz >> 9); 1717 ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 1); 1718 if (ret) 1719 return ret; 1720 } 1721 ret = mmc_test_area_erase(test); 1722 if (ret) 1723 return ret; 1724 sz = t->max_tfr; 1725 dev_addr = t->dev_addr; 1726 return mmc_test_area_io(test, sz, dev_addr, 1, 0, 1); 1727 } 1728 1729 /* 1730 * Single trim performance by transfer size. 1731 */ 1732 static int mmc_test_profile_trim_perf(struct mmc_test_card *test) 1733 { 1734 struct mmc_test_area *t = &test->area; 1735 unsigned long sz; 1736 unsigned int dev_addr; 1737 struct timespec64 ts1, ts2; 1738 int ret; 1739 1740 if (!mmc_card_can_trim(test->card)) 1741 return RESULT_UNSUP_CARD; 1742 1743 if (!mmc_card_can_erase(test->card)) 1744 return RESULT_UNSUP_HOST; 1745 1746 for (sz = 512; sz < t->max_sz; sz <<= 1) { 1747 dev_addr = t->dev_addr + (sz >> 9); 1748 ktime_get_ts64(&ts1); 1749 ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG); 1750 if (ret) 1751 return ret; 1752 ktime_get_ts64(&ts2); 1753 mmc_test_print_rate(test, sz, &ts1, &ts2); 1754 } 1755 dev_addr = t->dev_addr; 1756 ktime_get_ts64(&ts1); 1757 ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG); 1758 if (ret) 1759 return ret; 1760 ktime_get_ts64(&ts2); 1761 mmc_test_print_rate(test, sz, &ts1, &ts2); 1762 return 0; 1763 } 1764 1765 static int mmc_test_seq_read_perf(struct mmc_test_card *test, unsigned long sz) 1766 { 1767 struct mmc_test_area *t = &test->area; 1768 unsigned int dev_addr, i, cnt; 1769 struct timespec64 ts1, ts2; 1770 int ret; 1771 1772 cnt = t->max_sz / sz; 1773 dev_addr = t->dev_addr; 1774 ktime_get_ts64(&ts1); 1775 for (i = 0; i < cnt; i++) { 1776 ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 0); 1777 if (ret) 1778 return ret; 1779 dev_addr += (sz >> 9); 1780 } 1781 ktime_get_ts64(&ts2); 1782 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2); 1783 return 0; 1784 } 1785 1786 /* 1787 * Consecutive read performance by transfer size. 1788 */ 1789 static int mmc_test_profile_seq_read_perf(struct mmc_test_card *test) 1790 { 1791 struct mmc_test_area *t = &test->area; 1792 unsigned long sz; 1793 int ret; 1794 1795 for (sz = 512; sz < t->max_tfr; sz <<= 1) { 1796 ret = mmc_test_seq_read_perf(test, sz); 1797 if (ret) 1798 return ret; 1799 } 1800 sz = t->max_tfr; 1801 return mmc_test_seq_read_perf(test, sz); 1802 } 1803 1804 static int mmc_test_seq_write_perf(struct mmc_test_card *test, unsigned long sz) 1805 { 1806 struct mmc_test_area *t = &test->area; 1807 unsigned int dev_addr, i, cnt; 1808 struct timespec64 ts1, ts2; 1809 int ret; 1810 1811 ret = mmc_test_area_erase(test); 1812 if (ret) 1813 return ret; 1814 cnt = t->max_sz / sz; 1815 dev_addr = t->dev_addr; 1816 ktime_get_ts64(&ts1); 1817 for (i = 0; i < cnt; i++) { 1818 ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 0); 1819 if (ret) 1820 return ret; 1821 dev_addr += (sz >> 9); 1822 } 1823 ktime_get_ts64(&ts2); 1824 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2); 1825 return 0; 1826 } 1827 1828 /* 1829 * Consecutive write performance by transfer size. 1830 */ 1831 static int mmc_test_profile_seq_write_perf(struct mmc_test_card *test) 1832 { 1833 struct mmc_test_area *t = &test->area; 1834 unsigned long sz; 1835 int ret; 1836 1837 for (sz = 512; sz < t->max_tfr; sz <<= 1) { 1838 ret = mmc_test_seq_write_perf(test, sz); 1839 if (ret) 1840 return ret; 1841 } 1842 sz = t->max_tfr; 1843 return mmc_test_seq_write_perf(test, sz); 1844 } 1845 1846 /* 1847 * Consecutive trim performance by transfer size. 1848 */ 1849 static int mmc_test_profile_seq_trim_perf(struct mmc_test_card *test) 1850 { 1851 struct mmc_test_area *t = &test->area; 1852 unsigned long sz; 1853 unsigned int dev_addr, i, cnt; 1854 struct timespec64 ts1, ts2; 1855 int ret; 1856 1857 if (!mmc_card_can_trim(test->card)) 1858 return RESULT_UNSUP_CARD; 1859 1860 if (!mmc_card_can_erase(test->card)) 1861 return RESULT_UNSUP_HOST; 1862 1863 for (sz = 512; sz <= t->max_sz; sz <<= 1) { 1864 ret = mmc_test_area_erase(test); 1865 if (ret) 1866 return ret; 1867 ret = mmc_test_area_fill(test); 1868 if (ret) 1869 return ret; 1870 cnt = t->max_sz / sz; 1871 dev_addr = t->dev_addr; 1872 ktime_get_ts64(&ts1); 1873 for (i = 0; i < cnt; i++) { 1874 ret = mmc_erase(test->card, dev_addr, sz >> 9, 1875 MMC_TRIM_ARG); 1876 if (ret) 1877 return ret; 1878 dev_addr += (sz >> 9); 1879 } 1880 ktime_get_ts64(&ts2); 1881 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2); 1882 } 1883 return 0; 1884 } 1885 1886 static unsigned int rnd_next = 1; 1887 1888 static unsigned int mmc_test_rnd_num(unsigned int rnd_cnt) 1889 { 1890 uint64_t r; 1891 1892 rnd_next = rnd_next * 1103515245 + 12345; 1893 r = (rnd_next >> 16) & 0x7fff; 1894 return (r * rnd_cnt) >> 15; 1895 } 1896 1897 static int mmc_test_rnd_perf(struct mmc_test_card *test, int write, int print, 1898 unsigned long sz, int secs, int force_retuning) 1899 { 1900 unsigned int dev_addr, cnt, rnd_addr, range1, range2, last_ea = 0, ea; 1901 unsigned int ssz; 1902 struct timespec64 ts1, ts2, ts; 1903 int ret; 1904 1905 ssz = sz >> 9; 1906 1907 rnd_addr = mmc_test_capacity(test->card) / 4; 1908 range1 = rnd_addr / test->card->pref_erase; 1909 range2 = range1 / ssz; 1910 1911 ktime_get_ts64(&ts1); 1912 for (cnt = 0; cnt < UINT_MAX; cnt++) { 1913 ktime_get_ts64(&ts2); 1914 ts = timespec64_sub(ts2, ts1); 1915 if (ts.tv_sec >= secs) 1916 break; 1917 ea = mmc_test_rnd_num(range1); 1918 if (ea == last_ea) 1919 ea -= 1; 1920 last_ea = ea; 1921 dev_addr = rnd_addr + test->card->pref_erase * ea + 1922 ssz * mmc_test_rnd_num(range2); 1923 if (force_retuning) 1924 mmc_retune_needed(test->card->host); 1925 ret = mmc_test_area_io(test, sz, dev_addr, write, 0, 0); 1926 if (ret) 1927 return ret; 1928 } 1929 if (print) 1930 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2); 1931 return 0; 1932 } 1933 1934 static int mmc_test_random_perf(struct mmc_test_card *test, int write) 1935 { 1936 struct mmc_test_area *t = &test->area; 1937 unsigned int next; 1938 unsigned long sz; 1939 int ret; 1940 1941 for (sz = 512; sz < t->max_tfr; sz <<= 1) { 1942 /* 1943 * When writing, try to get more consistent results by running 1944 * the test twice with exactly the same I/O but outputting the 1945 * results only for the 2nd run. 1946 */ 1947 if (write) { 1948 next = rnd_next; 1949 ret = mmc_test_rnd_perf(test, write, 0, sz, 10, 0); 1950 if (ret) 1951 return ret; 1952 rnd_next = next; 1953 } 1954 ret = mmc_test_rnd_perf(test, write, 1, sz, 10, 0); 1955 if (ret) 1956 return ret; 1957 } 1958 sz = t->max_tfr; 1959 if (write) { 1960 next = rnd_next; 1961 ret = mmc_test_rnd_perf(test, write, 0, sz, 10, 0); 1962 if (ret) 1963 return ret; 1964 rnd_next = next; 1965 } 1966 return mmc_test_rnd_perf(test, write, 1, sz, 10, 0); 1967 } 1968 1969 static int mmc_test_retuning(struct mmc_test_card *test) 1970 { 1971 if (!mmc_can_retune(test->card->host)) { 1972 pr_info("%s: No retuning - test skipped\n", 1973 mmc_hostname(test->card->host)); 1974 return RESULT_UNSUP_HOST; 1975 } 1976 1977 return mmc_test_rnd_perf(test, 0, 0, 8192, 30, 1); 1978 } 1979 1980 /* 1981 * Random read performance by transfer size. 1982 */ 1983 static int mmc_test_random_read_perf(struct mmc_test_card *test) 1984 { 1985 return mmc_test_random_perf(test, 0); 1986 } 1987 1988 /* 1989 * Random write performance by transfer size. 1990 */ 1991 static int mmc_test_random_write_perf(struct mmc_test_card *test) 1992 { 1993 return mmc_test_random_perf(test, 1); 1994 } 1995 1996 static int mmc_test_seq_perf(struct mmc_test_card *test, int write, 1997 unsigned int tot_sz, int max_scatter) 1998 { 1999 struct mmc_test_area *t = &test->area; 2000 unsigned int dev_addr, i, cnt, sz, ssz; 2001 struct timespec64 ts1, ts2; 2002 int ret; 2003 2004 sz = t->max_tfr; 2005 2006 /* 2007 * In the case of a maximally scattered transfer, the maximum transfer 2008 * size is further limited by using PAGE_SIZE segments. 2009 */ 2010 if (max_scatter) { 2011 unsigned long max_tfr; 2012 2013 if (t->max_seg_sz >= PAGE_SIZE) 2014 max_tfr = t->max_segs * PAGE_SIZE; 2015 else 2016 max_tfr = t->max_segs * t->max_seg_sz; 2017 if (sz > max_tfr) 2018 sz = max_tfr; 2019 } 2020 2021 ssz = sz >> 9; 2022 dev_addr = mmc_test_capacity(test->card) / 4; 2023 if (tot_sz > dev_addr << 9) 2024 tot_sz = dev_addr << 9; 2025 cnt = tot_sz / sz; 2026 dev_addr &= 0xffff0000; /* Round to 64MiB boundary */ 2027 2028 ktime_get_ts64(&ts1); 2029 for (i = 0; i < cnt; i++) { 2030 ret = mmc_test_area_io(test, sz, dev_addr, write, 2031 max_scatter, 0); 2032 if (ret) 2033 return ret; 2034 dev_addr += ssz; 2035 } 2036 ktime_get_ts64(&ts2); 2037 2038 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2); 2039 2040 return 0; 2041 } 2042 2043 static int mmc_test_large_seq_perf(struct mmc_test_card *test, int write) 2044 { 2045 int ret, i; 2046 2047 for (i = 0; i < 10; i++) { 2048 ret = mmc_test_seq_perf(test, write, 10 * 1024 * 1024, 1); 2049 if (ret) 2050 return ret; 2051 } 2052 for (i = 0; i < 5; i++) { 2053 ret = mmc_test_seq_perf(test, write, 100 * 1024 * 1024, 1); 2054 if (ret) 2055 return ret; 2056 } 2057 for (i = 0; i < 3; i++) { 2058 ret = mmc_test_seq_perf(test, write, 1000 * 1024 * 1024, 1); 2059 if (ret) 2060 return ret; 2061 } 2062 2063 return ret; 2064 } 2065 2066 /* 2067 * Large sequential read performance. 2068 */ 2069 static int mmc_test_large_seq_read_perf(struct mmc_test_card *test) 2070 { 2071 return mmc_test_large_seq_perf(test, 0); 2072 } 2073 2074 /* 2075 * Large sequential write performance. 2076 */ 2077 static int mmc_test_large_seq_write_perf(struct mmc_test_card *test) 2078 { 2079 return mmc_test_large_seq_perf(test, 1); 2080 } 2081 2082 static int mmc_test_rw_multiple(struct mmc_test_card *test, 2083 struct mmc_test_multiple_rw *tdata, 2084 unsigned int reqsize, unsigned int size, 2085 int min_sg_len) 2086 { 2087 unsigned int dev_addr; 2088 struct mmc_test_area *t = &test->area; 2089 int ret = 0; 2090 2091 /* Set up test area */ 2092 if (size > mmc_test_capacity(test->card) / 2 * 512) 2093 size = mmc_test_capacity(test->card) / 2 * 512; 2094 if (reqsize > t->max_tfr) 2095 reqsize = t->max_tfr; 2096 dev_addr = mmc_test_capacity(test->card) / 4; 2097 if ((dev_addr & 0xffff0000)) 2098 dev_addr &= 0xffff0000; /* Round to 64MiB boundary */ 2099 else 2100 dev_addr &= 0xfffff800; /* Round to 1MiB boundary */ 2101 if (!dev_addr) 2102 goto err; 2103 2104 if (reqsize > size) 2105 return 0; 2106 2107 /* prepare test area */ 2108 if (mmc_card_can_erase(test->card) && 2109 tdata->prepare & MMC_TEST_PREP_ERASE) { 2110 ret = mmc_erase(test->card, dev_addr, 2111 size / 512, test->card->erase_arg); 2112 if (ret) 2113 ret = mmc_erase(test->card, dev_addr, 2114 size / 512, MMC_ERASE_ARG); 2115 if (ret) 2116 goto err; 2117 } 2118 2119 /* Run test */ 2120 ret = mmc_test_area_io_seq(test, reqsize, dev_addr, 2121 tdata->do_write, 0, 1, size / reqsize, 2122 tdata->do_nonblock_req, min_sg_len); 2123 if (ret) 2124 goto err; 2125 2126 return ret; 2127 err: 2128 pr_info("[%s] error\n", __func__); 2129 return ret; 2130 } 2131 2132 static int mmc_test_rw_multiple_size(struct mmc_test_card *test, 2133 struct mmc_test_multiple_rw *rw) 2134 { 2135 int ret = 0; 2136 int i; 2137 void *pre_req = test->card->host->ops->pre_req; 2138 void *post_req = test->card->host->ops->post_req; 2139 2140 if (rw->do_nonblock_req && 2141 ((!pre_req && post_req) || (pre_req && !post_req))) { 2142 pr_info("error: only one of pre/post is defined\n"); 2143 return -EINVAL; 2144 } 2145 2146 for (i = 0 ; i < rw->len && ret == 0; i++) { 2147 ret = mmc_test_rw_multiple(test, rw, rw->bs[i], rw->size, 0); 2148 if (ret) 2149 break; 2150 } 2151 return ret; 2152 } 2153 2154 static int mmc_test_rw_multiple_sg_len(struct mmc_test_card *test, 2155 struct mmc_test_multiple_rw *rw) 2156 { 2157 int ret = 0; 2158 int i; 2159 2160 for (i = 0 ; i < rw->len && ret == 0; i++) { 2161 ret = mmc_test_rw_multiple(test, rw, 512 * 1024, rw->size, 2162 rw->sg_len[i]); 2163 if (ret) 2164 break; 2165 } 2166 return ret; 2167 } 2168 2169 /* 2170 * Multiple blocking write 4k to 4 MB chunks 2171 */ 2172 static int mmc_test_profile_mult_write_blocking_perf(struct mmc_test_card *test) 2173 { 2174 unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16, 2175 1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22}; 2176 struct mmc_test_multiple_rw test_data = { 2177 .bs = bs, 2178 .size = TEST_AREA_MAX_SIZE, 2179 .len = ARRAY_SIZE(bs), 2180 .do_write = true, 2181 .do_nonblock_req = false, 2182 .prepare = MMC_TEST_PREP_ERASE, 2183 }; 2184 2185 return mmc_test_rw_multiple_size(test, &test_data); 2186 }; 2187 2188 /* 2189 * Multiple non-blocking write 4k to 4 MB chunks 2190 */ 2191 static int mmc_test_profile_mult_write_nonblock_perf(struct mmc_test_card *test) 2192 { 2193 unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16, 2194 1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22}; 2195 struct mmc_test_multiple_rw test_data = { 2196 .bs = bs, 2197 .size = TEST_AREA_MAX_SIZE, 2198 .len = ARRAY_SIZE(bs), 2199 .do_write = true, 2200 .do_nonblock_req = true, 2201 .prepare = MMC_TEST_PREP_ERASE, 2202 }; 2203 2204 return mmc_test_rw_multiple_size(test, &test_data); 2205 } 2206 2207 /* 2208 * Multiple blocking read 4k to 4 MB chunks 2209 */ 2210 static int mmc_test_profile_mult_read_blocking_perf(struct mmc_test_card *test) 2211 { 2212 unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16, 2213 1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22}; 2214 struct mmc_test_multiple_rw test_data = { 2215 .bs = bs, 2216 .size = TEST_AREA_MAX_SIZE, 2217 .len = ARRAY_SIZE(bs), 2218 .do_write = false, 2219 .do_nonblock_req = false, 2220 .prepare = MMC_TEST_PREP_NONE, 2221 }; 2222 2223 return mmc_test_rw_multiple_size(test, &test_data); 2224 } 2225 2226 /* 2227 * Multiple non-blocking read 4k to 4 MB chunks 2228 */ 2229 static int mmc_test_profile_mult_read_nonblock_perf(struct mmc_test_card *test) 2230 { 2231 unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16, 2232 1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22}; 2233 struct mmc_test_multiple_rw test_data = { 2234 .bs = bs, 2235 .size = TEST_AREA_MAX_SIZE, 2236 .len = ARRAY_SIZE(bs), 2237 .do_write = false, 2238 .do_nonblock_req = true, 2239 .prepare = MMC_TEST_PREP_NONE, 2240 }; 2241 2242 return mmc_test_rw_multiple_size(test, &test_data); 2243 } 2244 2245 /* 2246 * Multiple blocking write 1 to 512 sg elements 2247 */ 2248 static int mmc_test_profile_sglen_wr_blocking_perf(struct mmc_test_card *test) 2249 { 2250 unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6, 2251 1 << 7, 1 << 8, 1 << 9}; 2252 struct mmc_test_multiple_rw test_data = { 2253 .sg_len = sg_len, 2254 .size = TEST_AREA_MAX_SIZE, 2255 .len = ARRAY_SIZE(sg_len), 2256 .do_write = true, 2257 .do_nonblock_req = false, 2258 .prepare = MMC_TEST_PREP_ERASE, 2259 }; 2260 2261 return mmc_test_rw_multiple_sg_len(test, &test_data); 2262 }; 2263 2264 /* 2265 * Multiple non-blocking write 1 to 512 sg elements 2266 */ 2267 static int mmc_test_profile_sglen_wr_nonblock_perf(struct mmc_test_card *test) 2268 { 2269 unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6, 2270 1 << 7, 1 << 8, 1 << 9}; 2271 struct mmc_test_multiple_rw test_data = { 2272 .sg_len = sg_len, 2273 .size = TEST_AREA_MAX_SIZE, 2274 .len = ARRAY_SIZE(sg_len), 2275 .do_write = true, 2276 .do_nonblock_req = true, 2277 .prepare = MMC_TEST_PREP_ERASE, 2278 }; 2279 2280 return mmc_test_rw_multiple_sg_len(test, &test_data); 2281 } 2282 2283 /* 2284 * Multiple blocking read 1 to 512 sg elements 2285 */ 2286 static int mmc_test_profile_sglen_r_blocking_perf(struct mmc_test_card *test) 2287 { 2288 unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6, 2289 1 << 7, 1 << 8, 1 << 9}; 2290 struct mmc_test_multiple_rw test_data = { 2291 .sg_len = sg_len, 2292 .size = TEST_AREA_MAX_SIZE, 2293 .len = ARRAY_SIZE(sg_len), 2294 .do_write = false, 2295 .do_nonblock_req = false, 2296 .prepare = MMC_TEST_PREP_NONE, 2297 }; 2298 2299 return mmc_test_rw_multiple_sg_len(test, &test_data); 2300 } 2301 2302 /* 2303 * Multiple non-blocking read 1 to 512 sg elements 2304 */ 2305 static int mmc_test_profile_sglen_r_nonblock_perf(struct mmc_test_card *test) 2306 { 2307 unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6, 2308 1 << 7, 1 << 8, 1 << 9}; 2309 struct mmc_test_multiple_rw test_data = { 2310 .sg_len = sg_len, 2311 .size = TEST_AREA_MAX_SIZE, 2312 .len = ARRAY_SIZE(sg_len), 2313 .do_write = false, 2314 .do_nonblock_req = true, 2315 .prepare = MMC_TEST_PREP_NONE, 2316 }; 2317 2318 return mmc_test_rw_multiple_sg_len(test, &test_data); 2319 } 2320 2321 /* 2322 * eMMC hardware reset. 2323 */ 2324 static int mmc_test_reset(struct mmc_test_card *test) 2325 { 2326 struct mmc_card *card = test->card; 2327 int err; 2328 2329 err = mmc_hw_reset(card); 2330 if (!err) { 2331 /* 2332 * Reset will re-enable the card's command queue, but tests 2333 * expect it to be disabled. 2334 */ 2335 if (card->ext_csd.cmdq_en) 2336 mmc_cmdq_disable(card); 2337 return RESULT_OK; 2338 } else if (err == -EOPNOTSUPP) { 2339 return RESULT_UNSUP_HOST; 2340 } 2341 2342 return RESULT_FAIL; 2343 } 2344 2345 static int mmc_test_send_status(struct mmc_test_card *test, 2346 struct mmc_command *cmd) 2347 { 2348 memset(cmd, 0, sizeof(*cmd)); 2349 2350 cmd->opcode = MMC_SEND_STATUS; 2351 if (!mmc_host_is_spi(test->card->host)) 2352 cmd->arg = test->card->rca << 16; 2353 cmd->flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC; 2354 2355 return mmc_wait_for_cmd(test->card->host, cmd, 0); 2356 } 2357 2358 static int mmc_test_ongoing_transfer(struct mmc_test_card *test, 2359 unsigned int dev_addr, int use_sbc, 2360 int repeat_cmd, int write, int use_areq) 2361 { 2362 struct mmc_test_req *rq = mmc_test_req_alloc(); 2363 struct mmc_host *host = test->card->host; 2364 struct mmc_test_area *t = &test->area; 2365 struct mmc_request *mrq; 2366 unsigned long timeout; 2367 bool expired = false; 2368 int ret = 0, cmd_ret; 2369 u32 status = 0; 2370 int count = 0; 2371 2372 if (!rq) 2373 return -ENOMEM; 2374 2375 mrq = &rq->mrq; 2376 if (use_sbc) 2377 mrq->sbc = &rq->sbc; 2378 mrq->cap_cmd_during_tfr = true; 2379 2380 mmc_test_prepare_mrq(test, mrq, t->sg, t->sg_len, dev_addr, t->blocks, 2381 512, write); 2382 2383 if (use_sbc && t->blocks > 1 && !mrq->sbc) { 2384 ret = mmc_host_can_cmd23(host) ? 2385 RESULT_UNSUP_CARD : 2386 RESULT_UNSUP_HOST; 2387 goto out_free; 2388 } 2389 2390 /* Start ongoing data request */ 2391 if (use_areq) { 2392 ret = mmc_test_start_areq(test, mrq, NULL); 2393 if (ret) 2394 goto out_free; 2395 } else { 2396 mmc_wait_for_req(host, mrq); 2397 } 2398 2399 timeout = jiffies + msecs_to_jiffies(3000); 2400 do { 2401 count += 1; 2402 2403 /* Send status command while data transfer in progress */ 2404 cmd_ret = mmc_test_send_status(test, &rq->status); 2405 if (cmd_ret) 2406 break; 2407 2408 status = rq->status.resp[0]; 2409 if (status & R1_ERROR) { 2410 cmd_ret = -EIO; 2411 break; 2412 } 2413 2414 if (mmc_is_req_done(host, mrq)) 2415 break; 2416 2417 expired = time_after(jiffies, timeout); 2418 if (expired) { 2419 pr_info("%s: timeout waiting for Tran state status %#x\n", 2420 mmc_hostname(host), status); 2421 cmd_ret = -ETIMEDOUT; 2422 break; 2423 } 2424 } while (repeat_cmd && R1_CURRENT_STATE(status) != R1_STATE_TRAN); 2425 2426 /* Wait for data request to complete */ 2427 if (use_areq) { 2428 ret = mmc_test_start_areq(test, NULL, mrq); 2429 } else { 2430 mmc_wait_for_req_done(test->card->host, mrq); 2431 } 2432 2433 /* 2434 * For cap_cmd_during_tfr request, upper layer must send stop if 2435 * required. 2436 */ 2437 if (mrq->data->stop && (mrq->data->error || !mrq->sbc)) { 2438 if (ret) 2439 mmc_wait_for_cmd(host, mrq->data->stop, 0); 2440 else 2441 ret = mmc_wait_for_cmd(host, mrq->data->stop, 0); 2442 } 2443 2444 if (ret) 2445 goto out_free; 2446 2447 if (cmd_ret) { 2448 pr_info("%s: Send Status failed: status %#x, error %d\n", 2449 mmc_hostname(test->card->host), status, cmd_ret); 2450 } 2451 2452 ret = mmc_test_check_result(test, mrq); 2453 if (ret) 2454 goto out_free; 2455 2456 ret = mmc_test_wait_busy(test); 2457 if (ret) 2458 goto out_free; 2459 2460 if (repeat_cmd && (t->blocks + 1) << 9 > t->max_tfr) 2461 pr_info("%s: %d commands completed during transfer of %u blocks\n", 2462 mmc_hostname(test->card->host), count, t->blocks); 2463 2464 if (cmd_ret) 2465 ret = cmd_ret; 2466 out_free: 2467 kfree(rq); 2468 2469 return ret; 2470 } 2471 2472 static int __mmc_test_cmds_during_tfr(struct mmc_test_card *test, 2473 unsigned long sz, int use_sbc, int write, 2474 int use_areq) 2475 { 2476 struct mmc_test_area *t = &test->area; 2477 int ret; 2478 2479 if (!(test->card->host->caps & MMC_CAP_CMD_DURING_TFR)) 2480 return RESULT_UNSUP_HOST; 2481 2482 ret = mmc_test_area_map(test, sz, 0, 0, use_areq); 2483 if (ret) 2484 return ret; 2485 2486 ret = mmc_test_ongoing_transfer(test, t->dev_addr, use_sbc, 0, write, 2487 use_areq); 2488 if (ret) 2489 return ret; 2490 2491 return mmc_test_ongoing_transfer(test, t->dev_addr, use_sbc, 1, write, 2492 use_areq); 2493 } 2494 2495 static int mmc_test_cmds_during_tfr(struct mmc_test_card *test, int use_sbc, 2496 int write, int use_areq) 2497 { 2498 struct mmc_test_area *t = &test->area; 2499 unsigned long sz; 2500 int ret; 2501 2502 for (sz = 512; sz <= t->max_tfr; sz += 512) { 2503 ret = __mmc_test_cmds_during_tfr(test, sz, use_sbc, write, 2504 use_areq); 2505 if (ret) 2506 return ret; 2507 } 2508 return 0; 2509 } 2510 2511 /* 2512 * Commands during read - no Set Block Count (CMD23). 2513 */ 2514 static int mmc_test_cmds_during_read(struct mmc_test_card *test) 2515 { 2516 return mmc_test_cmds_during_tfr(test, 0, 0, 0); 2517 } 2518 2519 /* 2520 * Commands during write - no Set Block Count (CMD23). 2521 */ 2522 static int mmc_test_cmds_during_write(struct mmc_test_card *test) 2523 { 2524 return mmc_test_cmds_during_tfr(test, 0, 1, 0); 2525 } 2526 2527 /* 2528 * Commands during read - use Set Block Count (CMD23). 2529 */ 2530 static int mmc_test_cmds_during_read_cmd23(struct mmc_test_card *test) 2531 { 2532 return mmc_test_cmds_during_tfr(test, 1, 0, 0); 2533 } 2534 2535 /* 2536 * Commands during write - use Set Block Count (CMD23). 2537 */ 2538 static int mmc_test_cmds_during_write_cmd23(struct mmc_test_card *test) 2539 { 2540 return mmc_test_cmds_during_tfr(test, 1, 1, 0); 2541 } 2542 2543 /* 2544 * Commands during non-blocking read - use Set Block Count (CMD23). 2545 */ 2546 static int mmc_test_cmds_during_read_cmd23_nonblock(struct mmc_test_card *test) 2547 { 2548 return mmc_test_cmds_during_tfr(test, 1, 0, 1); 2549 } 2550 2551 /* 2552 * Commands during non-blocking write - use Set Block Count (CMD23). 2553 */ 2554 static int mmc_test_cmds_during_write_cmd23_nonblock(struct mmc_test_card *test) 2555 { 2556 return mmc_test_cmds_during_tfr(test, 1, 1, 1); 2557 } 2558 2559 static const struct mmc_test_case mmc_test_cases[] = { 2560 { 2561 .name = "Basic write (no data verification)", 2562 .run = mmc_test_basic_write, 2563 }, 2564 2565 { 2566 .name = "Basic read (no data verification)", 2567 .run = mmc_test_basic_read, 2568 }, 2569 2570 { 2571 .name = "Basic write (with data verification)", 2572 .prepare = mmc_test_prepare_write, 2573 .run = mmc_test_verify_write, 2574 .cleanup = mmc_test_cleanup, 2575 }, 2576 2577 { 2578 .name = "Basic read (with data verification)", 2579 .prepare = mmc_test_prepare_read, 2580 .run = mmc_test_verify_read, 2581 .cleanup = mmc_test_cleanup, 2582 }, 2583 2584 { 2585 .name = "Multi-block write", 2586 .prepare = mmc_test_prepare_write, 2587 .run = mmc_test_multi_write, 2588 .cleanup = mmc_test_cleanup, 2589 }, 2590 2591 { 2592 .name = "Multi-block read", 2593 .prepare = mmc_test_prepare_read, 2594 .run = mmc_test_multi_read, 2595 .cleanup = mmc_test_cleanup, 2596 }, 2597 2598 { 2599 .name = "Power of two block writes", 2600 .prepare = mmc_test_prepare_write, 2601 .run = mmc_test_pow2_write, 2602 .cleanup = mmc_test_cleanup, 2603 }, 2604 2605 { 2606 .name = "Power of two block reads", 2607 .prepare = mmc_test_prepare_read, 2608 .run = mmc_test_pow2_read, 2609 .cleanup = mmc_test_cleanup, 2610 }, 2611 2612 { 2613 .name = "Weird sized block writes", 2614 .prepare = mmc_test_prepare_write, 2615 .run = mmc_test_weird_write, 2616 .cleanup = mmc_test_cleanup, 2617 }, 2618 2619 { 2620 .name = "Weird sized block reads", 2621 .prepare = mmc_test_prepare_read, 2622 .run = mmc_test_weird_read, 2623 .cleanup = mmc_test_cleanup, 2624 }, 2625 2626 { 2627 .name = "Badly aligned write", 2628 .prepare = mmc_test_prepare_write, 2629 .run = mmc_test_align_write, 2630 .cleanup = mmc_test_cleanup, 2631 }, 2632 2633 { 2634 .name = "Badly aligned read", 2635 .prepare = mmc_test_prepare_read, 2636 .run = mmc_test_align_read, 2637 .cleanup = mmc_test_cleanup, 2638 }, 2639 2640 { 2641 .name = "Badly aligned multi-block write", 2642 .prepare = mmc_test_prepare_write, 2643 .run = mmc_test_align_multi_write, 2644 .cleanup = mmc_test_cleanup, 2645 }, 2646 2647 { 2648 .name = "Badly aligned multi-block read", 2649 .prepare = mmc_test_prepare_read, 2650 .run = mmc_test_align_multi_read, 2651 .cleanup = mmc_test_cleanup, 2652 }, 2653 2654 { 2655 .name = "Proper xfer_size at write (start failure)", 2656 .run = mmc_test_xfersize_write, 2657 }, 2658 2659 { 2660 .name = "Proper xfer_size at read (start failure)", 2661 .run = mmc_test_xfersize_read, 2662 }, 2663 2664 { 2665 .name = "Proper xfer_size at write (midway failure)", 2666 .run = mmc_test_multi_xfersize_write, 2667 }, 2668 2669 { 2670 .name = "Proper xfer_size at read (midway failure)", 2671 .run = mmc_test_multi_xfersize_read, 2672 }, 2673 2674 #ifdef CONFIG_HIGHMEM 2675 2676 { 2677 .name = "Highmem write", 2678 .prepare = mmc_test_prepare_write, 2679 .run = mmc_test_write_high, 2680 .cleanup = mmc_test_cleanup, 2681 }, 2682 2683 { 2684 .name = "Highmem read", 2685 .prepare = mmc_test_prepare_read, 2686 .run = mmc_test_read_high, 2687 .cleanup = mmc_test_cleanup, 2688 }, 2689 2690 { 2691 .name = "Multi-block highmem write", 2692 .prepare = mmc_test_prepare_write, 2693 .run = mmc_test_multi_write_high, 2694 .cleanup = mmc_test_cleanup, 2695 }, 2696 2697 { 2698 .name = "Multi-block highmem read", 2699 .prepare = mmc_test_prepare_read, 2700 .run = mmc_test_multi_read_high, 2701 .cleanup = mmc_test_cleanup, 2702 }, 2703 2704 #else 2705 2706 { 2707 .name = "Highmem write", 2708 .run = mmc_test_no_highmem, 2709 }, 2710 2711 { 2712 .name = "Highmem read", 2713 .run = mmc_test_no_highmem, 2714 }, 2715 2716 { 2717 .name = "Multi-block highmem write", 2718 .run = mmc_test_no_highmem, 2719 }, 2720 2721 { 2722 .name = "Multi-block highmem read", 2723 .run = mmc_test_no_highmem, 2724 }, 2725 2726 #endif /* CONFIG_HIGHMEM */ 2727 2728 { 2729 .name = "Best-case read performance", 2730 .prepare = mmc_test_area_prepare_fill, 2731 .run = mmc_test_best_read_performance, 2732 .cleanup = mmc_test_area_cleanup, 2733 }, 2734 2735 { 2736 .name = "Best-case write performance", 2737 .prepare = mmc_test_area_prepare_erase, 2738 .run = mmc_test_best_write_performance, 2739 .cleanup = mmc_test_area_cleanup, 2740 }, 2741 2742 { 2743 .name = "Best-case read performance into scattered pages", 2744 .prepare = mmc_test_area_prepare_fill, 2745 .run = mmc_test_best_read_perf_max_scatter, 2746 .cleanup = mmc_test_area_cleanup, 2747 }, 2748 2749 { 2750 .name = "Best-case write performance from scattered pages", 2751 .prepare = mmc_test_area_prepare_erase, 2752 .run = mmc_test_best_write_perf_max_scatter, 2753 .cleanup = mmc_test_area_cleanup, 2754 }, 2755 2756 { 2757 .name = "Single read performance by transfer size", 2758 .prepare = mmc_test_area_prepare_fill, 2759 .run = mmc_test_profile_read_perf, 2760 .cleanup = mmc_test_area_cleanup, 2761 }, 2762 2763 { 2764 .name = "Single write performance by transfer size", 2765 .prepare = mmc_test_area_prepare, 2766 .run = mmc_test_profile_write_perf, 2767 .cleanup = mmc_test_area_cleanup, 2768 }, 2769 2770 { 2771 .name = "Single trim performance by transfer size", 2772 .prepare = mmc_test_area_prepare_fill, 2773 .run = mmc_test_profile_trim_perf, 2774 .cleanup = mmc_test_area_cleanup, 2775 }, 2776 2777 { 2778 .name = "Consecutive read performance by transfer size", 2779 .prepare = mmc_test_area_prepare_fill, 2780 .run = mmc_test_profile_seq_read_perf, 2781 .cleanup = mmc_test_area_cleanup, 2782 }, 2783 2784 { 2785 .name = "Consecutive write performance by transfer size", 2786 .prepare = mmc_test_area_prepare, 2787 .run = mmc_test_profile_seq_write_perf, 2788 .cleanup = mmc_test_area_cleanup, 2789 }, 2790 2791 { 2792 .name = "Consecutive trim performance by transfer size", 2793 .prepare = mmc_test_area_prepare, 2794 .run = mmc_test_profile_seq_trim_perf, 2795 .cleanup = mmc_test_area_cleanup, 2796 }, 2797 2798 { 2799 .name = "Random read performance by transfer size", 2800 .prepare = mmc_test_area_prepare, 2801 .run = mmc_test_random_read_perf, 2802 .cleanup = mmc_test_area_cleanup, 2803 }, 2804 2805 { 2806 .name = "Random write performance by transfer size", 2807 .prepare = mmc_test_area_prepare, 2808 .run = mmc_test_random_write_perf, 2809 .cleanup = mmc_test_area_cleanup, 2810 }, 2811 2812 { 2813 .name = "Large sequential read into scattered pages", 2814 .prepare = mmc_test_area_prepare, 2815 .run = mmc_test_large_seq_read_perf, 2816 .cleanup = mmc_test_area_cleanup, 2817 }, 2818 2819 { 2820 .name = "Large sequential write from scattered pages", 2821 .prepare = mmc_test_area_prepare, 2822 .run = mmc_test_large_seq_write_perf, 2823 .cleanup = mmc_test_area_cleanup, 2824 }, 2825 2826 { 2827 .name = "Write performance with blocking req 4k to 4MB", 2828 .prepare = mmc_test_area_prepare, 2829 .run = mmc_test_profile_mult_write_blocking_perf, 2830 .cleanup = mmc_test_area_cleanup, 2831 }, 2832 2833 { 2834 .name = "Write performance with non-blocking req 4k to 4MB", 2835 .prepare = mmc_test_area_prepare, 2836 .run = mmc_test_profile_mult_write_nonblock_perf, 2837 .cleanup = mmc_test_area_cleanup, 2838 }, 2839 2840 { 2841 .name = "Read performance with blocking req 4k to 4MB", 2842 .prepare = mmc_test_area_prepare, 2843 .run = mmc_test_profile_mult_read_blocking_perf, 2844 .cleanup = mmc_test_area_cleanup, 2845 }, 2846 2847 { 2848 .name = "Read performance with non-blocking req 4k to 4MB", 2849 .prepare = mmc_test_area_prepare, 2850 .run = mmc_test_profile_mult_read_nonblock_perf, 2851 .cleanup = mmc_test_area_cleanup, 2852 }, 2853 2854 { 2855 .name = "Write performance blocking req 1 to 512 sg elems", 2856 .prepare = mmc_test_area_prepare, 2857 .run = mmc_test_profile_sglen_wr_blocking_perf, 2858 .cleanup = mmc_test_area_cleanup, 2859 }, 2860 2861 { 2862 .name = "Write performance non-blocking req 1 to 512 sg elems", 2863 .prepare = mmc_test_area_prepare, 2864 .run = mmc_test_profile_sglen_wr_nonblock_perf, 2865 .cleanup = mmc_test_area_cleanup, 2866 }, 2867 2868 { 2869 .name = "Read performance blocking req 1 to 512 sg elems", 2870 .prepare = mmc_test_area_prepare, 2871 .run = mmc_test_profile_sglen_r_blocking_perf, 2872 .cleanup = mmc_test_area_cleanup, 2873 }, 2874 2875 { 2876 .name = "Read performance non-blocking req 1 to 512 sg elems", 2877 .prepare = mmc_test_area_prepare, 2878 .run = mmc_test_profile_sglen_r_nonblock_perf, 2879 .cleanup = mmc_test_area_cleanup, 2880 }, 2881 2882 { 2883 .name = "Reset test", 2884 .run = mmc_test_reset, 2885 }, 2886 2887 { 2888 .name = "Commands during read - no Set Block Count (CMD23)", 2889 .prepare = mmc_test_area_prepare, 2890 .run = mmc_test_cmds_during_read, 2891 .cleanup = mmc_test_area_cleanup, 2892 }, 2893 2894 { 2895 .name = "Commands during write - no Set Block Count (CMD23)", 2896 .prepare = mmc_test_area_prepare, 2897 .run = mmc_test_cmds_during_write, 2898 .cleanup = mmc_test_area_cleanup, 2899 }, 2900 2901 { 2902 .name = "Commands during read - use Set Block Count (CMD23)", 2903 .prepare = mmc_test_area_prepare, 2904 .run = mmc_test_cmds_during_read_cmd23, 2905 .cleanup = mmc_test_area_cleanup, 2906 }, 2907 2908 { 2909 .name = "Commands during write - use Set Block Count (CMD23)", 2910 .prepare = mmc_test_area_prepare, 2911 .run = mmc_test_cmds_during_write_cmd23, 2912 .cleanup = mmc_test_area_cleanup, 2913 }, 2914 2915 { 2916 .name = "Commands during non-blocking read - use Set Block Count (CMD23)", 2917 .prepare = mmc_test_area_prepare, 2918 .run = mmc_test_cmds_during_read_cmd23_nonblock, 2919 .cleanup = mmc_test_area_cleanup, 2920 }, 2921 2922 { 2923 .name = "Commands during non-blocking write - use Set Block Count (CMD23)", 2924 .prepare = mmc_test_area_prepare, 2925 .run = mmc_test_cmds_during_write_cmd23_nonblock, 2926 .cleanup = mmc_test_area_cleanup, 2927 }, 2928 2929 { 2930 .name = "Re-tuning reliability", 2931 .prepare = mmc_test_area_prepare, 2932 .run = mmc_test_retuning, 2933 .cleanup = mmc_test_area_cleanup, 2934 }, 2935 2936 }; 2937 2938 static DEFINE_MUTEX(mmc_test_lock); 2939 2940 static LIST_HEAD(mmc_test_result); 2941 2942 static void mmc_test_run(struct mmc_test_card *test, int testcase) 2943 { 2944 int i, ret; 2945 2946 pr_info("%s: Starting tests of card %s...\n", 2947 mmc_hostname(test->card->host), mmc_card_id(test->card)); 2948 2949 mmc_claim_host(test->card->host); 2950 2951 for (i = 0; i < ARRAY_SIZE(mmc_test_cases); i++) { 2952 struct mmc_test_general_result *gr; 2953 2954 if (testcase && ((i + 1) != testcase)) 2955 continue; 2956 2957 pr_info("%s: Test case %d. %s...\n", 2958 mmc_hostname(test->card->host), i + 1, 2959 mmc_test_cases[i].name); 2960 2961 if (mmc_test_cases[i].prepare) { 2962 ret = mmc_test_cases[i].prepare(test); 2963 if (ret) { 2964 pr_info("%s: Result: Prepare stage failed! (%d)\n", 2965 mmc_hostname(test->card->host), 2966 ret); 2967 continue; 2968 } 2969 } 2970 2971 gr = kzalloc(sizeof(*gr), GFP_KERNEL); 2972 if (gr) { 2973 INIT_LIST_HEAD(&gr->tr_lst); 2974 2975 /* Assign data what we know already */ 2976 gr->card = test->card; 2977 gr->testcase = i; 2978 2979 /* Append container to global one */ 2980 list_add_tail(&gr->link, &mmc_test_result); 2981 2982 /* 2983 * Save the pointer to created container in our private 2984 * structure. 2985 */ 2986 test->gr = gr; 2987 } 2988 2989 ret = mmc_test_cases[i].run(test); 2990 switch (ret) { 2991 case RESULT_OK: 2992 pr_info("%s: Result: OK\n", 2993 mmc_hostname(test->card->host)); 2994 break; 2995 case RESULT_FAIL: 2996 pr_info("%s: Result: FAILED\n", 2997 mmc_hostname(test->card->host)); 2998 break; 2999 case RESULT_UNSUP_HOST: 3000 pr_info("%s: Result: UNSUPPORTED (by host)\n", 3001 mmc_hostname(test->card->host)); 3002 break; 3003 case RESULT_UNSUP_CARD: 3004 pr_info("%s: Result: UNSUPPORTED (by card)\n", 3005 mmc_hostname(test->card->host)); 3006 break; 3007 default: 3008 pr_info("%s: Result: ERROR (%d)\n", 3009 mmc_hostname(test->card->host), ret); 3010 } 3011 3012 /* Save the result */ 3013 if (gr) 3014 gr->result = ret; 3015 3016 if (mmc_test_cases[i].cleanup) { 3017 ret = mmc_test_cases[i].cleanup(test); 3018 if (ret) { 3019 pr_info("%s: Warning: Cleanup stage failed! (%d)\n", 3020 mmc_hostname(test->card->host), 3021 ret); 3022 } 3023 } 3024 } 3025 3026 mmc_release_host(test->card->host); 3027 3028 pr_info("%s: Tests completed.\n", 3029 mmc_hostname(test->card->host)); 3030 } 3031 3032 static void mmc_test_free_result(struct mmc_card *card) 3033 { 3034 struct mmc_test_general_result *gr, *grs; 3035 3036 mutex_lock(&mmc_test_lock); 3037 3038 list_for_each_entry_safe(gr, grs, &mmc_test_result, link) { 3039 struct mmc_test_transfer_result *tr, *trs; 3040 3041 if (card && gr->card != card) 3042 continue; 3043 3044 list_for_each_entry_safe(tr, trs, &gr->tr_lst, link) { 3045 list_del(&tr->link); 3046 kfree(tr); 3047 } 3048 3049 list_del(&gr->link); 3050 kfree(gr); 3051 } 3052 3053 mutex_unlock(&mmc_test_lock); 3054 } 3055 3056 static LIST_HEAD(mmc_test_file_test); 3057 3058 static int mtf_test_show(struct seq_file *sf, void *data) 3059 { 3060 struct mmc_card *card = sf->private; 3061 struct mmc_test_general_result *gr; 3062 3063 mutex_lock(&mmc_test_lock); 3064 3065 list_for_each_entry(gr, &mmc_test_result, link) { 3066 struct mmc_test_transfer_result *tr; 3067 3068 if (gr->card != card) 3069 continue; 3070 3071 seq_printf(sf, "Test %d: %d\n", gr->testcase + 1, gr->result); 3072 3073 list_for_each_entry(tr, &gr->tr_lst, link) { 3074 seq_printf(sf, "%u %d %ptSp %u %u.%02u\n", 3075 tr->count, tr->sectors, &tr->ts, tr->rate, 3076 tr->iops / 100, tr->iops % 100); 3077 } 3078 } 3079 3080 mutex_unlock(&mmc_test_lock); 3081 3082 return 0; 3083 } 3084 3085 static int mtf_test_open(struct inode *inode, struct file *file) 3086 { 3087 return single_open(file, mtf_test_show, inode->i_private); 3088 } 3089 3090 static ssize_t mtf_test_write(struct file *file, const char __user *buf, 3091 size_t count, loff_t *pos) 3092 { 3093 struct seq_file *sf = file->private_data; 3094 struct mmc_card *card = sf->private; 3095 struct mmc_test_card *test; 3096 long testcase; 3097 int ret; 3098 3099 ret = kstrtol_from_user(buf, count, 10, &testcase); 3100 if (ret) 3101 return ret; 3102 3103 test = kzalloc(sizeof(*test), GFP_KERNEL); 3104 if (!test) 3105 return -ENOMEM; 3106 3107 /* 3108 * Remove all test cases associated with given card. Thus we have only 3109 * actual data of the last run. 3110 */ 3111 mmc_test_free_result(card); 3112 3113 test->card = card; 3114 3115 test->buffer = kzalloc(BUFFER_SIZE, GFP_KERNEL); 3116 #ifdef CONFIG_HIGHMEM 3117 test->highmem = alloc_pages(GFP_KERNEL | __GFP_HIGHMEM, BUFFER_ORDER); 3118 if (!test->highmem) { 3119 count = -ENOMEM; 3120 goto free_test_buffer; 3121 } 3122 #endif 3123 3124 if (test->buffer) { 3125 mutex_lock(&mmc_test_lock); 3126 mmc_test_run(test, testcase); 3127 mutex_unlock(&mmc_test_lock); 3128 } 3129 3130 #ifdef CONFIG_HIGHMEM 3131 __free_pages(test->highmem, BUFFER_ORDER); 3132 free_test_buffer: 3133 #endif 3134 kfree(test->buffer); 3135 kfree(test); 3136 3137 return count; 3138 } 3139 3140 static const struct file_operations mmc_test_fops_test = { 3141 .open = mtf_test_open, 3142 .read = seq_read, 3143 .write = mtf_test_write, 3144 .llseek = seq_lseek, 3145 .release = single_release, 3146 }; 3147 3148 static int mtf_testlist_show(struct seq_file *sf, void *data) 3149 { 3150 int i; 3151 3152 mutex_lock(&mmc_test_lock); 3153 3154 seq_puts(sf, "0:\tRun all tests\n"); 3155 for (i = 0; i < ARRAY_SIZE(mmc_test_cases); i++) 3156 seq_printf(sf, "%d:\t%s\n", i + 1, mmc_test_cases[i].name); 3157 3158 mutex_unlock(&mmc_test_lock); 3159 3160 return 0; 3161 } 3162 3163 DEFINE_SHOW_ATTRIBUTE(mtf_testlist); 3164 3165 static void mmc_test_free_dbgfs_file(struct mmc_card *card) 3166 { 3167 struct mmc_test_dbgfs_file *df, *dfs; 3168 3169 mutex_lock(&mmc_test_lock); 3170 3171 list_for_each_entry_safe(df, dfs, &mmc_test_file_test, link) { 3172 if (card && df->card != card) 3173 continue; 3174 debugfs_remove(df->file); 3175 list_del(&df->link); 3176 kfree(df); 3177 } 3178 3179 mutex_unlock(&mmc_test_lock); 3180 } 3181 3182 static int __mmc_test_register_dbgfs_file(struct mmc_card *card, 3183 const char *name, umode_t mode, const struct file_operations *fops) 3184 { 3185 struct dentry *file = NULL; 3186 struct mmc_test_dbgfs_file *df; 3187 3188 if (card->debugfs_root) 3189 file = debugfs_create_file(name, mode, card->debugfs_root, 3190 card, fops); 3191 3192 df = kmalloc(sizeof(*df), GFP_KERNEL); 3193 if (!df) { 3194 debugfs_remove(file); 3195 return -ENOMEM; 3196 } 3197 3198 df->card = card; 3199 df->file = file; 3200 3201 list_add(&df->link, &mmc_test_file_test); 3202 return 0; 3203 } 3204 3205 static int mmc_test_register_dbgfs_file(struct mmc_card *card) 3206 { 3207 int ret; 3208 3209 mutex_lock(&mmc_test_lock); 3210 3211 ret = __mmc_test_register_dbgfs_file(card, "test", 0644, 3212 &mmc_test_fops_test); 3213 if (ret) 3214 goto err; 3215 3216 ret = __mmc_test_register_dbgfs_file(card, "testlist", 0444, 3217 &mtf_testlist_fops); 3218 if (ret) 3219 goto err; 3220 3221 err: 3222 mutex_unlock(&mmc_test_lock); 3223 3224 return ret; 3225 } 3226 3227 static int mmc_test_probe(struct mmc_card *card) 3228 { 3229 int ret; 3230 3231 if (!mmc_card_mmc(card) && !mmc_card_sd(card)) 3232 return -ENODEV; 3233 3234 if (mmc_card_ult_capacity(card)) { 3235 pr_info("%s: mmc-test currently UNSUPPORTED for SDUC\n", 3236 mmc_hostname(card->host)); 3237 return -EOPNOTSUPP; 3238 } 3239 3240 ret = mmc_test_register_dbgfs_file(card); 3241 if (ret) 3242 return ret; 3243 3244 if (card->ext_csd.cmdq_en) { 3245 mmc_claim_host(card->host); 3246 ret = mmc_cmdq_disable(card); 3247 mmc_release_host(card->host); 3248 if (ret) 3249 return ret; 3250 } 3251 3252 dev_info(&card->dev, "Card claimed for testing.\n"); 3253 3254 return 0; 3255 } 3256 3257 static void mmc_test_remove(struct mmc_card *card) 3258 { 3259 if (card->reenable_cmdq) { 3260 mmc_claim_host(card->host); 3261 mmc_cmdq_enable(card); 3262 mmc_release_host(card->host); 3263 } 3264 mmc_test_free_result(card); 3265 mmc_test_free_dbgfs_file(card); 3266 } 3267 3268 static struct mmc_driver mmc_driver = { 3269 .drv = { 3270 .name = "mmc_test", 3271 }, 3272 .probe = mmc_test_probe, 3273 .remove = mmc_test_remove, 3274 }; 3275 3276 static int __init mmc_test_init(void) 3277 { 3278 return mmc_register_driver(&mmc_driver); 3279 } 3280 3281 static void __exit mmc_test_exit(void) 3282 { 3283 /* Clear stalled data if card is still plugged */ 3284 mmc_test_free_result(NULL); 3285 mmc_test_free_dbgfs_file(NULL); 3286 3287 mmc_unregister_driver(&mmc_driver); 3288 } 3289 3290 module_init(mmc_test_init); 3291 module_exit(mmc_test_exit); 3292 3293 MODULE_LICENSE("GPL"); 3294 MODULE_DESCRIPTION("Multimedia Card (MMC) host test driver"); 3295 MODULE_AUTHOR("Pierre Ossman"); 3296