1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Copyright 2007-2008 Pierre Ossman 4 */ 5 6 #include <linux/mmc/core.h> 7 #include <linux/mmc/card.h> 8 #include <linux/mmc/host.h> 9 #include <linux/mmc/mmc.h> 10 #include <linux/slab.h> 11 12 #include <linux/scatterlist.h> 13 #include <linux/list.h> 14 15 #include <linux/debugfs.h> 16 #include <linux/uaccess.h> 17 #include <linux/seq_file.h> 18 #include <linux/module.h> 19 20 #include "core.h" 21 #include "card.h" 22 #include "host.h" 23 #include "bus.h" 24 #include "mmc_ops.h" 25 26 #define RESULT_OK 0 27 #define RESULT_FAIL 1 28 #define RESULT_UNSUP_HOST 2 29 #define RESULT_UNSUP_CARD 3 30 31 #define BUFFER_ORDER 2 32 #define BUFFER_SIZE (PAGE_SIZE << BUFFER_ORDER) 33 34 #define TEST_ALIGN_END 8 35 36 /* 37 * Limit the test area size to the maximum MMC HC erase group size. Note that 38 * the maximum SD allocation unit size is just 4MiB. 39 */ 40 #define TEST_AREA_MAX_SIZE SZ_128M 41 42 /** 43 * struct mmc_test_pages - pages allocated by 'alloc_pages()'. 44 * @page: first page in the allocation 45 * @order: order of the number of pages allocated 46 */ 47 struct mmc_test_pages { 48 struct page *page; 49 unsigned int order; 50 }; 51 52 /** 53 * struct mmc_test_mem - allocated memory. 54 * @cnt: number of allocations 55 * @arr: array of allocations 56 */ 57 struct mmc_test_mem { 58 unsigned int cnt; 59 struct mmc_test_pages arr[] __counted_by(cnt); 60 }; 61 62 /** 63 * struct mmc_test_area - information for performance tests. 64 * @max_sz: test area size (in bytes) 65 * @dev_addr: address on card at which to do performance tests 66 * @max_tfr: maximum transfer size allowed by driver (in bytes) 67 * @max_segs: maximum segments allowed by driver in scatterlist @sg 68 * @max_seg_sz: maximum segment size allowed by driver 69 * @blocks: number of (512 byte) blocks currently mapped by @sg 70 * @sg_len: length of currently mapped scatterlist @sg 71 * @mem: allocated memory 72 * @sg: scatterlist 73 * @sg_areq: scatterlist for non-blocking request 74 */ 75 struct mmc_test_area { 76 unsigned long max_sz; 77 unsigned int dev_addr; 78 unsigned int max_tfr; 79 unsigned int max_segs; 80 unsigned int max_seg_sz; 81 unsigned int blocks; 82 unsigned int sg_len; 83 struct mmc_test_mem *mem; 84 struct scatterlist *sg; 85 struct scatterlist *sg_areq; 86 }; 87 88 /** 89 * struct mmc_test_transfer_result - transfer results for performance tests. 90 * @link: double-linked list 91 * @count: amount of group of sectors to check 92 * @sectors: amount of sectors to check in one group 93 * @ts: time values of transfer 94 * @rate: calculated transfer rate 95 * @iops: I/O operations per second (times 100) 96 */ 97 struct mmc_test_transfer_result { 98 struct list_head link; 99 unsigned int count; 100 unsigned int sectors; 101 struct timespec64 ts; 102 unsigned int rate; 103 unsigned int iops; 104 }; 105 106 /** 107 * struct mmc_test_general_result - results for tests. 108 * @link: double-linked list 109 * @card: card under test 110 * @testcase: number of test case 111 * @result: result of test run 112 * @tr_lst: transfer measurements if any as mmc_test_transfer_result 113 */ 114 struct mmc_test_general_result { 115 struct list_head link; 116 struct mmc_card *card; 117 int testcase; 118 int result; 119 struct list_head tr_lst; 120 }; 121 122 /** 123 * struct mmc_test_dbgfs_file - debugfs related file. 124 * @link: double-linked list 125 * @card: card under test 126 * @file: file created under debugfs 127 */ 128 struct mmc_test_dbgfs_file { 129 struct list_head link; 130 struct mmc_card *card; 131 struct dentry *file; 132 }; 133 134 /** 135 * struct mmc_test_card - test information. 136 * @card: card under test 137 * @scratch: transfer buffer 138 * @highmem: buffer for highmem tests 139 * @area: information for performance tests 140 * @gr: pointer to results of current testcase 141 * @buffer: transfer buffer 142 */ 143 struct mmc_test_card { 144 struct mmc_card *card; 145 146 u8 scratch[BUFFER_SIZE]; 147 #ifdef CONFIG_HIGHMEM 148 struct page *highmem; 149 #endif 150 struct mmc_test_area area; 151 struct mmc_test_general_result *gr; 152 153 u8 buffer[]; 154 }; 155 156 enum mmc_test_prep_media { 157 MMC_TEST_PREP_NONE = 0, 158 MMC_TEST_PREP_WRITE_FULL = 1 << 0, 159 MMC_TEST_PREP_ERASE = 1 << 1, 160 }; 161 162 struct mmc_test_multiple_rw { 163 unsigned int *sg_len; 164 unsigned int *bs; 165 unsigned int len; 166 unsigned int size; 167 bool do_write; 168 bool do_nonblock_req; 169 enum mmc_test_prep_media prepare; 170 }; 171 172 static unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16, 173 1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22}; 174 175 static unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6, 176 1 << 7, 1 << 8, 1 << 9}; 177 /*******************************************************************/ 178 /* General helper functions */ 179 /*******************************************************************/ 180 181 /* 182 * Configure correct block size in card 183 */ 184 static int mmc_test_set_blksize(struct mmc_test_card *test, unsigned size) 185 { 186 return mmc_set_blocklen(test->card, size); 187 } 188 189 static void mmc_test_prepare_sbc(struct mmc_test_card *test, 190 struct mmc_request *mrq, unsigned int blocks) 191 { 192 struct mmc_card *card = test->card; 193 194 if (!mrq->sbc || !mmc_host_can_cmd23(card->host) || 195 !mmc_card_can_cmd23(card) || !mmc_op_multi(mrq->cmd->opcode) || 196 mmc_card_blk_no_cmd23(card)) { 197 mrq->sbc = NULL; 198 return; 199 } 200 201 mrq->sbc->opcode = MMC_SET_BLOCK_COUNT; 202 mrq->sbc->arg = blocks; 203 mrq->sbc->flags = MMC_RSP_R1 | MMC_CMD_AC; 204 } 205 206 /* 207 * Fill in the mmc_request structure given a set of transfer parameters. 208 */ 209 static void mmc_test_prepare_mrq(struct mmc_test_card *test, 210 struct mmc_request *mrq, struct scatterlist *sg, unsigned sg_len, 211 unsigned dev_addr, unsigned blocks, unsigned blksz, int write) 212 { 213 if (WARN_ON(!mrq || !mrq->cmd || !mrq->data || !mrq->stop)) 214 return; 215 216 if (blocks > 1) { 217 mrq->cmd->opcode = write ? 218 MMC_WRITE_MULTIPLE_BLOCK : MMC_READ_MULTIPLE_BLOCK; 219 } else { 220 mrq->cmd->opcode = write ? 221 MMC_WRITE_BLOCK : MMC_READ_SINGLE_BLOCK; 222 } 223 224 mrq->cmd->arg = dev_addr; 225 if (!mmc_card_blockaddr(test->card)) 226 mrq->cmd->arg <<= 9; 227 228 mrq->cmd->flags = MMC_RSP_R1 | MMC_CMD_ADTC; 229 230 if (blocks == 1) 231 mrq->stop = NULL; 232 else { 233 mrq->stop->opcode = MMC_STOP_TRANSMISSION; 234 mrq->stop->arg = 0; 235 mrq->stop->flags = MMC_RSP_R1B | MMC_CMD_AC; 236 } 237 238 mrq->data->blksz = blksz; 239 mrq->data->blocks = blocks; 240 mrq->data->flags = write ? MMC_DATA_WRITE : MMC_DATA_READ; 241 mrq->data->sg = sg; 242 mrq->data->sg_len = sg_len; 243 244 mmc_test_prepare_sbc(test, mrq, blocks); 245 246 mmc_set_data_timeout(mrq->data, test->card); 247 } 248 249 static int mmc_test_busy(struct mmc_command *cmd) 250 { 251 return !(cmd->resp[0] & R1_READY_FOR_DATA) || 252 (R1_CURRENT_STATE(cmd->resp[0]) == R1_STATE_PRG); 253 } 254 255 /* 256 * Wait for the card to finish the busy state 257 */ 258 static int mmc_test_wait_busy(struct mmc_test_card *test) 259 { 260 int ret, busy; 261 struct mmc_command cmd = {}; 262 263 busy = 0; 264 do { 265 memset(&cmd, 0, sizeof(struct mmc_command)); 266 267 cmd.opcode = MMC_SEND_STATUS; 268 cmd.arg = test->card->rca << 16; 269 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; 270 271 ret = mmc_wait_for_cmd(test->card->host, &cmd, 0); 272 if (ret) 273 break; 274 275 if (!busy && mmc_test_busy(&cmd)) { 276 busy = 1; 277 if (test->card->host->caps & MMC_CAP_WAIT_WHILE_BUSY) 278 pr_info("%s: Warning: Host did not wait for busy state to end.\n", 279 mmc_hostname(test->card->host)); 280 } 281 } while (mmc_test_busy(&cmd)); 282 283 return ret; 284 } 285 286 /* 287 * Transfer a single sector of kernel addressable data 288 */ 289 static int mmc_test_buffer_transfer(struct mmc_test_card *test, 290 u8 *buffer, unsigned addr, unsigned blksz, int write) 291 { 292 struct mmc_request mrq = {}; 293 struct mmc_command cmd = {}; 294 struct mmc_command stop = {}; 295 struct mmc_data data = {}; 296 297 struct scatterlist sg; 298 299 mrq.cmd = &cmd; 300 mrq.data = &data; 301 mrq.stop = &stop; 302 303 sg_init_one(&sg, buffer, blksz); 304 305 mmc_test_prepare_mrq(test, &mrq, &sg, 1, addr, 1, blksz, write); 306 307 mmc_wait_for_req(test->card->host, &mrq); 308 309 if (cmd.error) 310 return cmd.error; 311 if (data.error) 312 return data.error; 313 314 return mmc_test_wait_busy(test); 315 } 316 317 static void mmc_test_free_mem(struct mmc_test_mem *mem) 318 { 319 if (!mem) 320 return; 321 while (mem->cnt--) 322 __free_pages(mem->arr[mem->cnt].page, 323 mem->arr[mem->cnt].order); 324 kfree(mem); 325 } 326 327 /* 328 * Allocate a lot of memory, preferably max_sz but at least min_sz. In case 329 * there isn't much memory do not exceed 1/16th total lowmem pages. Also do 330 * not exceed a maximum number of segments and try not to make segments much 331 * bigger than maximum segment size. 332 */ 333 static struct mmc_test_mem *mmc_test_alloc_mem(unsigned long min_sz, 334 unsigned long max_sz, 335 unsigned int max_segs, 336 unsigned int max_seg_sz) 337 { 338 unsigned long max_page_cnt = DIV_ROUND_UP(max_sz, PAGE_SIZE); 339 unsigned long min_page_cnt = DIV_ROUND_UP(min_sz, PAGE_SIZE); 340 unsigned long max_seg_page_cnt = DIV_ROUND_UP(max_seg_sz, PAGE_SIZE); 341 unsigned long page_cnt = 0; 342 unsigned long limit = nr_free_buffer_pages() >> 4; 343 struct mmc_test_mem *mem; 344 345 if (max_page_cnt > limit) 346 max_page_cnt = limit; 347 if (min_page_cnt > max_page_cnt) 348 min_page_cnt = max_page_cnt; 349 350 if (max_seg_page_cnt > max_page_cnt) 351 max_seg_page_cnt = max_page_cnt; 352 353 if (max_segs > max_page_cnt) 354 max_segs = max_page_cnt; 355 356 mem = kzalloc_flex(*mem, arr, max_segs); 357 if (!mem) 358 return NULL; 359 360 while (max_page_cnt) { 361 struct page *page; 362 unsigned int order; 363 gfp_t flags = GFP_KERNEL | GFP_DMA | __GFP_NOWARN | 364 __GFP_NORETRY; 365 366 order = get_order(max_seg_page_cnt << PAGE_SHIFT); 367 while (1) { 368 page = alloc_pages(flags, order); 369 if (page || !order) 370 break; 371 order -= 1; 372 } 373 if (!page) { 374 if (page_cnt < min_page_cnt) 375 goto out_free; 376 break; 377 } 378 mem->arr[mem->cnt].page = page; 379 mem->arr[mem->cnt].order = order; 380 mem->cnt += 1; 381 if (max_page_cnt <= (1UL << order)) 382 break; 383 max_page_cnt -= 1UL << order; 384 page_cnt += 1UL << order; 385 if (mem->cnt >= max_segs) { 386 if (page_cnt < min_page_cnt) 387 goto out_free; 388 break; 389 } 390 } 391 392 return mem; 393 394 out_free: 395 mmc_test_free_mem(mem); 396 return NULL; 397 } 398 399 /* 400 * Map memory into a scatterlist. Optionally allow the same memory to be 401 * mapped more than once. 402 */ 403 static int mmc_test_map_sg(struct mmc_test_mem *mem, unsigned long size, 404 struct scatterlist *sglist, int repeat, 405 unsigned int max_segs, unsigned int max_seg_sz, 406 unsigned int *sg_len, int min_sg_len) 407 { 408 struct scatterlist *sg = NULL; 409 unsigned int i; 410 unsigned long sz = size; 411 412 sg_init_table(sglist, max_segs); 413 if (min_sg_len > max_segs) 414 min_sg_len = max_segs; 415 416 *sg_len = 0; 417 do { 418 for (i = 0; i < mem->cnt; i++) { 419 unsigned long len = PAGE_SIZE << mem->arr[i].order; 420 421 if (min_sg_len && (size / min_sg_len < len)) 422 len = ALIGN(size / min_sg_len, 512); 423 if (len > sz) 424 len = sz; 425 if (len > max_seg_sz) 426 len = max_seg_sz; 427 if (sg) 428 sg = sg_next(sg); 429 else 430 sg = sglist; 431 if (!sg) 432 return -EINVAL; 433 sg_set_page(sg, mem->arr[i].page, len, 0); 434 sz -= len; 435 *sg_len += 1; 436 if (!sz) 437 break; 438 } 439 } while (sz && repeat); 440 441 if (sz) 442 return -EINVAL; 443 444 if (sg) 445 sg_mark_end(sg); 446 447 return 0; 448 } 449 450 /* 451 * Map memory into a scatterlist so that no pages are contiguous. Allow the 452 * same memory to be mapped more than once. 453 */ 454 static int mmc_test_map_sg_max_scatter(struct mmc_test_mem *mem, 455 unsigned long sz, 456 struct scatterlist *sglist, 457 unsigned int max_segs, 458 unsigned int max_seg_sz, 459 unsigned int *sg_len) 460 { 461 struct scatterlist *sg = NULL; 462 unsigned int i = mem->cnt, cnt; 463 unsigned long len; 464 void *base, *addr, *last_addr = NULL; 465 466 sg_init_table(sglist, max_segs); 467 468 *sg_len = 0; 469 while (sz) { 470 base = page_address(mem->arr[--i].page); 471 cnt = 1 << mem->arr[i].order; 472 while (sz && cnt) { 473 addr = base + PAGE_SIZE * --cnt; 474 if (last_addr && last_addr + PAGE_SIZE == addr) 475 continue; 476 last_addr = addr; 477 len = PAGE_SIZE; 478 if (len > max_seg_sz) 479 len = max_seg_sz; 480 if (len > sz) 481 len = sz; 482 if (sg) 483 sg = sg_next(sg); 484 else 485 sg = sglist; 486 if (!sg) 487 return -EINVAL; 488 sg_set_page(sg, virt_to_page(addr), len, 0); 489 sz -= len; 490 *sg_len += 1; 491 } 492 if (i == 0) 493 i = mem->cnt; 494 } 495 496 if (sg) 497 sg_mark_end(sg); 498 499 return 0; 500 } 501 502 /* 503 * Calculate transfer rate in bytes per second. 504 */ 505 static unsigned int mmc_test_rate(uint64_t bytes, struct timespec64 *ts) 506 { 507 uint64_t ns; 508 509 ns = timespec64_to_ns(ts); 510 bytes *= NSEC_PER_SEC; 511 512 while (ns > UINT_MAX) { 513 bytes >>= 1; 514 ns >>= 1; 515 } 516 517 if (!ns) 518 return 0; 519 520 do_div(bytes, (uint32_t)ns); 521 522 return bytes; 523 } 524 525 /* 526 * Save transfer results for future usage 527 */ 528 static void mmc_test_save_transfer_result(struct mmc_test_card *test, 529 unsigned int count, unsigned int sectors, struct timespec64 ts, 530 unsigned int rate, unsigned int iops) 531 { 532 struct mmc_test_transfer_result *tr; 533 534 if (!test->gr) 535 return; 536 537 tr = kmalloc_obj(*tr); 538 if (!tr) 539 return; 540 541 tr->count = count; 542 tr->sectors = sectors; 543 tr->ts = ts; 544 tr->rate = rate; 545 tr->iops = iops; 546 547 list_add_tail(&tr->link, &test->gr->tr_lst); 548 } 549 550 /* 551 * Print the transfer rate. 552 */ 553 static void mmc_test_print_rate(struct mmc_test_card *test, uint64_t bytes, 554 struct timespec64 *ts1, struct timespec64 *ts2) 555 { 556 unsigned int rate, iops, sectors = bytes >> SECTOR_SHIFT; 557 struct timespec64 ts; 558 559 ts = timespec64_sub(*ts2, *ts1); 560 561 rate = mmc_test_rate(bytes, &ts); 562 iops = mmc_test_rate(100, &ts); /* I/O ops per sec x 100 */ 563 564 pr_info("%s: Transfer of %u sectors (%u%s KiB) took %llu.%09u " 565 "seconds (%u kB/s, %u KiB/s, %u.%02u IOPS)\n", 566 mmc_hostname(test->card->host), sectors, sectors >> 1, 567 (sectors & 1 ? ".5" : ""), (u64)ts.tv_sec, 568 (u32)ts.tv_nsec, rate / 1000, rate / 1024, 569 iops / 100, iops % 100); 570 571 mmc_test_save_transfer_result(test, 1, sectors, ts, rate, iops); 572 } 573 574 /* 575 * Print the average transfer rate. 576 */ 577 static void mmc_test_print_avg_rate(struct mmc_test_card *test, uint64_t bytes, 578 unsigned int count, struct timespec64 *ts1, 579 struct timespec64 *ts2) 580 { 581 unsigned int rate, iops, sectors = bytes >> SECTOR_SHIFT; 582 uint64_t tot = bytes * count; 583 struct timespec64 ts; 584 585 ts = timespec64_sub(*ts2, *ts1); 586 587 rate = mmc_test_rate(tot, &ts); 588 iops = mmc_test_rate(count * 100, &ts); /* I/O ops per sec x 100 */ 589 590 pr_info("%s: Transfer of %u x %u sectors (%u x %u%s KiB) took %ptSp seconds (%u kB/s, %u KiB/s, %u.%02u IOPS, sg_len %d)\n", 591 mmc_hostname(test->card->host), count, sectors, count, 592 sectors >> 1, (sectors & 1 ? ".5" : ""), &ts, 593 rate / 1000, rate / 1024, iops / 100, iops % 100, 594 test->area.sg_len); 595 596 mmc_test_save_transfer_result(test, count, sectors, ts, rate, iops); 597 } 598 599 /* 600 * Return the card size in sectors. 601 */ 602 static unsigned int mmc_test_capacity(struct mmc_card *card) 603 { 604 if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) 605 return card->ext_csd.sectors; 606 else 607 return card->csd.capacity << (card->csd.read_blkbits - 9); 608 } 609 610 /*******************************************************************/ 611 /* Test preparation and cleanup */ 612 /*******************************************************************/ 613 614 /* 615 * Fill the first couple of sectors of the card with known data 616 * so that bad reads/writes can be detected 617 */ 618 static int __mmc_test_prepare(struct mmc_test_card *test, int write, int val) 619 { 620 int ret, i; 621 622 ret = mmc_test_set_blksize(test, 512); 623 if (ret) 624 return ret; 625 626 if (write) 627 memset(test->buffer, val, 512); 628 else { 629 for (i = 0; i < 512; i++) 630 test->buffer[i] = i; 631 } 632 633 for (i = 0; i < BUFFER_SIZE / 512; i++) { 634 ret = mmc_test_buffer_transfer(test, test->buffer, i, 512, 1); 635 if (ret) 636 return ret; 637 } 638 639 return 0; 640 } 641 642 static int mmc_test_prepare_write(struct mmc_test_card *test) 643 { 644 return __mmc_test_prepare(test, 1, 0xDF); 645 } 646 647 static int mmc_test_prepare_read(struct mmc_test_card *test) 648 { 649 return __mmc_test_prepare(test, 0, 0); 650 } 651 652 static int mmc_test_cleanup(struct mmc_test_card *test) 653 { 654 return __mmc_test_prepare(test, 1, 0); 655 } 656 657 /*******************************************************************/ 658 /* Test execution helpers */ 659 /*******************************************************************/ 660 661 /* 662 * Modifies the mmc_request to perform the "short transfer" tests 663 */ 664 static void mmc_test_prepare_broken_mrq(struct mmc_test_card *test, 665 struct mmc_request *mrq, int write) 666 { 667 if (WARN_ON(!mrq || !mrq->cmd || !mrq->data)) 668 return; 669 670 if (mrq->data->blocks > 1) { 671 mrq->cmd->opcode = write ? 672 MMC_WRITE_BLOCK : MMC_READ_SINGLE_BLOCK; 673 mrq->stop = NULL; 674 } else { 675 mrq->cmd->opcode = MMC_SEND_STATUS; 676 mrq->cmd->arg = test->card->rca << 16; 677 } 678 } 679 680 /* 681 * Checks that a normal transfer didn't have any errors 682 */ 683 static int mmc_test_check_result(struct mmc_test_card *test, 684 struct mmc_request *mrq) 685 { 686 int ret; 687 688 if (WARN_ON(!mrq || !mrq->cmd || !mrq->data)) 689 return -EINVAL; 690 691 ret = 0; 692 693 if (mrq->sbc && mrq->sbc->error) 694 ret = mrq->sbc->error; 695 if (!ret && mrq->cmd->error) 696 ret = mrq->cmd->error; 697 if (!ret && mrq->data->error) 698 ret = mrq->data->error; 699 if (!ret && mrq->stop && mrq->stop->error) 700 ret = mrq->stop->error; 701 if (!ret && mrq->data->bytes_xfered != 702 mrq->data->blocks * mrq->data->blksz) 703 ret = RESULT_FAIL; 704 705 if (ret == -EINVAL) 706 ret = RESULT_UNSUP_HOST; 707 708 return ret; 709 } 710 711 /* 712 * Checks that a "short transfer" behaved as expected 713 */ 714 static int mmc_test_check_broken_result(struct mmc_test_card *test, 715 struct mmc_request *mrq) 716 { 717 int ret; 718 719 if (WARN_ON(!mrq || !mrq->cmd || !mrq->data)) 720 return -EINVAL; 721 722 ret = 0; 723 724 if (!ret && mrq->cmd->error) 725 ret = mrq->cmd->error; 726 if (!ret && mrq->data->error == 0) 727 ret = RESULT_FAIL; 728 if (!ret && mrq->data->error != -ETIMEDOUT) 729 ret = mrq->data->error; 730 if (!ret && mrq->stop && mrq->stop->error) 731 ret = mrq->stop->error; 732 if (mrq->data->blocks > 1) { 733 if (!ret && mrq->data->bytes_xfered > mrq->data->blksz) 734 ret = RESULT_FAIL; 735 } else { 736 if (!ret && mrq->data->bytes_xfered > 0) 737 ret = RESULT_FAIL; 738 } 739 740 if (ret == -EINVAL) 741 ret = RESULT_UNSUP_HOST; 742 743 return ret; 744 } 745 746 struct mmc_test_req { 747 struct mmc_request mrq; 748 struct mmc_command sbc; 749 struct mmc_command cmd; 750 struct mmc_command stop; 751 struct mmc_command status; 752 struct mmc_data data; 753 }; 754 755 /* 756 * Tests nonblock transfer with certain parameters 757 */ 758 static void mmc_test_req_reset(struct mmc_test_req *rq) 759 { 760 memset(rq, 0, sizeof(struct mmc_test_req)); 761 762 rq->mrq.cmd = &rq->cmd; 763 rq->mrq.data = &rq->data; 764 rq->mrq.stop = &rq->stop; 765 } 766 767 static struct mmc_test_req *mmc_test_req_alloc(void) 768 { 769 struct mmc_test_req *rq = kmalloc_obj(*rq); 770 771 if (rq) 772 mmc_test_req_reset(rq); 773 774 return rq; 775 } 776 777 static void mmc_test_wait_done(struct mmc_request *mrq) 778 { 779 complete(&mrq->completion); 780 } 781 782 static int mmc_test_start_areq(struct mmc_test_card *test, 783 struct mmc_request *mrq, 784 struct mmc_request *prev_mrq) 785 { 786 struct mmc_host *host = test->card->host; 787 int err = 0; 788 789 if (mrq) { 790 init_completion(&mrq->completion); 791 mrq->done = mmc_test_wait_done; 792 mmc_pre_req(host, mrq); 793 } 794 795 if (prev_mrq) { 796 wait_for_completion(&prev_mrq->completion); 797 err = mmc_test_wait_busy(test); 798 if (!err) 799 err = mmc_test_check_result(test, prev_mrq); 800 } 801 802 if (!err && mrq) { 803 err = mmc_start_request(host, mrq); 804 if (err) 805 mmc_retune_release(host); 806 } 807 808 if (prev_mrq) 809 mmc_post_req(host, prev_mrq, 0); 810 811 if (err && mrq) 812 mmc_post_req(host, mrq, err); 813 814 return err; 815 } 816 817 static int mmc_test_nonblock_transfer(struct mmc_test_card *test, 818 unsigned int dev_addr, int write, 819 int count) 820 { 821 struct mmc_test_req *rq1, *rq2; 822 struct mmc_request *mrq, *prev_mrq; 823 int i; 824 int ret = RESULT_OK; 825 struct mmc_test_area *t = &test->area; 826 struct scatterlist *sg = t->sg; 827 struct scatterlist *sg_areq = t->sg_areq; 828 829 rq1 = mmc_test_req_alloc(); 830 rq2 = mmc_test_req_alloc(); 831 if (!rq1 || !rq2) { 832 ret = RESULT_FAIL; 833 goto err; 834 } 835 836 mrq = &rq1->mrq; 837 prev_mrq = NULL; 838 839 for (i = 0; i < count; i++) { 840 mmc_test_req_reset(container_of(mrq, struct mmc_test_req, mrq)); 841 mmc_test_prepare_mrq(test, mrq, sg, t->sg_len, dev_addr, 842 t->blocks, 512, write); 843 ret = mmc_test_start_areq(test, mrq, prev_mrq); 844 if (ret) 845 goto err; 846 847 if (!prev_mrq) 848 prev_mrq = &rq2->mrq; 849 850 swap(mrq, prev_mrq); 851 swap(sg, sg_areq); 852 dev_addr += t->blocks; 853 } 854 855 ret = mmc_test_start_areq(test, NULL, prev_mrq); 856 err: 857 kfree(rq1); 858 kfree(rq2); 859 return ret; 860 } 861 862 /* 863 * Tests a basic transfer with certain parameters 864 */ 865 static int mmc_test_simple_transfer(struct mmc_test_card *test, 866 struct scatterlist *sg, unsigned sg_len, unsigned dev_addr, 867 unsigned blocks, unsigned blksz, int write) 868 { 869 struct mmc_request mrq = {}; 870 struct mmc_command cmd = {}; 871 struct mmc_command stop = {}; 872 struct mmc_data data = {}; 873 874 mrq.cmd = &cmd; 875 mrq.data = &data; 876 mrq.stop = &stop; 877 878 mmc_test_prepare_mrq(test, &mrq, sg, sg_len, dev_addr, 879 blocks, blksz, write); 880 881 mmc_wait_for_req(test->card->host, &mrq); 882 883 mmc_test_wait_busy(test); 884 885 return mmc_test_check_result(test, &mrq); 886 } 887 888 /* 889 * Tests a transfer where the card will fail completely or partly 890 */ 891 static int mmc_test_broken_transfer(struct mmc_test_card *test, 892 unsigned blocks, unsigned blksz, int write) 893 { 894 struct mmc_request mrq = {}; 895 struct mmc_command cmd = {}; 896 struct mmc_command stop = {}; 897 struct mmc_data data = {}; 898 899 struct scatterlist sg; 900 901 mrq.cmd = &cmd; 902 mrq.data = &data; 903 mrq.stop = &stop; 904 905 sg_init_one(&sg, test->buffer, blocks * blksz); 906 907 mmc_test_prepare_mrq(test, &mrq, &sg, 1, 0, blocks, blksz, write); 908 mmc_test_prepare_broken_mrq(test, &mrq, write); 909 910 mmc_wait_for_req(test->card->host, &mrq); 911 912 mmc_test_wait_busy(test); 913 914 return mmc_test_check_broken_result(test, &mrq); 915 } 916 917 /* 918 * Does a complete transfer test where data is also validated 919 * 920 * Note: mmc_test_prepare() must have been done before this call 921 */ 922 static int mmc_test_transfer(struct mmc_test_card *test, 923 struct scatterlist *sg, unsigned sg_len, unsigned dev_addr, 924 unsigned blocks, unsigned blksz, int write) 925 { 926 int ret, i; 927 928 if (write) { 929 for (i = 0; i < blocks * blksz; i++) 930 test->scratch[i] = i; 931 } else { 932 memset(test->scratch, 0, BUFFER_SIZE); 933 } 934 sg_copy_from_buffer(sg, sg_len, test->scratch, BUFFER_SIZE); 935 936 ret = mmc_test_set_blksize(test, blksz); 937 if (ret) 938 return ret; 939 940 ret = mmc_test_simple_transfer(test, sg, sg_len, dev_addr, 941 blocks, blksz, write); 942 if (ret) 943 return ret; 944 945 if (write) { 946 int sectors; 947 948 ret = mmc_test_set_blksize(test, 512); 949 if (ret) 950 return ret; 951 952 sectors = (blocks * blksz + 511) / 512; 953 if ((sectors * 512) == (blocks * blksz)) 954 sectors++; 955 956 if ((sectors * 512) > BUFFER_SIZE) 957 return -EINVAL; 958 959 memset(test->buffer, 0, sectors * 512); 960 961 for (i = 0; i < sectors; i++) { 962 ret = mmc_test_buffer_transfer(test, 963 test->buffer + i * 512, 964 dev_addr + i, 512, 0); 965 if (ret) 966 return ret; 967 } 968 969 for (i = 0; i < blocks * blksz; i++) { 970 if (test->buffer[i] != (u8)i) 971 return RESULT_FAIL; 972 } 973 974 for (; i < sectors * 512; i++) { 975 if (test->buffer[i] != 0xDF) 976 return RESULT_FAIL; 977 } 978 } else { 979 sg_copy_to_buffer(sg, sg_len, test->scratch, BUFFER_SIZE); 980 for (i = 0; i < blocks * blksz; i++) { 981 if (test->scratch[i] != (u8)i) 982 return RESULT_FAIL; 983 } 984 } 985 986 return 0; 987 } 988 989 /*******************************************************************/ 990 /* Tests */ 991 /*******************************************************************/ 992 993 struct mmc_test_case { 994 const char *name; 995 996 int (*prepare)(struct mmc_test_card *); 997 int (*run)(struct mmc_test_card *); 998 int (*cleanup)(struct mmc_test_card *); 999 }; 1000 1001 static int mmc_test_basic_write(struct mmc_test_card *test) 1002 { 1003 int ret; 1004 struct scatterlist sg; 1005 1006 ret = mmc_test_set_blksize(test, 512); 1007 if (ret) 1008 return ret; 1009 1010 sg_init_one(&sg, test->buffer, 512); 1011 1012 return mmc_test_simple_transfer(test, &sg, 1, 0, 1, 512, 1); 1013 } 1014 1015 static int mmc_test_basic_read(struct mmc_test_card *test) 1016 { 1017 int ret; 1018 struct scatterlist sg; 1019 1020 ret = mmc_test_set_blksize(test, 512); 1021 if (ret) 1022 return ret; 1023 1024 sg_init_one(&sg, test->buffer, 512); 1025 1026 return mmc_test_simple_transfer(test, &sg, 1, 0, 1, 512, 0); 1027 } 1028 1029 static int mmc_test_verify_write(struct mmc_test_card *test) 1030 { 1031 struct scatterlist sg; 1032 1033 sg_init_one(&sg, test->buffer, 512); 1034 1035 return mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1); 1036 } 1037 1038 static int mmc_test_verify_read(struct mmc_test_card *test) 1039 { 1040 struct scatterlist sg; 1041 1042 sg_init_one(&sg, test->buffer, 512); 1043 1044 return mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0); 1045 } 1046 1047 static int mmc_test_multi_write(struct mmc_test_card *test) 1048 { 1049 unsigned int size; 1050 struct scatterlist sg; 1051 1052 if (test->card->host->max_blk_count == 1) 1053 return RESULT_UNSUP_HOST; 1054 1055 size = PAGE_SIZE * 2; 1056 size = min(size, test->card->host->max_req_size); 1057 size = min(size, test->card->host->max_seg_size); 1058 size = min(size, test->card->host->max_blk_count * 512); 1059 1060 if (size < 1024) 1061 return RESULT_UNSUP_HOST; 1062 1063 sg_init_one(&sg, test->buffer, size); 1064 1065 return mmc_test_transfer(test, &sg, 1, 0, size / 512, 512, 1); 1066 } 1067 1068 static int mmc_test_multi_read(struct mmc_test_card *test) 1069 { 1070 unsigned int size; 1071 struct scatterlist sg; 1072 1073 if (test->card->host->max_blk_count == 1) 1074 return RESULT_UNSUP_HOST; 1075 1076 size = PAGE_SIZE * 2; 1077 size = min(size, test->card->host->max_req_size); 1078 size = min(size, test->card->host->max_seg_size); 1079 size = min(size, test->card->host->max_blk_count * 512); 1080 1081 if (size < 1024) 1082 return RESULT_UNSUP_HOST; 1083 1084 sg_init_one(&sg, test->buffer, size); 1085 1086 return mmc_test_transfer(test, &sg, 1, 0, size / 512, 512, 0); 1087 } 1088 1089 static int mmc_test_pow2_write(struct mmc_test_card *test) 1090 { 1091 int ret, i; 1092 struct scatterlist sg; 1093 1094 if (!test->card->csd.write_partial) 1095 return RESULT_UNSUP_CARD; 1096 1097 for (i = 1; i < 512; i <<= 1) { 1098 sg_init_one(&sg, test->buffer, i); 1099 ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 1); 1100 if (ret) 1101 return ret; 1102 } 1103 1104 return 0; 1105 } 1106 1107 static int mmc_test_pow2_read(struct mmc_test_card *test) 1108 { 1109 int ret, i; 1110 struct scatterlist sg; 1111 1112 if (!test->card->csd.read_partial) 1113 return RESULT_UNSUP_CARD; 1114 1115 for (i = 1; i < 512; i <<= 1) { 1116 sg_init_one(&sg, test->buffer, i); 1117 ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 0); 1118 if (ret) 1119 return ret; 1120 } 1121 1122 return 0; 1123 } 1124 1125 static int mmc_test_weird_write(struct mmc_test_card *test) 1126 { 1127 int ret, i; 1128 struct scatterlist sg; 1129 1130 if (!test->card->csd.write_partial) 1131 return RESULT_UNSUP_CARD; 1132 1133 for (i = 3; i < 512; i += 7) { 1134 sg_init_one(&sg, test->buffer, i); 1135 ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 1); 1136 if (ret) 1137 return ret; 1138 } 1139 1140 return 0; 1141 } 1142 1143 static int mmc_test_weird_read(struct mmc_test_card *test) 1144 { 1145 int ret, i; 1146 struct scatterlist sg; 1147 1148 if (!test->card->csd.read_partial) 1149 return RESULT_UNSUP_CARD; 1150 1151 for (i = 3; i < 512; i += 7) { 1152 sg_init_one(&sg, test->buffer, i); 1153 ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 0); 1154 if (ret) 1155 return ret; 1156 } 1157 1158 return 0; 1159 } 1160 1161 static int mmc_test_align_write(struct mmc_test_card *test) 1162 { 1163 int ret, i; 1164 struct scatterlist sg; 1165 1166 for (i = 1; i < TEST_ALIGN_END; i++) { 1167 sg_init_one(&sg, test->buffer + i, 512); 1168 ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1); 1169 if (ret) 1170 return ret; 1171 } 1172 1173 return 0; 1174 } 1175 1176 static int mmc_test_align_read(struct mmc_test_card *test) 1177 { 1178 int ret, i; 1179 struct scatterlist sg; 1180 1181 for (i = 1; i < TEST_ALIGN_END; i++) { 1182 sg_init_one(&sg, test->buffer + i, 512); 1183 ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0); 1184 if (ret) 1185 return ret; 1186 } 1187 1188 return 0; 1189 } 1190 1191 static int mmc_test_align_multi_write(struct mmc_test_card *test) 1192 { 1193 int ret, i; 1194 unsigned int size; 1195 struct scatterlist sg; 1196 1197 if (test->card->host->max_blk_count == 1) 1198 return RESULT_UNSUP_HOST; 1199 1200 size = PAGE_SIZE * 2; 1201 size = min(size, test->card->host->max_req_size); 1202 size = min(size, test->card->host->max_seg_size); 1203 size = min(size, test->card->host->max_blk_count * 512); 1204 1205 if (size < 1024) 1206 return RESULT_UNSUP_HOST; 1207 1208 for (i = 1; i < TEST_ALIGN_END; i++) { 1209 sg_init_one(&sg, test->buffer + i, size); 1210 ret = mmc_test_transfer(test, &sg, 1, 0, size / 512, 512, 1); 1211 if (ret) 1212 return ret; 1213 } 1214 1215 return 0; 1216 } 1217 1218 static int mmc_test_align_multi_read(struct mmc_test_card *test) 1219 { 1220 int ret, i; 1221 unsigned int size; 1222 struct scatterlist sg; 1223 1224 if (test->card->host->max_blk_count == 1) 1225 return RESULT_UNSUP_HOST; 1226 1227 size = PAGE_SIZE * 2; 1228 size = min(size, test->card->host->max_req_size); 1229 size = min(size, test->card->host->max_seg_size); 1230 size = min(size, test->card->host->max_blk_count * 512); 1231 1232 if (size < 1024) 1233 return RESULT_UNSUP_HOST; 1234 1235 for (i = 1; i < TEST_ALIGN_END; i++) { 1236 sg_init_one(&sg, test->buffer + i, size); 1237 ret = mmc_test_transfer(test, &sg, 1, 0, size / 512, 512, 0); 1238 if (ret) 1239 return ret; 1240 } 1241 1242 return 0; 1243 } 1244 1245 static int mmc_test_xfersize_write(struct mmc_test_card *test) 1246 { 1247 int ret; 1248 1249 ret = mmc_test_set_blksize(test, 512); 1250 if (ret) 1251 return ret; 1252 1253 return mmc_test_broken_transfer(test, 1, 512, 1); 1254 } 1255 1256 static int mmc_test_xfersize_read(struct mmc_test_card *test) 1257 { 1258 int ret; 1259 1260 ret = mmc_test_set_blksize(test, 512); 1261 if (ret) 1262 return ret; 1263 1264 return mmc_test_broken_transfer(test, 1, 512, 0); 1265 } 1266 1267 static int mmc_test_multi_xfersize_write(struct mmc_test_card *test) 1268 { 1269 int ret; 1270 1271 if (test->card->host->max_blk_count == 1) 1272 return RESULT_UNSUP_HOST; 1273 1274 ret = mmc_test_set_blksize(test, 512); 1275 if (ret) 1276 return ret; 1277 1278 return mmc_test_broken_transfer(test, 2, 512, 1); 1279 } 1280 1281 static int mmc_test_multi_xfersize_read(struct mmc_test_card *test) 1282 { 1283 int ret; 1284 1285 if (test->card->host->max_blk_count == 1) 1286 return RESULT_UNSUP_HOST; 1287 1288 ret = mmc_test_set_blksize(test, 512); 1289 if (ret) 1290 return ret; 1291 1292 return mmc_test_broken_transfer(test, 2, 512, 0); 1293 } 1294 1295 #ifdef CONFIG_HIGHMEM 1296 1297 static int mmc_test_write_high(struct mmc_test_card *test) 1298 { 1299 struct scatterlist sg; 1300 1301 sg_init_table(&sg, 1); 1302 sg_set_page(&sg, test->highmem, 512, 0); 1303 1304 return mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1); 1305 } 1306 1307 static int mmc_test_read_high(struct mmc_test_card *test) 1308 { 1309 struct scatterlist sg; 1310 1311 sg_init_table(&sg, 1); 1312 sg_set_page(&sg, test->highmem, 512, 0); 1313 1314 return mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0); 1315 } 1316 1317 static int mmc_test_multi_write_high(struct mmc_test_card *test) 1318 { 1319 unsigned int size; 1320 struct scatterlist sg; 1321 1322 if (test->card->host->max_blk_count == 1) 1323 return RESULT_UNSUP_HOST; 1324 1325 size = PAGE_SIZE * 2; 1326 size = min(size, test->card->host->max_req_size); 1327 size = min(size, test->card->host->max_seg_size); 1328 size = min(size, test->card->host->max_blk_count * 512); 1329 1330 if (size < 1024) 1331 return RESULT_UNSUP_HOST; 1332 1333 sg_init_table(&sg, 1); 1334 sg_set_page(&sg, test->highmem, size, 0); 1335 1336 return mmc_test_transfer(test, &sg, 1, 0, size / 512, 512, 1); 1337 } 1338 1339 static int mmc_test_multi_read_high(struct mmc_test_card *test) 1340 { 1341 unsigned int size; 1342 struct scatterlist sg; 1343 1344 if (test->card->host->max_blk_count == 1) 1345 return RESULT_UNSUP_HOST; 1346 1347 size = PAGE_SIZE * 2; 1348 size = min(size, test->card->host->max_req_size); 1349 size = min(size, test->card->host->max_seg_size); 1350 size = min(size, test->card->host->max_blk_count * 512); 1351 1352 if (size < 1024) 1353 return RESULT_UNSUP_HOST; 1354 1355 sg_init_table(&sg, 1); 1356 sg_set_page(&sg, test->highmem, size, 0); 1357 1358 return mmc_test_transfer(test, &sg, 1, 0, size / 512, 512, 0); 1359 } 1360 1361 #else 1362 1363 static int mmc_test_no_highmem(struct mmc_test_card *test) 1364 { 1365 pr_info("%s: Highmem not configured - test skipped\n", 1366 mmc_hostname(test->card->host)); 1367 return 0; 1368 } 1369 1370 #endif /* CONFIG_HIGHMEM */ 1371 1372 /* 1373 * Map sz bytes so that it can be transferred. 1374 */ 1375 static int mmc_test_area_map(struct mmc_test_card *test, unsigned long sz, 1376 int max_scatter, int min_sg_len, bool nonblock) 1377 { 1378 struct mmc_test_area *t = &test->area; 1379 int err; 1380 unsigned int sg_len = 0; 1381 1382 t->blocks = sz >> SECTOR_SHIFT; 1383 1384 if (max_scatter) { 1385 err = mmc_test_map_sg_max_scatter(t->mem, sz, t->sg, 1386 t->max_segs, t->max_seg_sz, 1387 &t->sg_len); 1388 } else { 1389 err = mmc_test_map_sg(t->mem, sz, t->sg, 1, t->max_segs, 1390 t->max_seg_sz, &t->sg_len, min_sg_len); 1391 } 1392 1393 if (err || !nonblock) 1394 goto err; 1395 1396 if (max_scatter) { 1397 err = mmc_test_map_sg_max_scatter(t->mem, sz, t->sg_areq, 1398 t->max_segs, t->max_seg_sz, 1399 &sg_len); 1400 } else { 1401 err = mmc_test_map_sg(t->mem, sz, t->sg_areq, 1, t->max_segs, 1402 t->max_seg_sz, &sg_len, min_sg_len); 1403 } 1404 if (!err && sg_len != t->sg_len) 1405 err = -EINVAL; 1406 1407 err: 1408 if (err) 1409 pr_info("%s: Failed to map sg list\n", 1410 mmc_hostname(test->card->host)); 1411 return err; 1412 } 1413 1414 /* 1415 * Transfer bytes mapped by mmc_test_area_map(). 1416 */ 1417 static int mmc_test_area_transfer(struct mmc_test_card *test, 1418 unsigned int dev_addr, int write) 1419 { 1420 struct mmc_test_area *t = &test->area; 1421 1422 return mmc_test_simple_transfer(test, t->sg, t->sg_len, dev_addr, 1423 t->blocks, 512, write); 1424 } 1425 1426 /* 1427 * Map and transfer bytes for multiple transfers. 1428 */ 1429 static int mmc_test_area_io_seq(struct mmc_test_card *test, unsigned long sz, 1430 unsigned int dev_addr, int write, 1431 int max_scatter, int timed, int count, 1432 bool nonblock, int min_sg_len) 1433 { 1434 struct timespec64 ts1, ts2; 1435 int ret = 0; 1436 int i; 1437 1438 /* 1439 * In the case of a maximally scattered transfer, the maximum transfer 1440 * size is further limited by using PAGE_SIZE segments. 1441 */ 1442 if (max_scatter) { 1443 struct mmc_test_area *t = &test->area; 1444 unsigned long max_tfr; 1445 1446 if (t->max_seg_sz >= PAGE_SIZE) 1447 max_tfr = t->max_segs * PAGE_SIZE; 1448 else 1449 max_tfr = t->max_segs * t->max_seg_sz; 1450 if (sz > max_tfr) 1451 sz = max_tfr; 1452 } 1453 1454 ret = mmc_test_area_map(test, sz, max_scatter, min_sg_len, nonblock); 1455 if (ret) 1456 return ret; 1457 1458 if (timed) 1459 ktime_get_ts64(&ts1); 1460 if (nonblock) 1461 ret = mmc_test_nonblock_transfer(test, dev_addr, write, count); 1462 else 1463 for (i = 0; i < count && ret == 0; i++) { 1464 ret = mmc_test_area_transfer(test, dev_addr, write); 1465 dev_addr += sz >> SECTOR_SHIFT; 1466 } 1467 1468 if (ret) 1469 return ret; 1470 1471 if (timed) 1472 ktime_get_ts64(&ts2); 1473 1474 if (timed) 1475 mmc_test_print_avg_rate(test, sz, count, &ts1, &ts2); 1476 1477 return 0; 1478 } 1479 1480 static int mmc_test_area_io(struct mmc_test_card *test, unsigned long sz, 1481 unsigned int dev_addr, int write, int max_scatter, 1482 int timed) 1483 { 1484 return mmc_test_area_io_seq(test, sz, dev_addr, write, max_scatter, 1485 timed, 1, false, 0); 1486 } 1487 1488 /* 1489 * Write the test area entirely. 1490 */ 1491 static int mmc_test_area_fill(struct mmc_test_card *test) 1492 { 1493 struct mmc_test_area *t = &test->area; 1494 1495 return mmc_test_area_io(test, t->max_tfr, t->dev_addr, 1, 0, 0); 1496 } 1497 1498 /* 1499 * Erase the test area entirely. 1500 */ 1501 static int mmc_test_area_erase(struct mmc_test_card *test) 1502 { 1503 struct mmc_test_area *t = &test->area; 1504 1505 if (!mmc_card_can_erase(test->card)) 1506 return 0; 1507 1508 return mmc_erase(test->card, t->dev_addr, t->max_sz >> SECTOR_SHIFT, 1509 MMC_ERASE_ARG); 1510 } 1511 1512 /* 1513 * Cleanup struct mmc_test_area. 1514 */ 1515 static int mmc_test_area_cleanup(struct mmc_test_card *test) 1516 { 1517 struct mmc_test_area *t = &test->area; 1518 1519 kfree(t->sg); 1520 kfree(t->sg_areq); 1521 mmc_test_free_mem(t->mem); 1522 1523 return 0; 1524 } 1525 1526 /* 1527 * Initialize an area for testing large transfers. The test area is set to the 1528 * middle of the card because cards may have different characteristics at the 1529 * front (for FAT file system optimization). Optionally, the area is erased 1530 * (if the card supports it) which may improve write performance. Optionally, 1531 * the area is filled with data for subsequent read tests. 1532 */ 1533 static int mmc_test_area_init(struct mmc_test_card *test, int erase, int fill) 1534 { 1535 struct mmc_test_area *t = &test->area; 1536 unsigned long min_sz = SZ_64K, sz; 1537 int ret; 1538 1539 ret = mmc_test_set_blksize(test, 512); 1540 if (ret) 1541 return ret; 1542 1543 /* Make the test area size about 4MiB */ 1544 sz = (unsigned long)test->card->pref_erase << SECTOR_SHIFT; 1545 t->max_sz = sz; 1546 while (t->max_sz < SZ_4M) 1547 t->max_sz += sz; 1548 while (t->max_sz > TEST_AREA_MAX_SIZE && t->max_sz > sz) 1549 t->max_sz -= sz; 1550 1551 t->max_segs = test->card->host->max_segs; 1552 t->max_seg_sz = test->card->host->max_seg_size; 1553 t->max_seg_sz -= t->max_seg_sz % 512; 1554 1555 t->max_tfr = t->max_sz; 1556 if (t->max_tfr >> SECTOR_SHIFT > test->card->host->max_blk_count) 1557 t->max_tfr = test->card->host->max_blk_count << SECTOR_SHIFT; 1558 if (t->max_tfr > test->card->host->max_req_size) 1559 t->max_tfr = test->card->host->max_req_size; 1560 if (t->max_tfr / t->max_seg_sz > t->max_segs) 1561 t->max_tfr = t->max_segs * t->max_seg_sz; 1562 1563 /* 1564 * Try to allocate enough memory for a max. sized transfer. Less is OK 1565 * because the same memory can be mapped into the scatterlist more than 1566 * once. Also, take into account the limits imposed on scatterlist 1567 * segments by the host driver. 1568 */ 1569 t->mem = mmc_test_alloc_mem(min_sz, t->max_tfr, t->max_segs, 1570 t->max_seg_sz); 1571 if (!t->mem) 1572 return -ENOMEM; 1573 1574 t->sg = kmalloc_objs(*t->sg, t->max_segs); 1575 if (!t->sg) { 1576 ret = -ENOMEM; 1577 goto out_free; 1578 } 1579 1580 t->sg_areq = kmalloc_objs(*t->sg_areq, t->max_segs); 1581 if (!t->sg_areq) { 1582 ret = -ENOMEM; 1583 goto out_free; 1584 } 1585 1586 t->dev_addr = mmc_test_capacity(test->card) / 2; 1587 t->dev_addr -= t->dev_addr % (t->max_sz >> SECTOR_SHIFT); 1588 1589 if (erase) { 1590 ret = mmc_test_area_erase(test); 1591 if (ret) 1592 goto out_free; 1593 } 1594 1595 if (fill) { 1596 ret = mmc_test_area_fill(test); 1597 if (ret) 1598 goto out_free; 1599 } 1600 1601 return 0; 1602 1603 out_free: 1604 mmc_test_area_cleanup(test); 1605 return ret; 1606 } 1607 1608 /* 1609 * Prepare for large transfers. Do not erase the test area. 1610 */ 1611 static int mmc_test_area_prepare(struct mmc_test_card *test) 1612 { 1613 return mmc_test_area_init(test, 0, 0); 1614 } 1615 1616 /* 1617 * Prepare for large transfers. Do erase the test area. 1618 */ 1619 static int mmc_test_area_prepare_erase(struct mmc_test_card *test) 1620 { 1621 return mmc_test_area_init(test, 1, 0); 1622 } 1623 1624 /* 1625 * Prepare for large transfers. Erase and fill the test area. 1626 */ 1627 static int mmc_test_area_prepare_fill(struct mmc_test_card *test) 1628 { 1629 return mmc_test_area_init(test, 1, 1); 1630 } 1631 1632 /* 1633 * Test best-case performance. Best-case performance is expected from 1634 * a single large transfer. 1635 * 1636 * An additional option (max_scatter) allows the measurement of the same 1637 * transfer but with no contiguous pages in the scatter list. This tests 1638 * the efficiency of DMA to handle scattered pages. 1639 */ 1640 static int mmc_test_best_performance(struct mmc_test_card *test, int write, 1641 int max_scatter) 1642 { 1643 struct mmc_test_area *t = &test->area; 1644 1645 return mmc_test_area_io(test, t->max_tfr, t->dev_addr, write, 1646 max_scatter, 1); 1647 } 1648 1649 /* 1650 * Best-case read performance. 1651 */ 1652 static int mmc_test_best_read_performance(struct mmc_test_card *test) 1653 { 1654 return mmc_test_best_performance(test, 0, 0); 1655 } 1656 1657 /* 1658 * Best-case write performance. 1659 */ 1660 static int mmc_test_best_write_performance(struct mmc_test_card *test) 1661 { 1662 return mmc_test_best_performance(test, 1, 0); 1663 } 1664 1665 /* 1666 * Best-case read performance into scattered pages. 1667 */ 1668 static int mmc_test_best_read_perf_max_scatter(struct mmc_test_card *test) 1669 { 1670 return mmc_test_best_performance(test, 0, 1); 1671 } 1672 1673 /* 1674 * Best-case write performance from scattered pages. 1675 */ 1676 static int mmc_test_best_write_perf_max_scatter(struct mmc_test_card *test) 1677 { 1678 return mmc_test_best_performance(test, 1, 1); 1679 } 1680 1681 /* 1682 * Single read performance by transfer size. 1683 */ 1684 static int mmc_test_profile_read_perf(struct mmc_test_card *test) 1685 { 1686 struct mmc_test_area *t = &test->area; 1687 unsigned long sz; 1688 unsigned int dev_addr; 1689 int ret; 1690 1691 for (sz = 512; sz < t->max_tfr; sz <<= 1) { 1692 dev_addr = t->dev_addr + (sz >> SECTOR_SHIFT); 1693 ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 1); 1694 if (ret) 1695 return ret; 1696 } 1697 sz = t->max_tfr; 1698 dev_addr = t->dev_addr; 1699 return mmc_test_area_io(test, sz, dev_addr, 0, 0, 1); 1700 } 1701 1702 /* 1703 * Single write performance by transfer size. 1704 */ 1705 static int mmc_test_profile_write_perf(struct mmc_test_card *test) 1706 { 1707 struct mmc_test_area *t = &test->area; 1708 unsigned long sz; 1709 unsigned int dev_addr; 1710 int ret; 1711 1712 ret = mmc_test_area_erase(test); 1713 if (ret) 1714 return ret; 1715 for (sz = 512; sz < t->max_tfr; sz <<= 1) { 1716 dev_addr = t->dev_addr + (sz >> SECTOR_SHIFT); 1717 ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 1); 1718 if (ret) 1719 return ret; 1720 } 1721 ret = mmc_test_area_erase(test); 1722 if (ret) 1723 return ret; 1724 sz = t->max_tfr; 1725 dev_addr = t->dev_addr; 1726 return mmc_test_area_io(test, sz, dev_addr, 1, 0, 1); 1727 } 1728 1729 /* 1730 * Single trim performance by transfer size. 1731 */ 1732 static int mmc_test_profile_trim_perf(struct mmc_test_card *test) 1733 { 1734 struct mmc_test_area *t = &test->area; 1735 unsigned long sz; 1736 unsigned int dev_addr; 1737 struct timespec64 ts1, ts2; 1738 int ret; 1739 1740 if (!mmc_card_can_trim(test->card)) 1741 return RESULT_UNSUP_CARD; 1742 1743 if (!mmc_card_can_erase(test->card)) 1744 return RESULT_UNSUP_HOST; 1745 1746 for (sz = 512; sz < t->max_sz; sz <<= 1) { 1747 dev_addr = t->dev_addr + (sz >> SECTOR_SHIFT); 1748 ktime_get_ts64(&ts1); 1749 ret = mmc_erase(test->card, dev_addr, sz >> SECTOR_SHIFT, MMC_TRIM_ARG); 1750 if (ret) 1751 return ret; 1752 ktime_get_ts64(&ts2); 1753 mmc_test_print_rate(test, sz, &ts1, &ts2); 1754 } 1755 dev_addr = t->dev_addr; 1756 ktime_get_ts64(&ts1); 1757 ret = mmc_erase(test->card, dev_addr, sz >> SECTOR_SHIFT, MMC_TRIM_ARG); 1758 if (ret) 1759 return ret; 1760 ktime_get_ts64(&ts2); 1761 mmc_test_print_rate(test, sz, &ts1, &ts2); 1762 return 0; 1763 } 1764 1765 static int mmc_test_seq_read_perf(struct mmc_test_card *test, unsigned long sz) 1766 { 1767 struct mmc_test_area *t = &test->area; 1768 unsigned int dev_addr, i, cnt; 1769 struct timespec64 ts1, ts2; 1770 int ret; 1771 1772 cnt = t->max_sz / sz; 1773 dev_addr = t->dev_addr; 1774 ktime_get_ts64(&ts1); 1775 for (i = 0; i < cnt; i++) { 1776 ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 0); 1777 if (ret) 1778 return ret; 1779 dev_addr += (sz >> SECTOR_SHIFT); 1780 } 1781 ktime_get_ts64(&ts2); 1782 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2); 1783 return 0; 1784 } 1785 1786 /* 1787 * Consecutive read performance by transfer size. 1788 */ 1789 static int mmc_test_profile_seq_read_perf(struct mmc_test_card *test) 1790 { 1791 struct mmc_test_area *t = &test->area; 1792 unsigned long sz; 1793 int ret; 1794 1795 for (sz = 512; sz < t->max_tfr; sz <<= 1) { 1796 ret = mmc_test_seq_read_perf(test, sz); 1797 if (ret) 1798 return ret; 1799 } 1800 sz = t->max_tfr; 1801 return mmc_test_seq_read_perf(test, sz); 1802 } 1803 1804 static int mmc_test_seq_write_perf(struct mmc_test_card *test, unsigned long sz) 1805 { 1806 struct mmc_test_area *t = &test->area; 1807 unsigned int dev_addr, i, cnt; 1808 struct timespec64 ts1, ts2; 1809 int ret; 1810 1811 ret = mmc_test_area_erase(test); 1812 if (ret) 1813 return ret; 1814 cnt = t->max_sz / sz; 1815 dev_addr = t->dev_addr; 1816 ktime_get_ts64(&ts1); 1817 for (i = 0; i < cnt; i++) { 1818 ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 0); 1819 if (ret) 1820 return ret; 1821 dev_addr += (sz >> SECTOR_SHIFT); 1822 } 1823 ktime_get_ts64(&ts2); 1824 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2); 1825 return 0; 1826 } 1827 1828 /* 1829 * Consecutive write performance by transfer size. 1830 */ 1831 static int mmc_test_profile_seq_write_perf(struct mmc_test_card *test) 1832 { 1833 struct mmc_test_area *t = &test->area; 1834 unsigned long sz; 1835 int ret; 1836 1837 for (sz = 512; sz < t->max_tfr; sz <<= 1) { 1838 ret = mmc_test_seq_write_perf(test, sz); 1839 if (ret) 1840 return ret; 1841 } 1842 sz = t->max_tfr; 1843 return mmc_test_seq_write_perf(test, sz); 1844 } 1845 1846 /* 1847 * Consecutive trim performance by transfer size. 1848 */ 1849 static int mmc_test_profile_seq_trim_perf(struct mmc_test_card *test) 1850 { 1851 struct mmc_test_area *t = &test->area; 1852 unsigned long sz; 1853 unsigned int dev_addr, i, cnt; 1854 struct timespec64 ts1, ts2; 1855 int ret; 1856 1857 if (!mmc_card_can_trim(test->card)) 1858 return RESULT_UNSUP_CARD; 1859 1860 if (!mmc_card_can_erase(test->card)) 1861 return RESULT_UNSUP_HOST; 1862 1863 for (sz = 512; sz <= t->max_sz; sz <<= 1) { 1864 ret = mmc_test_area_erase(test); 1865 if (ret) 1866 return ret; 1867 ret = mmc_test_area_fill(test); 1868 if (ret) 1869 return ret; 1870 cnt = t->max_sz / sz; 1871 dev_addr = t->dev_addr; 1872 ktime_get_ts64(&ts1); 1873 for (i = 0; i < cnt; i++) { 1874 ret = mmc_erase(test->card, dev_addr, sz >> SECTOR_SHIFT, 1875 MMC_TRIM_ARG); 1876 if (ret) 1877 return ret; 1878 dev_addr += (sz >> SECTOR_SHIFT); 1879 } 1880 ktime_get_ts64(&ts2); 1881 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2); 1882 } 1883 return 0; 1884 } 1885 1886 static unsigned int rnd_next = 1; 1887 1888 static unsigned int mmc_test_rnd_num(unsigned int rnd_cnt) 1889 { 1890 uint64_t r; 1891 1892 rnd_next = rnd_next * 1103515245 + 12345; 1893 r = (rnd_next >> 16) & 0x7fff; 1894 return (r * rnd_cnt) >> 15; 1895 } 1896 1897 static int mmc_test_rnd_perf(struct mmc_test_card *test, int write, int print, 1898 unsigned long sz, int secs, int force_retuning) 1899 { 1900 unsigned int dev_addr, cnt, rnd_addr, range1, range2, last_ea = 0, ea; 1901 unsigned int ssz; 1902 struct timespec64 ts1, ts2, ts; 1903 int ret; 1904 1905 ssz = sz >> SECTOR_SHIFT; 1906 1907 rnd_addr = mmc_test_capacity(test->card) / 4; 1908 range1 = rnd_addr / test->card->pref_erase; 1909 range2 = range1 / ssz; 1910 1911 ktime_get_ts64(&ts1); 1912 for (cnt = 0; cnt < UINT_MAX; cnt++) { 1913 ktime_get_ts64(&ts2); 1914 ts = timespec64_sub(ts2, ts1); 1915 if (ts.tv_sec >= secs) 1916 break; 1917 ea = mmc_test_rnd_num(range1); 1918 if (ea == last_ea) 1919 ea -= 1; 1920 last_ea = ea; 1921 dev_addr = rnd_addr + test->card->pref_erase * ea + 1922 ssz * mmc_test_rnd_num(range2); 1923 if (force_retuning) 1924 mmc_retune_needed(test->card->host); 1925 ret = mmc_test_area_io(test, sz, dev_addr, write, 0, 0); 1926 if (ret) 1927 return ret; 1928 } 1929 if (print) 1930 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2); 1931 return 0; 1932 } 1933 1934 static int mmc_test_random_perf(struct mmc_test_card *test, int write) 1935 { 1936 struct mmc_test_area *t = &test->area; 1937 unsigned int next; 1938 unsigned long sz; 1939 int ret; 1940 1941 for (sz = 512; sz < t->max_tfr; sz <<= 1) { 1942 /* 1943 * When writing, try to get more consistent results by running 1944 * the test twice with exactly the same I/O but outputting the 1945 * results only for the 2nd run. 1946 */ 1947 if (write) { 1948 next = rnd_next; 1949 ret = mmc_test_rnd_perf(test, write, 0, sz, 10, 0); 1950 if (ret) 1951 return ret; 1952 rnd_next = next; 1953 } 1954 ret = mmc_test_rnd_perf(test, write, 1, sz, 10, 0); 1955 if (ret) 1956 return ret; 1957 } 1958 sz = t->max_tfr; 1959 if (write) { 1960 next = rnd_next; 1961 ret = mmc_test_rnd_perf(test, write, 0, sz, 10, 0); 1962 if (ret) 1963 return ret; 1964 rnd_next = next; 1965 } 1966 return mmc_test_rnd_perf(test, write, 1, sz, 10, 0); 1967 } 1968 1969 static int mmc_test_retuning(struct mmc_test_card *test) 1970 { 1971 if (!mmc_can_retune(test->card->host)) { 1972 pr_info("%s: No retuning - test skipped\n", 1973 mmc_hostname(test->card->host)); 1974 return RESULT_UNSUP_HOST; 1975 } 1976 1977 return mmc_test_rnd_perf(test, 0, 0, 8192, 30, 1); 1978 } 1979 1980 /* 1981 * Random read performance by transfer size. 1982 */ 1983 static int mmc_test_random_read_perf(struct mmc_test_card *test) 1984 { 1985 return mmc_test_random_perf(test, 0); 1986 } 1987 1988 /* 1989 * Random write performance by transfer size. 1990 */ 1991 static int mmc_test_random_write_perf(struct mmc_test_card *test) 1992 { 1993 return mmc_test_random_perf(test, 1); 1994 } 1995 1996 static int mmc_test_seq_perf(struct mmc_test_card *test, int write, 1997 unsigned int tot_sz, int max_scatter) 1998 { 1999 struct mmc_test_area *t = &test->area; 2000 unsigned int dev_addr, i, cnt, sz, ssz; 2001 struct timespec64 ts1, ts2; 2002 int ret; 2003 2004 sz = t->max_tfr; 2005 2006 /* 2007 * In the case of a maximally scattered transfer, the maximum transfer 2008 * size is further limited by using PAGE_SIZE segments. 2009 */ 2010 if (max_scatter) { 2011 unsigned long max_tfr; 2012 2013 if (t->max_seg_sz >= PAGE_SIZE) 2014 max_tfr = t->max_segs * PAGE_SIZE; 2015 else 2016 max_tfr = t->max_segs * t->max_seg_sz; 2017 if (sz > max_tfr) 2018 sz = max_tfr; 2019 } 2020 2021 ssz = sz >> SECTOR_SHIFT; 2022 dev_addr = mmc_test_capacity(test->card) / 4; 2023 if (tot_sz > dev_addr << SECTOR_SHIFT) 2024 tot_sz = dev_addr << SECTOR_SHIFT; 2025 cnt = tot_sz / sz; 2026 dev_addr &= 0xffff0000; /* Round to 64MiB boundary */ 2027 2028 ktime_get_ts64(&ts1); 2029 for (i = 0; i < cnt; i++) { 2030 ret = mmc_test_area_io(test, sz, dev_addr, write, 2031 max_scatter, 0); 2032 if (ret) 2033 return ret; 2034 dev_addr += ssz; 2035 } 2036 ktime_get_ts64(&ts2); 2037 2038 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2); 2039 2040 return 0; 2041 } 2042 2043 static int mmc_test_large_seq_perf(struct mmc_test_card *test, int write) 2044 { 2045 int ret, i; 2046 2047 for (i = 0; i < 10; i++) { 2048 ret = mmc_test_seq_perf(test, write, 10 * SZ_1M, 1); 2049 if (ret) 2050 return ret; 2051 } 2052 for (i = 0; i < 5; i++) { 2053 ret = mmc_test_seq_perf(test, write, 100 * SZ_1M, 1); 2054 if (ret) 2055 return ret; 2056 } 2057 for (i = 0; i < 3; i++) { 2058 ret = mmc_test_seq_perf(test, write, 1000 * SZ_1M, 1); 2059 if (ret) 2060 return ret; 2061 } 2062 2063 return ret; 2064 } 2065 2066 /* 2067 * Large sequential read performance. 2068 */ 2069 static int mmc_test_large_seq_read_perf(struct mmc_test_card *test) 2070 { 2071 return mmc_test_large_seq_perf(test, 0); 2072 } 2073 2074 /* 2075 * Large sequential write performance. 2076 */ 2077 static int mmc_test_large_seq_write_perf(struct mmc_test_card *test) 2078 { 2079 return mmc_test_large_seq_perf(test, 1); 2080 } 2081 2082 static int mmc_test_rw_multiple(struct mmc_test_card *test, 2083 struct mmc_test_multiple_rw *tdata, 2084 unsigned int reqsize, unsigned int size, 2085 int min_sg_len) 2086 { 2087 unsigned int dev_addr; 2088 struct mmc_test_area *t = &test->area; 2089 int ret = 0; 2090 2091 /* Set up test area */ 2092 if (size > mmc_test_capacity(test->card) / 2 * 512) 2093 size = mmc_test_capacity(test->card) / 2 * 512; 2094 if (reqsize > t->max_tfr) 2095 reqsize = t->max_tfr; 2096 dev_addr = mmc_test_capacity(test->card) / 4; 2097 if ((dev_addr & 0xffff0000)) 2098 dev_addr &= 0xffff0000; /* Round to 64MiB boundary */ 2099 else 2100 dev_addr &= 0xfffff800; /* Round to 1MiB boundary */ 2101 if (!dev_addr) 2102 goto err; 2103 2104 if (reqsize > size) 2105 return 0; 2106 2107 /* prepare test area */ 2108 if (mmc_card_can_erase(test->card) && 2109 tdata->prepare & MMC_TEST_PREP_ERASE) { 2110 ret = mmc_erase(test->card, dev_addr, 2111 size / 512, test->card->erase_arg); 2112 if (ret) 2113 ret = mmc_erase(test->card, dev_addr, 2114 size / 512, MMC_ERASE_ARG); 2115 if (ret) 2116 goto err; 2117 } 2118 2119 /* Run test */ 2120 ret = mmc_test_area_io_seq(test, reqsize, dev_addr, 2121 tdata->do_write, 0, 1, size / reqsize, 2122 tdata->do_nonblock_req, min_sg_len); 2123 if (ret) 2124 goto err; 2125 2126 return ret; 2127 err: 2128 pr_info("[%s] error\n", __func__); 2129 return ret; 2130 } 2131 2132 static int mmc_test_rw_multiple_size(struct mmc_test_card *test, 2133 struct mmc_test_multiple_rw *rw) 2134 { 2135 int ret = 0; 2136 int i; 2137 void *pre_req = test->card->host->ops->pre_req; 2138 void *post_req = test->card->host->ops->post_req; 2139 2140 if (rw->do_nonblock_req && 2141 ((!pre_req && post_req) || (pre_req && !post_req))) { 2142 pr_info("error: only one of pre/post is defined\n"); 2143 return -EINVAL; 2144 } 2145 2146 for (i = 0 ; i < rw->len && ret == 0; i++) { 2147 ret = mmc_test_rw_multiple(test, rw, rw->bs[i], rw->size, 0); 2148 if (ret) 2149 break; 2150 } 2151 return ret; 2152 } 2153 2154 static int mmc_test_rw_multiple_sg_len(struct mmc_test_card *test, 2155 struct mmc_test_multiple_rw *rw) 2156 { 2157 int ret = 0; 2158 int i; 2159 2160 for (i = 0 ; i < rw->len && ret == 0; i++) { 2161 ret = mmc_test_rw_multiple(test, rw, SZ_512K, rw->size, 2162 rw->sg_len[i]); 2163 if (ret) 2164 break; 2165 } 2166 return ret; 2167 } 2168 2169 /* 2170 * Multiple blocking write 4k to 4 MB chunks 2171 */ 2172 static int mmc_test_profile_mult_write_blocking_perf(struct mmc_test_card *test) 2173 { 2174 struct mmc_test_multiple_rw test_data = { 2175 .bs = bs, 2176 .size = TEST_AREA_MAX_SIZE, 2177 .len = ARRAY_SIZE(bs), 2178 .do_write = true, 2179 .do_nonblock_req = false, 2180 .prepare = MMC_TEST_PREP_ERASE, 2181 }; 2182 2183 return mmc_test_rw_multiple_size(test, &test_data); 2184 }; 2185 2186 /* 2187 * Multiple non-blocking write 4k to 4 MB chunks 2188 */ 2189 static int mmc_test_profile_mult_write_nonblock_perf(struct mmc_test_card *test) 2190 { 2191 struct mmc_test_multiple_rw test_data = { 2192 .bs = bs, 2193 .size = TEST_AREA_MAX_SIZE, 2194 .len = ARRAY_SIZE(bs), 2195 .do_write = true, 2196 .do_nonblock_req = true, 2197 .prepare = MMC_TEST_PREP_ERASE, 2198 }; 2199 2200 return mmc_test_rw_multiple_size(test, &test_data); 2201 } 2202 2203 /* 2204 * Multiple blocking read 4k to 4 MB chunks 2205 */ 2206 static int mmc_test_profile_mult_read_blocking_perf(struct mmc_test_card *test) 2207 { 2208 struct mmc_test_multiple_rw test_data = { 2209 .bs = bs, 2210 .size = TEST_AREA_MAX_SIZE, 2211 .len = ARRAY_SIZE(bs), 2212 .do_write = false, 2213 .do_nonblock_req = false, 2214 .prepare = MMC_TEST_PREP_NONE, 2215 }; 2216 2217 return mmc_test_rw_multiple_size(test, &test_data); 2218 } 2219 2220 /* 2221 * Multiple non-blocking read 4k to 4 MB chunks 2222 */ 2223 static int mmc_test_profile_mult_read_nonblock_perf(struct mmc_test_card *test) 2224 { 2225 struct mmc_test_multiple_rw test_data = { 2226 .bs = bs, 2227 .size = TEST_AREA_MAX_SIZE, 2228 .len = ARRAY_SIZE(bs), 2229 .do_write = false, 2230 .do_nonblock_req = true, 2231 .prepare = MMC_TEST_PREP_NONE, 2232 }; 2233 2234 return mmc_test_rw_multiple_size(test, &test_data); 2235 } 2236 2237 /* 2238 * Multiple blocking write 1 to 512 sg elements 2239 */ 2240 static int mmc_test_profile_sglen_wr_blocking_perf(struct mmc_test_card *test) 2241 { 2242 struct mmc_test_multiple_rw test_data = { 2243 .sg_len = sg_len, 2244 .size = TEST_AREA_MAX_SIZE, 2245 .len = ARRAY_SIZE(sg_len), 2246 .do_write = true, 2247 .do_nonblock_req = false, 2248 .prepare = MMC_TEST_PREP_ERASE, 2249 }; 2250 2251 return mmc_test_rw_multiple_sg_len(test, &test_data); 2252 }; 2253 2254 /* 2255 * Multiple non-blocking write 1 to 512 sg elements 2256 */ 2257 static int mmc_test_profile_sglen_wr_nonblock_perf(struct mmc_test_card *test) 2258 { 2259 struct mmc_test_multiple_rw test_data = { 2260 .sg_len = sg_len, 2261 .size = TEST_AREA_MAX_SIZE, 2262 .len = ARRAY_SIZE(sg_len), 2263 .do_write = true, 2264 .do_nonblock_req = true, 2265 .prepare = MMC_TEST_PREP_ERASE, 2266 }; 2267 2268 return mmc_test_rw_multiple_sg_len(test, &test_data); 2269 } 2270 2271 /* 2272 * Multiple blocking read 1 to 512 sg elements 2273 */ 2274 static int mmc_test_profile_sglen_r_blocking_perf(struct mmc_test_card *test) 2275 { 2276 struct mmc_test_multiple_rw test_data = { 2277 .sg_len = sg_len, 2278 .size = TEST_AREA_MAX_SIZE, 2279 .len = ARRAY_SIZE(sg_len), 2280 .do_write = false, 2281 .do_nonblock_req = false, 2282 .prepare = MMC_TEST_PREP_NONE, 2283 }; 2284 2285 return mmc_test_rw_multiple_sg_len(test, &test_data); 2286 } 2287 2288 /* 2289 * Multiple non-blocking read 1 to 512 sg elements 2290 */ 2291 static int mmc_test_profile_sglen_r_nonblock_perf(struct mmc_test_card *test) 2292 { 2293 struct mmc_test_multiple_rw test_data = { 2294 .sg_len = sg_len, 2295 .size = TEST_AREA_MAX_SIZE, 2296 .len = ARRAY_SIZE(sg_len), 2297 .do_write = false, 2298 .do_nonblock_req = true, 2299 .prepare = MMC_TEST_PREP_NONE, 2300 }; 2301 2302 return mmc_test_rw_multiple_sg_len(test, &test_data); 2303 } 2304 2305 /* 2306 * eMMC hardware reset. 2307 */ 2308 static int mmc_test_reset(struct mmc_test_card *test) 2309 { 2310 struct mmc_card *card = test->card; 2311 int err; 2312 2313 err = mmc_hw_reset(card); 2314 if (!err) { 2315 /* 2316 * Reset will re-enable the card's command queue, but tests 2317 * expect it to be disabled. 2318 */ 2319 if (card->ext_csd.cmdq_en) 2320 mmc_cmdq_disable(card); 2321 return RESULT_OK; 2322 } else if (err == -EOPNOTSUPP) { 2323 return RESULT_UNSUP_HOST; 2324 } 2325 2326 return RESULT_FAIL; 2327 } 2328 2329 static int mmc_test_send_status(struct mmc_test_card *test, 2330 struct mmc_command *cmd) 2331 { 2332 memset(cmd, 0, sizeof(*cmd)); 2333 2334 cmd->opcode = MMC_SEND_STATUS; 2335 if (!mmc_host_is_spi(test->card->host)) 2336 cmd->arg = test->card->rca << 16; 2337 cmd->flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC; 2338 2339 return mmc_wait_for_cmd(test->card->host, cmd, 0); 2340 } 2341 2342 static int mmc_test_ongoing_transfer(struct mmc_test_card *test, 2343 unsigned int dev_addr, int use_sbc, 2344 int repeat_cmd, int write, int use_areq) 2345 { 2346 struct mmc_test_req *rq = mmc_test_req_alloc(); 2347 struct mmc_host *host = test->card->host; 2348 struct mmc_test_area *t = &test->area; 2349 struct mmc_request *mrq; 2350 unsigned long timeout; 2351 bool expired = false; 2352 int ret = 0, cmd_ret; 2353 u32 status = 0; 2354 int count = 0; 2355 2356 if (!rq) 2357 return -ENOMEM; 2358 2359 mrq = &rq->mrq; 2360 if (use_sbc) 2361 mrq->sbc = &rq->sbc; 2362 mrq->cap_cmd_during_tfr = true; 2363 2364 mmc_test_prepare_mrq(test, mrq, t->sg, t->sg_len, dev_addr, t->blocks, 2365 512, write); 2366 2367 if (use_sbc && t->blocks > 1 && !mrq->sbc) { 2368 ret = mmc_host_can_cmd23(host) ? 2369 RESULT_UNSUP_CARD : 2370 RESULT_UNSUP_HOST; 2371 goto out_free; 2372 } 2373 2374 /* Start ongoing data request */ 2375 if (use_areq) { 2376 ret = mmc_test_start_areq(test, mrq, NULL); 2377 if (ret) 2378 goto out_free; 2379 } else { 2380 mmc_wait_for_req(host, mrq); 2381 } 2382 2383 timeout = jiffies + msecs_to_jiffies(3000); 2384 do { 2385 count += 1; 2386 2387 /* Send status command while data transfer in progress */ 2388 cmd_ret = mmc_test_send_status(test, &rq->status); 2389 if (cmd_ret) 2390 break; 2391 2392 status = rq->status.resp[0]; 2393 if (status & R1_ERROR) { 2394 cmd_ret = -EIO; 2395 break; 2396 } 2397 2398 if (mmc_is_req_done(host, mrq)) 2399 break; 2400 2401 expired = time_after(jiffies, timeout); 2402 if (expired) { 2403 pr_info("%s: timeout waiting for Tran state status %#x\n", 2404 mmc_hostname(host), status); 2405 cmd_ret = -ETIMEDOUT; 2406 break; 2407 } 2408 } while (repeat_cmd && R1_CURRENT_STATE(status) != R1_STATE_TRAN); 2409 2410 /* Wait for data request to complete */ 2411 if (use_areq) { 2412 ret = mmc_test_start_areq(test, NULL, mrq); 2413 } else { 2414 mmc_wait_for_req_done(test->card->host, mrq); 2415 } 2416 2417 /* 2418 * For cap_cmd_during_tfr request, upper layer must send stop if 2419 * required. 2420 */ 2421 if (mrq->data->stop && (mrq->data->error || !mrq->sbc)) { 2422 if (ret) 2423 mmc_wait_for_cmd(host, mrq->data->stop, 0); 2424 else 2425 ret = mmc_wait_for_cmd(host, mrq->data->stop, 0); 2426 } 2427 2428 if (ret) 2429 goto out_free; 2430 2431 if (cmd_ret) { 2432 pr_info("%s: Send Status failed: status %#x, error %d\n", 2433 mmc_hostname(test->card->host), status, cmd_ret); 2434 } 2435 2436 ret = mmc_test_check_result(test, mrq); 2437 if (ret) 2438 goto out_free; 2439 2440 ret = mmc_test_wait_busy(test); 2441 if (ret) 2442 goto out_free; 2443 2444 if (repeat_cmd && (t->blocks + 1) << SECTOR_SHIFT > t->max_tfr) 2445 pr_info("%s: %d commands completed during transfer of %u blocks\n", 2446 mmc_hostname(test->card->host), count, t->blocks); 2447 2448 if (cmd_ret) 2449 ret = cmd_ret; 2450 out_free: 2451 kfree(rq); 2452 2453 return ret; 2454 } 2455 2456 static int __mmc_test_cmds_during_tfr(struct mmc_test_card *test, 2457 unsigned long sz, int use_sbc, int write, 2458 int use_areq) 2459 { 2460 struct mmc_test_area *t = &test->area; 2461 int ret; 2462 2463 if (!(test->card->host->caps & MMC_CAP_CMD_DURING_TFR)) 2464 return RESULT_UNSUP_HOST; 2465 2466 ret = mmc_test_area_map(test, sz, 0, 0, use_areq); 2467 if (ret) 2468 return ret; 2469 2470 ret = mmc_test_ongoing_transfer(test, t->dev_addr, use_sbc, 0, write, 2471 use_areq); 2472 if (ret) 2473 return ret; 2474 2475 return mmc_test_ongoing_transfer(test, t->dev_addr, use_sbc, 1, write, 2476 use_areq); 2477 } 2478 2479 static int mmc_test_cmds_during_tfr(struct mmc_test_card *test, int use_sbc, 2480 int write, int use_areq) 2481 { 2482 struct mmc_test_area *t = &test->area; 2483 unsigned long sz; 2484 int ret; 2485 2486 for (sz = 512; sz <= t->max_tfr; sz += 512) { 2487 ret = __mmc_test_cmds_during_tfr(test, sz, use_sbc, write, 2488 use_areq); 2489 if (ret) 2490 return ret; 2491 } 2492 return 0; 2493 } 2494 2495 /* 2496 * Commands during read - no Set Block Count (CMD23). 2497 */ 2498 static int mmc_test_cmds_during_read(struct mmc_test_card *test) 2499 { 2500 return mmc_test_cmds_during_tfr(test, 0, 0, 0); 2501 } 2502 2503 /* 2504 * Commands during write - no Set Block Count (CMD23). 2505 */ 2506 static int mmc_test_cmds_during_write(struct mmc_test_card *test) 2507 { 2508 return mmc_test_cmds_during_tfr(test, 0, 1, 0); 2509 } 2510 2511 /* 2512 * Commands during read - use Set Block Count (CMD23). 2513 */ 2514 static int mmc_test_cmds_during_read_cmd23(struct mmc_test_card *test) 2515 { 2516 return mmc_test_cmds_during_tfr(test, 1, 0, 0); 2517 } 2518 2519 /* 2520 * Commands during write - use Set Block Count (CMD23). 2521 */ 2522 static int mmc_test_cmds_during_write_cmd23(struct mmc_test_card *test) 2523 { 2524 return mmc_test_cmds_during_tfr(test, 1, 1, 0); 2525 } 2526 2527 /* 2528 * Commands during non-blocking read - use Set Block Count (CMD23). 2529 */ 2530 static int mmc_test_cmds_during_read_cmd23_nonblock(struct mmc_test_card *test) 2531 { 2532 return mmc_test_cmds_during_tfr(test, 1, 0, 1); 2533 } 2534 2535 /* 2536 * Commands during non-blocking write - use Set Block Count (CMD23). 2537 */ 2538 static int mmc_test_cmds_during_write_cmd23_nonblock(struct mmc_test_card *test) 2539 { 2540 return mmc_test_cmds_during_tfr(test, 1, 1, 1); 2541 } 2542 2543 static const struct mmc_test_case mmc_test_cases[] = { 2544 { 2545 .name = "Basic write (no data verification)", 2546 .run = mmc_test_basic_write, 2547 }, 2548 2549 { 2550 .name = "Basic read (no data verification)", 2551 .run = mmc_test_basic_read, 2552 }, 2553 2554 { 2555 .name = "Basic write (with data verification)", 2556 .prepare = mmc_test_prepare_write, 2557 .run = mmc_test_verify_write, 2558 .cleanup = mmc_test_cleanup, 2559 }, 2560 2561 { 2562 .name = "Basic read (with data verification)", 2563 .prepare = mmc_test_prepare_read, 2564 .run = mmc_test_verify_read, 2565 .cleanup = mmc_test_cleanup, 2566 }, 2567 2568 { 2569 .name = "Multi-block write", 2570 .prepare = mmc_test_prepare_write, 2571 .run = mmc_test_multi_write, 2572 .cleanup = mmc_test_cleanup, 2573 }, 2574 2575 { 2576 .name = "Multi-block read", 2577 .prepare = mmc_test_prepare_read, 2578 .run = mmc_test_multi_read, 2579 .cleanup = mmc_test_cleanup, 2580 }, 2581 2582 { 2583 .name = "Power of two block writes", 2584 .prepare = mmc_test_prepare_write, 2585 .run = mmc_test_pow2_write, 2586 .cleanup = mmc_test_cleanup, 2587 }, 2588 2589 { 2590 .name = "Power of two block reads", 2591 .prepare = mmc_test_prepare_read, 2592 .run = mmc_test_pow2_read, 2593 .cleanup = mmc_test_cleanup, 2594 }, 2595 2596 { 2597 .name = "Weird sized block writes", 2598 .prepare = mmc_test_prepare_write, 2599 .run = mmc_test_weird_write, 2600 .cleanup = mmc_test_cleanup, 2601 }, 2602 2603 { 2604 .name = "Weird sized block reads", 2605 .prepare = mmc_test_prepare_read, 2606 .run = mmc_test_weird_read, 2607 .cleanup = mmc_test_cleanup, 2608 }, 2609 2610 { 2611 .name = "Badly aligned write", 2612 .prepare = mmc_test_prepare_write, 2613 .run = mmc_test_align_write, 2614 .cleanup = mmc_test_cleanup, 2615 }, 2616 2617 { 2618 .name = "Badly aligned read", 2619 .prepare = mmc_test_prepare_read, 2620 .run = mmc_test_align_read, 2621 .cleanup = mmc_test_cleanup, 2622 }, 2623 2624 { 2625 .name = "Badly aligned multi-block write", 2626 .prepare = mmc_test_prepare_write, 2627 .run = mmc_test_align_multi_write, 2628 .cleanup = mmc_test_cleanup, 2629 }, 2630 2631 { 2632 .name = "Badly aligned multi-block read", 2633 .prepare = mmc_test_prepare_read, 2634 .run = mmc_test_align_multi_read, 2635 .cleanup = mmc_test_cleanup, 2636 }, 2637 2638 { 2639 .name = "Proper xfer_size at write (start failure)", 2640 .run = mmc_test_xfersize_write, 2641 }, 2642 2643 { 2644 .name = "Proper xfer_size at read (start failure)", 2645 .run = mmc_test_xfersize_read, 2646 }, 2647 2648 { 2649 .name = "Proper xfer_size at write (midway failure)", 2650 .run = mmc_test_multi_xfersize_write, 2651 }, 2652 2653 { 2654 .name = "Proper xfer_size at read (midway failure)", 2655 .run = mmc_test_multi_xfersize_read, 2656 }, 2657 2658 #ifdef CONFIG_HIGHMEM 2659 2660 { 2661 .name = "Highmem write", 2662 .prepare = mmc_test_prepare_write, 2663 .run = mmc_test_write_high, 2664 .cleanup = mmc_test_cleanup, 2665 }, 2666 2667 { 2668 .name = "Highmem read", 2669 .prepare = mmc_test_prepare_read, 2670 .run = mmc_test_read_high, 2671 .cleanup = mmc_test_cleanup, 2672 }, 2673 2674 { 2675 .name = "Multi-block highmem write", 2676 .prepare = mmc_test_prepare_write, 2677 .run = mmc_test_multi_write_high, 2678 .cleanup = mmc_test_cleanup, 2679 }, 2680 2681 { 2682 .name = "Multi-block highmem read", 2683 .prepare = mmc_test_prepare_read, 2684 .run = mmc_test_multi_read_high, 2685 .cleanup = mmc_test_cleanup, 2686 }, 2687 2688 #else 2689 2690 { 2691 .name = "Highmem write", 2692 .run = mmc_test_no_highmem, 2693 }, 2694 2695 { 2696 .name = "Highmem read", 2697 .run = mmc_test_no_highmem, 2698 }, 2699 2700 { 2701 .name = "Multi-block highmem write", 2702 .run = mmc_test_no_highmem, 2703 }, 2704 2705 { 2706 .name = "Multi-block highmem read", 2707 .run = mmc_test_no_highmem, 2708 }, 2709 2710 #endif /* CONFIG_HIGHMEM */ 2711 2712 { 2713 .name = "Best-case read performance", 2714 .prepare = mmc_test_area_prepare_fill, 2715 .run = mmc_test_best_read_performance, 2716 .cleanup = mmc_test_area_cleanup, 2717 }, 2718 2719 { 2720 .name = "Best-case write performance", 2721 .prepare = mmc_test_area_prepare_erase, 2722 .run = mmc_test_best_write_performance, 2723 .cleanup = mmc_test_area_cleanup, 2724 }, 2725 2726 { 2727 .name = "Best-case read performance into scattered pages", 2728 .prepare = mmc_test_area_prepare_fill, 2729 .run = mmc_test_best_read_perf_max_scatter, 2730 .cleanup = mmc_test_area_cleanup, 2731 }, 2732 2733 { 2734 .name = "Best-case write performance from scattered pages", 2735 .prepare = mmc_test_area_prepare_erase, 2736 .run = mmc_test_best_write_perf_max_scatter, 2737 .cleanup = mmc_test_area_cleanup, 2738 }, 2739 2740 { 2741 .name = "Single read performance by transfer size", 2742 .prepare = mmc_test_area_prepare_fill, 2743 .run = mmc_test_profile_read_perf, 2744 .cleanup = mmc_test_area_cleanup, 2745 }, 2746 2747 { 2748 .name = "Single write performance by transfer size", 2749 .prepare = mmc_test_area_prepare, 2750 .run = mmc_test_profile_write_perf, 2751 .cleanup = mmc_test_area_cleanup, 2752 }, 2753 2754 { 2755 .name = "Single trim performance by transfer size", 2756 .prepare = mmc_test_area_prepare_fill, 2757 .run = mmc_test_profile_trim_perf, 2758 .cleanup = mmc_test_area_cleanup, 2759 }, 2760 2761 { 2762 .name = "Consecutive read performance by transfer size", 2763 .prepare = mmc_test_area_prepare_fill, 2764 .run = mmc_test_profile_seq_read_perf, 2765 .cleanup = mmc_test_area_cleanup, 2766 }, 2767 2768 { 2769 .name = "Consecutive write performance by transfer size", 2770 .prepare = mmc_test_area_prepare, 2771 .run = mmc_test_profile_seq_write_perf, 2772 .cleanup = mmc_test_area_cleanup, 2773 }, 2774 2775 { 2776 .name = "Consecutive trim performance by transfer size", 2777 .prepare = mmc_test_area_prepare, 2778 .run = mmc_test_profile_seq_trim_perf, 2779 .cleanup = mmc_test_area_cleanup, 2780 }, 2781 2782 { 2783 .name = "Random read performance by transfer size", 2784 .prepare = mmc_test_area_prepare, 2785 .run = mmc_test_random_read_perf, 2786 .cleanup = mmc_test_area_cleanup, 2787 }, 2788 2789 { 2790 .name = "Random write performance by transfer size", 2791 .prepare = mmc_test_area_prepare, 2792 .run = mmc_test_random_write_perf, 2793 .cleanup = mmc_test_area_cleanup, 2794 }, 2795 2796 { 2797 .name = "Large sequential read into scattered pages", 2798 .prepare = mmc_test_area_prepare, 2799 .run = mmc_test_large_seq_read_perf, 2800 .cleanup = mmc_test_area_cleanup, 2801 }, 2802 2803 { 2804 .name = "Large sequential write from scattered pages", 2805 .prepare = mmc_test_area_prepare, 2806 .run = mmc_test_large_seq_write_perf, 2807 .cleanup = mmc_test_area_cleanup, 2808 }, 2809 2810 { 2811 .name = "Write performance with blocking req 4k to 4MB", 2812 .prepare = mmc_test_area_prepare, 2813 .run = mmc_test_profile_mult_write_blocking_perf, 2814 .cleanup = mmc_test_area_cleanup, 2815 }, 2816 2817 { 2818 .name = "Write performance with non-blocking req 4k to 4MB", 2819 .prepare = mmc_test_area_prepare, 2820 .run = mmc_test_profile_mult_write_nonblock_perf, 2821 .cleanup = mmc_test_area_cleanup, 2822 }, 2823 2824 { 2825 .name = "Read performance with blocking req 4k to 4MB", 2826 .prepare = mmc_test_area_prepare, 2827 .run = mmc_test_profile_mult_read_blocking_perf, 2828 .cleanup = mmc_test_area_cleanup, 2829 }, 2830 2831 { 2832 .name = "Read performance with non-blocking req 4k to 4MB", 2833 .prepare = mmc_test_area_prepare, 2834 .run = mmc_test_profile_mult_read_nonblock_perf, 2835 .cleanup = mmc_test_area_cleanup, 2836 }, 2837 2838 { 2839 .name = "Write performance blocking req 1 to 512 sg elems", 2840 .prepare = mmc_test_area_prepare, 2841 .run = mmc_test_profile_sglen_wr_blocking_perf, 2842 .cleanup = mmc_test_area_cleanup, 2843 }, 2844 2845 { 2846 .name = "Write performance non-blocking req 1 to 512 sg elems", 2847 .prepare = mmc_test_area_prepare, 2848 .run = mmc_test_profile_sglen_wr_nonblock_perf, 2849 .cleanup = mmc_test_area_cleanup, 2850 }, 2851 2852 { 2853 .name = "Read performance blocking req 1 to 512 sg elems", 2854 .prepare = mmc_test_area_prepare, 2855 .run = mmc_test_profile_sglen_r_blocking_perf, 2856 .cleanup = mmc_test_area_cleanup, 2857 }, 2858 2859 { 2860 .name = "Read performance non-blocking req 1 to 512 sg elems", 2861 .prepare = mmc_test_area_prepare, 2862 .run = mmc_test_profile_sglen_r_nonblock_perf, 2863 .cleanup = mmc_test_area_cleanup, 2864 }, 2865 2866 { 2867 .name = "Reset test", 2868 .run = mmc_test_reset, 2869 }, 2870 2871 { 2872 .name = "Commands during read - no Set Block Count (CMD23)", 2873 .prepare = mmc_test_area_prepare, 2874 .run = mmc_test_cmds_during_read, 2875 .cleanup = mmc_test_area_cleanup, 2876 }, 2877 2878 { 2879 .name = "Commands during write - no Set Block Count (CMD23)", 2880 .prepare = mmc_test_area_prepare, 2881 .run = mmc_test_cmds_during_write, 2882 .cleanup = mmc_test_area_cleanup, 2883 }, 2884 2885 { 2886 .name = "Commands during read - use Set Block Count (CMD23)", 2887 .prepare = mmc_test_area_prepare, 2888 .run = mmc_test_cmds_during_read_cmd23, 2889 .cleanup = mmc_test_area_cleanup, 2890 }, 2891 2892 { 2893 .name = "Commands during write - use Set Block Count (CMD23)", 2894 .prepare = mmc_test_area_prepare, 2895 .run = mmc_test_cmds_during_write_cmd23, 2896 .cleanup = mmc_test_area_cleanup, 2897 }, 2898 2899 { 2900 .name = "Commands during non-blocking read - use Set Block Count (CMD23)", 2901 .prepare = mmc_test_area_prepare, 2902 .run = mmc_test_cmds_during_read_cmd23_nonblock, 2903 .cleanup = mmc_test_area_cleanup, 2904 }, 2905 2906 { 2907 .name = "Commands during non-blocking write - use Set Block Count (CMD23)", 2908 .prepare = mmc_test_area_prepare, 2909 .run = mmc_test_cmds_during_write_cmd23_nonblock, 2910 .cleanup = mmc_test_area_cleanup, 2911 }, 2912 2913 { 2914 .name = "Re-tuning reliability", 2915 .prepare = mmc_test_area_prepare, 2916 .run = mmc_test_retuning, 2917 .cleanup = mmc_test_area_cleanup, 2918 }, 2919 2920 }; 2921 2922 static DEFINE_MUTEX(mmc_test_lock); 2923 2924 static LIST_HEAD(mmc_test_result); 2925 2926 static void mmc_test_run(struct mmc_test_card *test, int testcase) 2927 { 2928 int i, ret; 2929 2930 pr_info("%s: Starting tests of card %s...\n", 2931 mmc_hostname(test->card->host), mmc_card_id(test->card)); 2932 2933 mmc_claim_host(test->card->host); 2934 2935 for (i = 0; i < ARRAY_SIZE(mmc_test_cases); i++) { 2936 struct mmc_test_general_result *gr; 2937 2938 if (testcase && ((i + 1) != testcase)) 2939 continue; 2940 2941 pr_info("%s: Test case %d. %s...\n", 2942 mmc_hostname(test->card->host), i + 1, 2943 mmc_test_cases[i].name); 2944 2945 if (mmc_test_cases[i].prepare) { 2946 ret = mmc_test_cases[i].prepare(test); 2947 if (ret) { 2948 pr_info("%s: Result: Prepare stage failed! (%d)\n", 2949 mmc_hostname(test->card->host), 2950 ret); 2951 continue; 2952 } 2953 } 2954 2955 gr = kzalloc_obj(*gr); 2956 if (gr) { 2957 INIT_LIST_HEAD(&gr->tr_lst); 2958 2959 /* Assign data what we know already */ 2960 gr->card = test->card; 2961 gr->testcase = i; 2962 2963 /* Append container to global one */ 2964 list_add_tail(&gr->link, &mmc_test_result); 2965 2966 /* 2967 * Save the pointer to created container in our private 2968 * structure. 2969 */ 2970 test->gr = gr; 2971 } 2972 2973 ret = mmc_test_cases[i].run(test); 2974 switch (ret) { 2975 case RESULT_OK: 2976 pr_info("%s: Result: OK\n", 2977 mmc_hostname(test->card->host)); 2978 break; 2979 case RESULT_FAIL: 2980 pr_info("%s: Result: FAILED\n", 2981 mmc_hostname(test->card->host)); 2982 break; 2983 case RESULT_UNSUP_HOST: 2984 pr_info("%s: Result: UNSUPPORTED (by host)\n", 2985 mmc_hostname(test->card->host)); 2986 break; 2987 case RESULT_UNSUP_CARD: 2988 pr_info("%s: Result: UNSUPPORTED (by card)\n", 2989 mmc_hostname(test->card->host)); 2990 break; 2991 default: 2992 pr_info("%s: Result: ERROR (%d)\n", 2993 mmc_hostname(test->card->host), ret); 2994 } 2995 2996 /* Save the result */ 2997 if (gr) 2998 gr->result = ret; 2999 3000 if (mmc_test_cases[i].cleanup) { 3001 ret = mmc_test_cases[i].cleanup(test); 3002 if (ret) { 3003 pr_info("%s: Warning: Cleanup stage failed! (%d)\n", 3004 mmc_hostname(test->card->host), 3005 ret); 3006 } 3007 } 3008 } 3009 3010 mmc_release_host(test->card->host); 3011 3012 pr_info("%s: Tests completed.\n", 3013 mmc_hostname(test->card->host)); 3014 } 3015 3016 static void mmc_test_free_result(struct mmc_card *card) 3017 { 3018 struct mmc_test_general_result *gr, *grs; 3019 3020 mutex_lock(&mmc_test_lock); 3021 3022 list_for_each_entry_safe(gr, grs, &mmc_test_result, link) { 3023 struct mmc_test_transfer_result *tr, *trs; 3024 3025 if (card && gr->card != card) 3026 continue; 3027 3028 list_for_each_entry_safe(tr, trs, &gr->tr_lst, link) { 3029 list_del(&tr->link); 3030 kfree(tr); 3031 } 3032 3033 list_del(&gr->link); 3034 kfree(gr); 3035 } 3036 3037 mutex_unlock(&mmc_test_lock); 3038 } 3039 3040 static LIST_HEAD(mmc_test_file_test); 3041 3042 static int mtf_test_show(struct seq_file *sf, void *data) 3043 { 3044 struct mmc_card *card = sf->private; 3045 struct mmc_test_general_result *gr; 3046 3047 mutex_lock(&mmc_test_lock); 3048 3049 list_for_each_entry(gr, &mmc_test_result, link) { 3050 struct mmc_test_transfer_result *tr; 3051 3052 if (gr->card != card) 3053 continue; 3054 3055 seq_printf(sf, "Test %d: %d\n", gr->testcase + 1, gr->result); 3056 3057 list_for_each_entry(tr, &gr->tr_lst, link) { 3058 seq_printf(sf, "%u %d %ptSp %u %u.%02u\n", 3059 tr->count, tr->sectors, &tr->ts, tr->rate, 3060 tr->iops / 100, tr->iops % 100); 3061 } 3062 } 3063 3064 mutex_unlock(&mmc_test_lock); 3065 3066 return 0; 3067 } 3068 3069 static int mtf_test_open(struct inode *inode, struct file *file) 3070 { 3071 return single_open(file, mtf_test_show, inode->i_private); 3072 } 3073 3074 static ssize_t mtf_test_write(struct file *file, const char __user *buf, 3075 size_t count, loff_t *pos) 3076 { 3077 struct seq_file *sf = file->private_data; 3078 struct mmc_card *card = sf->private; 3079 struct mmc_test_card *test; 3080 long testcase; 3081 int ret; 3082 3083 ret = kstrtol_from_user(buf, count, 10, &testcase); 3084 if (ret) 3085 return ret; 3086 3087 test = kzalloc_flex(*test, buffer, BUFFER_SIZE); 3088 if (!test) 3089 return -ENOMEM; 3090 3091 /* 3092 * Remove all test cases associated with given card. Thus we have only 3093 * actual data of the last run. 3094 */ 3095 mmc_test_free_result(card); 3096 3097 test->card = card; 3098 3099 #ifdef CONFIG_HIGHMEM 3100 test->highmem = alloc_pages(GFP_KERNEL | __GFP_HIGHMEM, BUFFER_ORDER); 3101 if (!test->highmem) { 3102 count = -ENOMEM; 3103 goto free_test_buffer; 3104 } 3105 #endif 3106 3107 mutex_lock(&mmc_test_lock); 3108 mmc_test_run(test, testcase); 3109 mutex_unlock(&mmc_test_lock); 3110 3111 #ifdef CONFIG_HIGHMEM 3112 __free_pages(test->highmem, BUFFER_ORDER); 3113 free_test_buffer: 3114 #endif 3115 kfree(test); 3116 3117 return count; 3118 } 3119 3120 static const struct file_operations mmc_test_fops_test = { 3121 .open = mtf_test_open, 3122 .read = seq_read, 3123 .write = mtf_test_write, 3124 .llseek = seq_lseek, 3125 .release = single_release, 3126 }; 3127 3128 static int mtf_testlist_show(struct seq_file *sf, void *data) 3129 { 3130 int i; 3131 3132 mutex_lock(&mmc_test_lock); 3133 3134 seq_puts(sf, "0:\tRun all tests\n"); 3135 for (i = 0; i < ARRAY_SIZE(mmc_test_cases); i++) 3136 seq_printf(sf, "%d:\t%s\n", i + 1, mmc_test_cases[i].name); 3137 3138 mutex_unlock(&mmc_test_lock); 3139 3140 return 0; 3141 } 3142 3143 DEFINE_SHOW_ATTRIBUTE(mtf_testlist); 3144 3145 static void mmc_test_free_dbgfs_file(struct mmc_card *card) 3146 { 3147 struct mmc_test_dbgfs_file *df, *dfs; 3148 3149 mutex_lock(&mmc_test_lock); 3150 3151 list_for_each_entry_safe(df, dfs, &mmc_test_file_test, link) { 3152 if (card && df->card != card) 3153 continue; 3154 debugfs_remove(df->file); 3155 list_del(&df->link); 3156 kfree(df); 3157 } 3158 3159 mutex_unlock(&mmc_test_lock); 3160 } 3161 3162 static int __mmc_test_register_dbgfs_file(struct mmc_card *card, 3163 const char *name, umode_t mode, const struct file_operations *fops) 3164 { 3165 struct dentry *file = NULL; 3166 struct mmc_test_dbgfs_file *df; 3167 3168 if (card->debugfs_root) 3169 file = debugfs_create_file(name, mode, card->debugfs_root, 3170 card, fops); 3171 3172 df = kmalloc_obj(*df); 3173 if (!df) { 3174 debugfs_remove(file); 3175 return -ENOMEM; 3176 } 3177 3178 df->card = card; 3179 df->file = file; 3180 3181 list_add(&df->link, &mmc_test_file_test); 3182 return 0; 3183 } 3184 3185 static int mmc_test_register_dbgfs_file(struct mmc_card *card) 3186 { 3187 int ret; 3188 3189 mutex_lock(&mmc_test_lock); 3190 3191 ret = __mmc_test_register_dbgfs_file(card, "test", 0644, 3192 &mmc_test_fops_test); 3193 if (ret) 3194 goto err; 3195 3196 ret = __mmc_test_register_dbgfs_file(card, "testlist", 0444, 3197 &mtf_testlist_fops); 3198 if (ret) 3199 goto err; 3200 3201 err: 3202 mutex_unlock(&mmc_test_lock); 3203 3204 return ret; 3205 } 3206 3207 static int mmc_test_probe(struct mmc_card *card) 3208 { 3209 int ret; 3210 3211 if (!mmc_card_mmc(card) && !mmc_card_sd(card)) 3212 return -ENODEV; 3213 3214 if (mmc_card_ult_capacity(card)) { 3215 pr_info("%s: mmc-test currently UNSUPPORTED for SDUC\n", 3216 mmc_hostname(card->host)); 3217 return -EOPNOTSUPP; 3218 } 3219 3220 ret = mmc_test_register_dbgfs_file(card); 3221 if (ret) 3222 return ret; 3223 3224 if (card->ext_csd.cmdq_en) { 3225 mmc_claim_host(card->host); 3226 ret = mmc_cmdq_disable(card); 3227 mmc_release_host(card->host); 3228 if (ret) 3229 return ret; 3230 } 3231 3232 dev_info(&card->dev, "Card claimed for testing.\n"); 3233 3234 return 0; 3235 } 3236 3237 static void mmc_test_remove(struct mmc_card *card) 3238 { 3239 if (card->reenable_cmdq) { 3240 mmc_claim_host(card->host); 3241 mmc_cmdq_enable(card); 3242 mmc_release_host(card->host); 3243 } 3244 mmc_test_free_result(card); 3245 mmc_test_free_dbgfs_file(card); 3246 } 3247 3248 static struct mmc_driver mmc_driver = { 3249 .drv = { 3250 .name = "mmc_test", 3251 }, 3252 .probe = mmc_test_probe, 3253 .remove = mmc_test_remove, 3254 }; 3255 3256 static int __init mmc_test_init(void) 3257 { 3258 return mmc_register_driver(&mmc_driver); 3259 } 3260 3261 static void __exit mmc_test_exit(void) 3262 { 3263 /* Clear stalled data if card is still plugged */ 3264 mmc_test_free_result(NULL); 3265 mmc_test_free_dbgfs_file(NULL); 3266 3267 mmc_unregister_driver(&mmc_driver); 3268 } 3269 3270 module_init(mmc_test_init); 3271 module_exit(mmc_test_exit); 3272 3273 MODULE_LICENSE("GPL"); 3274 MODULE_DESCRIPTION("Multimedia Card (MMC) host test driver"); 3275 MODULE_AUTHOR("Pierre Ossman"); 3276