1 /* 2 * Copyright 2007-2008 Pierre Ossman 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or (at 7 * your option) any later version. 8 */ 9 10 #include <linux/mmc/core.h> 11 #include <linux/mmc/card.h> 12 #include <linux/mmc/host.h> 13 #include <linux/mmc/mmc.h> 14 #include <linux/slab.h> 15 16 #include <linux/scatterlist.h> 17 #include <linux/swap.h> /* For nr_free_buffer_pages() */ 18 #include <linux/list.h> 19 20 #include <linux/debugfs.h> 21 #include <linux/uaccess.h> 22 #include <linux/seq_file.h> 23 #include <linux/module.h> 24 25 #include "core.h" 26 #include "card.h" 27 #include "host.h" 28 #include "bus.h" 29 30 #define RESULT_OK 0 31 #define RESULT_FAIL 1 32 #define RESULT_UNSUP_HOST 2 33 #define RESULT_UNSUP_CARD 3 34 35 #define BUFFER_ORDER 2 36 #define BUFFER_SIZE (PAGE_SIZE << BUFFER_ORDER) 37 38 #define TEST_ALIGN_END 8 39 40 /* 41 * Limit the test area size to the maximum MMC HC erase group size. Note that 42 * the maximum SD allocation unit size is just 4MiB. 43 */ 44 #define TEST_AREA_MAX_SIZE (128 * 1024 * 1024) 45 46 /** 47 * struct mmc_test_pages - pages allocated by 'alloc_pages()'. 48 * @page: first page in the allocation 49 * @order: order of the number of pages allocated 50 */ 51 struct mmc_test_pages { 52 struct page *page; 53 unsigned int order; 54 }; 55 56 /** 57 * struct mmc_test_mem - allocated memory. 58 * @arr: array of allocations 59 * @cnt: number of allocations 60 */ 61 struct mmc_test_mem { 62 struct mmc_test_pages *arr; 63 unsigned int cnt; 64 }; 65 66 /** 67 * struct mmc_test_area - information for performance tests. 68 * @max_sz: test area size (in bytes) 69 * @dev_addr: address on card at which to do performance tests 70 * @max_tfr: maximum transfer size allowed by driver (in bytes) 71 * @max_segs: maximum segments allowed by driver in scatterlist @sg 72 * @max_seg_sz: maximum segment size allowed by driver 73 * @blocks: number of (512 byte) blocks currently mapped by @sg 74 * @sg_len: length of currently mapped scatterlist @sg 75 * @mem: allocated memory 76 * @sg: scatterlist 77 */ 78 struct mmc_test_area { 79 unsigned long max_sz; 80 unsigned int dev_addr; 81 unsigned int max_tfr; 82 unsigned int max_segs; 83 unsigned int max_seg_sz; 84 unsigned int blocks; 85 unsigned int sg_len; 86 struct mmc_test_mem *mem; 87 struct scatterlist *sg; 88 }; 89 90 /** 91 * struct mmc_test_transfer_result - transfer results for performance tests. 92 * @link: double-linked list 93 * @count: amount of group of sectors to check 94 * @sectors: amount of sectors to check in one group 95 * @ts: time values of transfer 96 * @rate: calculated transfer rate 97 * @iops: I/O operations per second (times 100) 98 */ 99 struct mmc_test_transfer_result { 100 struct list_head link; 101 unsigned int count; 102 unsigned int sectors; 103 struct timespec ts; 104 unsigned int rate; 105 unsigned int iops; 106 }; 107 108 /** 109 * struct mmc_test_general_result - results for tests. 110 * @link: double-linked list 111 * @card: card under test 112 * @testcase: number of test case 113 * @result: result of test run 114 * @tr_lst: transfer measurements if any as mmc_test_transfer_result 115 */ 116 struct mmc_test_general_result { 117 struct list_head link; 118 struct mmc_card *card; 119 int testcase; 120 int result; 121 struct list_head tr_lst; 122 }; 123 124 /** 125 * struct mmc_test_dbgfs_file - debugfs related file. 126 * @link: double-linked list 127 * @card: card under test 128 * @file: file created under debugfs 129 */ 130 struct mmc_test_dbgfs_file { 131 struct list_head link; 132 struct mmc_card *card; 133 struct dentry *file; 134 }; 135 136 /** 137 * struct mmc_test_card - test information. 138 * @card: card under test 139 * @scratch: transfer buffer 140 * @buffer: transfer buffer 141 * @highmem: buffer for highmem tests 142 * @area: information for performance tests 143 * @gr: pointer to results of current testcase 144 */ 145 struct mmc_test_card { 146 struct mmc_card *card; 147 148 u8 scratch[BUFFER_SIZE]; 149 u8 *buffer; 150 #ifdef CONFIG_HIGHMEM 151 struct page *highmem; 152 #endif 153 struct mmc_test_area area; 154 struct mmc_test_general_result *gr; 155 }; 156 157 enum mmc_test_prep_media { 158 MMC_TEST_PREP_NONE = 0, 159 MMC_TEST_PREP_WRITE_FULL = 1 << 0, 160 MMC_TEST_PREP_ERASE = 1 << 1, 161 }; 162 163 struct mmc_test_multiple_rw { 164 unsigned int *sg_len; 165 unsigned int *bs; 166 unsigned int len; 167 unsigned int size; 168 bool do_write; 169 bool do_nonblock_req; 170 enum mmc_test_prep_media prepare; 171 }; 172 173 struct mmc_test_async_req { 174 struct mmc_async_req areq; 175 struct mmc_test_card *test; 176 }; 177 178 /*******************************************************************/ 179 /* General helper functions */ 180 /*******************************************************************/ 181 182 /* 183 * Configure correct block size in card 184 */ 185 static int mmc_test_set_blksize(struct mmc_test_card *test, unsigned size) 186 { 187 return mmc_set_blocklen(test->card, size); 188 } 189 190 static bool mmc_test_card_cmd23(struct mmc_card *card) 191 { 192 return mmc_card_mmc(card) || 193 (mmc_card_sd(card) && card->scr.cmds & SD_SCR_CMD23_SUPPORT); 194 } 195 196 static void mmc_test_prepare_sbc(struct mmc_test_card *test, 197 struct mmc_request *mrq, unsigned int blocks) 198 { 199 struct mmc_card *card = test->card; 200 201 if (!mrq->sbc || !mmc_host_cmd23(card->host) || 202 !mmc_test_card_cmd23(card) || !mmc_op_multi(mrq->cmd->opcode) || 203 (card->quirks & MMC_QUIRK_BLK_NO_CMD23)) { 204 mrq->sbc = NULL; 205 return; 206 } 207 208 mrq->sbc->opcode = MMC_SET_BLOCK_COUNT; 209 mrq->sbc->arg = blocks; 210 mrq->sbc->flags = MMC_RSP_R1 | MMC_CMD_AC; 211 } 212 213 /* 214 * Fill in the mmc_request structure given a set of transfer parameters. 215 */ 216 static void mmc_test_prepare_mrq(struct mmc_test_card *test, 217 struct mmc_request *mrq, struct scatterlist *sg, unsigned sg_len, 218 unsigned dev_addr, unsigned blocks, unsigned blksz, int write) 219 { 220 if (WARN_ON(!mrq || !mrq->cmd || !mrq->data || !mrq->stop)) 221 return; 222 223 if (blocks > 1) { 224 mrq->cmd->opcode = write ? 225 MMC_WRITE_MULTIPLE_BLOCK : MMC_READ_MULTIPLE_BLOCK; 226 } else { 227 mrq->cmd->opcode = write ? 228 MMC_WRITE_BLOCK : MMC_READ_SINGLE_BLOCK; 229 } 230 231 mrq->cmd->arg = dev_addr; 232 if (!mmc_card_blockaddr(test->card)) 233 mrq->cmd->arg <<= 9; 234 235 mrq->cmd->flags = MMC_RSP_R1 | MMC_CMD_ADTC; 236 237 if (blocks == 1) 238 mrq->stop = NULL; 239 else { 240 mrq->stop->opcode = MMC_STOP_TRANSMISSION; 241 mrq->stop->arg = 0; 242 mrq->stop->flags = MMC_RSP_R1B | MMC_CMD_AC; 243 } 244 245 mrq->data->blksz = blksz; 246 mrq->data->blocks = blocks; 247 mrq->data->flags = write ? MMC_DATA_WRITE : MMC_DATA_READ; 248 mrq->data->sg = sg; 249 mrq->data->sg_len = sg_len; 250 251 mmc_test_prepare_sbc(test, mrq, blocks); 252 253 mmc_set_data_timeout(mrq->data, test->card); 254 } 255 256 static int mmc_test_busy(struct mmc_command *cmd) 257 { 258 return !(cmd->resp[0] & R1_READY_FOR_DATA) || 259 (R1_CURRENT_STATE(cmd->resp[0]) == R1_STATE_PRG); 260 } 261 262 /* 263 * Wait for the card to finish the busy state 264 */ 265 static int mmc_test_wait_busy(struct mmc_test_card *test) 266 { 267 int ret, busy; 268 struct mmc_command cmd = {}; 269 270 busy = 0; 271 do { 272 memset(&cmd, 0, sizeof(struct mmc_command)); 273 274 cmd.opcode = MMC_SEND_STATUS; 275 cmd.arg = test->card->rca << 16; 276 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; 277 278 ret = mmc_wait_for_cmd(test->card->host, &cmd, 0); 279 if (ret) 280 break; 281 282 if (!busy && mmc_test_busy(&cmd)) { 283 busy = 1; 284 if (test->card->host->caps & MMC_CAP_WAIT_WHILE_BUSY) 285 pr_info("%s: Warning: Host did not wait for busy state to end.\n", 286 mmc_hostname(test->card->host)); 287 } 288 } while (mmc_test_busy(&cmd)); 289 290 return ret; 291 } 292 293 /* 294 * Transfer a single sector of kernel addressable data 295 */ 296 static int mmc_test_buffer_transfer(struct mmc_test_card *test, 297 u8 *buffer, unsigned addr, unsigned blksz, int write) 298 { 299 struct mmc_request mrq = {}; 300 struct mmc_command cmd = {}; 301 struct mmc_command stop = {}; 302 struct mmc_data data = {}; 303 304 struct scatterlist sg; 305 306 mrq.cmd = &cmd; 307 mrq.data = &data; 308 mrq.stop = &stop; 309 310 sg_init_one(&sg, buffer, blksz); 311 312 mmc_test_prepare_mrq(test, &mrq, &sg, 1, addr, 1, blksz, write); 313 314 mmc_wait_for_req(test->card->host, &mrq); 315 316 if (cmd.error) 317 return cmd.error; 318 if (data.error) 319 return data.error; 320 321 return mmc_test_wait_busy(test); 322 } 323 324 static void mmc_test_free_mem(struct mmc_test_mem *mem) 325 { 326 if (!mem) 327 return; 328 while (mem->cnt--) 329 __free_pages(mem->arr[mem->cnt].page, 330 mem->arr[mem->cnt].order); 331 kfree(mem->arr); 332 kfree(mem); 333 } 334 335 /* 336 * Allocate a lot of memory, preferably max_sz but at least min_sz. In case 337 * there isn't much memory do not exceed 1/16th total lowmem pages. Also do 338 * not exceed a maximum number of segments and try not to make segments much 339 * bigger than maximum segment size. 340 */ 341 static struct mmc_test_mem *mmc_test_alloc_mem(unsigned long min_sz, 342 unsigned long max_sz, 343 unsigned int max_segs, 344 unsigned int max_seg_sz) 345 { 346 unsigned long max_page_cnt = DIV_ROUND_UP(max_sz, PAGE_SIZE); 347 unsigned long min_page_cnt = DIV_ROUND_UP(min_sz, PAGE_SIZE); 348 unsigned long max_seg_page_cnt = DIV_ROUND_UP(max_seg_sz, PAGE_SIZE); 349 unsigned long page_cnt = 0; 350 unsigned long limit = nr_free_buffer_pages() >> 4; 351 struct mmc_test_mem *mem; 352 353 if (max_page_cnt > limit) 354 max_page_cnt = limit; 355 if (min_page_cnt > max_page_cnt) 356 min_page_cnt = max_page_cnt; 357 358 if (max_seg_page_cnt > max_page_cnt) 359 max_seg_page_cnt = max_page_cnt; 360 361 if (max_segs > max_page_cnt) 362 max_segs = max_page_cnt; 363 364 mem = kzalloc(sizeof(*mem), GFP_KERNEL); 365 if (!mem) 366 return NULL; 367 368 mem->arr = kcalloc(max_segs, sizeof(*mem->arr), GFP_KERNEL); 369 if (!mem->arr) 370 goto out_free; 371 372 while (max_page_cnt) { 373 struct page *page; 374 unsigned int order; 375 gfp_t flags = GFP_KERNEL | GFP_DMA | __GFP_NOWARN | 376 __GFP_NORETRY; 377 378 order = get_order(max_seg_page_cnt << PAGE_SHIFT); 379 while (1) { 380 page = alloc_pages(flags, order); 381 if (page || !order) 382 break; 383 order -= 1; 384 } 385 if (!page) { 386 if (page_cnt < min_page_cnt) 387 goto out_free; 388 break; 389 } 390 mem->arr[mem->cnt].page = page; 391 mem->arr[mem->cnt].order = order; 392 mem->cnt += 1; 393 if (max_page_cnt <= (1UL << order)) 394 break; 395 max_page_cnt -= 1UL << order; 396 page_cnt += 1UL << order; 397 if (mem->cnt >= max_segs) { 398 if (page_cnt < min_page_cnt) 399 goto out_free; 400 break; 401 } 402 } 403 404 return mem; 405 406 out_free: 407 mmc_test_free_mem(mem); 408 return NULL; 409 } 410 411 /* 412 * Map memory into a scatterlist. Optionally allow the same memory to be 413 * mapped more than once. 414 */ 415 static int mmc_test_map_sg(struct mmc_test_mem *mem, unsigned long size, 416 struct scatterlist *sglist, int repeat, 417 unsigned int max_segs, unsigned int max_seg_sz, 418 unsigned int *sg_len, int min_sg_len) 419 { 420 struct scatterlist *sg = NULL; 421 unsigned int i; 422 unsigned long sz = size; 423 424 sg_init_table(sglist, max_segs); 425 if (min_sg_len > max_segs) 426 min_sg_len = max_segs; 427 428 *sg_len = 0; 429 do { 430 for (i = 0; i < mem->cnt; i++) { 431 unsigned long len = PAGE_SIZE << mem->arr[i].order; 432 433 if (min_sg_len && (size / min_sg_len < len)) 434 len = ALIGN(size / min_sg_len, 512); 435 if (len > sz) 436 len = sz; 437 if (len > max_seg_sz) 438 len = max_seg_sz; 439 if (sg) 440 sg = sg_next(sg); 441 else 442 sg = sglist; 443 if (!sg) 444 return -EINVAL; 445 sg_set_page(sg, mem->arr[i].page, len, 0); 446 sz -= len; 447 *sg_len += 1; 448 if (!sz) 449 break; 450 } 451 } while (sz && repeat); 452 453 if (sz) 454 return -EINVAL; 455 456 if (sg) 457 sg_mark_end(sg); 458 459 return 0; 460 } 461 462 /* 463 * Map memory into a scatterlist so that no pages are contiguous. Allow the 464 * same memory to be mapped more than once. 465 */ 466 static int mmc_test_map_sg_max_scatter(struct mmc_test_mem *mem, 467 unsigned long sz, 468 struct scatterlist *sglist, 469 unsigned int max_segs, 470 unsigned int max_seg_sz, 471 unsigned int *sg_len) 472 { 473 struct scatterlist *sg = NULL; 474 unsigned int i = mem->cnt, cnt; 475 unsigned long len; 476 void *base, *addr, *last_addr = NULL; 477 478 sg_init_table(sglist, max_segs); 479 480 *sg_len = 0; 481 while (sz) { 482 base = page_address(mem->arr[--i].page); 483 cnt = 1 << mem->arr[i].order; 484 while (sz && cnt) { 485 addr = base + PAGE_SIZE * --cnt; 486 if (last_addr && last_addr + PAGE_SIZE == addr) 487 continue; 488 last_addr = addr; 489 len = PAGE_SIZE; 490 if (len > max_seg_sz) 491 len = max_seg_sz; 492 if (len > sz) 493 len = sz; 494 if (sg) 495 sg = sg_next(sg); 496 else 497 sg = sglist; 498 if (!sg) 499 return -EINVAL; 500 sg_set_page(sg, virt_to_page(addr), len, 0); 501 sz -= len; 502 *sg_len += 1; 503 } 504 if (i == 0) 505 i = mem->cnt; 506 } 507 508 if (sg) 509 sg_mark_end(sg); 510 511 return 0; 512 } 513 514 /* 515 * Calculate transfer rate in bytes per second. 516 */ 517 static unsigned int mmc_test_rate(uint64_t bytes, struct timespec *ts) 518 { 519 uint64_t ns; 520 521 ns = ts->tv_sec; 522 ns *= 1000000000; 523 ns += ts->tv_nsec; 524 525 bytes *= 1000000000; 526 527 while (ns > UINT_MAX) { 528 bytes >>= 1; 529 ns >>= 1; 530 } 531 532 if (!ns) 533 return 0; 534 535 do_div(bytes, (uint32_t)ns); 536 537 return bytes; 538 } 539 540 /* 541 * Save transfer results for future usage 542 */ 543 static void mmc_test_save_transfer_result(struct mmc_test_card *test, 544 unsigned int count, unsigned int sectors, struct timespec ts, 545 unsigned int rate, unsigned int iops) 546 { 547 struct mmc_test_transfer_result *tr; 548 549 if (!test->gr) 550 return; 551 552 tr = kmalloc(sizeof(*tr), GFP_KERNEL); 553 if (!tr) 554 return; 555 556 tr->count = count; 557 tr->sectors = sectors; 558 tr->ts = ts; 559 tr->rate = rate; 560 tr->iops = iops; 561 562 list_add_tail(&tr->link, &test->gr->tr_lst); 563 } 564 565 /* 566 * Print the transfer rate. 567 */ 568 static void mmc_test_print_rate(struct mmc_test_card *test, uint64_t bytes, 569 struct timespec *ts1, struct timespec *ts2) 570 { 571 unsigned int rate, iops, sectors = bytes >> 9; 572 struct timespec ts; 573 574 ts = timespec_sub(*ts2, *ts1); 575 576 rate = mmc_test_rate(bytes, &ts); 577 iops = mmc_test_rate(100, &ts); /* I/O ops per sec x 100 */ 578 579 pr_info("%s: Transfer of %u sectors (%u%s KiB) took %lu.%09lu " 580 "seconds (%u kB/s, %u KiB/s, %u.%02u IOPS)\n", 581 mmc_hostname(test->card->host), sectors, sectors >> 1, 582 (sectors & 1 ? ".5" : ""), (unsigned long)ts.tv_sec, 583 (unsigned long)ts.tv_nsec, rate / 1000, rate / 1024, 584 iops / 100, iops % 100); 585 586 mmc_test_save_transfer_result(test, 1, sectors, ts, rate, iops); 587 } 588 589 /* 590 * Print the average transfer rate. 591 */ 592 static void mmc_test_print_avg_rate(struct mmc_test_card *test, uint64_t bytes, 593 unsigned int count, struct timespec *ts1, 594 struct timespec *ts2) 595 { 596 unsigned int rate, iops, sectors = bytes >> 9; 597 uint64_t tot = bytes * count; 598 struct timespec ts; 599 600 ts = timespec_sub(*ts2, *ts1); 601 602 rate = mmc_test_rate(tot, &ts); 603 iops = mmc_test_rate(count * 100, &ts); /* I/O ops per sec x 100 */ 604 605 pr_info("%s: Transfer of %u x %u sectors (%u x %u%s KiB) took " 606 "%lu.%09lu seconds (%u kB/s, %u KiB/s, " 607 "%u.%02u IOPS, sg_len %d)\n", 608 mmc_hostname(test->card->host), count, sectors, count, 609 sectors >> 1, (sectors & 1 ? ".5" : ""), 610 (unsigned long)ts.tv_sec, (unsigned long)ts.tv_nsec, 611 rate / 1000, rate / 1024, iops / 100, iops % 100, 612 test->area.sg_len); 613 614 mmc_test_save_transfer_result(test, count, sectors, ts, rate, iops); 615 } 616 617 /* 618 * Return the card size in sectors. 619 */ 620 static unsigned int mmc_test_capacity(struct mmc_card *card) 621 { 622 if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) 623 return card->ext_csd.sectors; 624 else 625 return card->csd.capacity << (card->csd.read_blkbits - 9); 626 } 627 628 /*******************************************************************/ 629 /* Test preparation and cleanup */ 630 /*******************************************************************/ 631 632 /* 633 * Fill the first couple of sectors of the card with known data 634 * so that bad reads/writes can be detected 635 */ 636 static int __mmc_test_prepare(struct mmc_test_card *test, int write) 637 { 638 int ret, i; 639 640 ret = mmc_test_set_blksize(test, 512); 641 if (ret) 642 return ret; 643 644 if (write) 645 memset(test->buffer, 0xDF, 512); 646 else { 647 for (i = 0; i < 512; i++) 648 test->buffer[i] = i; 649 } 650 651 for (i = 0; i < BUFFER_SIZE / 512; i++) { 652 ret = mmc_test_buffer_transfer(test, test->buffer, i, 512, 1); 653 if (ret) 654 return ret; 655 } 656 657 return 0; 658 } 659 660 static int mmc_test_prepare_write(struct mmc_test_card *test) 661 { 662 return __mmc_test_prepare(test, 1); 663 } 664 665 static int mmc_test_prepare_read(struct mmc_test_card *test) 666 { 667 return __mmc_test_prepare(test, 0); 668 } 669 670 static int mmc_test_cleanup(struct mmc_test_card *test) 671 { 672 int ret, i; 673 674 ret = mmc_test_set_blksize(test, 512); 675 if (ret) 676 return ret; 677 678 memset(test->buffer, 0, 512); 679 680 for (i = 0; i < BUFFER_SIZE / 512; i++) { 681 ret = mmc_test_buffer_transfer(test, test->buffer, i, 512, 1); 682 if (ret) 683 return ret; 684 } 685 686 return 0; 687 } 688 689 /*******************************************************************/ 690 /* Test execution helpers */ 691 /*******************************************************************/ 692 693 /* 694 * Modifies the mmc_request to perform the "short transfer" tests 695 */ 696 static void mmc_test_prepare_broken_mrq(struct mmc_test_card *test, 697 struct mmc_request *mrq, int write) 698 { 699 if (WARN_ON(!mrq || !mrq->cmd || !mrq->data)) 700 return; 701 702 if (mrq->data->blocks > 1) { 703 mrq->cmd->opcode = write ? 704 MMC_WRITE_BLOCK : MMC_READ_SINGLE_BLOCK; 705 mrq->stop = NULL; 706 } else { 707 mrq->cmd->opcode = MMC_SEND_STATUS; 708 mrq->cmd->arg = test->card->rca << 16; 709 } 710 } 711 712 /* 713 * Checks that a normal transfer didn't have any errors 714 */ 715 static int mmc_test_check_result(struct mmc_test_card *test, 716 struct mmc_request *mrq) 717 { 718 int ret; 719 720 if (WARN_ON(!mrq || !mrq->cmd || !mrq->data)) 721 return -EINVAL; 722 723 ret = 0; 724 725 if (mrq->sbc && mrq->sbc->error) 726 ret = mrq->sbc->error; 727 if (!ret && mrq->cmd->error) 728 ret = mrq->cmd->error; 729 if (!ret && mrq->data->error) 730 ret = mrq->data->error; 731 if (!ret && mrq->stop && mrq->stop->error) 732 ret = mrq->stop->error; 733 if (!ret && mrq->data->bytes_xfered != 734 mrq->data->blocks * mrq->data->blksz) 735 ret = RESULT_FAIL; 736 737 if (ret == -EINVAL) 738 ret = RESULT_UNSUP_HOST; 739 740 return ret; 741 } 742 743 static enum mmc_blk_status mmc_test_check_result_async(struct mmc_card *card, 744 struct mmc_async_req *areq) 745 { 746 struct mmc_test_async_req *test_async = 747 container_of(areq, struct mmc_test_async_req, areq); 748 int ret; 749 750 mmc_test_wait_busy(test_async->test); 751 752 /* 753 * FIXME: this would earlier just casts a regular error code, 754 * either of the kernel type -ERRORCODE or the local test framework 755 * RESULT_* errorcode, into an enum mmc_blk_status and return as 756 * result check. Instead, convert it to some reasonable type by just 757 * returning either MMC_BLK_SUCCESS or MMC_BLK_CMD_ERR. 758 * If possible, a reasonable error code should be returned. 759 */ 760 ret = mmc_test_check_result(test_async->test, areq->mrq); 761 if (ret) 762 return MMC_BLK_CMD_ERR; 763 764 return MMC_BLK_SUCCESS; 765 } 766 767 /* 768 * Checks that a "short transfer" behaved as expected 769 */ 770 static int mmc_test_check_broken_result(struct mmc_test_card *test, 771 struct mmc_request *mrq) 772 { 773 int ret; 774 775 if (WARN_ON(!mrq || !mrq->cmd || !mrq->data)) 776 return -EINVAL; 777 778 ret = 0; 779 780 if (!ret && mrq->cmd->error) 781 ret = mrq->cmd->error; 782 if (!ret && mrq->data->error == 0) 783 ret = RESULT_FAIL; 784 if (!ret && mrq->data->error != -ETIMEDOUT) 785 ret = mrq->data->error; 786 if (!ret && mrq->stop && mrq->stop->error) 787 ret = mrq->stop->error; 788 if (mrq->data->blocks > 1) { 789 if (!ret && mrq->data->bytes_xfered > mrq->data->blksz) 790 ret = RESULT_FAIL; 791 } else { 792 if (!ret && mrq->data->bytes_xfered > 0) 793 ret = RESULT_FAIL; 794 } 795 796 if (ret == -EINVAL) 797 ret = RESULT_UNSUP_HOST; 798 799 return ret; 800 } 801 802 /* 803 * Tests nonblock transfer with certain parameters 804 */ 805 static void mmc_test_nonblock_reset(struct mmc_request *mrq, 806 struct mmc_command *cmd, 807 struct mmc_command *stop, 808 struct mmc_data *data) 809 { 810 memset(mrq, 0, sizeof(struct mmc_request)); 811 memset(cmd, 0, sizeof(struct mmc_command)); 812 memset(data, 0, sizeof(struct mmc_data)); 813 memset(stop, 0, sizeof(struct mmc_command)); 814 815 mrq->cmd = cmd; 816 mrq->data = data; 817 mrq->stop = stop; 818 } 819 static int mmc_test_nonblock_transfer(struct mmc_test_card *test, 820 struct scatterlist *sg, unsigned sg_len, 821 unsigned dev_addr, unsigned blocks, 822 unsigned blksz, int write, int count) 823 { 824 struct mmc_request mrq1; 825 struct mmc_command cmd1; 826 struct mmc_command stop1; 827 struct mmc_data data1; 828 829 struct mmc_request mrq2; 830 struct mmc_command cmd2; 831 struct mmc_command stop2; 832 struct mmc_data data2; 833 834 struct mmc_test_async_req test_areq[2]; 835 struct mmc_async_req *done_areq; 836 struct mmc_async_req *cur_areq = &test_areq[0].areq; 837 struct mmc_async_req *other_areq = &test_areq[1].areq; 838 enum mmc_blk_status status; 839 int i; 840 int ret = RESULT_OK; 841 842 test_areq[0].test = test; 843 test_areq[1].test = test; 844 845 mmc_test_nonblock_reset(&mrq1, &cmd1, &stop1, &data1); 846 mmc_test_nonblock_reset(&mrq2, &cmd2, &stop2, &data2); 847 848 cur_areq->mrq = &mrq1; 849 cur_areq->err_check = mmc_test_check_result_async; 850 other_areq->mrq = &mrq2; 851 other_areq->err_check = mmc_test_check_result_async; 852 853 for (i = 0; i < count; i++) { 854 mmc_test_prepare_mrq(test, cur_areq->mrq, sg, sg_len, dev_addr, 855 blocks, blksz, write); 856 done_areq = mmc_start_areq(test->card->host, cur_areq, &status); 857 858 if (status != MMC_BLK_SUCCESS || (!done_areq && i > 0)) { 859 ret = RESULT_FAIL; 860 goto err; 861 } 862 863 if (done_areq) { 864 if (done_areq->mrq == &mrq2) 865 mmc_test_nonblock_reset(&mrq2, &cmd2, 866 &stop2, &data2); 867 else 868 mmc_test_nonblock_reset(&mrq1, &cmd1, 869 &stop1, &data1); 870 } 871 swap(cur_areq, other_areq); 872 dev_addr += blocks; 873 } 874 875 done_areq = mmc_start_areq(test->card->host, NULL, &status); 876 if (status != MMC_BLK_SUCCESS) 877 ret = RESULT_FAIL; 878 879 return ret; 880 err: 881 return ret; 882 } 883 884 /* 885 * Tests a basic transfer with certain parameters 886 */ 887 static int mmc_test_simple_transfer(struct mmc_test_card *test, 888 struct scatterlist *sg, unsigned sg_len, unsigned dev_addr, 889 unsigned blocks, unsigned blksz, int write) 890 { 891 struct mmc_request mrq = {}; 892 struct mmc_command cmd = {}; 893 struct mmc_command stop = {}; 894 struct mmc_data data = {}; 895 896 mrq.cmd = &cmd; 897 mrq.data = &data; 898 mrq.stop = &stop; 899 900 mmc_test_prepare_mrq(test, &mrq, sg, sg_len, dev_addr, 901 blocks, blksz, write); 902 903 mmc_wait_for_req(test->card->host, &mrq); 904 905 mmc_test_wait_busy(test); 906 907 return mmc_test_check_result(test, &mrq); 908 } 909 910 /* 911 * Tests a transfer where the card will fail completely or partly 912 */ 913 static int mmc_test_broken_transfer(struct mmc_test_card *test, 914 unsigned blocks, unsigned blksz, int write) 915 { 916 struct mmc_request mrq = {}; 917 struct mmc_command cmd = {}; 918 struct mmc_command stop = {}; 919 struct mmc_data data = {}; 920 921 struct scatterlist sg; 922 923 mrq.cmd = &cmd; 924 mrq.data = &data; 925 mrq.stop = &stop; 926 927 sg_init_one(&sg, test->buffer, blocks * blksz); 928 929 mmc_test_prepare_mrq(test, &mrq, &sg, 1, 0, blocks, blksz, write); 930 mmc_test_prepare_broken_mrq(test, &mrq, write); 931 932 mmc_wait_for_req(test->card->host, &mrq); 933 934 mmc_test_wait_busy(test); 935 936 return mmc_test_check_broken_result(test, &mrq); 937 } 938 939 /* 940 * Does a complete transfer test where data is also validated 941 * 942 * Note: mmc_test_prepare() must have been done before this call 943 */ 944 static int mmc_test_transfer(struct mmc_test_card *test, 945 struct scatterlist *sg, unsigned sg_len, unsigned dev_addr, 946 unsigned blocks, unsigned blksz, int write) 947 { 948 int ret, i; 949 unsigned long flags; 950 951 if (write) { 952 for (i = 0; i < blocks * blksz; i++) 953 test->scratch[i] = i; 954 } else { 955 memset(test->scratch, 0, BUFFER_SIZE); 956 } 957 local_irq_save(flags); 958 sg_copy_from_buffer(sg, sg_len, test->scratch, BUFFER_SIZE); 959 local_irq_restore(flags); 960 961 ret = mmc_test_set_blksize(test, blksz); 962 if (ret) 963 return ret; 964 965 ret = mmc_test_simple_transfer(test, sg, sg_len, dev_addr, 966 blocks, blksz, write); 967 if (ret) 968 return ret; 969 970 if (write) { 971 int sectors; 972 973 ret = mmc_test_set_blksize(test, 512); 974 if (ret) 975 return ret; 976 977 sectors = (blocks * blksz + 511) / 512; 978 if ((sectors * 512) == (blocks * blksz)) 979 sectors++; 980 981 if ((sectors * 512) > BUFFER_SIZE) 982 return -EINVAL; 983 984 memset(test->buffer, 0, sectors * 512); 985 986 for (i = 0; i < sectors; i++) { 987 ret = mmc_test_buffer_transfer(test, 988 test->buffer + i * 512, 989 dev_addr + i, 512, 0); 990 if (ret) 991 return ret; 992 } 993 994 for (i = 0; i < blocks * blksz; i++) { 995 if (test->buffer[i] != (u8)i) 996 return RESULT_FAIL; 997 } 998 999 for (; i < sectors * 512; i++) { 1000 if (test->buffer[i] != 0xDF) 1001 return RESULT_FAIL; 1002 } 1003 } else { 1004 local_irq_save(flags); 1005 sg_copy_to_buffer(sg, sg_len, test->scratch, BUFFER_SIZE); 1006 local_irq_restore(flags); 1007 for (i = 0; i < blocks * blksz; i++) { 1008 if (test->scratch[i] != (u8)i) 1009 return RESULT_FAIL; 1010 } 1011 } 1012 1013 return 0; 1014 } 1015 1016 /*******************************************************************/ 1017 /* Tests */ 1018 /*******************************************************************/ 1019 1020 struct mmc_test_case { 1021 const char *name; 1022 1023 int (*prepare)(struct mmc_test_card *); 1024 int (*run)(struct mmc_test_card *); 1025 int (*cleanup)(struct mmc_test_card *); 1026 }; 1027 1028 static int mmc_test_basic_write(struct mmc_test_card *test) 1029 { 1030 int ret; 1031 struct scatterlist sg; 1032 1033 ret = mmc_test_set_blksize(test, 512); 1034 if (ret) 1035 return ret; 1036 1037 sg_init_one(&sg, test->buffer, 512); 1038 1039 return mmc_test_simple_transfer(test, &sg, 1, 0, 1, 512, 1); 1040 } 1041 1042 static int mmc_test_basic_read(struct mmc_test_card *test) 1043 { 1044 int ret; 1045 struct scatterlist sg; 1046 1047 ret = mmc_test_set_blksize(test, 512); 1048 if (ret) 1049 return ret; 1050 1051 sg_init_one(&sg, test->buffer, 512); 1052 1053 return mmc_test_simple_transfer(test, &sg, 1, 0, 1, 512, 0); 1054 } 1055 1056 static int mmc_test_verify_write(struct mmc_test_card *test) 1057 { 1058 struct scatterlist sg; 1059 1060 sg_init_one(&sg, test->buffer, 512); 1061 1062 return mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1); 1063 } 1064 1065 static int mmc_test_verify_read(struct mmc_test_card *test) 1066 { 1067 struct scatterlist sg; 1068 1069 sg_init_one(&sg, test->buffer, 512); 1070 1071 return mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0); 1072 } 1073 1074 static int mmc_test_multi_write(struct mmc_test_card *test) 1075 { 1076 unsigned int size; 1077 struct scatterlist sg; 1078 1079 if (test->card->host->max_blk_count == 1) 1080 return RESULT_UNSUP_HOST; 1081 1082 size = PAGE_SIZE * 2; 1083 size = min(size, test->card->host->max_req_size); 1084 size = min(size, test->card->host->max_seg_size); 1085 size = min(size, test->card->host->max_blk_count * 512); 1086 1087 if (size < 1024) 1088 return RESULT_UNSUP_HOST; 1089 1090 sg_init_one(&sg, test->buffer, size); 1091 1092 return mmc_test_transfer(test, &sg, 1, 0, size / 512, 512, 1); 1093 } 1094 1095 static int mmc_test_multi_read(struct mmc_test_card *test) 1096 { 1097 unsigned int size; 1098 struct scatterlist sg; 1099 1100 if (test->card->host->max_blk_count == 1) 1101 return RESULT_UNSUP_HOST; 1102 1103 size = PAGE_SIZE * 2; 1104 size = min(size, test->card->host->max_req_size); 1105 size = min(size, test->card->host->max_seg_size); 1106 size = min(size, test->card->host->max_blk_count * 512); 1107 1108 if (size < 1024) 1109 return RESULT_UNSUP_HOST; 1110 1111 sg_init_one(&sg, test->buffer, size); 1112 1113 return mmc_test_transfer(test, &sg, 1, 0, size / 512, 512, 0); 1114 } 1115 1116 static int mmc_test_pow2_write(struct mmc_test_card *test) 1117 { 1118 int ret, i; 1119 struct scatterlist sg; 1120 1121 if (!test->card->csd.write_partial) 1122 return RESULT_UNSUP_CARD; 1123 1124 for (i = 1; i < 512; i <<= 1) { 1125 sg_init_one(&sg, test->buffer, i); 1126 ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 1); 1127 if (ret) 1128 return ret; 1129 } 1130 1131 return 0; 1132 } 1133 1134 static int mmc_test_pow2_read(struct mmc_test_card *test) 1135 { 1136 int ret, i; 1137 struct scatterlist sg; 1138 1139 if (!test->card->csd.read_partial) 1140 return RESULT_UNSUP_CARD; 1141 1142 for (i = 1; i < 512; i <<= 1) { 1143 sg_init_one(&sg, test->buffer, i); 1144 ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 0); 1145 if (ret) 1146 return ret; 1147 } 1148 1149 return 0; 1150 } 1151 1152 static int mmc_test_weird_write(struct mmc_test_card *test) 1153 { 1154 int ret, i; 1155 struct scatterlist sg; 1156 1157 if (!test->card->csd.write_partial) 1158 return RESULT_UNSUP_CARD; 1159 1160 for (i = 3; i < 512; i += 7) { 1161 sg_init_one(&sg, test->buffer, i); 1162 ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 1); 1163 if (ret) 1164 return ret; 1165 } 1166 1167 return 0; 1168 } 1169 1170 static int mmc_test_weird_read(struct mmc_test_card *test) 1171 { 1172 int ret, i; 1173 struct scatterlist sg; 1174 1175 if (!test->card->csd.read_partial) 1176 return RESULT_UNSUP_CARD; 1177 1178 for (i = 3; i < 512; i += 7) { 1179 sg_init_one(&sg, test->buffer, i); 1180 ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 0); 1181 if (ret) 1182 return ret; 1183 } 1184 1185 return 0; 1186 } 1187 1188 static int mmc_test_align_write(struct mmc_test_card *test) 1189 { 1190 int ret, i; 1191 struct scatterlist sg; 1192 1193 for (i = 1; i < TEST_ALIGN_END; i++) { 1194 sg_init_one(&sg, test->buffer + i, 512); 1195 ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1); 1196 if (ret) 1197 return ret; 1198 } 1199 1200 return 0; 1201 } 1202 1203 static int mmc_test_align_read(struct mmc_test_card *test) 1204 { 1205 int ret, i; 1206 struct scatterlist sg; 1207 1208 for (i = 1; i < TEST_ALIGN_END; i++) { 1209 sg_init_one(&sg, test->buffer + i, 512); 1210 ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0); 1211 if (ret) 1212 return ret; 1213 } 1214 1215 return 0; 1216 } 1217 1218 static int mmc_test_align_multi_write(struct mmc_test_card *test) 1219 { 1220 int ret, i; 1221 unsigned int size; 1222 struct scatterlist sg; 1223 1224 if (test->card->host->max_blk_count == 1) 1225 return RESULT_UNSUP_HOST; 1226 1227 size = PAGE_SIZE * 2; 1228 size = min(size, test->card->host->max_req_size); 1229 size = min(size, test->card->host->max_seg_size); 1230 size = min(size, test->card->host->max_blk_count * 512); 1231 1232 if (size < 1024) 1233 return RESULT_UNSUP_HOST; 1234 1235 for (i = 1; i < TEST_ALIGN_END; i++) { 1236 sg_init_one(&sg, test->buffer + i, size); 1237 ret = mmc_test_transfer(test, &sg, 1, 0, size / 512, 512, 1); 1238 if (ret) 1239 return ret; 1240 } 1241 1242 return 0; 1243 } 1244 1245 static int mmc_test_align_multi_read(struct mmc_test_card *test) 1246 { 1247 int ret, i; 1248 unsigned int size; 1249 struct scatterlist sg; 1250 1251 if (test->card->host->max_blk_count == 1) 1252 return RESULT_UNSUP_HOST; 1253 1254 size = PAGE_SIZE * 2; 1255 size = min(size, test->card->host->max_req_size); 1256 size = min(size, test->card->host->max_seg_size); 1257 size = min(size, test->card->host->max_blk_count * 512); 1258 1259 if (size < 1024) 1260 return RESULT_UNSUP_HOST; 1261 1262 for (i = 1; i < TEST_ALIGN_END; i++) { 1263 sg_init_one(&sg, test->buffer + i, size); 1264 ret = mmc_test_transfer(test, &sg, 1, 0, size / 512, 512, 0); 1265 if (ret) 1266 return ret; 1267 } 1268 1269 return 0; 1270 } 1271 1272 static int mmc_test_xfersize_write(struct mmc_test_card *test) 1273 { 1274 int ret; 1275 1276 ret = mmc_test_set_blksize(test, 512); 1277 if (ret) 1278 return ret; 1279 1280 return mmc_test_broken_transfer(test, 1, 512, 1); 1281 } 1282 1283 static int mmc_test_xfersize_read(struct mmc_test_card *test) 1284 { 1285 int ret; 1286 1287 ret = mmc_test_set_blksize(test, 512); 1288 if (ret) 1289 return ret; 1290 1291 return mmc_test_broken_transfer(test, 1, 512, 0); 1292 } 1293 1294 static int mmc_test_multi_xfersize_write(struct mmc_test_card *test) 1295 { 1296 int ret; 1297 1298 if (test->card->host->max_blk_count == 1) 1299 return RESULT_UNSUP_HOST; 1300 1301 ret = mmc_test_set_blksize(test, 512); 1302 if (ret) 1303 return ret; 1304 1305 return mmc_test_broken_transfer(test, 2, 512, 1); 1306 } 1307 1308 static int mmc_test_multi_xfersize_read(struct mmc_test_card *test) 1309 { 1310 int ret; 1311 1312 if (test->card->host->max_blk_count == 1) 1313 return RESULT_UNSUP_HOST; 1314 1315 ret = mmc_test_set_blksize(test, 512); 1316 if (ret) 1317 return ret; 1318 1319 return mmc_test_broken_transfer(test, 2, 512, 0); 1320 } 1321 1322 #ifdef CONFIG_HIGHMEM 1323 1324 static int mmc_test_write_high(struct mmc_test_card *test) 1325 { 1326 struct scatterlist sg; 1327 1328 sg_init_table(&sg, 1); 1329 sg_set_page(&sg, test->highmem, 512, 0); 1330 1331 return mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1); 1332 } 1333 1334 static int mmc_test_read_high(struct mmc_test_card *test) 1335 { 1336 struct scatterlist sg; 1337 1338 sg_init_table(&sg, 1); 1339 sg_set_page(&sg, test->highmem, 512, 0); 1340 1341 return mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0); 1342 } 1343 1344 static int mmc_test_multi_write_high(struct mmc_test_card *test) 1345 { 1346 unsigned int size; 1347 struct scatterlist sg; 1348 1349 if (test->card->host->max_blk_count == 1) 1350 return RESULT_UNSUP_HOST; 1351 1352 size = PAGE_SIZE * 2; 1353 size = min(size, test->card->host->max_req_size); 1354 size = min(size, test->card->host->max_seg_size); 1355 size = min(size, test->card->host->max_blk_count * 512); 1356 1357 if (size < 1024) 1358 return RESULT_UNSUP_HOST; 1359 1360 sg_init_table(&sg, 1); 1361 sg_set_page(&sg, test->highmem, size, 0); 1362 1363 return mmc_test_transfer(test, &sg, 1, 0, size / 512, 512, 1); 1364 } 1365 1366 static int mmc_test_multi_read_high(struct mmc_test_card *test) 1367 { 1368 unsigned int size; 1369 struct scatterlist sg; 1370 1371 if (test->card->host->max_blk_count == 1) 1372 return RESULT_UNSUP_HOST; 1373 1374 size = PAGE_SIZE * 2; 1375 size = min(size, test->card->host->max_req_size); 1376 size = min(size, test->card->host->max_seg_size); 1377 size = min(size, test->card->host->max_blk_count * 512); 1378 1379 if (size < 1024) 1380 return RESULT_UNSUP_HOST; 1381 1382 sg_init_table(&sg, 1); 1383 sg_set_page(&sg, test->highmem, size, 0); 1384 1385 return mmc_test_transfer(test, &sg, 1, 0, size / 512, 512, 0); 1386 } 1387 1388 #else 1389 1390 static int mmc_test_no_highmem(struct mmc_test_card *test) 1391 { 1392 pr_info("%s: Highmem not configured - test skipped\n", 1393 mmc_hostname(test->card->host)); 1394 return 0; 1395 } 1396 1397 #endif /* CONFIG_HIGHMEM */ 1398 1399 /* 1400 * Map sz bytes so that it can be transferred. 1401 */ 1402 static int mmc_test_area_map(struct mmc_test_card *test, unsigned long sz, 1403 int max_scatter, int min_sg_len) 1404 { 1405 struct mmc_test_area *t = &test->area; 1406 int err; 1407 1408 t->blocks = sz >> 9; 1409 1410 if (max_scatter) { 1411 err = mmc_test_map_sg_max_scatter(t->mem, sz, t->sg, 1412 t->max_segs, t->max_seg_sz, 1413 &t->sg_len); 1414 } else { 1415 err = mmc_test_map_sg(t->mem, sz, t->sg, 1, t->max_segs, 1416 t->max_seg_sz, &t->sg_len, min_sg_len); 1417 } 1418 if (err) 1419 pr_info("%s: Failed to map sg list\n", 1420 mmc_hostname(test->card->host)); 1421 return err; 1422 } 1423 1424 /* 1425 * Transfer bytes mapped by mmc_test_area_map(). 1426 */ 1427 static int mmc_test_area_transfer(struct mmc_test_card *test, 1428 unsigned int dev_addr, int write) 1429 { 1430 struct mmc_test_area *t = &test->area; 1431 1432 return mmc_test_simple_transfer(test, t->sg, t->sg_len, dev_addr, 1433 t->blocks, 512, write); 1434 } 1435 1436 /* 1437 * Map and transfer bytes for multiple transfers. 1438 */ 1439 static int mmc_test_area_io_seq(struct mmc_test_card *test, unsigned long sz, 1440 unsigned int dev_addr, int write, 1441 int max_scatter, int timed, int count, 1442 bool nonblock, int min_sg_len) 1443 { 1444 struct timespec ts1, ts2; 1445 int ret = 0; 1446 int i; 1447 struct mmc_test_area *t = &test->area; 1448 1449 /* 1450 * In the case of a maximally scattered transfer, the maximum transfer 1451 * size is further limited by using PAGE_SIZE segments. 1452 */ 1453 if (max_scatter) { 1454 struct mmc_test_area *t = &test->area; 1455 unsigned long max_tfr; 1456 1457 if (t->max_seg_sz >= PAGE_SIZE) 1458 max_tfr = t->max_segs * PAGE_SIZE; 1459 else 1460 max_tfr = t->max_segs * t->max_seg_sz; 1461 if (sz > max_tfr) 1462 sz = max_tfr; 1463 } 1464 1465 ret = mmc_test_area_map(test, sz, max_scatter, min_sg_len); 1466 if (ret) 1467 return ret; 1468 1469 if (timed) 1470 getnstimeofday(&ts1); 1471 if (nonblock) 1472 ret = mmc_test_nonblock_transfer(test, t->sg, t->sg_len, 1473 dev_addr, t->blocks, 512, write, count); 1474 else 1475 for (i = 0; i < count && ret == 0; i++) { 1476 ret = mmc_test_area_transfer(test, dev_addr, write); 1477 dev_addr += sz >> 9; 1478 } 1479 1480 if (ret) 1481 return ret; 1482 1483 if (timed) 1484 getnstimeofday(&ts2); 1485 1486 if (timed) 1487 mmc_test_print_avg_rate(test, sz, count, &ts1, &ts2); 1488 1489 return 0; 1490 } 1491 1492 static int mmc_test_area_io(struct mmc_test_card *test, unsigned long sz, 1493 unsigned int dev_addr, int write, int max_scatter, 1494 int timed) 1495 { 1496 return mmc_test_area_io_seq(test, sz, dev_addr, write, max_scatter, 1497 timed, 1, false, 0); 1498 } 1499 1500 /* 1501 * Write the test area entirely. 1502 */ 1503 static int mmc_test_area_fill(struct mmc_test_card *test) 1504 { 1505 struct mmc_test_area *t = &test->area; 1506 1507 return mmc_test_area_io(test, t->max_tfr, t->dev_addr, 1, 0, 0); 1508 } 1509 1510 /* 1511 * Erase the test area entirely. 1512 */ 1513 static int mmc_test_area_erase(struct mmc_test_card *test) 1514 { 1515 struct mmc_test_area *t = &test->area; 1516 1517 if (!mmc_can_erase(test->card)) 1518 return 0; 1519 1520 return mmc_erase(test->card, t->dev_addr, t->max_sz >> 9, 1521 MMC_ERASE_ARG); 1522 } 1523 1524 /* 1525 * Cleanup struct mmc_test_area. 1526 */ 1527 static int mmc_test_area_cleanup(struct mmc_test_card *test) 1528 { 1529 struct mmc_test_area *t = &test->area; 1530 1531 kfree(t->sg); 1532 mmc_test_free_mem(t->mem); 1533 1534 return 0; 1535 } 1536 1537 /* 1538 * Initialize an area for testing large transfers. The test area is set to the 1539 * middle of the card because cards may have different characteristics at the 1540 * front (for FAT file system optimization). Optionally, the area is erased 1541 * (if the card supports it) which may improve write performance. Optionally, 1542 * the area is filled with data for subsequent read tests. 1543 */ 1544 static int mmc_test_area_init(struct mmc_test_card *test, int erase, int fill) 1545 { 1546 struct mmc_test_area *t = &test->area; 1547 unsigned long min_sz = 64 * 1024, sz; 1548 int ret; 1549 1550 ret = mmc_test_set_blksize(test, 512); 1551 if (ret) 1552 return ret; 1553 1554 /* Make the test area size about 4MiB */ 1555 sz = (unsigned long)test->card->pref_erase << 9; 1556 t->max_sz = sz; 1557 while (t->max_sz < 4 * 1024 * 1024) 1558 t->max_sz += sz; 1559 while (t->max_sz > TEST_AREA_MAX_SIZE && t->max_sz > sz) 1560 t->max_sz -= sz; 1561 1562 t->max_segs = test->card->host->max_segs; 1563 t->max_seg_sz = test->card->host->max_seg_size; 1564 t->max_seg_sz -= t->max_seg_sz % 512; 1565 1566 t->max_tfr = t->max_sz; 1567 if (t->max_tfr >> 9 > test->card->host->max_blk_count) 1568 t->max_tfr = test->card->host->max_blk_count << 9; 1569 if (t->max_tfr > test->card->host->max_req_size) 1570 t->max_tfr = test->card->host->max_req_size; 1571 if (t->max_tfr / t->max_seg_sz > t->max_segs) 1572 t->max_tfr = t->max_segs * t->max_seg_sz; 1573 1574 /* 1575 * Try to allocate enough memory for a max. sized transfer. Less is OK 1576 * because the same memory can be mapped into the scatterlist more than 1577 * once. Also, take into account the limits imposed on scatterlist 1578 * segments by the host driver. 1579 */ 1580 t->mem = mmc_test_alloc_mem(min_sz, t->max_tfr, t->max_segs, 1581 t->max_seg_sz); 1582 if (!t->mem) 1583 return -ENOMEM; 1584 1585 t->sg = kmalloc_array(t->max_segs, sizeof(*t->sg), GFP_KERNEL); 1586 if (!t->sg) { 1587 ret = -ENOMEM; 1588 goto out_free; 1589 } 1590 1591 t->dev_addr = mmc_test_capacity(test->card) / 2; 1592 t->dev_addr -= t->dev_addr % (t->max_sz >> 9); 1593 1594 if (erase) { 1595 ret = mmc_test_area_erase(test); 1596 if (ret) 1597 goto out_free; 1598 } 1599 1600 if (fill) { 1601 ret = mmc_test_area_fill(test); 1602 if (ret) 1603 goto out_free; 1604 } 1605 1606 return 0; 1607 1608 out_free: 1609 mmc_test_area_cleanup(test); 1610 return ret; 1611 } 1612 1613 /* 1614 * Prepare for large transfers. Do not erase the test area. 1615 */ 1616 static int mmc_test_area_prepare(struct mmc_test_card *test) 1617 { 1618 return mmc_test_area_init(test, 0, 0); 1619 } 1620 1621 /* 1622 * Prepare for large transfers. Do erase the test area. 1623 */ 1624 static int mmc_test_area_prepare_erase(struct mmc_test_card *test) 1625 { 1626 return mmc_test_area_init(test, 1, 0); 1627 } 1628 1629 /* 1630 * Prepare for large transfers. Erase and fill the test area. 1631 */ 1632 static int mmc_test_area_prepare_fill(struct mmc_test_card *test) 1633 { 1634 return mmc_test_area_init(test, 1, 1); 1635 } 1636 1637 /* 1638 * Test best-case performance. Best-case performance is expected from 1639 * a single large transfer. 1640 * 1641 * An additional option (max_scatter) allows the measurement of the same 1642 * transfer but with no contiguous pages in the scatter list. This tests 1643 * the efficiency of DMA to handle scattered pages. 1644 */ 1645 static int mmc_test_best_performance(struct mmc_test_card *test, int write, 1646 int max_scatter) 1647 { 1648 struct mmc_test_area *t = &test->area; 1649 1650 return mmc_test_area_io(test, t->max_tfr, t->dev_addr, write, 1651 max_scatter, 1); 1652 } 1653 1654 /* 1655 * Best-case read performance. 1656 */ 1657 static int mmc_test_best_read_performance(struct mmc_test_card *test) 1658 { 1659 return mmc_test_best_performance(test, 0, 0); 1660 } 1661 1662 /* 1663 * Best-case write performance. 1664 */ 1665 static int mmc_test_best_write_performance(struct mmc_test_card *test) 1666 { 1667 return mmc_test_best_performance(test, 1, 0); 1668 } 1669 1670 /* 1671 * Best-case read performance into scattered pages. 1672 */ 1673 static int mmc_test_best_read_perf_max_scatter(struct mmc_test_card *test) 1674 { 1675 return mmc_test_best_performance(test, 0, 1); 1676 } 1677 1678 /* 1679 * Best-case write performance from scattered pages. 1680 */ 1681 static int mmc_test_best_write_perf_max_scatter(struct mmc_test_card *test) 1682 { 1683 return mmc_test_best_performance(test, 1, 1); 1684 } 1685 1686 /* 1687 * Single read performance by transfer size. 1688 */ 1689 static int mmc_test_profile_read_perf(struct mmc_test_card *test) 1690 { 1691 struct mmc_test_area *t = &test->area; 1692 unsigned long sz; 1693 unsigned int dev_addr; 1694 int ret; 1695 1696 for (sz = 512; sz < t->max_tfr; sz <<= 1) { 1697 dev_addr = t->dev_addr + (sz >> 9); 1698 ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 1); 1699 if (ret) 1700 return ret; 1701 } 1702 sz = t->max_tfr; 1703 dev_addr = t->dev_addr; 1704 return mmc_test_area_io(test, sz, dev_addr, 0, 0, 1); 1705 } 1706 1707 /* 1708 * Single write performance by transfer size. 1709 */ 1710 static int mmc_test_profile_write_perf(struct mmc_test_card *test) 1711 { 1712 struct mmc_test_area *t = &test->area; 1713 unsigned long sz; 1714 unsigned int dev_addr; 1715 int ret; 1716 1717 ret = mmc_test_area_erase(test); 1718 if (ret) 1719 return ret; 1720 for (sz = 512; sz < t->max_tfr; sz <<= 1) { 1721 dev_addr = t->dev_addr + (sz >> 9); 1722 ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 1); 1723 if (ret) 1724 return ret; 1725 } 1726 ret = mmc_test_area_erase(test); 1727 if (ret) 1728 return ret; 1729 sz = t->max_tfr; 1730 dev_addr = t->dev_addr; 1731 return mmc_test_area_io(test, sz, dev_addr, 1, 0, 1); 1732 } 1733 1734 /* 1735 * Single trim performance by transfer size. 1736 */ 1737 static int mmc_test_profile_trim_perf(struct mmc_test_card *test) 1738 { 1739 struct mmc_test_area *t = &test->area; 1740 unsigned long sz; 1741 unsigned int dev_addr; 1742 struct timespec ts1, ts2; 1743 int ret; 1744 1745 if (!mmc_can_trim(test->card)) 1746 return RESULT_UNSUP_CARD; 1747 1748 if (!mmc_can_erase(test->card)) 1749 return RESULT_UNSUP_HOST; 1750 1751 for (sz = 512; sz < t->max_sz; sz <<= 1) { 1752 dev_addr = t->dev_addr + (sz >> 9); 1753 getnstimeofday(&ts1); 1754 ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG); 1755 if (ret) 1756 return ret; 1757 getnstimeofday(&ts2); 1758 mmc_test_print_rate(test, sz, &ts1, &ts2); 1759 } 1760 dev_addr = t->dev_addr; 1761 getnstimeofday(&ts1); 1762 ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG); 1763 if (ret) 1764 return ret; 1765 getnstimeofday(&ts2); 1766 mmc_test_print_rate(test, sz, &ts1, &ts2); 1767 return 0; 1768 } 1769 1770 static int mmc_test_seq_read_perf(struct mmc_test_card *test, unsigned long sz) 1771 { 1772 struct mmc_test_area *t = &test->area; 1773 unsigned int dev_addr, i, cnt; 1774 struct timespec ts1, ts2; 1775 int ret; 1776 1777 cnt = t->max_sz / sz; 1778 dev_addr = t->dev_addr; 1779 getnstimeofday(&ts1); 1780 for (i = 0; i < cnt; i++) { 1781 ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 0); 1782 if (ret) 1783 return ret; 1784 dev_addr += (sz >> 9); 1785 } 1786 getnstimeofday(&ts2); 1787 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2); 1788 return 0; 1789 } 1790 1791 /* 1792 * Consecutive read performance by transfer size. 1793 */ 1794 static int mmc_test_profile_seq_read_perf(struct mmc_test_card *test) 1795 { 1796 struct mmc_test_area *t = &test->area; 1797 unsigned long sz; 1798 int ret; 1799 1800 for (sz = 512; sz < t->max_tfr; sz <<= 1) { 1801 ret = mmc_test_seq_read_perf(test, sz); 1802 if (ret) 1803 return ret; 1804 } 1805 sz = t->max_tfr; 1806 return mmc_test_seq_read_perf(test, sz); 1807 } 1808 1809 static int mmc_test_seq_write_perf(struct mmc_test_card *test, unsigned long sz) 1810 { 1811 struct mmc_test_area *t = &test->area; 1812 unsigned int dev_addr, i, cnt; 1813 struct timespec ts1, ts2; 1814 int ret; 1815 1816 ret = mmc_test_area_erase(test); 1817 if (ret) 1818 return ret; 1819 cnt = t->max_sz / sz; 1820 dev_addr = t->dev_addr; 1821 getnstimeofday(&ts1); 1822 for (i = 0; i < cnt; i++) { 1823 ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 0); 1824 if (ret) 1825 return ret; 1826 dev_addr += (sz >> 9); 1827 } 1828 getnstimeofday(&ts2); 1829 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2); 1830 return 0; 1831 } 1832 1833 /* 1834 * Consecutive write performance by transfer size. 1835 */ 1836 static int mmc_test_profile_seq_write_perf(struct mmc_test_card *test) 1837 { 1838 struct mmc_test_area *t = &test->area; 1839 unsigned long sz; 1840 int ret; 1841 1842 for (sz = 512; sz < t->max_tfr; sz <<= 1) { 1843 ret = mmc_test_seq_write_perf(test, sz); 1844 if (ret) 1845 return ret; 1846 } 1847 sz = t->max_tfr; 1848 return mmc_test_seq_write_perf(test, sz); 1849 } 1850 1851 /* 1852 * Consecutive trim performance by transfer size. 1853 */ 1854 static int mmc_test_profile_seq_trim_perf(struct mmc_test_card *test) 1855 { 1856 struct mmc_test_area *t = &test->area; 1857 unsigned long sz; 1858 unsigned int dev_addr, i, cnt; 1859 struct timespec ts1, ts2; 1860 int ret; 1861 1862 if (!mmc_can_trim(test->card)) 1863 return RESULT_UNSUP_CARD; 1864 1865 if (!mmc_can_erase(test->card)) 1866 return RESULT_UNSUP_HOST; 1867 1868 for (sz = 512; sz <= t->max_sz; sz <<= 1) { 1869 ret = mmc_test_area_erase(test); 1870 if (ret) 1871 return ret; 1872 ret = mmc_test_area_fill(test); 1873 if (ret) 1874 return ret; 1875 cnt = t->max_sz / sz; 1876 dev_addr = t->dev_addr; 1877 getnstimeofday(&ts1); 1878 for (i = 0; i < cnt; i++) { 1879 ret = mmc_erase(test->card, dev_addr, sz >> 9, 1880 MMC_TRIM_ARG); 1881 if (ret) 1882 return ret; 1883 dev_addr += (sz >> 9); 1884 } 1885 getnstimeofday(&ts2); 1886 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2); 1887 } 1888 return 0; 1889 } 1890 1891 static unsigned int rnd_next = 1; 1892 1893 static unsigned int mmc_test_rnd_num(unsigned int rnd_cnt) 1894 { 1895 uint64_t r; 1896 1897 rnd_next = rnd_next * 1103515245 + 12345; 1898 r = (rnd_next >> 16) & 0x7fff; 1899 return (r * rnd_cnt) >> 15; 1900 } 1901 1902 static int mmc_test_rnd_perf(struct mmc_test_card *test, int write, int print, 1903 unsigned long sz) 1904 { 1905 unsigned int dev_addr, cnt, rnd_addr, range1, range2, last_ea = 0, ea; 1906 unsigned int ssz; 1907 struct timespec ts1, ts2, ts; 1908 int ret; 1909 1910 ssz = sz >> 9; 1911 1912 rnd_addr = mmc_test_capacity(test->card) / 4; 1913 range1 = rnd_addr / test->card->pref_erase; 1914 range2 = range1 / ssz; 1915 1916 getnstimeofday(&ts1); 1917 for (cnt = 0; cnt < UINT_MAX; cnt++) { 1918 getnstimeofday(&ts2); 1919 ts = timespec_sub(ts2, ts1); 1920 if (ts.tv_sec >= 10) 1921 break; 1922 ea = mmc_test_rnd_num(range1); 1923 if (ea == last_ea) 1924 ea -= 1; 1925 last_ea = ea; 1926 dev_addr = rnd_addr + test->card->pref_erase * ea + 1927 ssz * mmc_test_rnd_num(range2); 1928 ret = mmc_test_area_io(test, sz, dev_addr, write, 0, 0); 1929 if (ret) 1930 return ret; 1931 } 1932 if (print) 1933 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2); 1934 return 0; 1935 } 1936 1937 static int mmc_test_random_perf(struct mmc_test_card *test, int write) 1938 { 1939 struct mmc_test_area *t = &test->area; 1940 unsigned int next; 1941 unsigned long sz; 1942 int ret; 1943 1944 for (sz = 512; sz < t->max_tfr; sz <<= 1) { 1945 /* 1946 * When writing, try to get more consistent results by running 1947 * the test twice with exactly the same I/O but outputting the 1948 * results only for the 2nd run. 1949 */ 1950 if (write) { 1951 next = rnd_next; 1952 ret = mmc_test_rnd_perf(test, write, 0, sz); 1953 if (ret) 1954 return ret; 1955 rnd_next = next; 1956 } 1957 ret = mmc_test_rnd_perf(test, write, 1, sz); 1958 if (ret) 1959 return ret; 1960 } 1961 sz = t->max_tfr; 1962 if (write) { 1963 next = rnd_next; 1964 ret = mmc_test_rnd_perf(test, write, 0, sz); 1965 if (ret) 1966 return ret; 1967 rnd_next = next; 1968 } 1969 return mmc_test_rnd_perf(test, write, 1, sz); 1970 } 1971 1972 /* 1973 * Random read performance by transfer size. 1974 */ 1975 static int mmc_test_random_read_perf(struct mmc_test_card *test) 1976 { 1977 return mmc_test_random_perf(test, 0); 1978 } 1979 1980 /* 1981 * Random write performance by transfer size. 1982 */ 1983 static int mmc_test_random_write_perf(struct mmc_test_card *test) 1984 { 1985 return mmc_test_random_perf(test, 1); 1986 } 1987 1988 static int mmc_test_seq_perf(struct mmc_test_card *test, int write, 1989 unsigned int tot_sz, int max_scatter) 1990 { 1991 struct mmc_test_area *t = &test->area; 1992 unsigned int dev_addr, i, cnt, sz, ssz; 1993 struct timespec ts1, ts2; 1994 int ret; 1995 1996 sz = t->max_tfr; 1997 1998 /* 1999 * In the case of a maximally scattered transfer, the maximum transfer 2000 * size is further limited by using PAGE_SIZE segments. 2001 */ 2002 if (max_scatter) { 2003 unsigned long max_tfr; 2004 2005 if (t->max_seg_sz >= PAGE_SIZE) 2006 max_tfr = t->max_segs * PAGE_SIZE; 2007 else 2008 max_tfr = t->max_segs * t->max_seg_sz; 2009 if (sz > max_tfr) 2010 sz = max_tfr; 2011 } 2012 2013 ssz = sz >> 9; 2014 dev_addr = mmc_test_capacity(test->card) / 4; 2015 if (tot_sz > dev_addr << 9) 2016 tot_sz = dev_addr << 9; 2017 cnt = tot_sz / sz; 2018 dev_addr &= 0xffff0000; /* Round to 64MiB boundary */ 2019 2020 getnstimeofday(&ts1); 2021 for (i = 0; i < cnt; i++) { 2022 ret = mmc_test_area_io(test, sz, dev_addr, write, 2023 max_scatter, 0); 2024 if (ret) 2025 return ret; 2026 dev_addr += ssz; 2027 } 2028 getnstimeofday(&ts2); 2029 2030 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2); 2031 2032 return 0; 2033 } 2034 2035 static int mmc_test_large_seq_perf(struct mmc_test_card *test, int write) 2036 { 2037 int ret, i; 2038 2039 for (i = 0; i < 10; i++) { 2040 ret = mmc_test_seq_perf(test, write, 10 * 1024 * 1024, 1); 2041 if (ret) 2042 return ret; 2043 } 2044 for (i = 0; i < 5; i++) { 2045 ret = mmc_test_seq_perf(test, write, 100 * 1024 * 1024, 1); 2046 if (ret) 2047 return ret; 2048 } 2049 for (i = 0; i < 3; i++) { 2050 ret = mmc_test_seq_perf(test, write, 1000 * 1024 * 1024, 1); 2051 if (ret) 2052 return ret; 2053 } 2054 2055 return ret; 2056 } 2057 2058 /* 2059 * Large sequential read performance. 2060 */ 2061 static int mmc_test_large_seq_read_perf(struct mmc_test_card *test) 2062 { 2063 return mmc_test_large_seq_perf(test, 0); 2064 } 2065 2066 /* 2067 * Large sequential write performance. 2068 */ 2069 static int mmc_test_large_seq_write_perf(struct mmc_test_card *test) 2070 { 2071 return mmc_test_large_seq_perf(test, 1); 2072 } 2073 2074 static int mmc_test_rw_multiple(struct mmc_test_card *test, 2075 struct mmc_test_multiple_rw *tdata, 2076 unsigned int reqsize, unsigned int size, 2077 int min_sg_len) 2078 { 2079 unsigned int dev_addr; 2080 struct mmc_test_area *t = &test->area; 2081 int ret = 0; 2082 2083 /* Set up test area */ 2084 if (size > mmc_test_capacity(test->card) / 2 * 512) 2085 size = mmc_test_capacity(test->card) / 2 * 512; 2086 if (reqsize > t->max_tfr) 2087 reqsize = t->max_tfr; 2088 dev_addr = mmc_test_capacity(test->card) / 4; 2089 if ((dev_addr & 0xffff0000)) 2090 dev_addr &= 0xffff0000; /* Round to 64MiB boundary */ 2091 else 2092 dev_addr &= 0xfffff800; /* Round to 1MiB boundary */ 2093 if (!dev_addr) 2094 goto err; 2095 2096 if (reqsize > size) 2097 return 0; 2098 2099 /* prepare test area */ 2100 if (mmc_can_erase(test->card) && 2101 tdata->prepare & MMC_TEST_PREP_ERASE) { 2102 ret = mmc_erase(test->card, dev_addr, 2103 size / 512, MMC_SECURE_ERASE_ARG); 2104 if (ret) 2105 ret = mmc_erase(test->card, dev_addr, 2106 size / 512, MMC_ERASE_ARG); 2107 if (ret) 2108 goto err; 2109 } 2110 2111 /* Run test */ 2112 ret = mmc_test_area_io_seq(test, reqsize, dev_addr, 2113 tdata->do_write, 0, 1, size / reqsize, 2114 tdata->do_nonblock_req, min_sg_len); 2115 if (ret) 2116 goto err; 2117 2118 return ret; 2119 err: 2120 pr_info("[%s] error\n", __func__); 2121 return ret; 2122 } 2123 2124 static int mmc_test_rw_multiple_size(struct mmc_test_card *test, 2125 struct mmc_test_multiple_rw *rw) 2126 { 2127 int ret = 0; 2128 int i; 2129 void *pre_req = test->card->host->ops->pre_req; 2130 void *post_req = test->card->host->ops->post_req; 2131 2132 if (rw->do_nonblock_req && 2133 ((!pre_req && post_req) || (pre_req && !post_req))) { 2134 pr_info("error: only one of pre/post is defined\n"); 2135 return -EINVAL; 2136 } 2137 2138 for (i = 0 ; i < rw->len && ret == 0; i++) { 2139 ret = mmc_test_rw_multiple(test, rw, rw->bs[i], rw->size, 0); 2140 if (ret) 2141 break; 2142 } 2143 return ret; 2144 } 2145 2146 static int mmc_test_rw_multiple_sg_len(struct mmc_test_card *test, 2147 struct mmc_test_multiple_rw *rw) 2148 { 2149 int ret = 0; 2150 int i; 2151 2152 for (i = 0 ; i < rw->len && ret == 0; i++) { 2153 ret = mmc_test_rw_multiple(test, rw, 512 * 1024, rw->size, 2154 rw->sg_len[i]); 2155 if (ret) 2156 break; 2157 } 2158 return ret; 2159 } 2160 2161 /* 2162 * Multiple blocking write 4k to 4 MB chunks 2163 */ 2164 static int mmc_test_profile_mult_write_blocking_perf(struct mmc_test_card *test) 2165 { 2166 unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16, 2167 1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22}; 2168 struct mmc_test_multiple_rw test_data = { 2169 .bs = bs, 2170 .size = TEST_AREA_MAX_SIZE, 2171 .len = ARRAY_SIZE(bs), 2172 .do_write = true, 2173 .do_nonblock_req = false, 2174 .prepare = MMC_TEST_PREP_ERASE, 2175 }; 2176 2177 return mmc_test_rw_multiple_size(test, &test_data); 2178 }; 2179 2180 /* 2181 * Multiple non-blocking write 4k to 4 MB chunks 2182 */ 2183 static int mmc_test_profile_mult_write_nonblock_perf(struct mmc_test_card *test) 2184 { 2185 unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16, 2186 1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22}; 2187 struct mmc_test_multiple_rw test_data = { 2188 .bs = bs, 2189 .size = TEST_AREA_MAX_SIZE, 2190 .len = ARRAY_SIZE(bs), 2191 .do_write = true, 2192 .do_nonblock_req = true, 2193 .prepare = MMC_TEST_PREP_ERASE, 2194 }; 2195 2196 return mmc_test_rw_multiple_size(test, &test_data); 2197 } 2198 2199 /* 2200 * Multiple blocking read 4k to 4 MB chunks 2201 */ 2202 static int mmc_test_profile_mult_read_blocking_perf(struct mmc_test_card *test) 2203 { 2204 unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16, 2205 1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22}; 2206 struct mmc_test_multiple_rw test_data = { 2207 .bs = bs, 2208 .size = TEST_AREA_MAX_SIZE, 2209 .len = ARRAY_SIZE(bs), 2210 .do_write = false, 2211 .do_nonblock_req = false, 2212 .prepare = MMC_TEST_PREP_NONE, 2213 }; 2214 2215 return mmc_test_rw_multiple_size(test, &test_data); 2216 } 2217 2218 /* 2219 * Multiple non-blocking read 4k to 4 MB chunks 2220 */ 2221 static int mmc_test_profile_mult_read_nonblock_perf(struct mmc_test_card *test) 2222 { 2223 unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16, 2224 1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22}; 2225 struct mmc_test_multiple_rw test_data = { 2226 .bs = bs, 2227 .size = TEST_AREA_MAX_SIZE, 2228 .len = ARRAY_SIZE(bs), 2229 .do_write = false, 2230 .do_nonblock_req = true, 2231 .prepare = MMC_TEST_PREP_NONE, 2232 }; 2233 2234 return mmc_test_rw_multiple_size(test, &test_data); 2235 } 2236 2237 /* 2238 * Multiple blocking write 1 to 512 sg elements 2239 */ 2240 static int mmc_test_profile_sglen_wr_blocking_perf(struct mmc_test_card *test) 2241 { 2242 unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6, 2243 1 << 7, 1 << 8, 1 << 9}; 2244 struct mmc_test_multiple_rw test_data = { 2245 .sg_len = sg_len, 2246 .size = TEST_AREA_MAX_SIZE, 2247 .len = ARRAY_SIZE(sg_len), 2248 .do_write = true, 2249 .do_nonblock_req = false, 2250 .prepare = MMC_TEST_PREP_ERASE, 2251 }; 2252 2253 return mmc_test_rw_multiple_sg_len(test, &test_data); 2254 }; 2255 2256 /* 2257 * Multiple non-blocking write 1 to 512 sg elements 2258 */ 2259 static int mmc_test_profile_sglen_wr_nonblock_perf(struct mmc_test_card *test) 2260 { 2261 unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6, 2262 1 << 7, 1 << 8, 1 << 9}; 2263 struct mmc_test_multiple_rw test_data = { 2264 .sg_len = sg_len, 2265 .size = TEST_AREA_MAX_SIZE, 2266 .len = ARRAY_SIZE(sg_len), 2267 .do_write = true, 2268 .do_nonblock_req = true, 2269 .prepare = MMC_TEST_PREP_ERASE, 2270 }; 2271 2272 return mmc_test_rw_multiple_sg_len(test, &test_data); 2273 } 2274 2275 /* 2276 * Multiple blocking read 1 to 512 sg elements 2277 */ 2278 static int mmc_test_profile_sglen_r_blocking_perf(struct mmc_test_card *test) 2279 { 2280 unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6, 2281 1 << 7, 1 << 8, 1 << 9}; 2282 struct mmc_test_multiple_rw test_data = { 2283 .sg_len = sg_len, 2284 .size = TEST_AREA_MAX_SIZE, 2285 .len = ARRAY_SIZE(sg_len), 2286 .do_write = false, 2287 .do_nonblock_req = false, 2288 .prepare = MMC_TEST_PREP_NONE, 2289 }; 2290 2291 return mmc_test_rw_multiple_sg_len(test, &test_data); 2292 } 2293 2294 /* 2295 * Multiple non-blocking read 1 to 512 sg elements 2296 */ 2297 static int mmc_test_profile_sglen_r_nonblock_perf(struct mmc_test_card *test) 2298 { 2299 unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6, 2300 1 << 7, 1 << 8, 1 << 9}; 2301 struct mmc_test_multiple_rw test_data = { 2302 .sg_len = sg_len, 2303 .size = TEST_AREA_MAX_SIZE, 2304 .len = ARRAY_SIZE(sg_len), 2305 .do_write = false, 2306 .do_nonblock_req = true, 2307 .prepare = MMC_TEST_PREP_NONE, 2308 }; 2309 2310 return mmc_test_rw_multiple_sg_len(test, &test_data); 2311 } 2312 2313 /* 2314 * eMMC hardware reset. 2315 */ 2316 static int mmc_test_reset(struct mmc_test_card *test) 2317 { 2318 struct mmc_card *card = test->card; 2319 struct mmc_host *host = card->host; 2320 int err; 2321 2322 err = mmc_hw_reset(host); 2323 if (!err) 2324 return RESULT_OK; 2325 else if (err == -EOPNOTSUPP) 2326 return RESULT_UNSUP_HOST; 2327 2328 return RESULT_FAIL; 2329 } 2330 2331 struct mmc_test_req { 2332 struct mmc_request mrq; 2333 struct mmc_command sbc; 2334 struct mmc_command cmd; 2335 struct mmc_command stop; 2336 struct mmc_command status; 2337 struct mmc_data data; 2338 }; 2339 2340 static struct mmc_test_req *mmc_test_req_alloc(void) 2341 { 2342 struct mmc_test_req *rq = kzalloc(sizeof(*rq), GFP_KERNEL); 2343 2344 if (rq) { 2345 rq->mrq.cmd = &rq->cmd; 2346 rq->mrq.data = &rq->data; 2347 rq->mrq.stop = &rq->stop; 2348 } 2349 2350 return rq; 2351 } 2352 2353 static int mmc_test_send_status(struct mmc_test_card *test, 2354 struct mmc_command *cmd) 2355 { 2356 memset(cmd, 0, sizeof(*cmd)); 2357 2358 cmd->opcode = MMC_SEND_STATUS; 2359 if (!mmc_host_is_spi(test->card->host)) 2360 cmd->arg = test->card->rca << 16; 2361 cmd->flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC; 2362 2363 return mmc_wait_for_cmd(test->card->host, cmd, 0); 2364 } 2365 2366 static int mmc_test_ongoing_transfer(struct mmc_test_card *test, 2367 unsigned int dev_addr, int use_sbc, 2368 int repeat_cmd, int write, int use_areq) 2369 { 2370 struct mmc_test_req *rq = mmc_test_req_alloc(); 2371 struct mmc_host *host = test->card->host; 2372 struct mmc_test_area *t = &test->area; 2373 struct mmc_test_async_req test_areq = { .test = test }; 2374 struct mmc_request *mrq; 2375 unsigned long timeout; 2376 bool expired = false; 2377 enum mmc_blk_status blkstat = MMC_BLK_SUCCESS; 2378 int ret = 0, cmd_ret; 2379 u32 status = 0; 2380 int count = 0; 2381 2382 if (!rq) 2383 return -ENOMEM; 2384 2385 mrq = &rq->mrq; 2386 if (use_sbc) 2387 mrq->sbc = &rq->sbc; 2388 mrq->cap_cmd_during_tfr = true; 2389 2390 test_areq.areq.mrq = mrq; 2391 test_areq.areq.err_check = mmc_test_check_result_async; 2392 2393 mmc_test_prepare_mrq(test, mrq, t->sg, t->sg_len, dev_addr, t->blocks, 2394 512, write); 2395 2396 if (use_sbc && t->blocks > 1 && !mrq->sbc) { 2397 ret = mmc_host_cmd23(host) ? 2398 RESULT_UNSUP_CARD : 2399 RESULT_UNSUP_HOST; 2400 goto out_free; 2401 } 2402 2403 /* Start ongoing data request */ 2404 if (use_areq) { 2405 mmc_start_areq(host, &test_areq.areq, &blkstat); 2406 if (blkstat != MMC_BLK_SUCCESS) { 2407 ret = RESULT_FAIL; 2408 goto out_free; 2409 } 2410 } else { 2411 mmc_wait_for_req(host, mrq); 2412 } 2413 2414 timeout = jiffies + msecs_to_jiffies(3000); 2415 do { 2416 count += 1; 2417 2418 /* Send status command while data transfer in progress */ 2419 cmd_ret = mmc_test_send_status(test, &rq->status); 2420 if (cmd_ret) 2421 break; 2422 2423 status = rq->status.resp[0]; 2424 if (status & R1_ERROR) { 2425 cmd_ret = -EIO; 2426 break; 2427 } 2428 2429 if (mmc_is_req_done(host, mrq)) 2430 break; 2431 2432 expired = time_after(jiffies, timeout); 2433 if (expired) { 2434 pr_info("%s: timeout waiting for Tran state status %#x\n", 2435 mmc_hostname(host), status); 2436 cmd_ret = -ETIMEDOUT; 2437 break; 2438 } 2439 } while (repeat_cmd && R1_CURRENT_STATE(status) != R1_STATE_TRAN); 2440 2441 /* Wait for data request to complete */ 2442 if (use_areq) { 2443 mmc_start_areq(host, NULL, &blkstat); 2444 if (blkstat != MMC_BLK_SUCCESS) 2445 ret = RESULT_FAIL; 2446 } else { 2447 mmc_wait_for_req_done(test->card->host, mrq); 2448 } 2449 2450 /* 2451 * For cap_cmd_during_tfr request, upper layer must send stop if 2452 * required. 2453 */ 2454 if (mrq->data->stop && (mrq->data->error || !mrq->sbc)) { 2455 if (ret) 2456 mmc_wait_for_cmd(host, mrq->data->stop, 0); 2457 else 2458 ret = mmc_wait_for_cmd(host, mrq->data->stop, 0); 2459 } 2460 2461 if (ret) 2462 goto out_free; 2463 2464 if (cmd_ret) { 2465 pr_info("%s: Send Status failed: status %#x, error %d\n", 2466 mmc_hostname(test->card->host), status, cmd_ret); 2467 } 2468 2469 ret = mmc_test_check_result(test, mrq); 2470 if (ret) 2471 goto out_free; 2472 2473 ret = mmc_test_wait_busy(test); 2474 if (ret) 2475 goto out_free; 2476 2477 if (repeat_cmd && (t->blocks + 1) << 9 > t->max_tfr) 2478 pr_info("%s: %d commands completed during transfer of %u blocks\n", 2479 mmc_hostname(test->card->host), count, t->blocks); 2480 2481 if (cmd_ret) 2482 ret = cmd_ret; 2483 out_free: 2484 kfree(rq); 2485 2486 return ret; 2487 } 2488 2489 static int __mmc_test_cmds_during_tfr(struct mmc_test_card *test, 2490 unsigned long sz, int use_sbc, int write, 2491 int use_areq) 2492 { 2493 struct mmc_test_area *t = &test->area; 2494 int ret; 2495 2496 if (!(test->card->host->caps & MMC_CAP_CMD_DURING_TFR)) 2497 return RESULT_UNSUP_HOST; 2498 2499 ret = mmc_test_area_map(test, sz, 0, 0); 2500 if (ret) 2501 return ret; 2502 2503 ret = mmc_test_ongoing_transfer(test, t->dev_addr, use_sbc, 0, write, 2504 use_areq); 2505 if (ret) 2506 return ret; 2507 2508 return mmc_test_ongoing_transfer(test, t->dev_addr, use_sbc, 1, write, 2509 use_areq); 2510 } 2511 2512 static int mmc_test_cmds_during_tfr(struct mmc_test_card *test, int use_sbc, 2513 int write, int use_areq) 2514 { 2515 struct mmc_test_area *t = &test->area; 2516 unsigned long sz; 2517 int ret; 2518 2519 for (sz = 512; sz <= t->max_tfr; sz += 512) { 2520 ret = __mmc_test_cmds_during_tfr(test, sz, use_sbc, write, 2521 use_areq); 2522 if (ret) 2523 return ret; 2524 } 2525 return 0; 2526 } 2527 2528 /* 2529 * Commands during read - no Set Block Count (CMD23). 2530 */ 2531 static int mmc_test_cmds_during_read(struct mmc_test_card *test) 2532 { 2533 return mmc_test_cmds_during_tfr(test, 0, 0, 0); 2534 } 2535 2536 /* 2537 * Commands during write - no Set Block Count (CMD23). 2538 */ 2539 static int mmc_test_cmds_during_write(struct mmc_test_card *test) 2540 { 2541 return mmc_test_cmds_during_tfr(test, 0, 1, 0); 2542 } 2543 2544 /* 2545 * Commands during read - use Set Block Count (CMD23). 2546 */ 2547 static int mmc_test_cmds_during_read_cmd23(struct mmc_test_card *test) 2548 { 2549 return mmc_test_cmds_during_tfr(test, 1, 0, 0); 2550 } 2551 2552 /* 2553 * Commands during write - use Set Block Count (CMD23). 2554 */ 2555 static int mmc_test_cmds_during_write_cmd23(struct mmc_test_card *test) 2556 { 2557 return mmc_test_cmds_during_tfr(test, 1, 1, 0); 2558 } 2559 2560 /* 2561 * Commands during non-blocking read - use Set Block Count (CMD23). 2562 */ 2563 static int mmc_test_cmds_during_read_cmd23_nonblock(struct mmc_test_card *test) 2564 { 2565 return mmc_test_cmds_during_tfr(test, 1, 0, 1); 2566 } 2567 2568 /* 2569 * Commands during non-blocking write - use Set Block Count (CMD23). 2570 */ 2571 static int mmc_test_cmds_during_write_cmd23_nonblock(struct mmc_test_card *test) 2572 { 2573 return mmc_test_cmds_during_tfr(test, 1, 1, 1); 2574 } 2575 2576 static const struct mmc_test_case mmc_test_cases[] = { 2577 { 2578 .name = "Basic write (no data verification)", 2579 .run = mmc_test_basic_write, 2580 }, 2581 2582 { 2583 .name = "Basic read (no data verification)", 2584 .run = mmc_test_basic_read, 2585 }, 2586 2587 { 2588 .name = "Basic write (with data verification)", 2589 .prepare = mmc_test_prepare_write, 2590 .run = mmc_test_verify_write, 2591 .cleanup = mmc_test_cleanup, 2592 }, 2593 2594 { 2595 .name = "Basic read (with data verification)", 2596 .prepare = mmc_test_prepare_read, 2597 .run = mmc_test_verify_read, 2598 .cleanup = mmc_test_cleanup, 2599 }, 2600 2601 { 2602 .name = "Multi-block write", 2603 .prepare = mmc_test_prepare_write, 2604 .run = mmc_test_multi_write, 2605 .cleanup = mmc_test_cleanup, 2606 }, 2607 2608 { 2609 .name = "Multi-block read", 2610 .prepare = mmc_test_prepare_read, 2611 .run = mmc_test_multi_read, 2612 .cleanup = mmc_test_cleanup, 2613 }, 2614 2615 { 2616 .name = "Power of two block writes", 2617 .prepare = mmc_test_prepare_write, 2618 .run = mmc_test_pow2_write, 2619 .cleanup = mmc_test_cleanup, 2620 }, 2621 2622 { 2623 .name = "Power of two block reads", 2624 .prepare = mmc_test_prepare_read, 2625 .run = mmc_test_pow2_read, 2626 .cleanup = mmc_test_cleanup, 2627 }, 2628 2629 { 2630 .name = "Weird sized block writes", 2631 .prepare = mmc_test_prepare_write, 2632 .run = mmc_test_weird_write, 2633 .cleanup = mmc_test_cleanup, 2634 }, 2635 2636 { 2637 .name = "Weird sized block reads", 2638 .prepare = mmc_test_prepare_read, 2639 .run = mmc_test_weird_read, 2640 .cleanup = mmc_test_cleanup, 2641 }, 2642 2643 { 2644 .name = "Badly aligned write", 2645 .prepare = mmc_test_prepare_write, 2646 .run = mmc_test_align_write, 2647 .cleanup = mmc_test_cleanup, 2648 }, 2649 2650 { 2651 .name = "Badly aligned read", 2652 .prepare = mmc_test_prepare_read, 2653 .run = mmc_test_align_read, 2654 .cleanup = mmc_test_cleanup, 2655 }, 2656 2657 { 2658 .name = "Badly aligned multi-block write", 2659 .prepare = mmc_test_prepare_write, 2660 .run = mmc_test_align_multi_write, 2661 .cleanup = mmc_test_cleanup, 2662 }, 2663 2664 { 2665 .name = "Badly aligned multi-block read", 2666 .prepare = mmc_test_prepare_read, 2667 .run = mmc_test_align_multi_read, 2668 .cleanup = mmc_test_cleanup, 2669 }, 2670 2671 { 2672 .name = "Correct xfer_size at write (start failure)", 2673 .run = mmc_test_xfersize_write, 2674 }, 2675 2676 { 2677 .name = "Correct xfer_size at read (start failure)", 2678 .run = mmc_test_xfersize_read, 2679 }, 2680 2681 { 2682 .name = "Correct xfer_size at write (midway failure)", 2683 .run = mmc_test_multi_xfersize_write, 2684 }, 2685 2686 { 2687 .name = "Correct xfer_size at read (midway failure)", 2688 .run = mmc_test_multi_xfersize_read, 2689 }, 2690 2691 #ifdef CONFIG_HIGHMEM 2692 2693 { 2694 .name = "Highmem write", 2695 .prepare = mmc_test_prepare_write, 2696 .run = mmc_test_write_high, 2697 .cleanup = mmc_test_cleanup, 2698 }, 2699 2700 { 2701 .name = "Highmem read", 2702 .prepare = mmc_test_prepare_read, 2703 .run = mmc_test_read_high, 2704 .cleanup = mmc_test_cleanup, 2705 }, 2706 2707 { 2708 .name = "Multi-block highmem write", 2709 .prepare = mmc_test_prepare_write, 2710 .run = mmc_test_multi_write_high, 2711 .cleanup = mmc_test_cleanup, 2712 }, 2713 2714 { 2715 .name = "Multi-block highmem read", 2716 .prepare = mmc_test_prepare_read, 2717 .run = mmc_test_multi_read_high, 2718 .cleanup = mmc_test_cleanup, 2719 }, 2720 2721 #else 2722 2723 { 2724 .name = "Highmem write", 2725 .run = mmc_test_no_highmem, 2726 }, 2727 2728 { 2729 .name = "Highmem read", 2730 .run = mmc_test_no_highmem, 2731 }, 2732 2733 { 2734 .name = "Multi-block highmem write", 2735 .run = mmc_test_no_highmem, 2736 }, 2737 2738 { 2739 .name = "Multi-block highmem read", 2740 .run = mmc_test_no_highmem, 2741 }, 2742 2743 #endif /* CONFIG_HIGHMEM */ 2744 2745 { 2746 .name = "Best-case read performance", 2747 .prepare = mmc_test_area_prepare_fill, 2748 .run = mmc_test_best_read_performance, 2749 .cleanup = mmc_test_area_cleanup, 2750 }, 2751 2752 { 2753 .name = "Best-case write performance", 2754 .prepare = mmc_test_area_prepare_erase, 2755 .run = mmc_test_best_write_performance, 2756 .cleanup = mmc_test_area_cleanup, 2757 }, 2758 2759 { 2760 .name = "Best-case read performance into scattered pages", 2761 .prepare = mmc_test_area_prepare_fill, 2762 .run = mmc_test_best_read_perf_max_scatter, 2763 .cleanup = mmc_test_area_cleanup, 2764 }, 2765 2766 { 2767 .name = "Best-case write performance from scattered pages", 2768 .prepare = mmc_test_area_prepare_erase, 2769 .run = mmc_test_best_write_perf_max_scatter, 2770 .cleanup = mmc_test_area_cleanup, 2771 }, 2772 2773 { 2774 .name = "Single read performance by transfer size", 2775 .prepare = mmc_test_area_prepare_fill, 2776 .run = mmc_test_profile_read_perf, 2777 .cleanup = mmc_test_area_cleanup, 2778 }, 2779 2780 { 2781 .name = "Single write performance by transfer size", 2782 .prepare = mmc_test_area_prepare, 2783 .run = mmc_test_profile_write_perf, 2784 .cleanup = mmc_test_area_cleanup, 2785 }, 2786 2787 { 2788 .name = "Single trim performance by transfer size", 2789 .prepare = mmc_test_area_prepare_fill, 2790 .run = mmc_test_profile_trim_perf, 2791 .cleanup = mmc_test_area_cleanup, 2792 }, 2793 2794 { 2795 .name = "Consecutive read performance by transfer size", 2796 .prepare = mmc_test_area_prepare_fill, 2797 .run = mmc_test_profile_seq_read_perf, 2798 .cleanup = mmc_test_area_cleanup, 2799 }, 2800 2801 { 2802 .name = "Consecutive write performance by transfer size", 2803 .prepare = mmc_test_area_prepare, 2804 .run = mmc_test_profile_seq_write_perf, 2805 .cleanup = mmc_test_area_cleanup, 2806 }, 2807 2808 { 2809 .name = "Consecutive trim performance by transfer size", 2810 .prepare = mmc_test_area_prepare, 2811 .run = mmc_test_profile_seq_trim_perf, 2812 .cleanup = mmc_test_area_cleanup, 2813 }, 2814 2815 { 2816 .name = "Random read performance by transfer size", 2817 .prepare = mmc_test_area_prepare, 2818 .run = mmc_test_random_read_perf, 2819 .cleanup = mmc_test_area_cleanup, 2820 }, 2821 2822 { 2823 .name = "Random write performance by transfer size", 2824 .prepare = mmc_test_area_prepare, 2825 .run = mmc_test_random_write_perf, 2826 .cleanup = mmc_test_area_cleanup, 2827 }, 2828 2829 { 2830 .name = "Large sequential read into scattered pages", 2831 .prepare = mmc_test_area_prepare, 2832 .run = mmc_test_large_seq_read_perf, 2833 .cleanup = mmc_test_area_cleanup, 2834 }, 2835 2836 { 2837 .name = "Large sequential write from scattered pages", 2838 .prepare = mmc_test_area_prepare, 2839 .run = mmc_test_large_seq_write_perf, 2840 .cleanup = mmc_test_area_cleanup, 2841 }, 2842 2843 { 2844 .name = "Write performance with blocking req 4k to 4MB", 2845 .prepare = mmc_test_area_prepare, 2846 .run = mmc_test_profile_mult_write_blocking_perf, 2847 .cleanup = mmc_test_area_cleanup, 2848 }, 2849 2850 { 2851 .name = "Write performance with non-blocking req 4k to 4MB", 2852 .prepare = mmc_test_area_prepare, 2853 .run = mmc_test_profile_mult_write_nonblock_perf, 2854 .cleanup = mmc_test_area_cleanup, 2855 }, 2856 2857 { 2858 .name = "Read performance with blocking req 4k to 4MB", 2859 .prepare = mmc_test_area_prepare, 2860 .run = mmc_test_profile_mult_read_blocking_perf, 2861 .cleanup = mmc_test_area_cleanup, 2862 }, 2863 2864 { 2865 .name = "Read performance with non-blocking req 4k to 4MB", 2866 .prepare = mmc_test_area_prepare, 2867 .run = mmc_test_profile_mult_read_nonblock_perf, 2868 .cleanup = mmc_test_area_cleanup, 2869 }, 2870 2871 { 2872 .name = "Write performance blocking req 1 to 512 sg elems", 2873 .prepare = mmc_test_area_prepare, 2874 .run = mmc_test_profile_sglen_wr_blocking_perf, 2875 .cleanup = mmc_test_area_cleanup, 2876 }, 2877 2878 { 2879 .name = "Write performance non-blocking req 1 to 512 sg elems", 2880 .prepare = mmc_test_area_prepare, 2881 .run = mmc_test_profile_sglen_wr_nonblock_perf, 2882 .cleanup = mmc_test_area_cleanup, 2883 }, 2884 2885 { 2886 .name = "Read performance blocking req 1 to 512 sg elems", 2887 .prepare = mmc_test_area_prepare, 2888 .run = mmc_test_profile_sglen_r_blocking_perf, 2889 .cleanup = mmc_test_area_cleanup, 2890 }, 2891 2892 { 2893 .name = "Read performance non-blocking req 1 to 512 sg elems", 2894 .prepare = mmc_test_area_prepare, 2895 .run = mmc_test_profile_sglen_r_nonblock_perf, 2896 .cleanup = mmc_test_area_cleanup, 2897 }, 2898 2899 { 2900 .name = "Reset test", 2901 .run = mmc_test_reset, 2902 }, 2903 2904 { 2905 .name = "Commands during read - no Set Block Count (CMD23)", 2906 .prepare = mmc_test_area_prepare, 2907 .run = mmc_test_cmds_during_read, 2908 .cleanup = mmc_test_area_cleanup, 2909 }, 2910 2911 { 2912 .name = "Commands during write - no Set Block Count (CMD23)", 2913 .prepare = mmc_test_area_prepare, 2914 .run = mmc_test_cmds_during_write, 2915 .cleanup = mmc_test_area_cleanup, 2916 }, 2917 2918 { 2919 .name = "Commands during read - use Set Block Count (CMD23)", 2920 .prepare = mmc_test_area_prepare, 2921 .run = mmc_test_cmds_during_read_cmd23, 2922 .cleanup = mmc_test_area_cleanup, 2923 }, 2924 2925 { 2926 .name = "Commands during write - use Set Block Count (CMD23)", 2927 .prepare = mmc_test_area_prepare, 2928 .run = mmc_test_cmds_during_write_cmd23, 2929 .cleanup = mmc_test_area_cleanup, 2930 }, 2931 2932 { 2933 .name = "Commands during non-blocking read - use Set Block Count (CMD23)", 2934 .prepare = mmc_test_area_prepare, 2935 .run = mmc_test_cmds_during_read_cmd23_nonblock, 2936 .cleanup = mmc_test_area_cleanup, 2937 }, 2938 2939 { 2940 .name = "Commands during non-blocking write - use Set Block Count (CMD23)", 2941 .prepare = mmc_test_area_prepare, 2942 .run = mmc_test_cmds_during_write_cmd23_nonblock, 2943 .cleanup = mmc_test_area_cleanup, 2944 }, 2945 }; 2946 2947 static DEFINE_MUTEX(mmc_test_lock); 2948 2949 static LIST_HEAD(mmc_test_result); 2950 2951 static void mmc_test_run(struct mmc_test_card *test, int testcase) 2952 { 2953 int i, ret; 2954 2955 pr_info("%s: Starting tests of card %s...\n", 2956 mmc_hostname(test->card->host), mmc_card_id(test->card)); 2957 2958 mmc_claim_host(test->card->host); 2959 2960 for (i = 0; i < ARRAY_SIZE(mmc_test_cases); i++) { 2961 struct mmc_test_general_result *gr; 2962 2963 if (testcase && ((i + 1) != testcase)) 2964 continue; 2965 2966 pr_info("%s: Test case %d. %s...\n", 2967 mmc_hostname(test->card->host), i + 1, 2968 mmc_test_cases[i].name); 2969 2970 if (mmc_test_cases[i].prepare) { 2971 ret = mmc_test_cases[i].prepare(test); 2972 if (ret) { 2973 pr_info("%s: Result: Prepare stage failed! (%d)\n", 2974 mmc_hostname(test->card->host), 2975 ret); 2976 continue; 2977 } 2978 } 2979 2980 gr = kzalloc(sizeof(*gr), GFP_KERNEL); 2981 if (gr) { 2982 INIT_LIST_HEAD(&gr->tr_lst); 2983 2984 /* Assign data what we know already */ 2985 gr->card = test->card; 2986 gr->testcase = i; 2987 2988 /* Append container to global one */ 2989 list_add_tail(&gr->link, &mmc_test_result); 2990 2991 /* 2992 * Save the pointer to created container in our private 2993 * structure. 2994 */ 2995 test->gr = gr; 2996 } 2997 2998 ret = mmc_test_cases[i].run(test); 2999 switch (ret) { 3000 case RESULT_OK: 3001 pr_info("%s: Result: OK\n", 3002 mmc_hostname(test->card->host)); 3003 break; 3004 case RESULT_FAIL: 3005 pr_info("%s: Result: FAILED\n", 3006 mmc_hostname(test->card->host)); 3007 break; 3008 case RESULT_UNSUP_HOST: 3009 pr_info("%s: Result: UNSUPPORTED (by host)\n", 3010 mmc_hostname(test->card->host)); 3011 break; 3012 case RESULT_UNSUP_CARD: 3013 pr_info("%s: Result: UNSUPPORTED (by card)\n", 3014 mmc_hostname(test->card->host)); 3015 break; 3016 default: 3017 pr_info("%s: Result: ERROR (%d)\n", 3018 mmc_hostname(test->card->host), ret); 3019 } 3020 3021 /* Save the result */ 3022 if (gr) 3023 gr->result = ret; 3024 3025 if (mmc_test_cases[i].cleanup) { 3026 ret = mmc_test_cases[i].cleanup(test); 3027 if (ret) { 3028 pr_info("%s: Warning: Cleanup stage failed! (%d)\n", 3029 mmc_hostname(test->card->host), 3030 ret); 3031 } 3032 } 3033 } 3034 3035 mmc_release_host(test->card->host); 3036 3037 pr_info("%s: Tests completed.\n", 3038 mmc_hostname(test->card->host)); 3039 } 3040 3041 static void mmc_test_free_result(struct mmc_card *card) 3042 { 3043 struct mmc_test_general_result *gr, *grs; 3044 3045 mutex_lock(&mmc_test_lock); 3046 3047 list_for_each_entry_safe(gr, grs, &mmc_test_result, link) { 3048 struct mmc_test_transfer_result *tr, *trs; 3049 3050 if (card && gr->card != card) 3051 continue; 3052 3053 list_for_each_entry_safe(tr, trs, &gr->tr_lst, link) { 3054 list_del(&tr->link); 3055 kfree(tr); 3056 } 3057 3058 list_del(&gr->link); 3059 kfree(gr); 3060 } 3061 3062 mutex_unlock(&mmc_test_lock); 3063 } 3064 3065 static LIST_HEAD(mmc_test_file_test); 3066 3067 static int mtf_test_show(struct seq_file *sf, void *data) 3068 { 3069 struct mmc_card *card = (struct mmc_card *)sf->private; 3070 struct mmc_test_general_result *gr; 3071 3072 mutex_lock(&mmc_test_lock); 3073 3074 list_for_each_entry(gr, &mmc_test_result, link) { 3075 struct mmc_test_transfer_result *tr; 3076 3077 if (gr->card != card) 3078 continue; 3079 3080 seq_printf(sf, "Test %d: %d\n", gr->testcase + 1, gr->result); 3081 3082 list_for_each_entry(tr, &gr->tr_lst, link) { 3083 seq_printf(sf, "%u %d %lu.%09lu %u %u.%02u\n", 3084 tr->count, tr->sectors, 3085 (unsigned long)tr->ts.tv_sec, 3086 (unsigned long)tr->ts.tv_nsec, 3087 tr->rate, tr->iops / 100, tr->iops % 100); 3088 } 3089 } 3090 3091 mutex_unlock(&mmc_test_lock); 3092 3093 return 0; 3094 } 3095 3096 static int mtf_test_open(struct inode *inode, struct file *file) 3097 { 3098 return single_open(file, mtf_test_show, inode->i_private); 3099 } 3100 3101 static ssize_t mtf_test_write(struct file *file, const char __user *buf, 3102 size_t count, loff_t *pos) 3103 { 3104 struct seq_file *sf = (struct seq_file *)file->private_data; 3105 struct mmc_card *card = (struct mmc_card *)sf->private; 3106 struct mmc_test_card *test; 3107 long testcase; 3108 int ret; 3109 3110 ret = kstrtol_from_user(buf, count, 10, &testcase); 3111 if (ret) 3112 return ret; 3113 3114 test = kzalloc(sizeof(*test), GFP_KERNEL); 3115 if (!test) 3116 return -ENOMEM; 3117 3118 /* 3119 * Remove all test cases associated with given card. Thus we have only 3120 * actual data of the last run. 3121 */ 3122 mmc_test_free_result(card); 3123 3124 test->card = card; 3125 3126 test->buffer = kzalloc(BUFFER_SIZE, GFP_KERNEL); 3127 #ifdef CONFIG_HIGHMEM 3128 test->highmem = alloc_pages(GFP_KERNEL | __GFP_HIGHMEM, BUFFER_ORDER); 3129 #endif 3130 3131 #ifdef CONFIG_HIGHMEM 3132 if (test->buffer && test->highmem) { 3133 #else 3134 if (test->buffer) { 3135 #endif 3136 mutex_lock(&mmc_test_lock); 3137 mmc_test_run(test, testcase); 3138 mutex_unlock(&mmc_test_lock); 3139 } 3140 3141 #ifdef CONFIG_HIGHMEM 3142 __free_pages(test->highmem, BUFFER_ORDER); 3143 #endif 3144 kfree(test->buffer); 3145 kfree(test); 3146 3147 return count; 3148 } 3149 3150 static const struct file_operations mmc_test_fops_test = { 3151 .open = mtf_test_open, 3152 .read = seq_read, 3153 .write = mtf_test_write, 3154 .llseek = seq_lseek, 3155 .release = single_release, 3156 }; 3157 3158 static int mtf_testlist_show(struct seq_file *sf, void *data) 3159 { 3160 int i; 3161 3162 mutex_lock(&mmc_test_lock); 3163 3164 seq_puts(sf, "0:\tRun all tests\n"); 3165 for (i = 0; i < ARRAY_SIZE(mmc_test_cases); i++) 3166 seq_printf(sf, "%d:\t%s\n", i + 1, mmc_test_cases[i].name); 3167 3168 mutex_unlock(&mmc_test_lock); 3169 3170 return 0; 3171 } 3172 3173 static int mtf_testlist_open(struct inode *inode, struct file *file) 3174 { 3175 return single_open(file, mtf_testlist_show, inode->i_private); 3176 } 3177 3178 static const struct file_operations mmc_test_fops_testlist = { 3179 .open = mtf_testlist_open, 3180 .read = seq_read, 3181 .llseek = seq_lseek, 3182 .release = single_release, 3183 }; 3184 3185 static void mmc_test_free_dbgfs_file(struct mmc_card *card) 3186 { 3187 struct mmc_test_dbgfs_file *df, *dfs; 3188 3189 mutex_lock(&mmc_test_lock); 3190 3191 list_for_each_entry_safe(df, dfs, &mmc_test_file_test, link) { 3192 if (card && df->card != card) 3193 continue; 3194 debugfs_remove(df->file); 3195 list_del(&df->link); 3196 kfree(df); 3197 } 3198 3199 mutex_unlock(&mmc_test_lock); 3200 } 3201 3202 static int __mmc_test_register_dbgfs_file(struct mmc_card *card, 3203 const char *name, umode_t mode, const struct file_operations *fops) 3204 { 3205 struct dentry *file = NULL; 3206 struct mmc_test_dbgfs_file *df; 3207 3208 if (card->debugfs_root) 3209 file = debugfs_create_file(name, mode, card->debugfs_root, 3210 card, fops); 3211 3212 if (IS_ERR_OR_NULL(file)) { 3213 dev_err(&card->dev, 3214 "Can't create %s. Perhaps debugfs is disabled.\n", 3215 name); 3216 return -ENODEV; 3217 } 3218 3219 df = kmalloc(sizeof(*df), GFP_KERNEL); 3220 if (!df) { 3221 debugfs_remove(file); 3222 dev_err(&card->dev, 3223 "Can't allocate memory for internal usage.\n"); 3224 return -ENOMEM; 3225 } 3226 3227 df->card = card; 3228 df->file = file; 3229 3230 list_add(&df->link, &mmc_test_file_test); 3231 return 0; 3232 } 3233 3234 static int mmc_test_register_dbgfs_file(struct mmc_card *card) 3235 { 3236 int ret; 3237 3238 mutex_lock(&mmc_test_lock); 3239 3240 ret = __mmc_test_register_dbgfs_file(card, "test", S_IWUSR | S_IRUGO, 3241 &mmc_test_fops_test); 3242 if (ret) 3243 goto err; 3244 3245 ret = __mmc_test_register_dbgfs_file(card, "testlist", S_IRUGO, 3246 &mmc_test_fops_testlist); 3247 if (ret) 3248 goto err; 3249 3250 err: 3251 mutex_unlock(&mmc_test_lock); 3252 3253 return ret; 3254 } 3255 3256 static int mmc_test_probe(struct mmc_card *card) 3257 { 3258 int ret; 3259 3260 if (!mmc_card_mmc(card) && !mmc_card_sd(card)) 3261 return -ENODEV; 3262 3263 ret = mmc_test_register_dbgfs_file(card); 3264 if (ret) 3265 return ret; 3266 3267 dev_info(&card->dev, "Card claimed for testing.\n"); 3268 3269 return 0; 3270 } 3271 3272 static void mmc_test_remove(struct mmc_card *card) 3273 { 3274 mmc_test_free_result(card); 3275 mmc_test_free_dbgfs_file(card); 3276 } 3277 3278 static void mmc_test_shutdown(struct mmc_card *card) 3279 { 3280 } 3281 3282 static struct mmc_driver mmc_driver = { 3283 .drv = { 3284 .name = "mmc_test", 3285 }, 3286 .probe = mmc_test_probe, 3287 .remove = mmc_test_remove, 3288 .shutdown = mmc_test_shutdown, 3289 }; 3290 3291 static int __init mmc_test_init(void) 3292 { 3293 return mmc_register_driver(&mmc_driver); 3294 } 3295 3296 static void __exit mmc_test_exit(void) 3297 { 3298 /* Clear stalled data if card is still plugged */ 3299 mmc_test_free_result(NULL); 3300 mmc_test_free_dbgfs_file(NULL); 3301 3302 mmc_unregister_driver(&mmc_driver); 3303 } 3304 3305 module_init(mmc_test_init); 3306 module_exit(mmc_test_exit); 3307 3308 MODULE_LICENSE("GPL"); 3309 MODULE_DESCRIPTION("Multimedia Card (MMC) host test driver"); 3310 MODULE_AUTHOR("Pierre Ossman"); 3311