1/* 2 * linux/drivers/mmc/card/mmc_test.c 3 * 4 * Copyright 2007-2008 Pierre Ossman 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or (at 9 * your option) any later version. 10 */ 11 12#include <linux/mmc/core.h> 13#include <linux/mmc/card.h> 14#include <linux/mmc/host.h> 15#include <linux/mmc/mmc.h> 16#include <linux/slab.h> 17 18#include <linux/scatterlist.h> 19#include <linux/swap.h> /* For nr_free_buffer_pages() */ 20#include <linux/list.h> 21 22#include <linux/debugfs.h> 23#include <linux/uaccess.h> 24#include <linux/seq_file.h> 25#include <linux/module.h> 26 27#define RESULT_OK 0 28#define RESULT_FAIL 1 29#define RESULT_UNSUP_HOST 2 30#define RESULT_UNSUP_CARD 3 31 32#define BUFFER_ORDER 2 33#define BUFFER_SIZE (PAGE_SIZE << BUFFER_ORDER) 34 35/* 36 * Limit the test area size to the maximum MMC HC erase group size. Note that 37 * the maximum SD allocation unit size is just 4MiB. 38 */ 39#define TEST_AREA_MAX_SIZE (128 * 1024 * 1024) 40 41/** 42 * struct mmc_test_pages - pages allocated by 'alloc_pages()'. 43 * @page: first page in the allocation 44 * @order: order of the number of pages allocated 45 */ 46struct mmc_test_pages { 47 struct page *page; 48 unsigned int order; 49}; 50 51/** 52 * struct mmc_test_mem - allocated memory. 53 * @arr: array of allocations 54 * @cnt: number of allocations 55 */ 56struct mmc_test_mem { 57 struct mmc_test_pages *arr; 58 unsigned int cnt; 59}; 60 61/** 62 * struct mmc_test_area - information for performance tests. 63 * @max_sz: test area size (in bytes) 64 * @dev_addr: address on card at which to do performance tests 65 * @max_tfr: maximum transfer size allowed by driver (in bytes) 66 * @max_segs: maximum segments allowed by driver in scatterlist @sg 67 * @max_seg_sz: maximum segment size allowed by driver 68 * @blocks: number of (512 byte) blocks currently mapped by @sg 69 * @sg_len: length of currently mapped scatterlist @sg 70 * @mem: allocated memory 71 * @sg: scatterlist 72 */ 73struct mmc_test_area { 74 unsigned long max_sz; 75 unsigned int dev_addr; 76 unsigned int max_tfr; 77 unsigned int max_segs; 78 unsigned int max_seg_sz; 79 unsigned int blocks; 80 unsigned int sg_len; 81 struct mmc_test_mem *mem; 82 struct scatterlist *sg; 83}; 84 85/** 86 * struct mmc_test_transfer_result - transfer results for performance tests. 87 * @link: double-linked list 88 * @count: amount of group of sectors to check 89 * @sectors: amount of sectors to check in one group 90 * @ts: time values of transfer 91 * @rate: calculated transfer rate 92 * @iops: I/O operations per second (times 100) 93 */ 94struct mmc_test_transfer_result { 95 struct list_head link; 96 unsigned int count; 97 unsigned int sectors; 98 struct timespec ts; 99 unsigned int rate; 100 unsigned int iops; 101}; 102 103/** 104 * struct mmc_test_general_result - results for tests. 105 * @link: double-linked list 106 * @card: card under test 107 * @testcase: number of test case 108 * @result: result of test run 109 * @tr_lst: transfer measurements if any as mmc_test_transfer_result 110 */ 111struct mmc_test_general_result { 112 struct list_head link; 113 struct mmc_card *card; 114 int testcase; 115 int result; 116 struct list_head tr_lst; 117}; 118 119/** 120 * struct mmc_test_dbgfs_file - debugfs related file. 121 * @link: double-linked list 122 * @card: card under test 123 * @file: file created under debugfs 124 */ 125struct mmc_test_dbgfs_file { 126 struct list_head link; 127 struct mmc_card *card; 128 struct dentry *file; 129}; 130 131/** 132 * struct mmc_test_card - test information. 133 * @card: card under test 134 * @scratch: transfer buffer 135 * @buffer: transfer buffer 136 * @highmem: buffer for highmem tests 137 * @area: information for performance tests 138 * @gr: pointer to results of current testcase 139 */ 140struct mmc_test_card { 141 struct mmc_card *card; 142 143 u8 scratch[BUFFER_SIZE]; 144 u8 *buffer; 145#ifdef CONFIG_HIGHMEM 146 struct page *highmem; 147#endif 148 struct mmc_test_area area; 149 struct mmc_test_general_result *gr; 150}; 151 152enum mmc_test_prep_media { 153 MMC_TEST_PREP_NONE = 0, 154 MMC_TEST_PREP_WRITE_FULL = 1 << 0, 155 MMC_TEST_PREP_ERASE = 1 << 1, 156}; 157 158struct mmc_test_multiple_rw { 159 unsigned int *sg_len; 160 unsigned int *bs; 161 unsigned int len; 162 unsigned int size; 163 bool do_write; 164 bool do_nonblock_req; 165 enum mmc_test_prep_media prepare; 166}; 167 168struct mmc_test_async_req { 169 struct mmc_async_req areq; 170 struct mmc_test_card *test; 171}; 172 173/*******************************************************************/ 174/* General helper functions */ 175/*******************************************************************/ 176 177/* 178 * Configure correct block size in card 179 */ 180static int mmc_test_set_blksize(struct mmc_test_card *test, unsigned size) 181{ 182 return mmc_set_blocklen(test->card, size); 183} 184 185/* 186 * Fill in the mmc_request structure given a set of transfer parameters. 187 */ 188static void mmc_test_prepare_mrq(struct mmc_test_card *test, 189 struct mmc_request *mrq, struct scatterlist *sg, unsigned sg_len, 190 unsigned dev_addr, unsigned blocks, unsigned blksz, int write) 191{ 192 BUG_ON(!mrq || !mrq->cmd || !mrq->data || !mrq->stop); 193 194 if (blocks > 1) { 195 mrq->cmd->opcode = write ? 196 MMC_WRITE_MULTIPLE_BLOCK : MMC_READ_MULTIPLE_BLOCK; 197 } else { 198 mrq->cmd->opcode = write ? 199 MMC_WRITE_BLOCK : MMC_READ_SINGLE_BLOCK; 200 } 201 202 mrq->cmd->arg = dev_addr; 203 if (!mmc_card_blockaddr(test->card)) 204 mrq->cmd->arg <<= 9; 205 206 mrq->cmd->flags = MMC_RSP_R1 | MMC_CMD_ADTC; 207 208 if (blocks == 1) 209 mrq->stop = NULL; 210 else { 211 mrq->stop->opcode = MMC_STOP_TRANSMISSION; 212 mrq->stop->arg = 0; 213 mrq->stop->flags = MMC_RSP_R1B | MMC_CMD_AC; 214 } 215 216 mrq->data->blksz = blksz; 217 mrq->data->blocks = blocks; 218 mrq->data->flags = write ? MMC_DATA_WRITE : MMC_DATA_READ; 219 mrq->data->sg = sg; 220 mrq->data->sg_len = sg_len; 221 222 mmc_set_data_timeout(mrq->data, test->card); 223} 224 225static int mmc_test_busy(struct mmc_command *cmd) 226{ 227 return !(cmd->resp[0] & R1_READY_FOR_DATA) || 228 (R1_CURRENT_STATE(cmd->resp[0]) == R1_STATE_PRG); 229} 230 231/* 232 * Wait for the card to finish the busy state 233 */ 234static int mmc_test_wait_busy(struct mmc_test_card *test) 235{ 236 int ret, busy; 237 struct mmc_command cmd = {0}; 238 239 busy = 0; 240 do { 241 memset(&cmd, 0, sizeof(struct mmc_command)); 242 243 cmd.opcode = MMC_SEND_STATUS; 244 cmd.arg = test->card->rca << 16; 245 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; 246 247 ret = mmc_wait_for_cmd(test->card->host, &cmd, 0); 248 if (ret) 249 break; 250 251 if (!busy && mmc_test_busy(&cmd)) { 252 busy = 1; 253 if (test->card->host->caps & MMC_CAP_WAIT_WHILE_BUSY) 254 pr_info("%s: Warning: Host did not " 255 "wait for busy state to end.\n", 256 mmc_hostname(test->card->host)); 257 } 258 } while (mmc_test_busy(&cmd)); 259 260 return ret; 261} 262 263/* 264 * Transfer a single sector of kernel addressable data 265 */ 266static int mmc_test_buffer_transfer(struct mmc_test_card *test, 267 u8 *buffer, unsigned addr, unsigned blksz, int write) 268{ 269 int ret; 270 271 struct mmc_request mrq = {0}; 272 struct mmc_command cmd = {0}; 273 struct mmc_command stop = {0}; 274 struct mmc_data data = {0}; 275 276 struct scatterlist sg; 277 278 mrq.cmd = &cmd; 279 mrq.data = &data; 280 mrq.stop = &stop; 281 282 sg_init_one(&sg, buffer, blksz); 283 284 mmc_test_prepare_mrq(test, &mrq, &sg, 1, addr, 1, blksz, write); 285 286 mmc_wait_for_req(test->card->host, &mrq); 287 288 if (cmd.error) 289 return cmd.error; 290 if (data.error) 291 return data.error; 292 293 ret = mmc_test_wait_busy(test); 294 if (ret) 295 return ret; 296 297 return 0; 298} 299 300static void mmc_test_free_mem(struct mmc_test_mem *mem) 301{ 302 if (!mem) 303 return; 304 while (mem->cnt--) 305 __free_pages(mem->arr[mem->cnt].page, 306 mem->arr[mem->cnt].order); 307 kfree(mem->arr); 308 kfree(mem); 309} 310 311/* 312 * Allocate a lot of memory, preferably max_sz but at least min_sz. In case 313 * there isn't much memory do not exceed 1/16th total lowmem pages. Also do 314 * not exceed a maximum number of segments and try not to make segments much 315 * bigger than maximum segment size. 316 */ 317static struct mmc_test_mem *mmc_test_alloc_mem(unsigned long min_sz, 318 unsigned long max_sz, 319 unsigned int max_segs, 320 unsigned int max_seg_sz) 321{ 322 unsigned long max_page_cnt = DIV_ROUND_UP(max_sz, PAGE_SIZE); 323 unsigned long min_page_cnt = DIV_ROUND_UP(min_sz, PAGE_SIZE); 324 unsigned long max_seg_page_cnt = DIV_ROUND_UP(max_seg_sz, PAGE_SIZE); 325 unsigned long page_cnt = 0; 326 unsigned long limit = nr_free_buffer_pages() >> 4; 327 struct mmc_test_mem *mem; 328 329 if (max_page_cnt > limit) 330 max_page_cnt = limit; 331 if (min_page_cnt > max_page_cnt) 332 min_page_cnt = max_page_cnt; 333 334 if (max_seg_page_cnt > max_page_cnt) 335 max_seg_page_cnt = max_page_cnt; 336 337 if (max_segs > max_page_cnt) 338 max_segs = max_page_cnt; 339 340 mem = kzalloc(sizeof(struct mmc_test_mem), GFP_KERNEL); 341 if (!mem) 342 return NULL; 343 344 mem->arr = kzalloc(sizeof(struct mmc_test_pages) * max_segs, 345 GFP_KERNEL); 346 if (!mem->arr) 347 goto out_free; 348 349 while (max_page_cnt) { 350 struct page *page; 351 unsigned int order; 352 gfp_t flags = GFP_KERNEL | GFP_DMA | __GFP_NOWARN | 353 __GFP_NORETRY; 354 355 order = get_order(max_seg_page_cnt << PAGE_SHIFT); 356 while (1) { 357 page = alloc_pages(flags, order); 358 if (page || !order) 359 break; 360 order -= 1; 361 } 362 if (!page) { 363 if (page_cnt < min_page_cnt) 364 goto out_free; 365 break; 366 } 367 mem->arr[mem->cnt].page = page; 368 mem->arr[mem->cnt].order = order; 369 mem->cnt += 1; 370 if (max_page_cnt <= (1UL << order)) 371 break; 372 max_page_cnt -= 1UL << order; 373 page_cnt += 1UL << order; 374 if (mem->cnt >= max_segs) { 375 if (page_cnt < min_page_cnt) 376 goto out_free; 377 break; 378 } 379 } 380 381 return mem; 382 383out_free: 384 mmc_test_free_mem(mem); 385 return NULL; 386} 387 388/* 389 * Map memory into a scatterlist. Optionally allow the same memory to be 390 * mapped more than once. 391 */ 392static int mmc_test_map_sg(struct mmc_test_mem *mem, unsigned long size, 393 struct scatterlist *sglist, int repeat, 394 unsigned int max_segs, unsigned int max_seg_sz, 395 unsigned int *sg_len, int min_sg_len) 396{ 397 struct scatterlist *sg = NULL; 398 unsigned int i; 399 unsigned long sz = size; 400 401 sg_init_table(sglist, max_segs); 402 if (min_sg_len > max_segs) 403 min_sg_len = max_segs; 404 405 *sg_len = 0; 406 do { 407 for (i = 0; i < mem->cnt; i++) { 408 unsigned long len = PAGE_SIZE << mem->arr[i].order; 409 410 if (min_sg_len && (size / min_sg_len < len)) 411 len = ALIGN(size / min_sg_len, 512); 412 if (len > sz) 413 len = sz; 414 if (len > max_seg_sz) 415 len = max_seg_sz; 416 if (sg) 417 sg = sg_next(sg); 418 else 419 sg = sglist; 420 if (!sg) 421 return -EINVAL; 422 sg_set_page(sg, mem->arr[i].page, len, 0); 423 sz -= len; 424 *sg_len += 1; 425 if (!sz) 426 break; 427 } 428 } while (sz && repeat); 429 430 if (sz) 431 return -EINVAL; 432 433 if (sg) 434 sg_mark_end(sg); 435 436 return 0; 437} 438 439/* 440 * Map memory into a scatterlist so that no pages are contiguous. Allow the 441 * same memory to be mapped more than once. 442 */ 443static int mmc_test_map_sg_max_scatter(struct mmc_test_mem *mem, 444 unsigned long sz, 445 struct scatterlist *sglist, 446 unsigned int max_segs, 447 unsigned int max_seg_sz, 448 unsigned int *sg_len) 449{ 450 struct scatterlist *sg = NULL; 451 unsigned int i = mem->cnt, cnt; 452 unsigned long len; 453 void *base, *addr, *last_addr = NULL; 454 455 sg_init_table(sglist, max_segs); 456 457 *sg_len = 0; 458 while (sz) { 459 base = page_address(mem->arr[--i].page); 460 cnt = 1 << mem->arr[i].order; 461 while (sz && cnt) { 462 addr = base + PAGE_SIZE * --cnt; 463 if (last_addr && last_addr + PAGE_SIZE == addr) 464 continue; 465 last_addr = addr; 466 len = PAGE_SIZE; 467 if (len > max_seg_sz) 468 len = max_seg_sz; 469 if (len > sz) 470 len = sz; 471 if (sg) 472 sg = sg_next(sg); 473 else 474 sg = sglist; 475 if (!sg) 476 return -EINVAL; 477 sg_set_page(sg, virt_to_page(addr), len, 0); 478 sz -= len; 479 *sg_len += 1; 480 } 481 if (i == 0) 482 i = mem->cnt; 483 } 484 485 if (sg) 486 sg_mark_end(sg); 487 488 return 0; 489} 490 491/* 492 * Calculate transfer rate in bytes per second. 493 */ 494static unsigned int mmc_test_rate(uint64_t bytes, struct timespec *ts) 495{ 496 uint64_t ns; 497 498 ns = ts->tv_sec; 499 ns *= 1000000000; 500 ns += ts->tv_nsec; 501 502 bytes *= 1000000000; 503 504 while (ns > UINT_MAX) { 505 bytes >>= 1; 506 ns >>= 1; 507 } 508 509 if (!ns) 510 return 0; 511 512 do_div(bytes, (uint32_t)ns); 513 514 return bytes; 515} 516 517/* 518 * Save transfer results for future usage 519 */ 520static void mmc_test_save_transfer_result(struct mmc_test_card *test, 521 unsigned int count, unsigned int sectors, struct timespec ts, 522 unsigned int rate, unsigned int iops) 523{ 524 struct mmc_test_transfer_result *tr; 525 526 if (!test->gr) 527 return; 528 529 tr = kmalloc(sizeof(struct mmc_test_transfer_result), GFP_KERNEL); 530 if (!tr) 531 return; 532 533 tr->count = count; 534 tr->sectors = sectors; 535 tr->ts = ts; 536 tr->rate = rate; 537 tr->iops = iops; 538 539 list_add_tail(&tr->link, &test->gr->tr_lst); 540} 541 542/* 543 * Print the transfer rate. 544 */ 545static void mmc_test_print_rate(struct mmc_test_card *test, uint64_t bytes, 546 struct timespec *ts1, struct timespec *ts2) 547{ 548 unsigned int rate, iops, sectors = bytes >> 9; 549 struct timespec ts; 550 551 ts = timespec_sub(*ts2, *ts1); 552 553 rate = mmc_test_rate(bytes, &ts); 554 iops = mmc_test_rate(100, &ts); /* I/O ops per sec x 100 */ 555 556 pr_info("%s: Transfer of %u sectors (%u%s KiB) took %lu.%09lu " 557 "seconds (%u kB/s, %u KiB/s, %u.%02u IOPS)\n", 558 mmc_hostname(test->card->host), sectors, sectors >> 1, 559 (sectors & 1 ? ".5" : ""), (unsigned long)ts.tv_sec, 560 (unsigned long)ts.tv_nsec, rate / 1000, rate / 1024, 561 iops / 100, iops % 100); 562 563 mmc_test_save_transfer_result(test, 1, sectors, ts, rate, iops); 564} 565 566/* 567 * Print the average transfer rate. 568 */ 569static void mmc_test_print_avg_rate(struct mmc_test_card *test, uint64_t bytes, 570 unsigned int count, struct timespec *ts1, 571 struct timespec *ts2) 572{ 573 unsigned int rate, iops, sectors = bytes >> 9; 574 uint64_t tot = bytes * count; 575 struct timespec ts; 576 577 ts = timespec_sub(*ts2, *ts1); 578 579 rate = mmc_test_rate(tot, &ts); 580 iops = mmc_test_rate(count * 100, &ts); /* I/O ops per sec x 100 */ 581 582 pr_info("%s: Transfer of %u x %u sectors (%u x %u%s KiB) took " 583 "%lu.%09lu seconds (%u kB/s, %u KiB/s, " 584 "%u.%02u IOPS, sg_len %d)\n", 585 mmc_hostname(test->card->host), count, sectors, count, 586 sectors >> 1, (sectors & 1 ? ".5" : ""), 587 (unsigned long)ts.tv_sec, (unsigned long)ts.tv_nsec, 588 rate / 1000, rate / 1024, iops / 100, iops % 100, 589 test->area.sg_len); 590 591 mmc_test_save_transfer_result(test, count, sectors, ts, rate, iops); 592} 593 594/* 595 * Return the card size in sectors. 596 */ 597static unsigned int mmc_test_capacity(struct mmc_card *card) 598{ 599 if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) 600 return card->ext_csd.sectors; 601 else 602 return card->csd.capacity << (card->csd.read_blkbits - 9); 603} 604 605/*******************************************************************/ 606/* Test preparation and cleanup */ 607/*******************************************************************/ 608 609/* 610 * Fill the first couple of sectors of the card with known data 611 * so that bad reads/writes can be detected 612 */ 613static int __mmc_test_prepare(struct mmc_test_card *test, int write) 614{ 615 int ret, i; 616 617 ret = mmc_test_set_blksize(test, 512); 618 if (ret) 619 return ret; 620 621 if (write) 622 memset(test->buffer, 0xDF, 512); 623 else { 624 for (i = 0;i < 512;i++) 625 test->buffer[i] = i; 626 } 627 628 for (i = 0;i < BUFFER_SIZE / 512;i++) { 629 ret = mmc_test_buffer_transfer(test, test->buffer, i, 512, 1); 630 if (ret) 631 return ret; 632 } 633 634 return 0; 635} 636 637static int mmc_test_prepare_write(struct mmc_test_card *test) 638{ 639 return __mmc_test_prepare(test, 1); 640} 641 642static int mmc_test_prepare_read(struct mmc_test_card *test) 643{ 644 return __mmc_test_prepare(test, 0); 645} 646 647static int mmc_test_cleanup(struct mmc_test_card *test) 648{ 649 int ret, i; 650 651 ret = mmc_test_set_blksize(test, 512); 652 if (ret) 653 return ret; 654 655 memset(test->buffer, 0, 512); 656 657 for (i = 0;i < BUFFER_SIZE / 512;i++) { 658 ret = mmc_test_buffer_transfer(test, test->buffer, i, 512, 1); 659 if (ret) 660 return ret; 661 } 662 663 return 0; 664} 665 666/*******************************************************************/ 667/* Test execution helpers */ 668/*******************************************************************/ 669 670/* 671 * Modifies the mmc_request to perform the "short transfer" tests 672 */ 673static void mmc_test_prepare_broken_mrq(struct mmc_test_card *test, 674 struct mmc_request *mrq, int write) 675{ 676 BUG_ON(!mrq || !mrq->cmd || !mrq->data); 677 678 if (mrq->data->blocks > 1) { 679 mrq->cmd->opcode = write ? 680 MMC_WRITE_BLOCK : MMC_READ_SINGLE_BLOCK; 681 mrq->stop = NULL; 682 } else { 683 mrq->cmd->opcode = MMC_SEND_STATUS; 684 mrq->cmd->arg = test->card->rca << 16; 685 } 686} 687 688/* 689 * Checks that a normal transfer didn't have any errors 690 */ 691static int mmc_test_check_result(struct mmc_test_card *test, 692 struct mmc_request *mrq) 693{ 694 int ret; 695 696 BUG_ON(!mrq || !mrq->cmd || !mrq->data); 697 698 ret = 0; 699 700 if (!ret && mrq->cmd->error) 701 ret = mrq->cmd->error; 702 if (!ret && mrq->data->error) 703 ret = mrq->data->error; 704 if (!ret && mrq->stop && mrq->stop->error) 705 ret = mrq->stop->error; 706 if (!ret && mrq->data->bytes_xfered != 707 mrq->data->blocks * mrq->data->blksz) 708 ret = RESULT_FAIL; 709 710 if (ret == -EINVAL) 711 ret = RESULT_UNSUP_HOST; 712 713 return ret; 714} 715 716static int mmc_test_check_result_async(struct mmc_card *card, 717 struct mmc_async_req *areq) 718{ 719 struct mmc_test_async_req *test_async = 720 container_of(areq, struct mmc_test_async_req, areq); 721 722 mmc_test_wait_busy(test_async->test); 723 724 return mmc_test_check_result(test_async->test, areq->mrq); 725} 726 727/* 728 * Checks that a "short transfer" behaved as expected 729 */ 730static int mmc_test_check_broken_result(struct mmc_test_card *test, 731 struct mmc_request *mrq) 732{ 733 int ret; 734 735 BUG_ON(!mrq || !mrq->cmd || !mrq->data); 736 737 ret = 0; 738 739 if (!ret && mrq->cmd->error) 740 ret = mrq->cmd->error; 741 if (!ret && mrq->data->error == 0) 742 ret = RESULT_FAIL; 743 if (!ret && mrq->data->error != -ETIMEDOUT) 744 ret = mrq->data->error; 745 if (!ret && mrq->stop && mrq->stop->error) 746 ret = mrq->stop->error; 747 if (mrq->data->blocks > 1) { 748 if (!ret && mrq->data->bytes_xfered > mrq->data->blksz) 749 ret = RESULT_FAIL; 750 } else { 751 if (!ret && mrq->data->bytes_xfered > 0) 752 ret = RESULT_FAIL; 753 } 754 755 if (ret == -EINVAL) 756 ret = RESULT_UNSUP_HOST; 757 758 return ret; 759} 760 761/* 762 * Tests nonblock transfer with certain parameters 763 */ 764static void mmc_test_nonblock_reset(struct mmc_request *mrq, 765 struct mmc_command *cmd, 766 struct mmc_command *stop, 767 struct mmc_data *data) 768{ 769 memset(mrq, 0, sizeof(struct mmc_request)); 770 memset(cmd, 0, sizeof(struct mmc_command)); 771 memset(data, 0, sizeof(struct mmc_data)); 772 memset(stop, 0, sizeof(struct mmc_command)); 773 774 mrq->cmd = cmd; 775 mrq->data = data; 776 mrq->stop = stop; 777} 778static int mmc_test_nonblock_transfer(struct mmc_test_card *test, 779 struct scatterlist *sg, unsigned sg_len, 780 unsigned dev_addr, unsigned blocks, 781 unsigned blksz, int write, int count) 782{ 783 struct mmc_request mrq1; 784 struct mmc_command cmd1; 785 struct mmc_command stop1; 786 struct mmc_data data1; 787 788 struct mmc_request mrq2; 789 struct mmc_command cmd2; 790 struct mmc_command stop2; 791 struct mmc_data data2; 792 793 struct mmc_test_async_req test_areq[2]; 794 struct mmc_async_req *done_areq; 795 struct mmc_async_req *cur_areq = &test_areq[0].areq; 796 struct mmc_async_req *other_areq = &test_areq[1].areq; 797 int i; 798 int ret; 799 800 test_areq[0].test = test; 801 test_areq[1].test = test; 802 803 mmc_test_nonblock_reset(&mrq1, &cmd1, &stop1, &data1); 804 mmc_test_nonblock_reset(&mrq2, &cmd2, &stop2, &data2); 805 806 cur_areq->mrq = &mrq1; 807 cur_areq->err_check = mmc_test_check_result_async; 808 other_areq->mrq = &mrq2; 809 other_areq->err_check = mmc_test_check_result_async; 810 811 for (i = 0; i < count; i++) { 812 mmc_test_prepare_mrq(test, cur_areq->mrq, sg, sg_len, dev_addr, 813 blocks, blksz, write); 814 done_areq = mmc_start_req(test->card->host, cur_areq, &ret); 815 816 if (ret || (!done_areq && i > 0)) 817 goto err; 818 819 if (done_areq) { 820 if (done_areq->mrq == &mrq2) 821 mmc_test_nonblock_reset(&mrq2, &cmd2, 822 &stop2, &data2); 823 else 824 mmc_test_nonblock_reset(&mrq1, &cmd1, 825 &stop1, &data1); 826 } 827 done_areq = cur_areq; 828 cur_areq = other_areq; 829 other_areq = done_areq; 830 dev_addr += blocks; 831 } 832 833 done_areq = mmc_start_req(test->card->host, NULL, &ret); 834 835 return ret; 836err: 837 return ret; 838} 839 840/* 841 * Tests a basic transfer with certain parameters 842 */ 843static int mmc_test_simple_transfer(struct mmc_test_card *test, 844 struct scatterlist *sg, unsigned sg_len, unsigned dev_addr, 845 unsigned blocks, unsigned blksz, int write) 846{ 847 struct mmc_request mrq = {0}; 848 struct mmc_command cmd = {0}; 849 struct mmc_command stop = {0}; 850 struct mmc_data data = {0}; 851 852 mrq.cmd = &cmd; 853 mrq.data = &data; 854 mrq.stop = &stop; 855 856 mmc_test_prepare_mrq(test, &mrq, sg, sg_len, dev_addr, 857 blocks, blksz, write); 858 859 mmc_wait_for_req(test->card->host, &mrq); 860 861 mmc_test_wait_busy(test); 862 863 return mmc_test_check_result(test, &mrq); 864} 865 866/* 867 * Tests a transfer where the card will fail completely or partly 868 */ 869static int mmc_test_broken_transfer(struct mmc_test_card *test, 870 unsigned blocks, unsigned blksz, int write) 871{ 872 struct mmc_request mrq = {0}; 873 struct mmc_command cmd = {0}; 874 struct mmc_command stop = {0}; 875 struct mmc_data data = {0}; 876 877 struct scatterlist sg; 878 879 mrq.cmd = &cmd; 880 mrq.data = &data; 881 mrq.stop = &stop; 882 883 sg_init_one(&sg, test->buffer, blocks * blksz); 884 885 mmc_test_prepare_mrq(test, &mrq, &sg, 1, 0, blocks, blksz, write); 886 mmc_test_prepare_broken_mrq(test, &mrq, write); 887 888 mmc_wait_for_req(test->card->host, &mrq); 889 890 mmc_test_wait_busy(test); 891 892 return mmc_test_check_broken_result(test, &mrq); 893} 894 895/* 896 * Does a complete transfer test where data is also validated 897 * 898 * Note: mmc_test_prepare() must have been done before this call 899 */ 900static int mmc_test_transfer(struct mmc_test_card *test, 901 struct scatterlist *sg, unsigned sg_len, unsigned dev_addr, 902 unsigned blocks, unsigned blksz, int write) 903{ 904 int ret, i; 905 unsigned long flags; 906 907 if (write) { 908 for (i = 0;i < blocks * blksz;i++) 909 test->scratch[i] = i; 910 } else { 911 memset(test->scratch, 0, BUFFER_SIZE); 912 } 913 local_irq_save(flags); 914 sg_copy_from_buffer(sg, sg_len, test->scratch, BUFFER_SIZE); 915 local_irq_restore(flags); 916 917 ret = mmc_test_set_blksize(test, blksz); 918 if (ret) 919 return ret; 920 921 ret = mmc_test_simple_transfer(test, sg, sg_len, dev_addr, 922 blocks, blksz, write); 923 if (ret) 924 return ret; 925 926 if (write) { 927 int sectors; 928 929 ret = mmc_test_set_blksize(test, 512); 930 if (ret) 931 return ret; 932 933 sectors = (blocks * blksz + 511) / 512; 934 if ((sectors * 512) == (blocks * blksz)) 935 sectors++; 936 937 if ((sectors * 512) > BUFFER_SIZE) 938 return -EINVAL; 939 940 memset(test->buffer, 0, sectors * 512); 941 942 for (i = 0;i < sectors;i++) { 943 ret = mmc_test_buffer_transfer(test, 944 test->buffer + i * 512, 945 dev_addr + i, 512, 0); 946 if (ret) 947 return ret; 948 } 949 950 for (i = 0;i < blocks * blksz;i++) { 951 if (test->buffer[i] != (u8)i) 952 return RESULT_FAIL; 953 } 954 955 for (;i < sectors * 512;i++) { 956 if (test->buffer[i] != 0xDF) 957 return RESULT_FAIL; 958 } 959 } else { 960 local_irq_save(flags); 961 sg_copy_to_buffer(sg, sg_len, test->scratch, BUFFER_SIZE); 962 local_irq_restore(flags); 963 for (i = 0;i < blocks * blksz;i++) { 964 if (test->scratch[i] != (u8)i) 965 return RESULT_FAIL; 966 } 967 } 968 969 return 0; 970} 971 972/*******************************************************************/ 973/* Tests */ 974/*******************************************************************/ 975 976struct mmc_test_case { 977 const char *name; 978 979 int (*prepare)(struct mmc_test_card *); 980 int (*run)(struct mmc_test_card *); 981 int (*cleanup)(struct mmc_test_card *); 982}; 983 984static int mmc_test_basic_write(struct mmc_test_card *test) 985{ 986 int ret; 987 struct scatterlist sg; 988 989 ret = mmc_test_set_blksize(test, 512); 990 if (ret) 991 return ret; 992 993 sg_init_one(&sg, test->buffer, 512); 994 995 ret = mmc_test_simple_transfer(test, &sg, 1, 0, 1, 512, 1); 996 if (ret) 997 return ret; 998 999 return 0; 1000} 1001 1002static int mmc_test_basic_read(struct mmc_test_card *test) 1003{ 1004 int ret; 1005 struct scatterlist sg; 1006 1007 ret = mmc_test_set_blksize(test, 512); 1008 if (ret) 1009 return ret; 1010 1011 sg_init_one(&sg, test->buffer, 512); 1012 1013 ret = mmc_test_simple_transfer(test, &sg, 1, 0, 1, 512, 0); 1014 if (ret) 1015 return ret; 1016 1017 return 0; 1018} 1019 1020static int mmc_test_verify_write(struct mmc_test_card *test) 1021{ 1022 int ret; 1023 struct scatterlist sg; 1024 1025 sg_init_one(&sg, test->buffer, 512); 1026 1027 ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1); 1028 if (ret) 1029 return ret; 1030 1031 return 0; 1032} 1033 1034static int mmc_test_verify_read(struct mmc_test_card *test) 1035{ 1036 int ret; 1037 struct scatterlist sg; 1038 1039 sg_init_one(&sg, test->buffer, 512); 1040 1041 ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0); 1042 if (ret) 1043 return ret; 1044 1045 return 0; 1046} 1047 1048static int mmc_test_multi_write(struct mmc_test_card *test) 1049{ 1050 int ret; 1051 unsigned int size; 1052 struct scatterlist sg; 1053 1054 if (test->card->host->max_blk_count == 1) 1055 return RESULT_UNSUP_HOST; 1056 1057 size = PAGE_SIZE * 2; 1058 size = min(size, test->card->host->max_req_size); 1059 size = min(size, test->card->host->max_seg_size); 1060 size = min(size, test->card->host->max_blk_count * 512); 1061 1062 if (size < 1024) 1063 return RESULT_UNSUP_HOST; 1064 1065 sg_init_one(&sg, test->buffer, size); 1066 1067 ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 1); 1068 if (ret) 1069 return ret; 1070 1071 return 0; 1072} 1073 1074static int mmc_test_multi_read(struct mmc_test_card *test) 1075{ 1076 int ret; 1077 unsigned int size; 1078 struct scatterlist sg; 1079 1080 if (test->card->host->max_blk_count == 1) 1081 return RESULT_UNSUP_HOST; 1082 1083 size = PAGE_SIZE * 2; 1084 size = min(size, test->card->host->max_req_size); 1085 size = min(size, test->card->host->max_seg_size); 1086 size = min(size, test->card->host->max_blk_count * 512); 1087 1088 if (size < 1024) 1089 return RESULT_UNSUP_HOST; 1090 1091 sg_init_one(&sg, test->buffer, size); 1092 1093 ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 0); 1094 if (ret) 1095 return ret; 1096 1097 return 0; 1098} 1099 1100static int mmc_test_pow2_write(struct mmc_test_card *test) 1101{ 1102 int ret, i; 1103 struct scatterlist sg; 1104 1105 if (!test->card->csd.write_partial) 1106 return RESULT_UNSUP_CARD; 1107 1108 for (i = 1; i < 512;i <<= 1) { 1109 sg_init_one(&sg, test->buffer, i); 1110 ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 1); 1111 if (ret) 1112 return ret; 1113 } 1114 1115 return 0; 1116} 1117 1118static int mmc_test_pow2_read(struct mmc_test_card *test) 1119{ 1120 int ret, i; 1121 struct scatterlist sg; 1122 1123 if (!test->card->csd.read_partial) 1124 return RESULT_UNSUP_CARD; 1125 1126 for (i = 1; i < 512;i <<= 1) { 1127 sg_init_one(&sg, test->buffer, i); 1128 ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 0); 1129 if (ret) 1130 return ret; 1131 } 1132 1133 return 0; 1134} 1135 1136static int mmc_test_weird_write(struct mmc_test_card *test) 1137{ 1138 int ret, i; 1139 struct scatterlist sg; 1140 1141 if (!test->card->csd.write_partial) 1142 return RESULT_UNSUP_CARD; 1143 1144 for (i = 3; i < 512;i += 7) { 1145 sg_init_one(&sg, test->buffer, i); 1146 ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 1); 1147 if (ret) 1148 return ret; 1149 } 1150 1151 return 0; 1152} 1153 1154static int mmc_test_weird_read(struct mmc_test_card *test) 1155{ 1156 int ret, i; 1157 struct scatterlist sg; 1158 1159 if (!test->card->csd.read_partial) 1160 return RESULT_UNSUP_CARD; 1161 1162 for (i = 3; i < 512;i += 7) { 1163 sg_init_one(&sg, test->buffer, i); 1164 ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 0); 1165 if (ret) 1166 return ret; 1167 } 1168 1169 return 0; 1170} 1171 1172static int mmc_test_align_write(struct mmc_test_card *test) 1173{ 1174 int ret, i; 1175 struct scatterlist sg; 1176 1177 for (i = 1;i < 4;i++) { 1178 sg_init_one(&sg, test->buffer + i, 512); 1179 ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1); 1180 if (ret) 1181 return ret; 1182 } 1183 1184 return 0; 1185} 1186 1187static int mmc_test_align_read(struct mmc_test_card *test) 1188{ 1189 int ret, i; 1190 struct scatterlist sg; 1191 1192 for (i = 1;i < 4;i++) { 1193 sg_init_one(&sg, test->buffer + i, 512); 1194 ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0); 1195 if (ret) 1196 return ret; 1197 } 1198 1199 return 0; 1200} 1201 1202static int mmc_test_align_multi_write(struct mmc_test_card *test) 1203{ 1204 int ret, i; 1205 unsigned int size; 1206 struct scatterlist sg; 1207 1208 if (test->card->host->max_blk_count == 1) 1209 return RESULT_UNSUP_HOST; 1210 1211 size = PAGE_SIZE * 2; 1212 size = min(size, test->card->host->max_req_size); 1213 size = min(size, test->card->host->max_seg_size); 1214 size = min(size, test->card->host->max_blk_count * 512); 1215 1216 if (size < 1024) 1217 return RESULT_UNSUP_HOST; 1218 1219 for (i = 1;i < 4;i++) { 1220 sg_init_one(&sg, test->buffer + i, size); 1221 ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 1); 1222 if (ret) 1223 return ret; 1224 } 1225 1226 return 0; 1227} 1228 1229static int mmc_test_align_multi_read(struct mmc_test_card *test) 1230{ 1231 int ret, i; 1232 unsigned int size; 1233 struct scatterlist sg; 1234 1235 if (test->card->host->max_blk_count == 1) 1236 return RESULT_UNSUP_HOST; 1237 1238 size = PAGE_SIZE * 2; 1239 size = min(size, test->card->host->max_req_size); 1240 size = min(size, test->card->host->max_seg_size); 1241 size = min(size, test->card->host->max_blk_count * 512); 1242 1243 if (size < 1024) 1244 return RESULT_UNSUP_HOST; 1245 1246 for (i = 1;i < 4;i++) { 1247 sg_init_one(&sg, test->buffer + i, size); 1248 ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 0); 1249 if (ret) 1250 return ret; 1251 } 1252 1253 return 0; 1254} 1255 1256static int mmc_test_xfersize_write(struct mmc_test_card *test) 1257{ 1258 int ret; 1259 1260 ret = mmc_test_set_blksize(test, 512); 1261 if (ret) 1262 return ret; 1263 1264 ret = mmc_test_broken_transfer(test, 1, 512, 1); 1265 if (ret) 1266 return ret; 1267 1268 return 0; 1269} 1270 1271static int mmc_test_xfersize_read(struct mmc_test_card *test) 1272{ 1273 int ret; 1274 1275 ret = mmc_test_set_blksize(test, 512); 1276 if (ret) 1277 return ret; 1278 1279 ret = mmc_test_broken_transfer(test, 1, 512, 0); 1280 if (ret) 1281 return ret; 1282 1283 return 0; 1284} 1285 1286static int mmc_test_multi_xfersize_write(struct mmc_test_card *test) 1287{ 1288 int ret; 1289 1290 if (test->card->host->max_blk_count == 1) 1291 return RESULT_UNSUP_HOST; 1292 1293 ret = mmc_test_set_blksize(test, 512); 1294 if (ret) 1295 return ret; 1296 1297 ret = mmc_test_broken_transfer(test, 2, 512, 1); 1298 if (ret) 1299 return ret; 1300 1301 return 0; 1302} 1303 1304static int mmc_test_multi_xfersize_read(struct mmc_test_card *test) 1305{ 1306 int ret; 1307 1308 if (test->card->host->max_blk_count == 1) 1309 return RESULT_UNSUP_HOST; 1310 1311 ret = mmc_test_set_blksize(test, 512); 1312 if (ret) 1313 return ret; 1314 1315 ret = mmc_test_broken_transfer(test, 2, 512, 0); 1316 if (ret) 1317 return ret; 1318 1319 return 0; 1320} 1321 1322#ifdef CONFIG_HIGHMEM 1323 1324static int mmc_test_write_high(struct mmc_test_card *test) 1325{ 1326 int ret; 1327 struct scatterlist sg; 1328 1329 sg_init_table(&sg, 1); 1330 sg_set_page(&sg, test->highmem, 512, 0); 1331 1332 ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1); 1333 if (ret) 1334 return ret; 1335 1336 return 0; 1337} 1338 1339static int mmc_test_read_high(struct mmc_test_card *test) 1340{ 1341 int ret; 1342 struct scatterlist sg; 1343 1344 sg_init_table(&sg, 1); 1345 sg_set_page(&sg, test->highmem, 512, 0); 1346 1347 ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0); 1348 if (ret) 1349 return ret; 1350 1351 return 0; 1352} 1353 1354static int mmc_test_multi_write_high(struct mmc_test_card *test) 1355{ 1356 int ret; 1357 unsigned int size; 1358 struct scatterlist sg; 1359 1360 if (test->card->host->max_blk_count == 1) 1361 return RESULT_UNSUP_HOST; 1362 1363 size = PAGE_SIZE * 2; 1364 size = min(size, test->card->host->max_req_size); 1365 size = min(size, test->card->host->max_seg_size); 1366 size = min(size, test->card->host->max_blk_count * 512); 1367 1368 if (size < 1024) 1369 return RESULT_UNSUP_HOST; 1370 1371 sg_init_table(&sg, 1); 1372 sg_set_page(&sg, test->highmem, size, 0); 1373 1374 ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 1); 1375 if (ret) 1376 return ret; 1377 1378 return 0; 1379} 1380 1381static int mmc_test_multi_read_high(struct mmc_test_card *test) 1382{ 1383 int ret; 1384 unsigned int size; 1385 struct scatterlist sg; 1386 1387 if (test->card->host->max_blk_count == 1) 1388 return RESULT_UNSUP_HOST; 1389 1390 size = PAGE_SIZE * 2; 1391 size = min(size, test->card->host->max_req_size); 1392 size = min(size, test->card->host->max_seg_size); 1393 size = min(size, test->card->host->max_blk_count * 512); 1394 1395 if (size < 1024) 1396 return RESULT_UNSUP_HOST; 1397 1398 sg_init_table(&sg, 1); 1399 sg_set_page(&sg, test->highmem, size, 0); 1400 1401 ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 0); 1402 if (ret) 1403 return ret; 1404 1405 return 0; 1406} 1407 1408#else 1409 1410static int mmc_test_no_highmem(struct mmc_test_card *test) 1411{ 1412 pr_info("%s: Highmem not configured - test skipped\n", 1413 mmc_hostname(test->card->host)); 1414 return 0; 1415} 1416 1417#endif /* CONFIG_HIGHMEM */ 1418 1419/* 1420 * Map sz bytes so that it can be transferred. 1421 */ 1422static int mmc_test_area_map(struct mmc_test_card *test, unsigned long sz, 1423 int max_scatter, int min_sg_len) 1424{ 1425 struct mmc_test_area *t = &test->area; 1426 int err; 1427 1428 t->blocks = sz >> 9; 1429 1430 if (max_scatter) { 1431 err = mmc_test_map_sg_max_scatter(t->mem, sz, t->sg, 1432 t->max_segs, t->max_seg_sz, 1433 &t->sg_len); 1434 } else { 1435 err = mmc_test_map_sg(t->mem, sz, t->sg, 1, t->max_segs, 1436 t->max_seg_sz, &t->sg_len, min_sg_len); 1437 } 1438 if (err) 1439 pr_info("%s: Failed to map sg list\n", 1440 mmc_hostname(test->card->host)); 1441 return err; 1442} 1443 1444/* 1445 * Transfer bytes mapped by mmc_test_area_map(). 1446 */ 1447static int mmc_test_area_transfer(struct mmc_test_card *test, 1448 unsigned int dev_addr, int write) 1449{ 1450 struct mmc_test_area *t = &test->area; 1451 1452 return mmc_test_simple_transfer(test, t->sg, t->sg_len, dev_addr, 1453 t->blocks, 512, write); 1454} 1455 1456/* 1457 * Map and transfer bytes for multiple transfers. 1458 */ 1459static int mmc_test_area_io_seq(struct mmc_test_card *test, unsigned long sz, 1460 unsigned int dev_addr, int write, 1461 int max_scatter, int timed, int count, 1462 bool nonblock, int min_sg_len) 1463{ 1464 struct timespec ts1, ts2; 1465 int ret = 0; 1466 int i; 1467 struct mmc_test_area *t = &test->area; 1468 1469 /* 1470 * In the case of a maximally scattered transfer, the maximum transfer 1471 * size is further limited by using PAGE_SIZE segments. 1472 */ 1473 if (max_scatter) { 1474 struct mmc_test_area *t = &test->area; 1475 unsigned long max_tfr; 1476 1477 if (t->max_seg_sz >= PAGE_SIZE) 1478 max_tfr = t->max_segs * PAGE_SIZE; 1479 else 1480 max_tfr = t->max_segs * t->max_seg_sz; 1481 if (sz > max_tfr) 1482 sz = max_tfr; 1483 } 1484 1485 ret = mmc_test_area_map(test, sz, max_scatter, min_sg_len); 1486 if (ret) 1487 return ret; 1488 1489 if (timed) 1490 getnstimeofday(&ts1); 1491 if (nonblock) 1492 ret = mmc_test_nonblock_transfer(test, t->sg, t->sg_len, 1493 dev_addr, t->blocks, 512, write, count); 1494 else 1495 for (i = 0; i < count && ret == 0; i++) { 1496 ret = mmc_test_area_transfer(test, dev_addr, write); 1497 dev_addr += sz >> 9; 1498 } 1499 1500 if (ret) 1501 return ret; 1502 1503 if (timed) 1504 getnstimeofday(&ts2); 1505 1506 if (timed) 1507 mmc_test_print_avg_rate(test, sz, count, &ts1, &ts2); 1508 1509 return 0; 1510} 1511 1512static int mmc_test_area_io(struct mmc_test_card *test, unsigned long sz, 1513 unsigned int dev_addr, int write, int max_scatter, 1514 int timed) 1515{ 1516 return mmc_test_area_io_seq(test, sz, dev_addr, write, max_scatter, 1517 timed, 1, false, 0); 1518} 1519 1520/* 1521 * Write the test area entirely. 1522 */ 1523static int mmc_test_area_fill(struct mmc_test_card *test) 1524{ 1525 struct mmc_test_area *t = &test->area; 1526 1527 return mmc_test_area_io(test, t->max_tfr, t->dev_addr, 1, 0, 0); 1528} 1529 1530/* 1531 * Erase the test area entirely. 1532 */ 1533static int mmc_test_area_erase(struct mmc_test_card *test) 1534{ 1535 struct mmc_test_area *t = &test->area; 1536 1537 if (!mmc_can_erase(test->card)) 1538 return 0; 1539 1540 return mmc_erase(test->card, t->dev_addr, t->max_sz >> 9, 1541 MMC_ERASE_ARG); 1542} 1543 1544/* 1545 * Cleanup struct mmc_test_area. 1546 */ 1547static int mmc_test_area_cleanup(struct mmc_test_card *test) 1548{ 1549 struct mmc_test_area *t = &test->area; 1550 1551 kfree(t->sg); 1552 mmc_test_free_mem(t->mem); 1553 1554 return 0; 1555} 1556 1557/* 1558 * Initialize an area for testing large transfers. The test area is set to the 1559 * middle of the card because cards may have different charateristics at the 1560 * front (for FAT file system optimization). Optionally, the area is erased 1561 * (if the card supports it) which may improve write performance. Optionally, 1562 * the area is filled with data for subsequent read tests. 1563 */ 1564static int mmc_test_area_init(struct mmc_test_card *test, int erase, int fill) 1565{ 1566 struct mmc_test_area *t = &test->area; 1567 unsigned long min_sz = 64 * 1024, sz; 1568 int ret; 1569 1570 ret = mmc_test_set_blksize(test, 512); 1571 if (ret) 1572 return ret; 1573 1574 /* Make the test area size about 4MiB */ 1575 sz = (unsigned long)test->card->pref_erase << 9; 1576 t->max_sz = sz; 1577 while (t->max_sz < 4 * 1024 * 1024) 1578 t->max_sz += sz; 1579 while (t->max_sz > TEST_AREA_MAX_SIZE && t->max_sz > sz) 1580 t->max_sz -= sz; 1581 1582 t->max_segs = test->card->host->max_segs; 1583 t->max_seg_sz = test->card->host->max_seg_size; 1584 t->max_seg_sz -= t->max_seg_sz % 512; 1585 1586 t->max_tfr = t->max_sz; 1587 if (t->max_tfr >> 9 > test->card->host->max_blk_count) 1588 t->max_tfr = test->card->host->max_blk_count << 9; 1589 if (t->max_tfr > test->card->host->max_req_size) 1590 t->max_tfr = test->card->host->max_req_size; 1591 if (t->max_tfr / t->max_seg_sz > t->max_segs) 1592 t->max_tfr = t->max_segs * t->max_seg_sz; 1593 1594 /* 1595 * Try to allocate enough memory for a max. sized transfer. Less is OK 1596 * because the same memory can be mapped into the scatterlist more than 1597 * once. Also, take into account the limits imposed on scatterlist 1598 * segments by the host driver. 1599 */ 1600 t->mem = mmc_test_alloc_mem(min_sz, t->max_tfr, t->max_segs, 1601 t->max_seg_sz); 1602 if (!t->mem) 1603 return -ENOMEM; 1604 1605 t->sg = kmalloc(sizeof(struct scatterlist) * t->max_segs, GFP_KERNEL); 1606 if (!t->sg) { 1607 ret = -ENOMEM; 1608 goto out_free; 1609 } 1610 1611 t->dev_addr = mmc_test_capacity(test->card) / 2; 1612 t->dev_addr -= t->dev_addr % (t->max_sz >> 9); 1613 1614 if (erase) { 1615 ret = mmc_test_area_erase(test); 1616 if (ret) 1617 goto out_free; 1618 } 1619 1620 if (fill) { 1621 ret = mmc_test_area_fill(test); 1622 if (ret) 1623 goto out_free; 1624 } 1625 1626 return 0; 1627 1628out_free: 1629 mmc_test_area_cleanup(test); 1630 return ret; 1631} 1632 1633/* 1634 * Prepare for large transfers. Do not erase the test area. 1635 */ 1636static int mmc_test_area_prepare(struct mmc_test_card *test) 1637{ 1638 return mmc_test_area_init(test, 0, 0); 1639} 1640 1641/* 1642 * Prepare for large transfers. Do erase the test area. 1643 */ 1644static int mmc_test_area_prepare_erase(struct mmc_test_card *test) 1645{ 1646 return mmc_test_area_init(test, 1, 0); 1647} 1648 1649/* 1650 * Prepare for large transfers. Erase and fill the test area. 1651 */ 1652static int mmc_test_area_prepare_fill(struct mmc_test_card *test) 1653{ 1654 return mmc_test_area_init(test, 1, 1); 1655} 1656 1657/* 1658 * Test best-case performance. Best-case performance is expected from 1659 * a single large transfer. 1660 * 1661 * An additional option (max_scatter) allows the measurement of the same 1662 * transfer but with no contiguous pages in the scatter list. This tests 1663 * the efficiency of DMA to handle scattered pages. 1664 */ 1665static int mmc_test_best_performance(struct mmc_test_card *test, int write, 1666 int max_scatter) 1667{ 1668 struct mmc_test_area *t = &test->area; 1669 1670 return mmc_test_area_io(test, t->max_tfr, t->dev_addr, write, 1671 max_scatter, 1); 1672} 1673 1674/* 1675 * Best-case read performance. 1676 */ 1677static int mmc_test_best_read_performance(struct mmc_test_card *test) 1678{ 1679 return mmc_test_best_performance(test, 0, 0); 1680} 1681 1682/* 1683 * Best-case write performance. 1684 */ 1685static int mmc_test_best_write_performance(struct mmc_test_card *test) 1686{ 1687 return mmc_test_best_performance(test, 1, 0); 1688} 1689 1690/* 1691 * Best-case read performance into scattered pages. 1692 */ 1693static int mmc_test_best_read_perf_max_scatter(struct mmc_test_card *test) 1694{ 1695 return mmc_test_best_performance(test, 0, 1); 1696} 1697 1698/* 1699 * Best-case write performance from scattered pages. 1700 */ 1701static int mmc_test_best_write_perf_max_scatter(struct mmc_test_card *test) 1702{ 1703 return mmc_test_best_performance(test, 1, 1); 1704} 1705 1706/* 1707 * Single read performance by transfer size. 1708 */ 1709static int mmc_test_profile_read_perf(struct mmc_test_card *test) 1710{ 1711 struct mmc_test_area *t = &test->area; 1712 unsigned long sz; 1713 unsigned int dev_addr; 1714 int ret; 1715 1716 for (sz = 512; sz < t->max_tfr; sz <<= 1) { 1717 dev_addr = t->dev_addr + (sz >> 9); 1718 ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 1); 1719 if (ret) 1720 return ret; 1721 } 1722 sz = t->max_tfr; 1723 dev_addr = t->dev_addr; 1724 return mmc_test_area_io(test, sz, dev_addr, 0, 0, 1); 1725} 1726 1727/* 1728 * Single write performance by transfer size. 1729 */ 1730static int mmc_test_profile_write_perf(struct mmc_test_card *test) 1731{ 1732 struct mmc_test_area *t = &test->area; 1733 unsigned long sz; 1734 unsigned int dev_addr; 1735 int ret; 1736 1737 ret = mmc_test_area_erase(test); 1738 if (ret) 1739 return ret; 1740 for (sz = 512; sz < t->max_tfr; sz <<= 1) { 1741 dev_addr = t->dev_addr + (sz >> 9); 1742 ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 1); 1743 if (ret) 1744 return ret; 1745 } 1746 ret = mmc_test_area_erase(test); 1747 if (ret) 1748 return ret; 1749 sz = t->max_tfr; 1750 dev_addr = t->dev_addr; 1751 return mmc_test_area_io(test, sz, dev_addr, 1, 0, 1); 1752} 1753 1754/* 1755 * Single trim performance by transfer size. 1756 */ 1757static int mmc_test_profile_trim_perf(struct mmc_test_card *test) 1758{ 1759 struct mmc_test_area *t = &test->area; 1760 unsigned long sz; 1761 unsigned int dev_addr; 1762 struct timespec ts1, ts2; 1763 int ret; 1764 1765 if (!mmc_can_trim(test->card)) 1766 return RESULT_UNSUP_CARD; 1767 1768 if (!mmc_can_erase(test->card)) 1769 return RESULT_UNSUP_HOST; 1770 1771 for (sz = 512; sz < t->max_sz; sz <<= 1) { 1772 dev_addr = t->dev_addr + (sz >> 9); 1773 getnstimeofday(&ts1); 1774 ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG); 1775 if (ret) 1776 return ret; 1777 getnstimeofday(&ts2); 1778 mmc_test_print_rate(test, sz, &ts1, &ts2); 1779 } 1780 dev_addr = t->dev_addr; 1781 getnstimeofday(&ts1); 1782 ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG); 1783 if (ret) 1784 return ret; 1785 getnstimeofday(&ts2); 1786 mmc_test_print_rate(test, sz, &ts1, &ts2); 1787 return 0; 1788} 1789 1790static int mmc_test_seq_read_perf(struct mmc_test_card *test, unsigned long sz) 1791{ 1792 struct mmc_test_area *t = &test->area; 1793 unsigned int dev_addr, i, cnt; 1794 struct timespec ts1, ts2; 1795 int ret; 1796 1797 cnt = t->max_sz / sz; 1798 dev_addr = t->dev_addr; 1799 getnstimeofday(&ts1); 1800 for (i = 0; i < cnt; i++) { 1801 ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 0); 1802 if (ret) 1803 return ret; 1804 dev_addr += (sz >> 9); 1805 } 1806 getnstimeofday(&ts2); 1807 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2); 1808 return 0; 1809} 1810 1811/* 1812 * Consecutive read performance by transfer size. 1813 */ 1814static int mmc_test_profile_seq_read_perf(struct mmc_test_card *test) 1815{ 1816 struct mmc_test_area *t = &test->area; 1817 unsigned long sz; 1818 int ret; 1819 1820 for (sz = 512; sz < t->max_tfr; sz <<= 1) { 1821 ret = mmc_test_seq_read_perf(test, sz); 1822 if (ret) 1823 return ret; 1824 } 1825 sz = t->max_tfr; 1826 return mmc_test_seq_read_perf(test, sz); 1827} 1828 1829static int mmc_test_seq_write_perf(struct mmc_test_card *test, unsigned long sz) 1830{ 1831 struct mmc_test_area *t = &test->area; 1832 unsigned int dev_addr, i, cnt; 1833 struct timespec ts1, ts2; 1834 int ret; 1835 1836 ret = mmc_test_area_erase(test); 1837 if (ret) 1838 return ret; 1839 cnt = t->max_sz / sz; 1840 dev_addr = t->dev_addr; 1841 getnstimeofday(&ts1); 1842 for (i = 0; i < cnt; i++) { 1843 ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 0); 1844 if (ret) 1845 return ret; 1846 dev_addr += (sz >> 9); 1847 } 1848 getnstimeofday(&ts2); 1849 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2); 1850 return 0; 1851} 1852 1853/* 1854 * Consecutive write performance by transfer size. 1855 */ 1856static int mmc_test_profile_seq_write_perf(struct mmc_test_card *test) 1857{ 1858 struct mmc_test_area *t = &test->area; 1859 unsigned long sz; 1860 int ret; 1861 1862 for (sz = 512; sz < t->max_tfr; sz <<= 1) { 1863 ret = mmc_test_seq_write_perf(test, sz); 1864 if (ret) 1865 return ret; 1866 } 1867 sz = t->max_tfr; 1868 return mmc_test_seq_write_perf(test, sz); 1869} 1870 1871/* 1872 * Consecutive trim performance by transfer size. 1873 */ 1874static int mmc_test_profile_seq_trim_perf(struct mmc_test_card *test) 1875{ 1876 struct mmc_test_area *t = &test->area; 1877 unsigned long sz; 1878 unsigned int dev_addr, i, cnt; 1879 struct timespec ts1, ts2; 1880 int ret; 1881 1882 if (!mmc_can_trim(test->card)) 1883 return RESULT_UNSUP_CARD; 1884 1885 if (!mmc_can_erase(test->card)) 1886 return RESULT_UNSUP_HOST; 1887 1888 for (sz = 512; sz <= t->max_sz; sz <<= 1) { 1889 ret = mmc_test_area_erase(test); 1890 if (ret) 1891 return ret; 1892 ret = mmc_test_area_fill(test); 1893 if (ret) 1894 return ret; 1895 cnt = t->max_sz / sz; 1896 dev_addr = t->dev_addr; 1897 getnstimeofday(&ts1); 1898 for (i = 0; i < cnt; i++) { 1899 ret = mmc_erase(test->card, dev_addr, sz >> 9, 1900 MMC_TRIM_ARG); 1901 if (ret) 1902 return ret; 1903 dev_addr += (sz >> 9); 1904 } 1905 getnstimeofday(&ts2); 1906 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2); 1907 } 1908 return 0; 1909} 1910 1911static unsigned int rnd_next = 1; 1912 1913static unsigned int mmc_test_rnd_num(unsigned int rnd_cnt) 1914{ 1915 uint64_t r; 1916 1917 rnd_next = rnd_next * 1103515245 + 12345; 1918 r = (rnd_next >> 16) & 0x7fff; 1919 return (r * rnd_cnt) >> 15; 1920} 1921 1922static int mmc_test_rnd_perf(struct mmc_test_card *test, int write, int print, 1923 unsigned long sz) 1924{ 1925 unsigned int dev_addr, cnt, rnd_addr, range1, range2, last_ea = 0, ea; 1926 unsigned int ssz; 1927 struct timespec ts1, ts2, ts; 1928 int ret; 1929 1930 ssz = sz >> 9; 1931 1932 rnd_addr = mmc_test_capacity(test->card) / 4; 1933 range1 = rnd_addr / test->card->pref_erase; 1934 range2 = range1 / ssz; 1935 1936 getnstimeofday(&ts1); 1937 for (cnt = 0; cnt < UINT_MAX; cnt++) { 1938 getnstimeofday(&ts2); 1939 ts = timespec_sub(ts2, ts1); 1940 if (ts.tv_sec >= 10) 1941 break; 1942 ea = mmc_test_rnd_num(range1); 1943 if (ea == last_ea) 1944 ea -= 1; 1945 last_ea = ea; 1946 dev_addr = rnd_addr + test->card->pref_erase * ea + 1947 ssz * mmc_test_rnd_num(range2); 1948 ret = mmc_test_area_io(test, sz, dev_addr, write, 0, 0); 1949 if (ret) 1950 return ret; 1951 } 1952 if (print) 1953 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2); 1954 return 0; 1955} 1956 1957static int mmc_test_random_perf(struct mmc_test_card *test, int write) 1958{ 1959 struct mmc_test_area *t = &test->area; 1960 unsigned int next; 1961 unsigned long sz; 1962 int ret; 1963 1964 for (sz = 512; sz < t->max_tfr; sz <<= 1) { 1965 /* 1966 * When writing, try to get more consistent results by running 1967 * the test twice with exactly the same I/O but outputting the 1968 * results only for the 2nd run. 1969 */ 1970 if (write) { 1971 next = rnd_next; 1972 ret = mmc_test_rnd_perf(test, write, 0, sz); 1973 if (ret) 1974 return ret; 1975 rnd_next = next; 1976 } 1977 ret = mmc_test_rnd_perf(test, write, 1, sz); 1978 if (ret) 1979 return ret; 1980 } 1981 sz = t->max_tfr; 1982 if (write) { 1983 next = rnd_next; 1984 ret = mmc_test_rnd_perf(test, write, 0, sz); 1985 if (ret) 1986 return ret; 1987 rnd_next = next; 1988 } 1989 return mmc_test_rnd_perf(test, write, 1, sz); 1990} 1991 1992/* 1993 * Random read performance by transfer size. 1994 */ 1995static int mmc_test_random_read_perf(struct mmc_test_card *test) 1996{ 1997 return mmc_test_random_perf(test, 0); 1998} 1999 2000/* 2001 * Random write performance by transfer size. 2002 */ 2003static int mmc_test_random_write_perf(struct mmc_test_card *test) 2004{ 2005 return mmc_test_random_perf(test, 1); 2006} 2007 2008static int mmc_test_seq_perf(struct mmc_test_card *test, int write, 2009 unsigned int tot_sz, int max_scatter) 2010{ 2011 struct mmc_test_area *t = &test->area; 2012 unsigned int dev_addr, i, cnt, sz, ssz; 2013 struct timespec ts1, ts2; 2014 int ret; 2015 2016 sz = t->max_tfr; 2017 2018 /* 2019 * In the case of a maximally scattered transfer, the maximum transfer 2020 * size is further limited by using PAGE_SIZE segments. 2021 */ 2022 if (max_scatter) { 2023 unsigned long max_tfr; 2024 2025 if (t->max_seg_sz >= PAGE_SIZE) 2026 max_tfr = t->max_segs * PAGE_SIZE; 2027 else 2028 max_tfr = t->max_segs * t->max_seg_sz; 2029 if (sz > max_tfr) 2030 sz = max_tfr; 2031 } 2032 2033 ssz = sz >> 9; 2034 dev_addr = mmc_test_capacity(test->card) / 4; 2035 if (tot_sz > dev_addr << 9) 2036 tot_sz = dev_addr << 9; 2037 cnt = tot_sz / sz; 2038 dev_addr &= 0xffff0000; /* Round to 64MiB boundary */ 2039 2040 getnstimeofday(&ts1); 2041 for (i = 0; i < cnt; i++) { 2042 ret = mmc_test_area_io(test, sz, dev_addr, write, 2043 max_scatter, 0); 2044 if (ret) 2045 return ret; 2046 dev_addr += ssz; 2047 } 2048 getnstimeofday(&ts2); 2049 2050 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2); 2051 2052 return 0; 2053} 2054 2055static int mmc_test_large_seq_perf(struct mmc_test_card *test, int write) 2056{ 2057 int ret, i; 2058 2059 for (i = 0; i < 10; i++) { 2060 ret = mmc_test_seq_perf(test, write, 10 * 1024 * 1024, 1); 2061 if (ret) 2062 return ret; 2063 } 2064 for (i = 0; i < 5; i++) { 2065 ret = mmc_test_seq_perf(test, write, 100 * 1024 * 1024, 1); 2066 if (ret) 2067 return ret; 2068 } 2069 for (i = 0; i < 3; i++) { 2070 ret = mmc_test_seq_perf(test, write, 1000 * 1024 * 1024, 1); 2071 if (ret) 2072 return ret; 2073 } 2074 2075 return ret; 2076} 2077 2078/* 2079 * Large sequential read performance. 2080 */ 2081static int mmc_test_large_seq_read_perf(struct mmc_test_card *test) 2082{ 2083 return mmc_test_large_seq_perf(test, 0); 2084} 2085 2086/* 2087 * Large sequential write performance. 2088 */ 2089static int mmc_test_large_seq_write_perf(struct mmc_test_card *test) 2090{ 2091 return mmc_test_large_seq_perf(test, 1); 2092} 2093 2094static int mmc_test_rw_multiple(struct mmc_test_card *test, 2095 struct mmc_test_multiple_rw *tdata, 2096 unsigned int reqsize, unsigned int size, 2097 int min_sg_len) 2098{ 2099 unsigned int dev_addr; 2100 struct mmc_test_area *t = &test->area; 2101 int ret = 0; 2102 2103 /* Set up test area */ 2104 if (size > mmc_test_capacity(test->card) / 2 * 512) 2105 size = mmc_test_capacity(test->card) / 2 * 512; 2106 if (reqsize > t->max_tfr) 2107 reqsize = t->max_tfr; 2108 dev_addr = mmc_test_capacity(test->card) / 4; 2109 if ((dev_addr & 0xffff0000)) 2110 dev_addr &= 0xffff0000; /* Round to 64MiB boundary */ 2111 else 2112 dev_addr &= 0xfffff800; /* Round to 1MiB boundary */ 2113 if (!dev_addr) 2114 goto err; 2115 2116 if (reqsize > size) 2117 return 0; 2118 2119 /* prepare test area */ 2120 if (mmc_can_erase(test->card) && 2121 tdata->prepare & MMC_TEST_PREP_ERASE) { 2122 ret = mmc_erase(test->card, dev_addr, 2123 size / 512, MMC_SECURE_ERASE_ARG); 2124 if (ret) 2125 ret = mmc_erase(test->card, dev_addr, 2126 size / 512, MMC_ERASE_ARG); 2127 if (ret) 2128 goto err; 2129 } 2130 2131 /* Run test */ 2132 ret = mmc_test_area_io_seq(test, reqsize, dev_addr, 2133 tdata->do_write, 0, 1, size / reqsize, 2134 tdata->do_nonblock_req, min_sg_len); 2135 if (ret) 2136 goto err; 2137 2138 return ret; 2139 err: 2140 pr_info("[%s] error\n", __func__); 2141 return ret; 2142} 2143 2144static int mmc_test_rw_multiple_size(struct mmc_test_card *test, 2145 struct mmc_test_multiple_rw *rw) 2146{ 2147 int ret = 0; 2148 int i; 2149 void *pre_req = test->card->host->ops->pre_req; 2150 void *post_req = test->card->host->ops->post_req; 2151 2152 if (rw->do_nonblock_req && 2153 ((!pre_req && post_req) || (pre_req && !post_req))) { 2154 pr_info("error: only one of pre/post is defined\n"); 2155 return -EINVAL; 2156 } 2157 2158 for (i = 0 ; i < rw->len && ret == 0; i++) { 2159 ret = mmc_test_rw_multiple(test, rw, rw->bs[i], rw->size, 0); 2160 if (ret) 2161 break; 2162 } 2163 return ret; 2164} 2165 2166static int mmc_test_rw_multiple_sg_len(struct mmc_test_card *test, 2167 struct mmc_test_multiple_rw *rw) 2168{ 2169 int ret = 0; 2170 int i; 2171 2172 for (i = 0 ; i < rw->len && ret == 0; i++) { 2173 ret = mmc_test_rw_multiple(test, rw, 512*1024, rw->size, 2174 rw->sg_len[i]); 2175 if (ret) 2176 break; 2177 } 2178 return ret; 2179} 2180 2181/* 2182 * Multiple blocking write 4k to 4 MB chunks 2183 */ 2184static int mmc_test_profile_mult_write_blocking_perf(struct mmc_test_card *test) 2185{ 2186 unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16, 2187 1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22}; 2188 struct mmc_test_multiple_rw test_data = { 2189 .bs = bs, 2190 .size = TEST_AREA_MAX_SIZE, 2191 .len = ARRAY_SIZE(bs), 2192 .do_write = true, 2193 .do_nonblock_req = false, 2194 .prepare = MMC_TEST_PREP_ERASE, 2195 }; 2196 2197 return mmc_test_rw_multiple_size(test, &test_data); 2198}; 2199 2200/* 2201 * Multiple non-blocking write 4k to 4 MB chunks 2202 */ 2203static int mmc_test_profile_mult_write_nonblock_perf(struct mmc_test_card *test) 2204{ 2205 unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16, 2206 1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22}; 2207 struct mmc_test_multiple_rw test_data = { 2208 .bs = bs, 2209 .size = TEST_AREA_MAX_SIZE, 2210 .len = ARRAY_SIZE(bs), 2211 .do_write = true, 2212 .do_nonblock_req = true, 2213 .prepare = MMC_TEST_PREP_ERASE, 2214 }; 2215 2216 return mmc_test_rw_multiple_size(test, &test_data); 2217} 2218 2219/* 2220 * Multiple blocking read 4k to 4 MB chunks 2221 */ 2222static int mmc_test_profile_mult_read_blocking_perf(struct mmc_test_card *test) 2223{ 2224 unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16, 2225 1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22}; 2226 struct mmc_test_multiple_rw test_data = { 2227 .bs = bs, 2228 .size = TEST_AREA_MAX_SIZE, 2229 .len = ARRAY_SIZE(bs), 2230 .do_write = false, 2231 .do_nonblock_req = false, 2232 .prepare = MMC_TEST_PREP_NONE, 2233 }; 2234 2235 return mmc_test_rw_multiple_size(test, &test_data); 2236} 2237 2238/* 2239 * Multiple non-blocking read 4k to 4 MB chunks 2240 */ 2241static int mmc_test_profile_mult_read_nonblock_perf(struct mmc_test_card *test) 2242{ 2243 unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16, 2244 1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22}; 2245 struct mmc_test_multiple_rw test_data = { 2246 .bs = bs, 2247 .size = TEST_AREA_MAX_SIZE, 2248 .len = ARRAY_SIZE(bs), 2249 .do_write = false, 2250 .do_nonblock_req = true, 2251 .prepare = MMC_TEST_PREP_NONE, 2252 }; 2253 2254 return mmc_test_rw_multiple_size(test, &test_data); 2255} 2256 2257/* 2258 * Multiple blocking write 1 to 512 sg elements 2259 */ 2260static int mmc_test_profile_sglen_wr_blocking_perf(struct mmc_test_card *test) 2261{ 2262 unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6, 2263 1 << 7, 1 << 8, 1 << 9}; 2264 struct mmc_test_multiple_rw test_data = { 2265 .sg_len = sg_len, 2266 .size = TEST_AREA_MAX_SIZE, 2267 .len = ARRAY_SIZE(sg_len), 2268 .do_write = true, 2269 .do_nonblock_req = false, 2270 .prepare = MMC_TEST_PREP_ERASE, 2271 }; 2272 2273 return mmc_test_rw_multiple_sg_len(test, &test_data); 2274}; 2275 2276/* 2277 * Multiple non-blocking write 1 to 512 sg elements 2278 */ 2279static int mmc_test_profile_sglen_wr_nonblock_perf(struct mmc_test_card *test) 2280{ 2281 unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6, 2282 1 << 7, 1 << 8, 1 << 9}; 2283 struct mmc_test_multiple_rw test_data = { 2284 .sg_len = sg_len, 2285 .size = TEST_AREA_MAX_SIZE, 2286 .len = ARRAY_SIZE(sg_len), 2287 .do_write = true, 2288 .do_nonblock_req = true, 2289 .prepare = MMC_TEST_PREP_ERASE, 2290 }; 2291 2292 return mmc_test_rw_multiple_sg_len(test, &test_data); 2293} 2294 2295/* 2296 * Multiple blocking read 1 to 512 sg elements 2297 */ 2298static int mmc_test_profile_sglen_r_blocking_perf(struct mmc_test_card *test) 2299{ 2300 unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6, 2301 1 << 7, 1 << 8, 1 << 9}; 2302 struct mmc_test_multiple_rw test_data = { 2303 .sg_len = sg_len, 2304 .size = TEST_AREA_MAX_SIZE, 2305 .len = ARRAY_SIZE(sg_len), 2306 .do_write = false, 2307 .do_nonblock_req = false, 2308 .prepare = MMC_TEST_PREP_NONE, 2309 }; 2310 2311 return mmc_test_rw_multiple_sg_len(test, &test_data); 2312} 2313 2314/* 2315 * Multiple non-blocking read 1 to 512 sg elements 2316 */ 2317static int mmc_test_profile_sglen_r_nonblock_perf(struct mmc_test_card *test) 2318{ 2319 unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6, 2320 1 << 7, 1 << 8, 1 << 9}; 2321 struct mmc_test_multiple_rw test_data = { 2322 .sg_len = sg_len, 2323 .size = TEST_AREA_MAX_SIZE, 2324 .len = ARRAY_SIZE(sg_len), 2325 .do_write = false, 2326 .do_nonblock_req = true, 2327 .prepare = MMC_TEST_PREP_NONE, 2328 }; 2329 2330 return mmc_test_rw_multiple_sg_len(test, &test_data); 2331} 2332 2333/* 2334 * eMMC hardware reset. 2335 */ 2336static int mmc_test_hw_reset(struct mmc_test_card *test) 2337{ 2338 struct mmc_card *card = test->card; 2339 struct mmc_host *host = card->host; 2340 int err; 2341 2342 err = mmc_hw_reset_check(host); 2343 if (!err) 2344 return RESULT_OK; 2345 2346 if (err == -ENOSYS) 2347 return RESULT_FAIL; 2348 2349 if (err != -EOPNOTSUPP) 2350 return err; 2351 2352 if (!mmc_can_reset(card)) 2353 return RESULT_UNSUP_CARD; 2354 2355 return RESULT_UNSUP_HOST; 2356} 2357 2358static const struct mmc_test_case mmc_test_cases[] = { 2359 { 2360 .name = "Basic write (no data verification)", 2361 .run = mmc_test_basic_write, 2362 }, 2363 2364 { 2365 .name = "Basic read (no data verification)", 2366 .run = mmc_test_basic_read, 2367 }, 2368 2369 { 2370 .name = "Basic write (with data verification)", 2371 .prepare = mmc_test_prepare_write, 2372 .run = mmc_test_verify_write, 2373 .cleanup = mmc_test_cleanup, 2374 }, 2375 2376 { 2377 .name = "Basic read (with data verification)", 2378 .prepare = mmc_test_prepare_read, 2379 .run = mmc_test_verify_read, 2380 .cleanup = mmc_test_cleanup, 2381 }, 2382 2383 { 2384 .name = "Multi-block write", 2385 .prepare = mmc_test_prepare_write, 2386 .run = mmc_test_multi_write, 2387 .cleanup = mmc_test_cleanup, 2388 }, 2389 2390 { 2391 .name = "Multi-block read", 2392 .prepare = mmc_test_prepare_read, 2393 .run = mmc_test_multi_read, 2394 .cleanup = mmc_test_cleanup, 2395 }, 2396 2397 { 2398 .name = "Power of two block writes", 2399 .prepare = mmc_test_prepare_write, 2400 .run = mmc_test_pow2_write, 2401 .cleanup = mmc_test_cleanup, 2402 }, 2403 2404 { 2405 .name = "Power of two block reads", 2406 .prepare = mmc_test_prepare_read, 2407 .run = mmc_test_pow2_read, 2408 .cleanup = mmc_test_cleanup, 2409 }, 2410 2411 { 2412 .name = "Weird sized block writes", 2413 .prepare = mmc_test_prepare_write, 2414 .run = mmc_test_weird_write, 2415 .cleanup = mmc_test_cleanup, 2416 }, 2417 2418 { 2419 .name = "Weird sized block reads", 2420 .prepare = mmc_test_prepare_read, 2421 .run = mmc_test_weird_read, 2422 .cleanup = mmc_test_cleanup, 2423 }, 2424 2425 { 2426 .name = "Badly aligned write", 2427 .prepare = mmc_test_prepare_write, 2428 .run = mmc_test_align_write, 2429 .cleanup = mmc_test_cleanup, 2430 }, 2431 2432 { 2433 .name = "Badly aligned read", 2434 .prepare = mmc_test_prepare_read, 2435 .run = mmc_test_align_read, 2436 .cleanup = mmc_test_cleanup, 2437 }, 2438 2439 { 2440 .name = "Badly aligned multi-block write", 2441 .prepare = mmc_test_prepare_write, 2442 .run = mmc_test_align_multi_write, 2443 .cleanup = mmc_test_cleanup, 2444 }, 2445 2446 { 2447 .name = "Badly aligned multi-block read", 2448 .prepare = mmc_test_prepare_read, 2449 .run = mmc_test_align_multi_read, 2450 .cleanup = mmc_test_cleanup, 2451 }, 2452 2453 { 2454 .name = "Correct xfer_size at write (start failure)", 2455 .run = mmc_test_xfersize_write, 2456 }, 2457 2458 { 2459 .name = "Correct xfer_size at read (start failure)", 2460 .run = mmc_test_xfersize_read, 2461 }, 2462 2463 { 2464 .name = "Correct xfer_size at write (midway failure)", 2465 .run = mmc_test_multi_xfersize_write, 2466 }, 2467 2468 { 2469 .name = "Correct xfer_size at read (midway failure)", 2470 .run = mmc_test_multi_xfersize_read, 2471 }, 2472 2473#ifdef CONFIG_HIGHMEM 2474 2475 { 2476 .name = "Highmem write", 2477 .prepare = mmc_test_prepare_write, 2478 .run = mmc_test_write_high, 2479 .cleanup = mmc_test_cleanup, 2480 }, 2481 2482 { 2483 .name = "Highmem read", 2484 .prepare = mmc_test_prepare_read, 2485 .run = mmc_test_read_high, 2486 .cleanup = mmc_test_cleanup, 2487 }, 2488 2489 { 2490 .name = "Multi-block highmem write", 2491 .prepare = mmc_test_prepare_write, 2492 .run = mmc_test_multi_write_high, 2493 .cleanup = mmc_test_cleanup, 2494 }, 2495 2496 { 2497 .name = "Multi-block highmem read", 2498 .prepare = mmc_test_prepare_read, 2499 .run = mmc_test_multi_read_high, 2500 .cleanup = mmc_test_cleanup, 2501 }, 2502 2503#else 2504 2505 { 2506 .name = "Highmem write", 2507 .run = mmc_test_no_highmem, 2508 }, 2509 2510 { 2511 .name = "Highmem read", 2512 .run = mmc_test_no_highmem, 2513 }, 2514 2515 { 2516 .name = "Multi-block highmem write", 2517 .run = mmc_test_no_highmem, 2518 }, 2519 2520 { 2521 .name = "Multi-block highmem read", 2522 .run = mmc_test_no_highmem, 2523 }, 2524 2525#endif /* CONFIG_HIGHMEM */ 2526 2527 { 2528 .name = "Best-case read performance", 2529 .prepare = mmc_test_area_prepare_fill, 2530 .run = mmc_test_best_read_performance, 2531 .cleanup = mmc_test_area_cleanup, 2532 }, 2533 2534 { 2535 .name = "Best-case write performance", 2536 .prepare = mmc_test_area_prepare_erase, 2537 .run = mmc_test_best_write_performance, 2538 .cleanup = mmc_test_area_cleanup, 2539 }, 2540 2541 { 2542 .name = "Best-case read performance into scattered pages", 2543 .prepare = mmc_test_area_prepare_fill, 2544 .run = mmc_test_best_read_perf_max_scatter, 2545 .cleanup = mmc_test_area_cleanup, 2546 }, 2547 2548 { 2549 .name = "Best-case write performance from scattered pages", 2550 .prepare = mmc_test_area_prepare_erase, 2551 .run = mmc_test_best_write_perf_max_scatter, 2552 .cleanup = mmc_test_area_cleanup, 2553 }, 2554 2555 { 2556 .name = "Single read performance by transfer size", 2557 .prepare = mmc_test_area_prepare_fill, 2558 .run = mmc_test_profile_read_perf, 2559 .cleanup = mmc_test_area_cleanup, 2560 }, 2561 2562 { 2563 .name = "Single write performance by transfer size", 2564 .prepare = mmc_test_area_prepare, 2565 .run = mmc_test_profile_write_perf, 2566 .cleanup = mmc_test_area_cleanup, 2567 }, 2568 2569 { 2570 .name = "Single trim performance by transfer size", 2571 .prepare = mmc_test_area_prepare_fill, 2572 .run = mmc_test_profile_trim_perf, 2573 .cleanup = mmc_test_area_cleanup, 2574 }, 2575 2576 { 2577 .name = "Consecutive read performance by transfer size", 2578 .prepare = mmc_test_area_prepare_fill, 2579 .run = mmc_test_profile_seq_read_perf, 2580 .cleanup = mmc_test_area_cleanup, 2581 }, 2582 2583 { 2584 .name = "Consecutive write performance by transfer size", 2585 .prepare = mmc_test_area_prepare, 2586 .run = mmc_test_profile_seq_write_perf, 2587 .cleanup = mmc_test_area_cleanup, 2588 }, 2589 2590 { 2591 .name = "Consecutive trim performance by transfer size", 2592 .prepare = mmc_test_area_prepare, 2593 .run = mmc_test_profile_seq_trim_perf, 2594 .cleanup = mmc_test_area_cleanup, 2595 }, 2596 2597 { 2598 .name = "Random read performance by transfer size", 2599 .prepare = mmc_test_area_prepare, 2600 .run = mmc_test_random_read_perf, 2601 .cleanup = mmc_test_area_cleanup, 2602 }, 2603 2604 { 2605 .name = "Random write performance by transfer size", 2606 .prepare = mmc_test_area_prepare, 2607 .run = mmc_test_random_write_perf, 2608 .cleanup = mmc_test_area_cleanup, 2609 }, 2610 2611 { 2612 .name = "Large sequential read into scattered pages", 2613 .prepare = mmc_test_area_prepare, 2614 .run = mmc_test_large_seq_read_perf, 2615 .cleanup = mmc_test_area_cleanup, 2616 }, 2617 2618 { 2619 .name = "Large sequential write from scattered pages", 2620 .prepare = mmc_test_area_prepare, 2621 .run = mmc_test_large_seq_write_perf, 2622 .cleanup = mmc_test_area_cleanup, 2623 }, 2624 2625 { 2626 .name = "Write performance with blocking req 4k to 4MB", 2627 .prepare = mmc_test_area_prepare, 2628 .run = mmc_test_profile_mult_write_blocking_perf, 2629 .cleanup = mmc_test_area_cleanup, 2630 }, 2631 2632 { 2633 .name = "Write performance with non-blocking req 4k to 4MB", 2634 .prepare = mmc_test_area_prepare, 2635 .run = mmc_test_profile_mult_write_nonblock_perf, 2636 .cleanup = mmc_test_area_cleanup, 2637 }, 2638 2639 { 2640 .name = "Read performance with blocking req 4k to 4MB", 2641 .prepare = mmc_test_area_prepare, 2642 .run = mmc_test_profile_mult_read_blocking_perf, 2643 .cleanup = mmc_test_area_cleanup, 2644 }, 2645 2646 { 2647 .name = "Read performance with non-blocking req 4k to 4MB", 2648 .prepare = mmc_test_area_prepare, 2649 .run = mmc_test_profile_mult_read_nonblock_perf, 2650 .cleanup = mmc_test_area_cleanup, 2651 }, 2652 2653 { 2654 .name = "Write performance blocking req 1 to 512 sg elems", 2655 .prepare = mmc_test_area_prepare, 2656 .run = mmc_test_profile_sglen_wr_blocking_perf, 2657 .cleanup = mmc_test_area_cleanup, 2658 }, 2659 2660 { 2661 .name = "Write performance non-blocking req 1 to 512 sg elems", 2662 .prepare = mmc_test_area_prepare, 2663 .run = mmc_test_profile_sglen_wr_nonblock_perf, 2664 .cleanup = mmc_test_area_cleanup, 2665 }, 2666 2667 { 2668 .name = "Read performance blocking req 1 to 512 sg elems", 2669 .prepare = mmc_test_area_prepare, 2670 .run = mmc_test_profile_sglen_r_blocking_perf, 2671 .cleanup = mmc_test_area_cleanup, 2672 }, 2673 2674 { 2675 .name = "Read performance non-blocking req 1 to 512 sg elems", 2676 .prepare = mmc_test_area_prepare, 2677 .run = mmc_test_profile_sglen_r_nonblock_perf, 2678 .cleanup = mmc_test_area_cleanup, 2679 }, 2680 2681 { 2682 .name = "eMMC hardware reset", 2683 .run = mmc_test_hw_reset, 2684 }, 2685}; 2686 2687static DEFINE_MUTEX(mmc_test_lock); 2688 2689static LIST_HEAD(mmc_test_result); 2690 2691static void mmc_test_run(struct mmc_test_card *test, int testcase) 2692{ 2693 int i, ret; 2694 2695 pr_info("%s: Starting tests of card %s...\n", 2696 mmc_hostname(test->card->host), mmc_card_id(test->card)); 2697 2698 mmc_claim_host(test->card->host); 2699 2700 for (i = 0;i < ARRAY_SIZE(mmc_test_cases);i++) { 2701 struct mmc_test_general_result *gr; 2702 2703 if (testcase && ((i + 1) != testcase)) 2704 continue; 2705 2706 pr_info("%s: Test case %d. %s...\n", 2707 mmc_hostname(test->card->host), i + 1, 2708 mmc_test_cases[i].name); 2709 2710 if (mmc_test_cases[i].prepare) { 2711 ret = mmc_test_cases[i].prepare(test); 2712 if (ret) { 2713 pr_info("%s: Result: Prepare " 2714 "stage failed! (%d)\n", 2715 mmc_hostname(test->card->host), 2716 ret); 2717 continue; 2718 } 2719 } 2720 2721 gr = kzalloc(sizeof(struct mmc_test_general_result), 2722 GFP_KERNEL); 2723 if (gr) { 2724 INIT_LIST_HEAD(&gr->tr_lst); 2725 2726 /* Assign data what we know already */ 2727 gr->card = test->card; 2728 gr->testcase = i; 2729 2730 /* Append container to global one */ 2731 list_add_tail(&gr->link, &mmc_test_result); 2732 2733 /* 2734 * Save the pointer to created container in our private 2735 * structure. 2736 */ 2737 test->gr = gr; 2738 } 2739 2740 ret = mmc_test_cases[i].run(test); 2741 switch (ret) { 2742 case RESULT_OK: 2743 pr_info("%s: Result: OK\n", 2744 mmc_hostname(test->card->host)); 2745 break; 2746 case RESULT_FAIL: 2747 pr_info("%s: Result: FAILED\n", 2748 mmc_hostname(test->card->host)); 2749 break; 2750 case RESULT_UNSUP_HOST: 2751 pr_info("%s: Result: UNSUPPORTED " 2752 "(by host)\n", 2753 mmc_hostname(test->card->host)); 2754 break; 2755 case RESULT_UNSUP_CARD: 2756 pr_info("%s: Result: UNSUPPORTED " 2757 "(by card)\n", 2758 mmc_hostname(test->card->host)); 2759 break; 2760 default: 2761 pr_info("%s: Result: ERROR (%d)\n", 2762 mmc_hostname(test->card->host), ret); 2763 } 2764 2765 /* Save the result */ 2766 if (gr) 2767 gr->result = ret; 2768 2769 if (mmc_test_cases[i].cleanup) { 2770 ret = mmc_test_cases[i].cleanup(test); 2771 if (ret) { 2772 pr_info("%s: Warning: Cleanup " 2773 "stage failed! (%d)\n", 2774 mmc_hostname(test->card->host), 2775 ret); 2776 } 2777 } 2778 } 2779 2780 mmc_release_host(test->card->host); 2781 2782 pr_info("%s: Tests completed.\n", 2783 mmc_hostname(test->card->host)); 2784} 2785 2786static void mmc_test_free_result(struct mmc_card *card) 2787{ 2788 struct mmc_test_general_result *gr, *grs; 2789 2790 mutex_lock(&mmc_test_lock); 2791 2792 list_for_each_entry_safe(gr, grs, &mmc_test_result, link) { 2793 struct mmc_test_transfer_result *tr, *trs; 2794 2795 if (card && gr->card != card) 2796 continue; 2797 2798 list_for_each_entry_safe(tr, trs, &gr->tr_lst, link) { 2799 list_del(&tr->link); 2800 kfree(tr); 2801 } 2802 2803 list_del(&gr->link); 2804 kfree(gr); 2805 } 2806 2807 mutex_unlock(&mmc_test_lock); 2808} 2809 2810static LIST_HEAD(mmc_test_file_test); 2811 2812static int mtf_test_show(struct seq_file *sf, void *data) 2813{ 2814 struct mmc_card *card = (struct mmc_card *)sf->private; 2815 struct mmc_test_general_result *gr; 2816 2817 mutex_lock(&mmc_test_lock); 2818 2819 list_for_each_entry(gr, &mmc_test_result, link) { 2820 struct mmc_test_transfer_result *tr; 2821 2822 if (gr->card != card) 2823 continue; 2824 2825 seq_printf(sf, "Test %d: %d\n", gr->testcase + 1, gr->result); 2826 2827 list_for_each_entry(tr, &gr->tr_lst, link) { 2828 seq_printf(sf, "%u %d %lu.%09lu %u %u.%02u\n", 2829 tr->count, tr->sectors, 2830 (unsigned long)tr->ts.tv_sec, 2831 (unsigned long)tr->ts.tv_nsec, 2832 tr->rate, tr->iops / 100, tr->iops % 100); 2833 } 2834 } 2835 2836 mutex_unlock(&mmc_test_lock); 2837 2838 return 0; 2839} 2840 2841static int mtf_test_open(struct inode *inode, struct file *file) 2842{ 2843 return single_open(file, mtf_test_show, inode->i_private); 2844} 2845 2846static ssize_t mtf_test_write(struct file *file, const char __user *buf, 2847 size_t count, loff_t *pos) 2848{ 2849 struct seq_file *sf = (struct seq_file *)file->private_data; 2850 struct mmc_card *card = (struct mmc_card *)sf->private; 2851 struct mmc_test_card *test; 2852 long testcase; 2853 int ret; 2854 2855 ret = kstrtol_from_user(buf, count, 10, &testcase); 2856 if (ret) 2857 return ret; 2858 2859 test = kzalloc(sizeof(struct mmc_test_card), GFP_KERNEL); 2860 if (!test) 2861 return -ENOMEM; 2862 2863 /* 2864 * Remove all test cases associated with given card. Thus we have only 2865 * actual data of the last run. 2866 */ 2867 mmc_test_free_result(card); 2868 2869 test->card = card; 2870 2871 test->buffer = kzalloc(BUFFER_SIZE, GFP_KERNEL); 2872#ifdef CONFIG_HIGHMEM 2873 test->highmem = alloc_pages(GFP_KERNEL | __GFP_HIGHMEM, BUFFER_ORDER); 2874#endif 2875 2876#ifdef CONFIG_HIGHMEM 2877 if (test->buffer && test->highmem) { 2878#else 2879 if (test->buffer) { 2880#endif 2881 mutex_lock(&mmc_test_lock); 2882 mmc_test_run(test, testcase); 2883 mutex_unlock(&mmc_test_lock); 2884 } 2885 2886#ifdef CONFIG_HIGHMEM 2887 __free_pages(test->highmem, BUFFER_ORDER); 2888#endif 2889 kfree(test->buffer); 2890 kfree(test); 2891 2892 return count; 2893} 2894 2895static const struct file_operations mmc_test_fops_test = { 2896 .open = mtf_test_open, 2897 .read = seq_read, 2898 .write = mtf_test_write, 2899 .llseek = seq_lseek, 2900 .release = single_release, 2901}; 2902 2903static int mtf_testlist_show(struct seq_file *sf, void *data) 2904{ 2905 int i; 2906 2907 mutex_lock(&mmc_test_lock); 2908 2909 for (i = 0; i < ARRAY_SIZE(mmc_test_cases); i++) 2910 seq_printf(sf, "%d:\t%s\n", i+1, mmc_test_cases[i].name); 2911 2912 mutex_unlock(&mmc_test_lock); 2913 2914 return 0; 2915} 2916 2917static int mtf_testlist_open(struct inode *inode, struct file *file) 2918{ 2919 return single_open(file, mtf_testlist_show, inode->i_private); 2920} 2921 2922static const struct file_operations mmc_test_fops_testlist = { 2923 .open = mtf_testlist_open, 2924 .read = seq_read, 2925 .llseek = seq_lseek, 2926 .release = single_release, 2927}; 2928 2929static void mmc_test_free_dbgfs_file(struct mmc_card *card) 2930{ 2931 struct mmc_test_dbgfs_file *df, *dfs; 2932 2933 mutex_lock(&mmc_test_lock); 2934 2935 list_for_each_entry_safe(df, dfs, &mmc_test_file_test, link) { 2936 if (card && df->card != card) 2937 continue; 2938 debugfs_remove(df->file); 2939 list_del(&df->link); 2940 kfree(df); 2941 } 2942 2943 mutex_unlock(&mmc_test_lock); 2944} 2945 2946static int __mmc_test_register_dbgfs_file(struct mmc_card *card, 2947 const char *name, umode_t mode, const struct file_operations *fops) 2948{ 2949 struct dentry *file = NULL; 2950 struct mmc_test_dbgfs_file *df; 2951 2952 if (card->debugfs_root) 2953 file = debugfs_create_file(name, mode, card->debugfs_root, 2954 card, fops); 2955 2956 if (IS_ERR_OR_NULL(file)) { 2957 dev_err(&card->dev, 2958 "Can't create %s. Perhaps debugfs is disabled.\n", 2959 name); 2960 return -ENODEV; 2961 } 2962 2963 df = kmalloc(sizeof(struct mmc_test_dbgfs_file), GFP_KERNEL); 2964 if (!df) { 2965 debugfs_remove(file); 2966 dev_err(&card->dev, 2967 "Can't allocate memory for internal usage.\n"); 2968 return -ENOMEM; 2969 } 2970 2971 df->card = card; 2972 df->file = file; 2973 2974 list_add(&df->link, &mmc_test_file_test); 2975 return 0; 2976} 2977 2978static int mmc_test_register_dbgfs_file(struct mmc_card *card) 2979{ 2980 int ret; 2981 2982 mutex_lock(&mmc_test_lock); 2983 2984 ret = __mmc_test_register_dbgfs_file(card, "test", S_IWUSR | S_IRUGO, 2985 &mmc_test_fops_test); 2986 if (ret) 2987 goto err; 2988 2989 ret = __mmc_test_register_dbgfs_file(card, "testlist", S_IRUGO, 2990 &mmc_test_fops_testlist); 2991 if (ret) 2992 goto err; 2993 2994err: 2995 mutex_unlock(&mmc_test_lock); 2996 2997 return ret; 2998} 2999 3000static int mmc_test_probe(struct mmc_card *card) 3001{ 3002 int ret; 3003 3004 if (!mmc_card_mmc(card) && !mmc_card_sd(card)) 3005 return -ENODEV; 3006 3007 ret = mmc_test_register_dbgfs_file(card); 3008 if (ret) 3009 return ret; 3010 3011 dev_info(&card->dev, "Card claimed for testing.\n"); 3012 3013 return 0; 3014} 3015 3016static void mmc_test_remove(struct mmc_card *card) 3017{ 3018 mmc_test_free_result(card); 3019 mmc_test_free_dbgfs_file(card); 3020} 3021 3022static void mmc_test_shutdown(struct mmc_card *card) 3023{ 3024} 3025 3026static struct mmc_driver mmc_driver = { 3027 .drv = { 3028 .name = "mmc_test", 3029 }, 3030 .probe = mmc_test_probe, 3031 .remove = mmc_test_remove, 3032 .shutdown = mmc_test_shutdown, 3033}; 3034 3035static int __init mmc_test_init(void) 3036{ 3037 return mmc_register_driver(&mmc_driver); 3038} 3039 3040static void __exit mmc_test_exit(void) 3041{ 3042 /* Clear stalled data if card is still plugged */ 3043 mmc_test_free_result(NULL); 3044 mmc_test_free_dbgfs_file(NULL); 3045 3046 mmc_unregister_driver(&mmc_driver); 3047} 3048 3049module_init(mmc_test_init); 3050module_exit(mmc_test_exit); 3051 3052MODULE_LICENSE("GPL"); 3053MODULE_DESCRIPTION("Multimedia Card (MMC) host test driver"); 3054MODULE_AUTHOR("Pierre Ossman"); 3055