1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Block driver for media (i.e., flash cards) 4 * 5 * Copyright 2002 Hewlett-Packard Company 6 * Copyright 2005-2008 Pierre Ossman 7 * 8 * Use consistent with the GNU GPL is permitted, 9 * provided that this copyright notice is 10 * preserved in its entirety in all copies and derived works. 11 * 12 * HEWLETT-PACKARD COMPANY MAKES NO WARRANTIES, EXPRESSED OR IMPLIED, 13 * AS TO THE USEFULNESS OR CORRECTNESS OF THIS CODE OR ITS 14 * FITNESS FOR ANY PARTICULAR PURPOSE. 15 * 16 * Many thanks to Alessandro Rubini and Jonathan Corbet! 17 * 18 * Author: Andrew Christian 19 * 28 May 2002 20 */ 21 #include <linux/moduleparam.h> 22 #include <linux/module.h> 23 #include <linux/init.h> 24 25 #include <linux/kernel.h> 26 #include <linux/fs.h> 27 #include <linux/slab.h> 28 #include <linux/errno.h> 29 #include <linux/hdreg.h> 30 #include <linux/kdev_t.h> 31 #include <linux/kref.h> 32 #include <linux/blkdev.h> 33 #include <linux/cdev.h> 34 #include <linux/mutex.h> 35 #include <linux/scatterlist.h> 36 #include <linux/string_helpers.h> 37 #include <linux/delay.h> 38 #include <linux/capability.h> 39 #include <linux/compat.h> 40 #include <linux/pm_runtime.h> 41 #include <linux/idr.h> 42 #include <linux/debugfs.h> 43 44 #include <linux/mmc/ioctl.h> 45 #include <linux/mmc/card.h> 46 #include <linux/mmc/host.h> 47 #include <linux/mmc/mmc.h> 48 #include <linux/mmc/sd.h> 49 50 #include <linux/uaccess.h> 51 52 #include "queue.h" 53 #include "block.h" 54 #include "core.h" 55 #include "card.h" 56 #include "crypto.h" 57 #include "host.h" 58 #include "bus.h" 59 #include "mmc_ops.h" 60 #include "quirks.h" 61 #include "sd_ops.h" 62 63 MODULE_ALIAS("mmc:block"); 64 #ifdef MODULE_PARAM_PREFIX 65 #undef MODULE_PARAM_PREFIX 66 #endif 67 #define MODULE_PARAM_PREFIX "mmcblk." 68 69 /* 70 * Set a 10 second timeout for polling write request busy state. Note, mmc core 71 * is setting a 3 second timeout for SD cards, and SDHCI has long had a 10 72 * second software timer to timeout the whole request, so 10 seconds should be 73 * ample. 74 */ 75 #define MMC_BLK_TIMEOUT_MS (10 * 1000) 76 #define MMC_EXTRACT_INDEX_FROM_ARG(x) ((x & 0x00FF0000) >> 16) 77 #define MMC_EXTRACT_VALUE_FROM_ARG(x) ((x & 0x0000FF00) >> 8) 78 79 #define mmc_req_rel_wr(req) ((req->cmd_flags & REQ_FUA) && \ 80 (rq_data_dir(req) == WRITE)) 81 static DEFINE_MUTEX(block_mutex); 82 83 /* 84 * The defaults come from config options but can be overriden by module 85 * or bootarg options. 86 */ 87 static int perdev_minors = CONFIG_MMC_BLOCK_MINORS; 88 89 /* 90 * We've only got one major, so number of mmcblk devices is 91 * limited to (1 << 20) / number of minors per device. It is also 92 * limited by the MAX_DEVICES below. 93 */ 94 static int max_devices; 95 96 #define MAX_DEVICES 256 97 98 static DEFINE_IDA(mmc_blk_ida); 99 static DEFINE_IDA(mmc_rpmb_ida); 100 101 struct mmc_blk_busy_data { 102 struct mmc_card *card; 103 u32 status; 104 }; 105 106 /* 107 * There is one mmc_blk_data per slot. 108 */ 109 struct mmc_blk_data { 110 struct device *parent; 111 struct gendisk *disk; 112 struct mmc_queue queue; 113 struct list_head part; 114 struct list_head rpmbs; 115 116 unsigned int flags; 117 #define MMC_BLK_CMD23 (1 << 0) /* Can do SET_BLOCK_COUNT for multiblock */ 118 #define MMC_BLK_REL_WR (1 << 1) /* MMC Reliable write support */ 119 120 struct kref kref; 121 unsigned int read_only; 122 unsigned int part_type; 123 unsigned int reset_done; 124 #define MMC_BLK_READ BIT(0) 125 #define MMC_BLK_WRITE BIT(1) 126 #define MMC_BLK_DISCARD BIT(2) 127 #define MMC_BLK_SECDISCARD BIT(3) 128 #define MMC_BLK_CQE_RECOVERY BIT(4) 129 #define MMC_BLK_TRIM BIT(5) 130 131 /* 132 * Only set in main mmc_blk_data associated 133 * with mmc_card with dev_set_drvdata, and keeps 134 * track of the current selected device partition. 135 */ 136 unsigned int part_curr; 137 #define MMC_BLK_PART_INVALID UINT_MAX /* Unknown partition active */ 138 int area_type; 139 140 /* debugfs files (only in main mmc_blk_data) */ 141 struct dentry *status_dentry; 142 struct dentry *ext_csd_dentry; 143 }; 144 145 /* Device type for RPMB character devices */ 146 static dev_t mmc_rpmb_devt; 147 148 /* Bus type for RPMB character devices */ 149 static struct bus_type mmc_rpmb_bus_type = { 150 .name = "mmc_rpmb", 151 }; 152 153 /** 154 * struct mmc_rpmb_data - special RPMB device type for these areas 155 * @dev: the device for the RPMB area 156 * @chrdev: character device for the RPMB area 157 * @id: unique device ID number 158 * @part_index: partition index (0 on first) 159 * @md: parent MMC block device 160 * @node: list item, so we can put this device on a list 161 */ 162 struct mmc_rpmb_data { 163 struct device dev; 164 struct cdev chrdev; 165 int id; 166 unsigned int part_index; 167 struct mmc_blk_data *md; 168 struct list_head node; 169 }; 170 171 static DEFINE_MUTEX(open_lock); 172 173 module_param(perdev_minors, int, 0444); 174 MODULE_PARM_DESC(perdev_minors, "Minors numbers to allocate per device"); 175 176 static inline int mmc_blk_part_switch(struct mmc_card *card, 177 unsigned int part_type); 178 static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq, 179 struct mmc_card *card, 180 int recovery_mode, 181 struct mmc_queue *mq); 182 static void mmc_blk_hsq_req_done(struct mmc_request *mrq); 183 184 static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk) 185 { 186 struct mmc_blk_data *md; 187 188 mutex_lock(&open_lock); 189 md = disk->private_data; 190 if (md && !kref_get_unless_zero(&md->kref)) 191 md = NULL; 192 mutex_unlock(&open_lock); 193 194 return md; 195 } 196 197 static inline int mmc_get_devidx(struct gendisk *disk) 198 { 199 int devidx = disk->first_minor / perdev_minors; 200 return devidx; 201 } 202 203 static void mmc_blk_kref_release(struct kref *ref) 204 { 205 struct mmc_blk_data *md = container_of(ref, struct mmc_blk_data, kref); 206 int devidx; 207 208 devidx = mmc_get_devidx(md->disk); 209 ida_simple_remove(&mmc_blk_ida, devidx); 210 211 mutex_lock(&open_lock); 212 md->disk->private_data = NULL; 213 mutex_unlock(&open_lock); 214 215 put_disk(md->disk); 216 kfree(md); 217 } 218 219 static void mmc_blk_put(struct mmc_blk_data *md) 220 { 221 kref_put(&md->kref, mmc_blk_kref_release); 222 } 223 224 static ssize_t power_ro_lock_show(struct device *dev, 225 struct device_attribute *attr, char *buf) 226 { 227 int ret; 228 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev)); 229 struct mmc_card *card = md->queue.card; 230 int locked = 0; 231 232 if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PERM_WP_EN) 233 locked = 2; 234 else if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PWR_WP_EN) 235 locked = 1; 236 237 ret = snprintf(buf, PAGE_SIZE, "%d\n", locked); 238 239 mmc_blk_put(md); 240 241 return ret; 242 } 243 244 static ssize_t power_ro_lock_store(struct device *dev, 245 struct device_attribute *attr, const char *buf, size_t count) 246 { 247 int ret; 248 struct mmc_blk_data *md, *part_md; 249 struct mmc_queue *mq; 250 struct request *req; 251 unsigned long set; 252 253 if (kstrtoul(buf, 0, &set)) 254 return -EINVAL; 255 256 if (set != 1) 257 return count; 258 259 md = mmc_blk_get(dev_to_disk(dev)); 260 mq = &md->queue; 261 262 /* Dispatch locking to the block layer */ 263 req = blk_mq_alloc_request(mq->queue, REQ_OP_DRV_OUT, 0); 264 if (IS_ERR(req)) { 265 count = PTR_ERR(req); 266 goto out_put; 267 } 268 req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_BOOT_WP; 269 blk_execute_rq(req, false); 270 ret = req_to_mmc_queue_req(req)->drv_op_result; 271 blk_mq_free_request(req); 272 273 if (!ret) { 274 pr_info("%s: Locking boot partition ro until next power on\n", 275 md->disk->disk_name); 276 set_disk_ro(md->disk, 1); 277 278 list_for_each_entry(part_md, &md->part, part) 279 if (part_md->area_type == MMC_BLK_DATA_AREA_BOOT) { 280 pr_info("%s: Locking boot partition ro until next power on\n", part_md->disk->disk_name); 281 set_disk_ro(part_md->disk, 1); 282 } 283 } 284 out_put: 285 mmc_blk_put(md); 286 return count; 287 } 288 289 static DEVICE_ATTR(ro_lock_until_next_power_on, 0, 290 power_ro_lock_show, power_ro_lock_store); 291 292 static ssize_t force_ro_show(struct device *dev, struct device_attribute *attr, 293 char *buf) 294 { 295 int ret; 296 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev)); 297 298 ret = snprintf(buf, PAGE_SIZE, "%d\n", 299 get_disk_ro(dev_to_disk(dev)) ^ 300 md->read_only); 301 mmc_blk_put(md); 302 return ret; 303 } 304 305 static ssize_t force_ro_store(struct device *dev, struct device_attribute *attr, 306 const char *buf, size_t count) 307 { 308 int ret; 309 char *end; 310 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev)); 311 unsigned long set = simple_strtoul(buf, &end, 0); 312 if (end == buf) { 313 ret = -EINVAL; 314 goto out; 315 } 316 317 set_disk_ro(dev_to_disk(dev), set || md->read_only); 318 ret = count; 319 out: 320 mmc_blk_put(md); 321 return ret; 322 } 323 324 static DEVICE_ATTR(force_ro, 0644, force_ro_show, force_ro_store); 325 326 static struct attribute *mmc_disk_attrs[] = { 327 &dev_attr_force_ro.attr, 328 &dev_attr_ro_lock_until_next_power_on.attr, 329 NULL, 330 }; 331 332 static umode_t mmc_disk_attrs_is_visible(struct kobject *kobj, 333 struct attribute *a, int n) 334 { 335 struct device *dev = kobj_to_dev(kobj); 336 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev)); 337 umode_t mode = a->mode; 338 339 if (a == &dev_attr_ro_lock_until_next_power_on.attr && 340 (md->area_type & MMC_BLK_DATA_AREA_BOOT) && 341 md->queue.card->ext_csd.boot_ro_lockable) { 342 mode = S_IRUGO; 343 if (!(md->queue.card->ext_csd.boot_ro_lock & 344 EXT_CSD_BOOT_WP_B_PWR_WP_DIS)) 345 mode |= S_IWUSR; 346 } 347 348 mmc_blk_put(md); 349 return mode; 350 } 351 352 static const struct attribute_group mmc_disk_attr_group = { 353 .is_visible = mmc_disk_attrs_is_visible, 354 .attrs = mmc_disk_attrs, 355 }; 356 357 static const struct attribute_group *mmc_disk_attr_groups[] = { 358 &mmc_disk_attr_group, 359 NULL, 360 }; 361 362 static int mmc_blk_open(struct block_device *bdev, fmode_t mode) 363 { 364 struct mmc_blk_data *md = mmc_blk_get(bdev->bd_disk); 365 int ret = -ENXIO; 366 367 mutex_lock(&block_mutex); 368 if (md) { 369 ret = 0; 370 if ((mode & FMODE_WRITE) && md->read_only) { 371 mmc_blk_put(md); 372 ret = -EROFS; 373 } 374 } 375 mutex_unlock(&block_mutex); 376 377 return ret; 378 } 379 380 static void mmc_blk_release(struct gendisk *disk, fmode_t mode) 381 { 382 struct mmc_blk_data *md = disk->private_data; 383 384 mutex_lock(&block_mutex); 385 mmc_blk_put(md); 386 mutex_unlock(&block_mutex); 387 } 388 389 static int 390 mmc_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo) 391 { 392 geo->cylinders = get_capacity(bdev->bd_disk) / (4 * 16); 393 geo->heads = 4; 394 geo->sectors = 16; 395 return 0; 396 } 397 398 struct mmc_blk_ioc_data { 399 struct mmc_ioc_cmd ic; 400 unsigned char *buf; 401 u64 buf_bytes; 402 struct mmc_rpmb_data *rpmb; 403 }; 404 405 static struct mmc_blk_ioc_data *mmc_blk_ioctl_copy_from_user( 406 struct mmc_ioc_cmd __user *user) 407 { 408 struct mmc_blk_ioc_data *idata; 409 int err; 410 411 idata = kmalloc(sizeof(*idata), GFP_KERNEL); 412 if (!idata) { 413 err = -ENOMEM; 414 goto out; 415 } 416 417 if (copy_from_user(&idata->ic, user, sizeof(idata->ic))) { 418 err = -EFAULT; 419 goto idata_err; 420 } 421 422 idata->buf_bytes = (u64) idata->ic.blksz * idata->ic.blocks; 423 if (idata->buf_bytes > MMC_IOC_MAX_BYTES) { 424 err = -EOVERFLOW; 425 goto idata_err; 426 } 427 428 if (!idata->buf_bytes) { 429 idata->buf = NULL; 430 return idata; 431 } 432 433 idata->buf = memdup_user((void __user *)(unsigned long) 434 idata->ic.data_ptr, idata->buf_bytes); 435 if (IS_ERR(idata->buf)) { 436 err = PTR_ERR(idata->buf); 437 goto idata_err; 438 } 439 440 return idata; 441 442 idata_err: 443 kfree(idata); 444 out: 445 return ERR_PTR(err); 446 } 447 448 static int mmc_blk_ioctl_copy_to_user(struct mmc_ioc_cmd __user *ic_ptr, 449 struct mmc_blk_ioc_data *idata) 450 { 451 struct mmc_ioc_cmd *ic = &idata->ic; 452 453 if (copy_to_user(&(ic_ptr->response), ic->response, 454 sizeof(ic->response))) 455 return -EFAULT; 456 457 if (!idata->ic.write_flag) { 458 if (copy_to_user((void __user *)(unsigned long)ic->data_ptr, 459 idata->buf, idata->buf_bytes)) 460 return -EFAULT; 461 } 462 463 return 0; 464 } 465 466 static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md, 467 struct mmc_blk_ioc_data *idata) 468 { 469 struct mmc_command cmd = {}, sbc = {}; 470 struct mmc_data data = {}; 471 struct mmc_request mrq = {}; 472 struct scatterlist sg; 473 int err; 474 unsigned int target_part; 475 476 if (!card || !md || !idata) 477 return -EINVAL; 478 479 /* 480 * The RPMB accesses comes in from the character device, so we 481 * need to target these explicitly. Else we just target the 482 * partition type for the block device the ioctl() was issued 483 * on. 484 */ 485 if (idata->rpmb) { 486 /* Support multiple RPMB partitions */ 487 target_part = idata->rpmb->part_index; 488 target_part |= EXT_CSD_PART_CONFIG_ACC_RPMB; 489 } else { 490 target_part = md->part_type; 491 } 492 493 cmd.opcode = idata->ic.opcode; 494 cmd.arg = idata->ic.arg; 495 cmd.flags = idata->ic.flags; 496 497 if (idata->buf_bytes) { 498 data.sg = &sg; 499 data.sg_len = 1; 500 data.blksz = idata->ic.blksz; 501 data.blocks = idata->ic.blocks; 502 503 sg_init_one(data.sg, idata->buf, idata->buf_bytes); 504 505 if (idata->ic.write_flag) 506 data.flags = MMC_DATA_WRITE; 507 else 508 data.flags = MMC_DATA_READ; 509 510 /* data.flags must already be set before doing this. */ 511 mmc_set_data_timeout(&data, card); 512 513 /* Allow overriding the timeout_ns for empirical tuning. */ 514 if (idata->ic.data_timeout_ns) 515 data.timeout_ns = idata->ic.data_timeout_ns; 516 517 if ((cmd.flags & MMC_RSP_R1B) == MMC_RSP_R1B) { 518 /* 519 * Pretend this is a data transfer and rely on the 520 * host driver to compute timeout. When all host 521 * drivers support cmd.cmd_timeout for R1B, this 522 * can be changed to: 523 * 524 * mrq.data = NULL; 525 * cmd.cmd_timeout = idata->ic.cmd_timeout_ms; 526 */ 527 data.timeout_ns = idata->ic.cmd_timeout_ms * 1000000; 528 } 529 530 mrq.data = &data; 531 } 532 533 mrq.cmd = &cmd; 534 535 err = mmc_blk_part_switch(card, target_part); 536 if (err) 537 return err; 538 539 if (idata->ic.is_acmd) { 540 err = mmc_app_cmd(card->host, card); 541 if (err) 542 return err; 543 } 544 545 if (idata->rpmb) { 546 sbc.opcode = MMC_SET_BLOCK_COUNT; 547 /* 548 * We don't do any blockcount validation because the max size 549 * may be increased by a future standard. We just copy the 550 * 'Reliable Write' bit here. 551 */ 552 sbc.arg = data.blocks | (idata->ic.write_flag & BIT(31)); 553 sbc.flags = MMC_RSP_R1 | MMC_CMD_AC; 554 mrq.sbc = &sbc; 555 } 556 557 if ((MMC_EXTRACT_INDEX_FROM_ARG(cmd.arg) == EXT_CSD_SANITIZE_START) && 558 (cmd.opcode == MMC_SWITCH)) 559 return mmc_sanitize(card, idata->ic.cmd_timeout_ms); 560 561 mmc_wait_for_req(card->host, &mrq); 562 memcpy(&idata->ic.response, cmd.resp, sizeof(cmd.resp)); 563 564 if (cmd.error) { 565 dev_err(mmc_dev(card->host), "%s: cmd error %d\n", 566 __func__, cmd.error); 567 return cmd.error; 568 } 569 if (data.error) { 570 dev_err(mmc_dev(card->host), "%s: data error %d\n", 571 __func__, data.error); 572 return data.error; 573 } 574 575 /* 576 * Make sure the cache of the PARTITION_CONFIG register and 577 * PARTITION_ACCESS bits is updated in case the ioctl ext_csd write 578 * changed it successfully. 579 */ 580 if ((MMC_EXTRACT_INDEX_FROM_ARG(cmd.arg) == EXT_CSD_PART_CONFIG) && 581 (cmd.opcode == MMC_SWITCH)) { 582 struct mmc_blk_data *main_md = dev_get_drvdata(&card->dev); 583 u8 value = MMC_EXTRACT_VALUE_FROM_ARG(cmd.arg); 584 585 /* 586 * Update cache so the next mmc_blk_part_switch call operates 587 * on up-to-date data. 588 */ 589 card->ext_csd.part_config = value; 590 main_md->part_curr = value & EXT_CSD_PART_CONFIG_ACC_MASK; 591 } 592 593 /* 594 * Make sure to update CACHE_CTRL in case it was changed. The cache 595 * will get turned back on if the card is re-initialized, e.g. 596 * suspend/resume or hw reset in recovery. 597 */ 598 if ((MMC_EXTRACT_INDEX_FROM_ARG(cmd.arg) == EXT_CSD_CACHE_CTRL) && 599 (cmd.opcode == MMC_SWITCH)) { 600 u8 value = MMC_EXTRACT_VALUE_FROM_ARG(cmd.arg) & 1; 601 602 card->ext_csd.cache_ctrl = value; 603 } 604 605 /* 606 * According to the SD specs, some commands require a delay after 607 * issuing the command. 608 */ 609 if (idata->ic.postsleep_min_us) 610 usleep_range(idata->ic.postsleep_min_us, idata->ic.postsleep_max_us); 611 612 if (idata->rpmb || (cmd.flags & MMC_RSP_R1B) == MMC_RSP_R1B) { 613 /* 614 * Ensure RPMB/R1B command has completed by polling CMD13 "Send Status". Here we 615 * allow to override the default timeout value if a custom timeout is specified. 616 */ 617 err = mmc_poll_for_busy(card, idata->ic.cmd_timeout_ms ? : MMC_BLK_TIMEOUT_MS, 618 false, MMC_BUSY_IO); 619 } 620 621 return err; 622 } 623 624 static int mmc_blk_ioctl_cmd(struct mmc_blk_data *md, 625 struct mmc_ioc_cmd __user *ic_ptr, 626 struct mmc_rpmb_data *rpmb) 627 { 628 struct mmc_blk_ioc_data *idata; 629 struct mmc_blk_ioc_data *idatas[1]; 630 struct mmc_queue *mq; 631 struct mmc_card *card; 632 int err = 0, ioc_err = 0; 633 struct request *req; 634 635 idata = mmc_blk_ioctl_copy_from_user(ic_ptr); 636 if (IS_ERR(idata)) 637 return PTR_ERR(idata); 638 /* This will be NULL on non-RPMB ioctl():s */ 639 idata->rpmb = rpmb; 640 641 card = md->queue.card; 642 if (IS_ERR(card)) { 643 err = PTR_ERR(card); 644 goto cmd_done; 645 } 646 647 /* 648 * Dispatch the ioctl() into the block request queue. 649 */ 650 mq = &md->queue; 651 req = blk_mq_alloc_request(mq->queue, 652 idata->ic.write_flag ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0); 653 if (IS_ERR(req)) { 654 err = PTR_ERR(req); 655 goto cmd_done; 656 } 657 idatas[0] = idata; 658 req_to_mmc_queue_req(req)->drv_op = 659 rpmb ? MMC_DRV_OP_IOCTL_RPMB : MMC_DRV_OP_IOCTL; 660 req_to_mmc_queue_req(req)->drv_op_data = idatas; 661 req_to_mmc_queue_req(req)->ioc_count = 1; 662 blk_execute_rq(req, false); 663 ioc_err = req_to_mmc_queue_req(req)->drv_op_result; 664 err = mmc_blk_ioctl_copy_to_user(ic_ptr, idata); 665 blk_mq_free_request(req); 666 667 cmd_done: 668 kfree(idata->buf); 669 kfree(idata); 670 return ioc_err ? ioc_err : err; 671 } 672 673 static int mmc_blk_ioctl_multi_cmd(struct mmc_blk_data *md, 674 struct mmc_ioc_multi_cmd __user *user, 675 struct mmc_rpmb_data *rpmb) 676 { 677 struct mmc_blk_ioc_data **idata = NULL; 678 struct mmc_ioc_cmd __user *cmds = user->cmds; 679 struct mmc_card *card; 680 struct mmc_queue *mq; 681 int err = 0, ioc_err = 0; 682 __u64 num_of_cmds; 683 unsigned int i, n; 684 struct request *req; 685 686 if (copy_from_user(&num_of_cmds, &user->num_of_cmds, 687 sizeof(num_of_cmds))) 688 return -EFAULT; 689 690 if (!num_of_cmds) 691 return 0; 692 693 if (num_of_cmds > MMC_IOC_MAX_CMDS) 694 return -EINVAL; 695 696 n = num_of_cmds; 697 idata = kcalloc(n, sizeof(*idata), GFP_KERNEL); 698 if (!idata) 699 return -ENOMEM; 700 701 for (i = 0; i < n; i++) { 702 idata[i] = mmc_blk_ioctl_copy_from_user(&cmds[i]); 703 if (IS_ERR(idata[i])) { 704 err = PTR_ERR(idata[i]); 705 n = i; 706 goto cmd_err; 707 } 708 /* This will be NULL on non-RPMB ioctl():s */ 709 idata[i]->rpmb = rpmb; 710 } 711 712 card = md->queue.card; 713 if (IS_ERR(card)) { 714 err = PTR_ERR(card); 715 goto cmd_err; 716 } 717 718 719 /* 720 * Dispatch the ioctl()s into the block request queue. 721 */ 722 mq = &md->queue; 723 req = blk_mq_alloc_request(mq->queue, 724 idata[0]->ic.write_flag ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0); 725 if (IS_ERR(req)) { 726 err = PTR_ERR(req); 727 goto cmd_err; 728 } 729 req_to_mmc_queue_req(req)->drv_op = 730 rpmb ? MMC_DRV_OP_IOCTL_RPMB : MMC_DRV_OP_IOCTL; 731 req_to_mmc_queue_req(req)->drv_op_data = idata; 732 req_to_mmc_queue_req(req)->ioc_count = n; 733 blk_execute_rq(req, false); 734 ioc_err = req_to_mmc_queue_req(req)->drv_op_result; 735 736 /* copy to user if data and response */ 737 for (i = 0; i < n && !err; i++) 738 err = mmc_blk_ioctl_copy_to_user(&cmds[i], idata[i]); 739 740 blk_mq_free_request(req); 741 742 cmd_err: 743 for (i = 0; i < n; i++) { 744 kfree(idata[i]->buf); 745 kfree(idata[i]); 746 } 747 kfree(idata); 748 return ioc_err ? ioc_err : err; 749 } 750 751 static int mmc_blk_check_blkdev(struct block_device *bdev) 752 { 753 /* 754 * The caller must have CAP_SYS_RAWIO, and must be calling this on the 755 * whole block device, not on a partition. This prevents overspray 756 * between sibling partitions. 757 */ 758 if (!capable(CAP_SYS_RAWIO) || bdev_is_partition(bdev)) 759 return -EPERM; 760 return 0; 761 } 762 763 static int mmc_blk_ioctl(struct block_device *bdev, fmode_t mode, 764 unsigned int cmd, unsigned long arg) 765 { 766 struct mmc_blk_data *md; 767 int ret; 768 769 switch (cmd) { 770 case MMC_IOC_CMD: 771 ret = mmc_blk_check_blkdev(bdev); 772 if (ret) 773 return ret; 774 md = mmc_blk_get(bdev->bd_disk); 775 if (!md) 776 return -EINVAL; 777 ret = mmc_blk_ioctl_cmd(md, 778 (struct mmc_ioc_cmd __user *)arg, 779 NULL); 780 mmc_blk_put(md); 781 return ret; 782 case MMC_IOC_MULTI_CMD: 783 ret = mmc_blk_check_blkdev(bdev); 784 if (ret) 785 return ret; 786 md = mmc_blk_get(bdev->bd_disk); 787 if (!md) 788 return -EINVAL; 789 ret = mmc_blk_ioctl_multi_cmd(md, 790 (struct mmc_ioc_multi_cmd __user *)arg, 791 NULL); 792 mmc_blk_put(md); 793 return ret; 794 default: 795 return -EINVAL; 796 } 797 } 798 799 #ifdef CONFIG_COMPAT 800 static int mmc_blk_compat_ioctl(struct block_device *bdev, fmode_t mode, 801 unsigned int cmd, unsigned long arg) 802 { 803 return mmc_blk_ioctl(bdev, mode, cmd, (unsigned long) compat_ptr(arg)); 804 } 805 #endif 806 807 static int mmc_blk_alternative_gpt_sector(struct gendisk *disk, 808 sector_t *sector) 809 { 810 struct mmc_blk_data *md; 811 int ret; 812 813 md = mmc_blk_get(disk); 814 if (!md) 815 return -EINVAL; 816 817 if (md->queue.card) 818 ret = mmc_card_alternative_gpt_sector(md->queue.card, sector); 819 else 820 ret = -ENODEV; 821 822 mmc_blk_put(md); 823 824 return ret; 825 } 826 827 static const struct block_device_operations mmc_bdops = { 828 .open = mmc_blk_open, 829 .release = mmc_blk_release, 830 .getgeo = mmc_blk_getgeo, 831 .owner = THIS_MODULE, 832 .ioctl = mmc_blk_ioctl, 833 #ifdef CONFIG_COMPAT 834 .compat_ioctl = mmc_blk_compat_ioctl, 835 #endif 836 .alternative_gpt_sector = mmc_blk_alternative_gpt_sector, 837 }; 838 839 static int mmc_blk_part_switch_pre(struct mmc_card *card, 840 unsigned int part_type) 841 { 842 int ret = 0; 843 844 if (part_type == EXT_CSD_PART_CONFIG_ACC_RPMB) { 845 if (card->ext_csd.cmdq_en) { 846 ret = mmc_cmdq_disable(card); 847 if (ret) 848 return ret; 849 } 850 mmc_retune_pause(card->host); 851 } 852 853 return ret; 854 } 855 856 static int mmc_blk_part_switch_post(struct mmc_card *card, 857 unsigned int part_type) 858 { 859 int ret = 0; 860 861 if (part_type == EXT_CSD_PART_CONFIG_ACC_RPMB) { 862 mmc_retune_unpause(card->host); 863 if (card->reenable_cmdq && !card->ext_csd.cmdq_en) 864 ret = mmc_cmdq_enable(card); 865 } 866 867 return ret; 868 } 869 870 static inline int mmc_blk_part_switch(struct mmc_card *card, 871 unsigned int part_type) 872 { 873 int ret = 0; 874 struct mmc_blk_data *main_md = dev_get_drvdata(&card->dev); 875 876 if (main_md->part_curr == part_type) 877 return 0; 878 879 if (mmc_card_mmc(card)) { 880 u8 part_config = card->ext_csd.part_config; 881 882 ret = mmc_blk_part_switch_pre(card, part_type); 883 if (ret) 884 return ret; 885 886 part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK; 887 part_config |= part_type; 888 889 ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 890 EXT_CSD_PART_CONFIG, part_config, 891 card->ext_csd.part_time); 892 if (ret) { 893 mmc_blk_part_switch_post(card, part_type); 894 return ret; 895 } 896 897 card->ext_csd.part_config = part_config; 898 899 ret = mmc_blk_part_switch_post(card, main_md->part_curr); 900 } 901 902 main_md->part_curr = part_type; 903 return ret; 904 } 905 906 static int mmc_sd_num_wr_blocks(struct mmc_card *card, u32 *written_blocks) 907 { 908 int err; 909 u32 result; 910 __be32 *blocks; 911 912 struct mmc_request mrq = {}; 913 struct mmc_command cmd = {}; 914 struct mmc_data data = {}; 915 916 struct scatterlist sg; 917 918 cmd.opcode = MMC_APP_CMD; 919 cmd.arg = card->rca << 16; 920 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; 921 922 err = mmc_wait_for_cmd(card->host, &cmd, 0); 923 if (err) 924 return err; 925 if (!mmc_host_is_spi(card->host) && !(cmd.resp[0] & R1_APP_CMD)) 926 return -EIO; 927 928 memset(&cmd, 0, sizeof(struct mmc_command)); 929 930 cmd.opcode = SD_APP_SEND_NUM_WR_BLKS; 931 cmd.arg = 0; 932 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; 933 934 data.blksz = 4; 935 data.blocks = 1; 936 data.flags = MMC_DATA_READ; 937 data.sg = &sg; 938 data.sg_len = 1; 939 mmc_set_data_timeout(&data, card); 940 941 mrq.cmd = &cmd; 942 mrq.data = &data; 943 944 blocks = kmalloc(4, GFP_KERNEL); 945 if (!blocks) 946 return -ENOMEM; 947 948 sg_init_one(&sg, blocks, 4); 949 950 mmc_wait_for_req(card->host, &mrq); 951 952 result = ntohl(*blocks); 953 kfree(blocks); 954 955 if (cmd.error || data.error) 956 return -EIO; 957 958 *written_blocks = result; 959 960 return 0; 961 } 962 963 static unsigned int mmc_blk_clock_khz(struct mmc_host *host) 964 { 965 if (host->actual_clock) 966 return host->actual_clock / 1000; 967 968 /* Clock may be subject to a divisor, fudge it by a factor of 2. */ 969 if (host->ios.clock) 970 return host->ios.clock / 2000; 971 972 /* How can there be no clock */ 973 WARN_ON_ONCE(1); 974 return 100; /* 100 kHz is minimum possible value */ 975 } 976 977 static unsigned int mmc_blk_data_timeout_ms(struct mmc_host *host, 978 struct mmc_data *data) 979 { 980 unsigned int ms = DIV_ROUND_UP(data->timeout_ns, 1000000); 981 unsigned int khz; 982 983 if (data->timeout_clks) { 984 khz = mmc_blk_clock_khz(host); 985 ms += DIV_ROUND_UP(data->timeout_clks, khz); 986 } 987 988 return ms; 989 } 990 991 /* 992 * Attempts to reset the card and get back to the requested partition. 993 * Therefore any error here must result in cancelling the block layer 994 * request, it must not be reattempted without going through the mmc_blk 995 * partition sanity checks. 996 */ 997 static int mmc_blk_reset(struct mmc_blk_data *md, struct mmc_host *host, 998 int type) 999 { 1000 int err; 1001 struct mmc_blk_data *main_md = dev_get_drvdata(&host->card->dev); 1002 1003 if (md->reset_done & type) 1004 return -EEXIST; 1005 1006 md->reset_done |= type; 1007 err = mmc_hw_reset(host->card); 1008 /* 1009 * A successful reset will leave the card in the main partition, but 1010 * upon failure it might not be, so set it to MMC_BLK_PART_INVALID 1011 * in that case. 1012 */ 1013 main_md->part_curr = err ? MMC_BLK_PART_INVALID : main_md->part_type; 1014 if (err) 1015 return err; 1016 /* Ensure we switch back to the correct partition */ 1017 if (mmc_blk_part_switch(host->card, md->part_type)) 1018 /* 1019 * We have failed to get back into the correct 1020 * partition, so we need to abort the whole request. 1021 */ 1022 return -ENODEV; 1023 return 0; 1024 } 1025 1026 static inline void mmc_blk_reset_success(struct mmc_blk_data *md, int type) 1027 { 1028 md->reset_done &= ~type; 1029 } 1030 1031 /* 1032 * The non-block commands come back from the block layer after it queued it and 1033 * processed it with all other requests and then they get issued in this 1034 * function. 1035 */ 1036 static void mmc_blk_issue_drv_op(struct mmc_queue *mq, struct request *req) 1037 { 1038 struct mmc_queue_req *mq_rq; 1039 struct mmc_card *card = mq->card; 1040 struct mmc_blk_data *md = mq->blkdata; 1041 struct mmc_blk_ioc_data **idata; 1042 bool rpmb_ioctl; 1043 u8 **ext_csd; 1044 u32 status; 1045 int ret; 1046 int i; 1047 1048 mq_rq = req_to_mmc_queue_req(req); 1049 rpmb_ioctl = (mq_rq->drv_op == MMC_DRV_OP_IOCTL_RPMB); 1050 1051 switch (mq_rq->drv_op) { 1052 case MMC_DRV_OP_IOCTL: 1053 if (card->ext_csd.cmdq_en) { 1054 ret = mmc_cmdq_disable(card); 1055 if (ret) 1056 break; 1057 } 1058 fallthrough; 1059 case MMC_DRV_OP_IOCTL_RPMB: 1060 idata = mq_rq->drv_op_data; 1061 for (i = 0, ret = 0; i < mq_rq->ioc_count; i++) { 1062 ret = __mmc_blk_ioctl_cmd(card, md, idata[i]); 1063 if (ret) 1064 break; 1065 } 1066 /* Always switch back to main area after RPMB access */ 1067 if (rpmb_ioctl) 1068 mmc_blk_part_switch(card, 0); 1069 else if (card->reenable_cmdq && !card->ext_csd.cmdq_en) 1070 mmc_cmdq_enable(card); 1071 break; 1072 case MMC_DRV_OP_BOOT_WP: 1073 ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BOOT_WP, 1074 card->ext_csd.boot_ro_lock | 1075 EXT_CSD_BOOT_WP_B_PWR_WP_EN, 1076 card->ext_csd.part_time); 1077 if (ret) 1078 pr_err("%s: Locking boot partition ro until next power on failed: %d\n", 1079 md->disk->disk_name, ret); 1080 else 1081 card->ext_csd.boot_ro_lock |= 1082 EXT_CSD_BOOT_WP_B_PWR_WP_EN; 1083 break; 1084 case MMC_DRV_OP_GET_CARD_STATUS: 1085 ret = mmc_send_status(card, &status); 1086 if (!ret) 1087 ret = status; 1088 break; 1089 case MMC_DRV_OP_GET_EXT_CSD: 1090 ext_csd = mq_rq->drv_op_data; 1091 ret = mmc_get_ext_csd(card, ext_csd); 1092 break; 1093 default: 1094 pr_err("%s: unknown driver specific operation\n", 1095 md->disk->disk_name); 1096 ret = -EINVAL; 1097 break; 1098 } 1099 mq_rq->drv_op_result = ret; 1100 blk_mq_end_request(req, ret ? BLK_STS_IOERR : BLK_STS_OK); 1101 } 1102 1103 static void mmc_blk_issue_erase_rq(struct mmc_queue *mq, struct request *req, 1104 int type, unsigned int erase_arg) 1105 { 1106 struct mmc_blk_data *md = mq->blkdata; 1107 struct mmc_card *card = md->queue.card; 1108 unsigned int from, nr; 1109 int err = 0; 1110 blk_status_t status = BLK_STS_OK; 1111 1112 if (!mmc_can_erase(card)) { 1113 status = BLK_STS_NOTSUPP; 1114 goto fail; 1115 } 1116 1117 from = blk_rq_pos(req); 1118 nr = blk_rq_sectors(req); 1119 1120 do { 1121 err = 0; 1122 if (card->quirks & MMC_QUIRK_INAND_CMD38) { 1123 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 1124 INAND_CMD38_ARG_EXT_CSD, 1125 erase_arg == MMC_TRIM_ARG ? 1126 INAND_CMD38_ARG_TRIM : 1127 INAND_CMD38_ARG_ERASE, 1128 card->ext_csd.generic_cmd6_time); 1129 } 1130 if (!err) 1131 err = mmc_erase(card, from, nr, erase_arg); 1132 } while (err == -EIO && !mmc_blk_reset(md, card->host, type)); 1133 if (err) 1134 status = BLK_STS_IOERR; 1135 else 1136 mmc_blk_reset_success(md, type); 1137 fail: 1138 blk_mq_end_request(req, status); 1139 } 1140 1141 static void mmc_blk_issue_trim_rq(struct mmc_queue *mq, struct request *req) 1142 { 1143 mmc_blk_issue_erase_rq(mq, req, MMC_BLK_TRIM, MMC_TRIM_ARG); 1144 } 1145 1146 static void mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req) 1147 { 1148 struct mmc_blk_data *md = mq->blkdata; 1149 struct mmc_card *card = md->queue.card; 1150 unsigned int arg = card->erase_arg; 1151 1152 if (mmc_card_broken_sd_discard(card)) 1153 arg = SD_ERASE_ARG; 1154 1155 mmc_blk_issue_erase_rq(mq, req, MMC_BLK_DISCARD, arg); 1156 } 1157 1158 static void mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq, 1159 struct request *req) 1160 { 1161 struct mmc_blk_data *md = mq->blkdata; 1162 struct mmc_card *card = md->queue.card; 1163 unsigned int from, nr, arg; 1164 int err = 0, type = MMC_BLK_SECDISCARD; 1165 blk_status_t status = BLK_STS_OK; 1166 1167 if (!(mmc_can_secure_erase_trim(card))) { 1168 status = BLK_STS_NOTSUPP; 1169 goto out; 1170 } 1171 1172 from = blk_rq_pos(req); 1173 nr = blk_rq_sectors(req); 1174 1175 if (mmc_can_trim(card) && !mmc_erase_group_aligned(card, from, nr)) 1176 arg = MMC_SECURE_TRIM1_ARG; 1177 else 1178 arg = MMC_SECURE_ERASE_ARG; 1179 1180 retry: 1181 if (card->quirks & MMC_QUIRK_INAND_CMD38) { 1182 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 1183 INAND_CMD38_ARG_EXT_CSD, 1184 arg == MMC_SECURE_TRIM1_ARG ? 1185 INAND_CMD38_ARG_SECTRIM1 : 1186 INAND_CMD38_ARG_SECERASE, 1187 card->ext_csd.generic_cmd6_time); 1188 if (err) 1189 goto out_retry; 1190 } 1191 1192 err = mmc_erase(card, from, nr, arg); 1193 if (err == -EIO) 1194 goto out_retry; 1195 if (err) { 1196 status = BLK_STS_IOERR; 1197 goto out; 1198 } 1199 1200 if (arg == MMC_SECURE_TRIM1_ARG) { 1201 if (card->quirks & MMC_QUIRK_INAND_CMD38) { 1202 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 1203 INAND_CMD38_ARG_EXT_CSD, 1204 INAND_CMD38_ARG_SECTRIM2, 1205 card->ext_csd.generic_cmd6_time); 1206 if (err) 1207 goto out_retry; 1208 } 1209 1210 err = mmc_erase(card, from, nr, MMC_SECURE_TRIM2_ARG); 1211 if (err == -EIO) 1212 goto out_retry; 1213 if (err) { 1214 status = BLK_STS_IOERR; 1215 goto out; 1216 } 1217 } 1218 1219 out_retry: 1220 if (err && !mmc_blk_reset(md, card->host, type)) 1221 goto retry; 1222 if (!err) 1223 mmc_blk_reset_success(md, type); 1224 out: 1225 blk_mq_end_request(req, status); 1226 } 1227 1228 static void mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req) 1229 { 1230 struct mmc_blk_data *md = mq->blkdata; 1231 struct mmc_card *card = md->queue.card; 1232 int ret = 0; 1233 1234 ret = mmc_flush_cache(card->host); 1235 blk_mq_end_request(req, ret ? BLK_STS_IOERR : BLK_STS_OK); 1236 } 1237 1238 /* 1239 * Reformat current write as a reliable write, supporting 1240 * both legacy and the enhanced reliable write MMC cards. 1241 * In each transfer we'll handle only as much as a single 1242 * reliable write can handle, thus finish the request in 1243 * partial completions. 1244 */ 1245 static inline void mmc_apply_rel_rw(struct mmc_blk_request *brq, 1246 struct mmc_card *card, 1247 struct request *req) 1248 { 1249 if (!(card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN)) { 1250 /* Legacy mode imposes restrictions on transfers. */ 1251 if (!IS_ALIGNED(blk_rq_pos(req), card->ext_csd.rel_sectors)) 1252 brq->data.blocks = 1; 1253 1254 if (brq->data.blocks > card->ext_csd.rel_sectors) 1255 brq->data.blocks = card->ext_csd.rel_sectors; 1256 else if (brq->data.blocks < card->ext_csd.rel_sectors) 1257 brq->data.blocks = 1; 1258 } 1259 } 1260 1261 #define CMD_ERRORS_EXCL_OOR \ 1262 (R1_ADDRESS_ERROR | /* Misaligned address */ \ 1263 R1_BLOCK_LEN_ERROR | /* Transferred block length incorrect */\ 1264 R1_WP_VIOLATION | /* Tried to write to protected block */ \ 1265 R1_CARD_ECC_FAILED | /* Card ECC failed */ \ 1266 R1_CC_ERROR | /* Card controller error */ \ 1267 R1_ERROR) /* General/unknown error */ 1268 1269 #define CMD_ERRORS \ 1270 (CMD_ERRORS_EXCL_OOR | \ 1271 R1_OUT_OF_RANGE) /* Command argument out of range */ \ 1272 1273 static void mmc_blk_eval_resp_error(struct mmc_blk_request *brq) 1274 { 1275 u32 val; 1276 1277 /* 1278 * Per the SD specification(physical layer version 4.10)[1], 1279 * section 4.3.3, it explicitly states that "When the last 1280 * block of user area is read using CMD18, the host should 1281 * ignore OUT_OF_RANGE error that may occur even the sequence 1282 * is correct". And JESD84-B51 for eMMC also has a similar 1283 * statement on section 6.8.3. 1284 * 1285 * Multiple block read/write could be done by either predefined 1286 * method, namely CMD23, or open-ending mode. For open-ending mode, 1287 * we should ignore the OUT_OF_RANGE error as it's normal behaviour. 1288 * 1289 * However the spec[1] doesn't tell us whether we should also 1290 * ignore that for predefined method. But per the spec[1], section 1291 * 4.15 Set Block Count Command, it says"If illegal block count 1292 * is set, out of range error will be indicated during read/write 1293 * operation (For example, data transfer is stopped at user area 1294 * boundary)." In another word, we could expect a out of range error 1295 * in the response for the following CMD18/25. And if argument of 1296 * CMD23 + the argument of CMD18/25 exceed the max number of blocks, 1297 * we could also expect to get a -ETIMEDOUT or any error number from 1298 * the host drivers due to missing data response(for write)/data(for 1299 * read), as the cards will stop the data transfer by itself per the 1300 * spec. So we only need to check R1_OUT_OF_RANGE for open-ending mode. 1301 */ 1302 1303 if (!brq->stop.error) { 1304 bool oor_with_open_end; 1305 /* If there is no error yet, check R1 response */ 1306 1307 val = brq->stop.resp[0] & CMD_ERRORS; 1308 oor_with_open_end = val & R1_OUT_OF_RANGE && !brq->mrq.sbc; 1309 1310 if (val && !oor_with_open_end) 1311 brq->stop.error = -EIO; 1312 } 1313 } 1314 1315 static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq, 1316 int recovery_mode, bool *do_rel_wr_p, 1317 bool *do_data_tag_p) 1318 { 1319 struct mmc_blk_data *md = mq->blkdata; 1320 struct mmc_card *card = md->queue.card; 1321 struct mmc_blk_request *brq = &mqrq->brq; 1322 struct request *req = mmc_queue_req_to_req(mqrq); 1323 bool do_rel_wr, do_data_tag; 1324 1325 /* 1326 * Reliable writes are used to implement Forced Unit Access and 1327 * are supported only on MMCs. 1328 */ 1329 do_rel_wr = (req->cmd_flags & REQ_FUA) && 1330 rq_data_dir(req) == WRITE && 1331 (md->flags & MMC_BLK_REL_WR); 1332 1333 memset(brq, 0, sizeof(struct mmc_blk_request)); 1334 1335 mmc_crypto_prepare_req(mqrq); 1336 1337 brq->mrq.data = &brq->data; 1338 brq->mrq.tag = req->tag; 1339 1340 brq->stop.opcode = MMC_STOP_TRANSMISSION; 1341 brq->stop.arg = 0; 1342 1343 if (rq_data_dir(req) == READ) { 1344 brq->data.flags = MMC_DATA_READ; 1345 brq->stop.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; 1346 } else { 1347 brq->data.flags = MMC_DATA_WRITE; 1348 brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; 1349 } 1350 1351 brq->data.blksz = 512; 1352 brq->data.blocks = blk_rq_sectors(req); 1353 brq->data.blk_addr = blk_rq_pos(req); 1354 1355 /* 1356 * The command queue supports 2 priorities: "high" (1) and "simple" (0). 1357 * The eMMC will give "high" priority tasks priority over "simple" 1358 * priority tasks. Here we always set "simple" priority by not setting 1359 * MMC_DATA_PRIO. 1360 */ 1361 1362 /* 1363 * The block layer doesn't support all sector count 1364 * restrictions, so we need to be prepared for too big 1365 * requests. 1366 */ 1367 if (brq->data.blocks > card->host->max_blk_count) 1368 brq->data.blocks = card->host->max_blk_count; 1369 1370 if (brq->data.blocks > 1) { 1371 /* 1372 * Some SD cards in SPI mode return a CRC error or even lock up 1373 * completely when trying to read the last block using a 1374 * multiblock read command. 1375 */ 1376 if (mmc_host_is_spi(card->host) && (rq_data_dir(req) == READ) && 1377 (blk_rq_pos(req) + blk_rq_sectors(req) == 1378 get_capacity(md->disk))) 1379 brq->data.blocks--; 1380 1381 /* 1382 * After a read error, we redo the request one (native) sector 1383 * at a time in order to accurately determine which 1384 * sectors can be read successfully. 1385 */ 1386 if (recovery_mode) 1387 brq->data.blocks = queue_physical_block_size(mq->queue) >> 9; 1388 1389 /* 1390 * Some controllers have HW issues while operating 1391 * in multiple I/O mode 1392 */ 1393 if (card->host->ops->multi_io_quirk) 1394 brq->data.blocks = card->host->ops->multi_io_quirk(card, 1395 (rq_data_dir(req) == READ) ? 1396 MMC_DATA_READ : MMC_DATA_WRITE, 1397 brq->data.blocks); 1398 } 1399 1400 if (do_rel_wr) { 1401 mmc_apply_rel_rw(brq, card, req); 1402 brq->data.flags |= MMC_DATA_REL_WR; 1403 } 1404 1405 /* 1406 * Data tag is used only during writing meta data to speed 1407 * up write and any subsequent read of this meta data 1408 */ 1409 do_data_tag = card->ext_csd.data_tag_unit_size && 1410 (req->cmd_flags & REQ_META) && 1411 (rq_data_dir(req) == WRITE) && 1412 ((brq->data.blocks * brq->data.blksz) >= 1413 card->ext_csd.data_tag_unit_size); 1414 1415 if (do_data_tag) 1416 brq->data.flags |= MMC_DATA_DAT_TAG; 1417 1418 mmc_set_data_timeout(&brq->data, card); 1419 1420 brq->data.sg = mqrq->sg; 1421 brq->data.sg_len = mmc_queue_map_sg(mq, mqrq); 1422 1423 /* 1424 * Adjust the sg list so it is the same size as the 1425 * request. 1426 */ 1427 if (brq->data.blocks != blk_rq_sectors(req)) { 1428 int i, data_size = brq->data.blocks << 9; 1429 struct scatterlist *sg; 1430 1431 for_each_sg(brq->data.sg, sg, brq->data.sg_len, i) { 1432 data_size -= sg->length; 1433 if (data_size <= 0) { 1434 sg->length += data_size; 1435 i++; 1436 break; 1437 } 1438 } 1439 brq->data.sg_len = i; 1440 } 1441 1442 if (do_rel_wr_p) 1443 *do_rel_wr_p = do_rel_wr; 1444 1445 if (do_data_tag_p) 1446 *do_data_tag_p = do_data_tag; 1447 } 1448 1449 #define MMC_CQE_RETRIES 2 1450 1451 static void mmc_blk_cqe_complete_rq(struct mmc_queue *mq, struct request *req) 1452 { 1453 struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req); 1454 struct mmc_request *mrq = &mqrq->brq.mrq; 1455 struct request_queue *q = req->q; 1456 struct mmc_host *host = mq->card->host; 1457 enum mmc_issue_type issue_type = mmc_issue_type(mq, req); 1458 unsigned long flags; 1459 bool put_card; 1460 int err; 1461 1462 mmc_cqe_post_req(host, mrq); 1463 1464 if (mrq->cmd && mrq->cmd->error) 1465 err = mrq->cmd->error; 1466 else if (mrq->data && mrq->data->error) 1467 err = mrq->data->error; 1468 else 1469 err = 0; 1470 1471 if (err) { 1472 if (mqrq->retries++ < MMC_CQE_RETRIES) 1473 blk_mq_requeue_request(req, true); 1474 else 1475 blk_mq_end_request(req, BLK_STS_IOERR); 1476 } else if (mrq->data) { 1477 if (blk_update_request(req, BLK_STS_OK, mrq->data->bytes_xfered)) 1478 blk_mq_requeue_request(req, true); 1479 else 1480 __blk_mq_end_request(req, BLK_STS_OK); 1481 } else { 1482 blk_mq_end_request(req, BLK_STS_OK); 1483 } 1484 1485 spin_lock_irqsave(&mq->lock, flags); 1486 1487 mq->in_flight[issue_type] -= 1; 1488 1489 put_card = (mmc_tot_in_flight(mq) == 0); 1490 1491 mmc_cqe_check_busy(mq); 1492 1493 spin_unlock_irqrestore(&mq->lock, flags); 1494 1495 if (!mq->cqe_busy) 1496 blk_mq_run_hw_queues(q, true); 1497 1498 if (put_card) 1499 mmc_put_card(mq->card, &mq->ctx); 1500 } 1501 1502 void mmc_blk_cqe_recovery(struct mmc_queue *mq) 1503 { 1504 struct mmc_card *card = mq->card; 1505 struct mmc_host *host = card->host; 1506 int err; 1507 1508 pr_debug("%s: CQE recovery start\n", mmc_hostname(host)); 1509 1510 err = mmc_cqe_recovery(host); 1511 if (err) 1512 mmc_blk_reset(mq->blkdata, host, MMC_BLK_CQE_RECOVERY); 1513 mmc_blk_reset_success(mq->blkdata, MMC_BLK_CQE_RECOVERY); 1514 1515 pr_debug("%s: CQE recovery done\n", mmc_hostname(host)); 1516 } 1517 1518 static void mmc_blk_cqe_req_done(struct mmc_request *mrq) 1519 { 1520 struct mmc_queue_req *mqrq = container_of(mrq, struct mmc_queue_req, 1521 brq.mrq); 1522 struct request *req = mmc_queue_req_to_req(mqrq); 1523 struct request_queue *q = req->q; 1524 struct mmc_queue *mq = q->queuedata; 1525 1526 /* 1527 * Block layer timeouts race with completions which means the normal 1528 * completion path cannot be used during recovery. 1529 */ 1530 if (mq->in_recovery) 1531 mmc_blk_cqe_complete_rq(mq, req); 1532 else if (likely(!blk_should_fake_timeout(req->q))) 1533 blk_mq_complete_request(req); 1534 } 1535 1536 static int mmc_blk_cqe_start_req(struct mmc_host *host, struct mmc_request *mrq) 1537 { 1538 mrq->done = mmc_blk_cqe_req_done; 1539 mrq->recovery_notifier = mmc_cqe_recovery_notifier; 1540 1541 return mmc_cqe_start_req(host, mrq); 1542 } 1543 1544 static struct mmc_request *mmc_blk_cqe_prep_dcmd(struct mmc_queue_req *mqrq, 1545 struct request *req) 1546 { 1547 struct mmc_blk_request *brq = &mqrq->brq; 1548 1549 memset(brq, 0, sizeof(*brq)); 1550 1551 brq->mrq.cmd = &brq->cmd; 1552 brq->mrq.tag = req->tag; 1553 1554 return &brq->mrq; 1555 } 1556 1557 static int mmc_blk_cqe_issue_flush(struct mmc_queue *mq, struct request *req) 1558 { 1559 struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req); 1560 struct mmc_request *mrq = mmc_blk_cqe_prep_dcmd(mqrq, req); 1561 1562 mrq->cmd->opcode = MMC_SWITCH; 1563 mrq->cmd->arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) | 1564 (EXT_CSD_FLUSH_CACHE << 16) | 1565 (1 << 8) | 1566 EXT_CSD_CMD_SET_NORMAL; 1567 mrq->cmd->flags = MMC_CMD_AC | MMC_RSP_R1B; 1568 1569 return mmc_blk_cqe_start_req(mq->card->host, mrq); 1570 } 1571 1572 static int mmc_blk_hsq_issue_rw_rq(struct mmc_queue *mq, struct request *req) 1573 { 1574 struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req); 1575 struct mmc_host *host = mq->card->host; 1576 int err; 1577 1578 mmc_blk_rw_rq_prep(mqrq, mq->card, 0, mq); 1579 mqrq->brq.mrq.done = mmc_blk_hsq_req_done; 1580 mmc_pre_req(host, &mqrq->brq.mrq); 1581 1582 err = mmc_cqe_start_req(host, &mqrq->brq.mrq); 1583 if (err) 1584 mmc_post_req(host, &mqrq->brq.mrq, err); 1585 1586 return err; 1587 } 1588 1589 static int mmc_blk_cqe_issue_rw_rq(struct mmc_queue *mq, struct request *req) 1590 { 1591 struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req); 1592 struct mmc_host *host = mq->card->host; 1593 1594 if (host->hsq_enabled) 1595 return mmc_blk_hsq_issue_rw_rq(mq, req); 1596 1597 mmc_blk_data_prep(mq, mqrq, 0, NULL, NULL); 1598 1599 return mmc_blk_cqe_start_req(mq->card->host, &mqrq->brq.mrq); 1600 } 1601 1602 static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq, 1603 struct mmc_card *card, 1604 int recovery_mode, 1605 struct mmc_queue *mq) 1606 { 1607 u32 readcmd, writecmd; 1608 struct mmc_blk_request *brq = &mqrq->brq; 1609 struct request *req = mmc_queue_req_to_req(mqrq); 1610 struct mmc_blk_data *md = mq->blkdata; 1611 bool do_rel_wr, do_data_tag; 1612 1613 mmc_blk_data_prep(mq, mqrq, recovery_mode, &do_rel_wr, &do_data_tag); 1614 1615 brq->mrq.cmd = &brq->cmd; 1616 1617 brq->cmd.arg = blk_rq_pos(req); 1618 if (!mmc_card_blockaddr(card)) 1619 brq->cmd.arg <<= 9; 1620 brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; 1621 1622 if (brq->data.blocks > 1 || do_rel_wr) { 1623 /* SPI multiblock writes terminate using a special 1624 * token, not a STOP_TRANSMISSION request. 1625 */ 1626 if (!mmc_host_is_spi(card->host) || 1627 rq_data_dir(req) == READ) 1628 brq->mrq.stop = &brq->stop; 1629 readcmd = MMC_READ_MULTIPLE_BLOCK; 1630 writecmd = MMC_WRITE_MULTIPLE_BLOCK; 1631 } else { 1632 brq->mrq.stop = NULL; 1633 readcmd = MMC_READ_SINGLE_BLOCK; 1634 writecmd = MMC_WRITE_BLOCK; 1635 } 1636 brq->cmd.opcode = rq_data_dir(req) == READ ? readcmd : writecmd; 1637 1638 /* 1639 * Pre-defined multi-block transfers are preferable to 1640 * open ended-ones (and necessary for reliable writes). 1641 * However, it is not sufficient to just send CMD23, 1642 * and avoid the final CMD12, as on an error condition 1643 * CMD12 (stop) needs to be sent anyway. This, coupled 1644 * with Auto-CMD23 enhancements provided by some 1645 * hosts, means that the complexity of dealing 1646 * with this is best left to the host. If CMD23 is 1647 * supported by card and host, we'll fill sbc in and let 1648 * the host deal with handling it correctly. This means 1649 * that for hosts that don't expose MMC_CAP_CMD23, no 1650 * change of behavior will be observed. 1651 * 1652 * N.B: Some MMC cards experience perf degradation. 1653 * We'll avoid using CMD23-bounded multiblock writes for 1654 * these, while retaining features like reliable writes. 1655 */ 1656 if ((md->flags & MMC_BLK_CMD23) && mmc_op_multi(brq->cmd.opcode) && 1657 (do_rel_wr || !(card->quirks & MMC_QUIRK_BLK_NO_CMD23) || 1658 do_data_tag)) { 1659 brq->sbc.opcode = MMC_SET_BLOCK_COUNT; 1660 brq->sbc.arg = brq->data.blocks | 1661 (do_rel_wr ? (1 << 31) : 0) | 1662 (do_data_tag ? (1 << 29) : 0); 1663 brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC; 1664 brq->mrq.sbc = &brq->sbc; 1665 } 1666 } 1667 1668 #define MMC_MAX_RETRIES 5 1669 #define MMC_DATA_RETRIES 2 1670 #define MMC_NO_RETRIES (MMC_MAX_RETRIES + 1) 1671 1672 static int mmc_blk_send_stop(struct mmc_card *card, unsigned int timeout) 1673 { 1674 struct mmc_command cmd = { 1675 .opcode = MMC_STOP_TRANSMISSION, 1676 .flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC, 1677 /* Some hosts wait for busy anyway, so provide a busy timeout */ 1678 .busy_timeout = timeout, 1679 }; 1680 1681 return mmc_wait_for_cmd(card->host, &cmd, 5); 1682 } 1683 1684 static int mmc_blk_fix_state(struct mmc_card *card, struct request *req) 1685 { 1686 struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req); 1687 struct mmc_blk_request *brq = &mqrq->brq; 1688 unsigned int timeout = mmc_blk_data_timeout_ms(card->host, &brq->data); 1689 int err; 1690 1691 mmc_retune_hold_now(card->host); 1692 1693 mmc_blk_send_stop(card, timeout); 1694 1695 err = mmc_poll_for_busy(card, timeout, false, MMC_BUSY_IO); 1696 1697 mmc_retune_release(card->host); 1698 1699 return err; 1700 } 1701 1702 #define MMC_READ_SINGLE_RETRIES 2 1703 1704 /* Single (native) sector read during recovery */ 1705 static void mmc_blk_read_single(struct mmc_queue *mq, struct request *req) 1706 { 1707 struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req); 1708 struct mmc_request *mrq = &mqrq->brq.mrq; 1709 struct mmc_card *card = mq->card; 1710 struct mmc_host *host = card->host; 1711 blk_status_t error = BLK_STS_OK; 1712 size_t bytes_per_read = queue_physical_block_size(mq->queue); 1713 1714 do { 1715 u32 status; 1716 int err; 1717 int retries = 0; 1718 1719 while (retries++ <= MMC_READ_SINGLE_RETRIES) { 1720 mmc_blk_rw_rq_prep(mqrq, card, 1, mq); 1721 1722 mmc_wait_for_req(host, mrq); 1723 1724 err = mmc_send_status(card, &status); 1725 if (err) 1726 goto error_exit; 1727 1728 if (!mmc_host_is_spi(host) && 1729 !mmc_ready_for_data(status)) { 1730 err = mmc_blk_fix_state(card, req); 1731 if (err) 1732 goto error_exit; 1733 } 1734 1735 if (!mrq->cmd->error) 1736 break; 1737 } 1738 1739 if (mrq->cmd->error || 1740 mrq->data->error || 1741 (!mmc_host_is_spi(host) && 1742 (mrq->cmd->resp[0] & CMD_ERRORS || status & CMD_ERRORS))) 1743 error = BLK_STS_IOERR; 1744 else 1745 error = BLK_STS_OK; 1746 1747 } while (blk_update_request(req, error, bytes_per_read)); 1748 1749 return; 1750 1751 error_exit: 1752 mrq->data->bytes_xfered = 0; 1753 blk_update_request(req, BLK_STS_IOERR, bytes_per_read); 1754 /* Let it try the remaining request again */ 1755 if (mqrq->retries > MMC_MAX_RETRIES - 1) 1756 mqrq->retries = MMC_MAX_RETRIES - 1; 1757 } 1758 1759 static inline bool mmc_blk_oor_valid(struct mmc_blk_request *brq) 1760 { 1761 return !!brq->mrq.sbc; 1762 } 1763 1764 static inline u32 mmc_blk_stop_err_bits(struct mmc_blk_request *brq) 1765 { 1766 return mmc_blk_oor_valid(brq) ? CMD_ERRORS : CMD_ERRORS_EXCL_OOR; 1767 } 1768 1769 /* 1770 * Check for errors the host controller driver might not have seen such as 1771 * response mode errors or invalid card state. 1772 */ 1773 static bool mmc_blk_status_error(struct request *req, u32 status) 1774 { 1775 struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req); 1776 struct mmc_blk_request *brq = &mqrq->brq; 1777 struct mmc_queue *mq = req->q->queuedata; 1778 u32 stop_err_bits; 1779 1780 if (mmc_host_is_spi(mq->card->host)) 1781 return false; 1782 1783 stop_err_bits = mmc_blk_stop_err_bits(brq); 1784 1785 return brq->cmd.resp[0] & CMD_ERRORS || 1786 brq->stop.resp[0] & stop_err_bits || 1787 status & stop_err_bits || 1788 (rq_data_dir(req) == WRITE && !mmc_ready_for_data(status)); 1789 } 1790 1791 static inline bool mmc_blk_cmd_started(struct mmc_blk_request *brq) 1792 { 1793 return !brq->sbc.error && !brq->cmd.error && 1794 !(brq->cmd.resp[0] & CMD_ERRORS); 1795 } 1796 1797 /* 1798 * Requests are completed by mmc_blk_mq_complete_rq() which sets simple 1799 * policy: 1800 * 1. A request that has transferred at least some data is considered 1801 * successful and will be requeued if there is remaining data to 1802 * transfer. 1803 * 2. Otherwise the number of retries is incremented and the request 1804 * will be requeued if there are remaining retries. 1805 * 3. Otherwise the request will be errored out. 1806 * That means mmc_blk_mq_complete_rq() is controlled by bytes_xfered and 1807 * mqrq->retries. So there are only 4 possible actions here: 1808 * 1. do not accept the bytes_xfered value i.e. set it to zero 1809 * 2. change mqrq->retries to determine the number of retries 1810 * 3. try to reset the card 1811 * 4. read one sector at a time 1812 */ 1813 static void mmc_blk_mq_rw_recovery(struct mmc_queue *mq, struct request *req) 1814 { 1815 int type = rq_data_dir(req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE; 1816 struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req); 1817 struct mmc_blk_request *brq = &mqrq->brq; 1818 struct mmc_blk_data *md = mq->blkdata; 1819 struct mmc_card *card = mq->card; 1820 u32 status; 1821 u32 blocks; 1822 int err; 1823 1824 /* 1825 * Some errors the host driver might not have seen. Set the number of 1826 * bytes transferred to zero in that case. 1827 */ 1828 err = __mmc_send_status(card, &status, 0); 1829 if (err || mmc_blk_status_error(req, status)) 1830 brq->data.bytes_xfered = 0; 1831 1832 mmc_retune_release(card->host); 1833 1834 /* 1835 * Try again to get the status. This also provides an opportunity for 1836 * re-tuning. 1837 */ 1838 if (err) 1839 err = __mmc_send_status(card, &status, 0); 1840 1841 /* 1842 * Nothing more to do after the number of bytes transferred has been 1843 * updated and there is no card. 1844 */ 1845 if (err && mmc_detect_card_removed(card->host)) 1846 return; 1847 1848 /* Try to get back to "tran" state */ 1849 if (!mmc_host_is_spi(mq->card->host) && 1850 (err || !mmc_ready_for_data(status))) 1851 err = mmc_blk_fix_state(mq->card, req); 1852 1853 /* 1854 * Special case for SD cards where the card might record the number of 1855 * blocks written. 1856 */ 1857 if (!err && mmc_blk_cmd_started(brq) && mmc_card_sd(card) && 1858 rq_data_dir(req) == WRITE) { 1859 if (mmc_sd_num_wr_blocks(card, &blocks)) 1860 brq->data.bytes_xfered = 0; 1861 else 1862 brq->data.bytes_xfered = blocks << 9; 1863 } 1864 1865 /* Reset if the card is in a bad state */ 1866 if (!mmc_host_is_spi(mq->card->host) && 1867 err && mmc_blk_reset(md, card->host, type)) { 1868 pr_err("%s: recovery failed!\n", req->q->disk->disk_name); 1869 mqrq->retries = MMC_NO_RETRIES; 1870 return; 1871 } 1872 1873 /* 1874 * If anything was done, just return and if there is anything remaining 1875 * on the request it will get requeued. 1876 */ 1877 if (brq->data.bytes_xfered) 1878 return; 1879 1880 /* Reset before last retry */ 1881 if (mqrq->retries + 1 == MMC_MAX_RETRIES && 1882 mmc_blk_reset(md, card->host, type)) 1883 return; 1884 1885 /* Command errors fail fast, so use all MMC_MAX_RETRIES */ 1886 if (brq->sbc.error || brq->cmd.error) 1887 return; 1888 1889 /* Reduce the remaining retries for data errors */ 1890 if (mqrq->retries < MMC_MAX_RETRIES - MMC_DATA_RETRIES) { 1891 mqrq->retries = MMC_MAX_RETRIES - MMC_DATA_RETRIES; 1892 return; 1893 } 1894 1895 if (rq_data_dir(req) == READ && brq->data.blocks > 1896 queue_physical_block_size(mq->queue) >> 9) { 1897 /* Read one (native) sector at a time */ 1898 mmc_blk_read_single(mq, req); 1899 return; 1900 } 1901 } 1902 1903 static inline bool mmc_blk_rq_error(struct mmc_blk_request *brq) 1904 { 1905 mmc_blk_eval_resp_error(brq); 1906 1907 return brq->sbc.error || brq->cmd.error || brq->stop.error || 1908 brq->data.error || brq->cmd.resp[0] & CMD_ERRORS; 1909 } 1910 1911 static int mmc_spi_err_check(struct mmc_card *card) 1912 { 1913 u32 status = 0; 1914 int err; 1915 1916 /* 1917 * SPI does not have a TRAN state we have to wait on, instead the 1918 * card is ready again when it no longer holds the line LOW. 1919 * We still have to ensure two things here before we know the write 1920 * was successful: 1921 * 1. The card has not disconnected during busy and we actually read our 1922 * own pull-up, thinking it was still connected, so ensure it 1923 * still responds. 1924 * 2. Check for any error bits, in particular R1_SPI_IDLE to catch a 1925 * just reconnected card after being disconnected during busy. 1926 */ 1927 err = __mmc_send_status(card, &status, 0); 1928 if (err) 1929 return err; 1930 /* All R1 and R2 bits of SPI are errors in our case */ 1931 if (status) 1932 return -EIO; 1933 return 0; 1934 } 1935 1936 static int mmc_blk_busy_cb(void *cb_data, bool *busy) 1937 { 1938 struct mmc_blk_busy_data *data = cb_data; 1939 u32 status = 0; 1940 int err; 1941 1942 err = mmc_send_status(data->card, &status); 1943 if (err) 1944 return err; 1945 1946 /* Accumulate response error bits. */ 1947 data->status |= status; 1948 1949 *busy = !mmc_ready_for_data(status); 1950 return 0; 1951 } 1952 1953 static int mmc_blk_card_busy(struct mmc_card *card, struct request *req) 1954 { 1955 struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req); 1956 struct mmc_blk_busy_data cb_data; 1957 int err; 1958 1959 if (rq_data_dir(req) == READ) 1960 return 0; 1961 1962 if (mmc_host_is_spi(card->host)) { 1963 err = mmc_spi_err_check(card); 1964 if (err) 1965 mqrq->brq.data.bytes_xfered = 0; 1966 return err; 1967 } 1968 1969 cb_data.card = card; 1970 cb_data.status = 0; 1971 err = __mmc_poll_for_busy(card->host, 0, MMC_BLK_TIMEOUT_MS, 1972 &mmc_blk_busy_cb, &cb_data); 1973 1974 /* 1975 * Do not assume data transferred correctly if there are any error bits 1976 * set. 1977 */ 1978 if (cb_data.status & mmc_blk_stop_err_bits(&mqrq->brq)) { 1979 mqrq->brq.data.bytes_xfered = 0; 1980 err = err ? err : -EIO; 1981 } 1982 1983 /* Copy the exception bit so it will be seen later on */ 1984 if (mmc_card_mmc(card) && cb_data.status & R1_EXCEPTION_EVENT) 1985 mqrq->brq.cmd.resp[0] |= R1_EXCEPTION_EVENT; 1986 1987 return err; 1988 } 1989 1990 static inline void mmc_blk_rw_reset_success(struct mmc_queue *mq, 1991 struct request *req) 1992 { 1993 int type = rq_data_dir(req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE; 1994 1995 mmc_blk_reset_success(mq->blkdata, type); 1996 } 1997 1998 static void mmc_blk_mq_complete_rq(struct mmc_queue *mq, struct request *req) 1999 { 2000 struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req); 2001 unsigned int nr_bytes = mqrq->brq.data.bytes_xfered; 2002 2003 if (nr_bytes) { 2004 if (blk_update_request(req, BLK_STS_OK, nr_bytes)) 2005 blk_mq_requeue_request(req, true); 2006 else 2007 __blk_mq_end_request(req, BLK_STS_OK); 2008 } else if (!blk_rq_bytes(req)) { 2009 __blk_mq_end_request(req, BLK_STS_IOERR); 2010 } else if (mqrq->retries++ < MMC_MAX_RETRIES) { 2011 blk_mq_requeue_request(req, true); 2012 } else { 2013 if (mmc_card_removed(mq->card)) 2014 req->rq_flags |= RQF_QUIET; 2015 blk_mq_end_request(req, BLK_STS_IOERR); 2016 } 2017 } 2018 2019 static bool mmc_blk_urgent_bkops_needed(struct mmc_queue *mq, 2020 struct mmc_queue_req *mqrq) 2021 { 2022 return mmc_card_mmc(mq->card) && !mmc_host_is_spi(mq->card->host) && 2023 (mqrq->brq.cmd.resp[0] & R1_EXCEPTION_EVENT || 2024 mqrq->brq.stop.resp[0] & R1_EXCEPTION_EVENT); 2025 } 2026 2027 static void mmc_blk_urgent_bkops(struct mmc_queue *mq, 2028 struct mmc_queue_req *mqrq) 2029 { 2030 if (mmc_blk_urgent_bkops_needed(mq, mqrq)) 2031 mmc_run_bkops(mq->card); 2032 } 2033 2034 static void mmc_blk_hsq_req_done(struct mmc_request *mrq) 2035 { 2036 struct mmc_queue_req *mqrq = 2037 container_of(mrq, struct mmc_queue_req, brq.mrq); 2038 struct request *req = mmc_queue_req_to_req(mqrq); 2039 struct request_queue *q = req->q; 2040 struct mmc_queue *mq = q->queuedata; 2041 struct mmc_host *host = mq->card->host; 2042 unsigned long flags; 2043 2044 if (mmc_blk_rq_error(&mqrq->brq) || 2045 mmc_blk_urgent_bkops_needed(mq, mqrq)) { 2046 spin_lock_irqsave(&mq->lock, flags); 2047 mq->recovery_needed = true; 2048 mq->recovery_req = req; 2049 spin_unlock_irqrestore(&mq->lock, flags); 2050 2051 host->cqe_ops->cqe_recovery_start(host); 2052 2053 schedule_work(&mq->recovery_work); 2054 return; 2055 } 2056 2057 mmc_blk_rw_reset_success(mq, req); 2058 2059 /* 2060 * Block layer timeouts race with completions which means the normal 2061 * completion path cannot be used during recovery. 2062 */ 2063 if (mq->in_recovery) 2064 mmc_blk_cqe_complete_rq(mq, req); 2065 else if (likely(!blk_should_fake_timeout(req->q))) 2066 blk_mq_complete_request(req); 2067 } 2068 2069 void mmc_blk_mq_complete(struct request *req) 2070 { 2071 struct mmc_queue *mq = req->q->queuedata; 2072 struct mmc_host *host = mq->card->host; 2073 2074 if (host->cqe_enabled) 2075 mmc_blk_cqe_complete_rq(mq, req); 2076 else if (likely(!blk_should_fake_timeout(req->q))) 2077 mmc_blk_mq_complete_rq(mq, req); 2078 } 2079 2080 static void mmc_blk_mq_poll_completion(struct mmc_queue *mq, 2081 struct request *req) 2082 { 2083 struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req); 2084 struct mmc_host *host = mq->card->host; 2085 2086 if (mmc_blk_rq_error(&mqrq->brq) || 2087 mmc_blk_card_busy(mq->card, req)) { 2088 mmc_blk_mq_rw_recovery(mq, req); 2089 } else { 2090 mmc_blk_rw_reset_success(mq, req); 2091 mmc_retune_release(host); 2092 } 2093 2094 mmc_blk_urgent_bkops(mq, mqrq); 2095 } 2096 2097 static void mmc_blk_mq_dec_in_flight(struct mmc_queue *mq, struct request *req) 2098 { 2099 unsigned long flags; 2100 bool put_card; 2101 2102 spin_lock_irqsave(&mq->lock, flags); 2103 2104 mq->in_flight[mmc_issue_type(mq, req)] -= 1; 2105 2106 put_card = (mmc_tot_in_flight(mq) == 0); 2107 2108 spin_unlock_irqrestore(&mq->lock, flags); 2109 2110 if (put_card) 2111 mmc_put_card(mq->card, &mq->ctx); 2112 } 2113 2114 static void mmc_blk_mq_post_req(struct mmc_queue *mq, struct request *req, 2115 bool can_sleep) 2116 { 2117 struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req); 2118 struct mmc_request *mrq = &mqrq->brq.mrq; 2119 struct mmc_host *host = mq->card->host; 2120 2121 mmc_post_req(host, mrq, 0); 2122 2123 /* 2124 * Block layer timeouts race with completions which means the normal 2125 * completion path cannot be used during recovery. 2126 */ 2127 if (mq->in_recovery) { 2128 mmc_blk_mq_complete_rq(mq, req); 2129 } else if (likely(!blk_should_fake_timeout(req->q))) { 2130 if (can_sleep) 2131 blk_mq_complete_request_direct(req, mmc_blk_mq_complete); 2132 else 2133 blk_mq_complete_request(req); 2134 } 2135 2136 mmc_blk_mq_dec_in_flight(mq, req); 2137 } 2138 2139 void mmc_blk_mq_recovery(struct mmc_queue *mq) 2140 { 2141 struct request *req = mq->recovery_req; 2142 struct mmc_host *host = mq->card->host; 2143 struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req); 2144 2145 mq->recovery_req = NULL; 2146 mq->rw_wait = false; 2147 2148 if (mmc_blk_rq_error(&mqrq->brq)) { 2149 mmc_retune_hold_now(host); 2150 mmc_blk_mq_rw_recovery(mq, req); 2151 } 2152 2153 mmc_blk_urgent_bkops(mq, mqrq); 2154 2155 mmc_blk_mq_post_req(mq, req, true); 2156 } 2157 2158 static void mmc_blk_mq_complete_prev_req(struct mmc_queue *mq, 2159 struct request **prev_req) 2160 { 2161 if (mmc_host_done_complete(mq->card->host)) 2162 return; 2163 2164 mutex_lock(&mq->complete_lock); 2165 2166 if (!mq->complete_req) 2167 goto out_unlock; 2168 2169 mmc_blk_mq_poll_completion(mq, mq->complete_req); 2170 2171 if (prev_req) 2172 *prev_req = mq->complete_req; 2173 else 2174 mmc_blk_mq_post_req(mq, mq->complete_req, true); 2175 2176 mq->complete_req = NULL; 2177 2178 out_unlock: 2179 mutex_unlock(&mq->complete_lock); 2180 } 2181 2182 void mmc_blk_mq_complete_work(struct work_struct *work) 2183 { 2184 struct mmc_queue *mq = container_of(work, struct mmc_queue, 2185 complete_work); 2186 2187 mmc_blk_mq_complete_prev_req(mq, NULL); 2188 } 2189 2190 static void mmc_blk_mq_req_done(struct mmc_request *mrq) 2191 { 2192 struct mmc_queue_req *mqrq = container_of(mrq, struct mmc_queue_req, 2193 brq.mrq); 2194 struct request *req = mmc_queue_req_to_req(mqrq); 2195 struct request_queue *q = req->q; 2196 struct mmc_queue *mq = q->queuedata; 2197 struct mmc_host *host = mq->card->host; 2198 unsigned long flags; 2199 2200 if (!mmc_host_done_complete(host)) { 2201 bool waiting; 2202 2203 /* 2204 * We cannot complete the request in this context, so record 2205 * that there is a request to complete, and that a following 2206 * request does not need to wait (although it does need to 2207 * complete complete_req first). 2208 */ 2209 spin_lock_irqsave(&mq->lock, flags); 2210 mq->complete_req = req; 2211 mq->rw_wait = false; 2212 waiting = mq->waiting; 2213 spin_unlock_irqrestore(&mq->lock, flags); 2214 2215 /* 2216 * If 'waiting' then the waiting task will complete this 2217 * request, otherwise queue a work to do it. Note that 2218 * complete_work may still race with the dispatch of a following 2219 * request. 2220 */ 2221 if (waiting) 2222 wake_up(&mq->wait); 2223 else 2224 queue_work(mq->card->complete_wq, &mq->complete_work); 2225 2226 return; 2227 } 2228 2229 /* Take the recovery path for errors or urgent background operations */ 2230 if (mmc_blk_rq_error(&mqrq->brq) || 2231 mmc_blk_urgent_bkops_needed(mq, mqrq)) { 2232 spin_lock_irqsave(&mq->lock, flags); 2233 mq->recovery_needed = true; 2234 mq->recovery_req = req; 2235 spin_unlock_irqrestore(&mq->lock, flags); 2236 wake_up(&mq->wait); 2237 schedule_work(&mq->recovery_work); 2238 return; 2239 } 2240 2241 mmc_blk_rw_reset_success(mq, req); 2242 2243 mq->rw_wait = false; 2244 wake_up(&mq->wait); 2245 2246 /* context unknown */ 2247 mmc_blk_mq_post_req(mq, req, false); 2248 } 2249 2250 static bool mmc_blk_rw_wait_cond(struct mmc_queue *mq, int *err) 2251 { 2252 unsigned long flags; 2253 bool done; 2254 2255 /* 2256 * Wait while there is another request in progress, but not if recovery 2257 * is needed. Also indicate whether there is a request waiting to start. 2258 */ 2259 spin_lock_irqsave(&mq->lock, flags); 2260 if (mq->recovery_needed) { 2261 *err = -EBUSY; 2262 done = true; 2263 } else { 2264 done = !mq->rw_wait; 2265 } 2266 mq->waiting = !done; 2267 spin_unlock_irqrestore(&mq->lock, flags); 2268 2269 return done; 2270 } 2271 2272 static int mmc_blk_rw_wait(struct mmc_queue *mq, struct request **prev_req) 2273 { 2274 int err = 0; 2275 2276 wait_event(mq->wait, mmc_blk_rw_wait_cond(mq, &err)); 2277 2278 /* Always complete the previous request if there is one */ 2279 mmc_blk_mq_complete_prev_req(mq, prev_req); 2280 2281 return err; 2282 } 2283 2284 static int mmc_blk_mq_issue_rw_rq(struct mmc_queue *mq, 2285 struct request *req) 2286 { 2287 struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req); 2288 struct mmc_host *host = mq->card->host; 2289 struct request *prev_req = NULL; 2290 int err = 0; 2291 2292 mmc_blk_rw_rq_prep(mqrq, mq->card, 0, mq); 2293 2294 mqrq->brq.mrq.done = mmc_blk_mq_req_done; 2295 2296 mmc_pre_req(host, &mqrq->brq.mrq); 2297 2298 err = mmc_blk_rw_wait(mq, &prev_req); 2299 if (err) 2300 goto out_post_req; 2301 2302 mq->rw_wait = true; 2303 2304 err = mmc_start_request(host, &mqrq->brq.mrq); 2305 2306 if (prev_req) 2307 mmc_blk_mq_post_req(mq, prev_req, true); 2308 2309 if (err) 2310 mq->rw_wait = false; 2311 2312 /* Release re-tuning here where there is no synchronization required */ 2313 if (err || mmc_host_done_complete(host)) 2314 mmc_retune_release(host); 2315 2316 out_post_req: 2317 if (err) 2318 mmc_post_req(host, &mqrq->brq.mrq, err); 2319 2320 return err; 2321 } 2322 2323 static int mmc_blk_wait_for_idle(struct mmc_queue *mq, struct mmc_host *host) 2324 { 2325 if (host->cqe_enabled) 2326 return host->cqe_ops->cqe_wait_for_idle(host); 2327 2328 return mmc_blk_rw_wait(mq, NULL); 2329 } 2330 2331 enum mmc_issued mmc_blk_mq_issue_rq(struct mmc_queue *mq, struct request *req) 2332 { 2333 struct mmc_blk_data *md = mq->blkdata; 2334 struct mmc_card *card = md->queue.card; 2335 struct mmc_host *host = card->host; 2336 int ret; 2337 2338 ret = mmc_blk_part_switch(card, md->part_type); 2339 if (ret) 2340 return MMC_REQ_FAILED_TO_START; 2341 2342 switch (mmc_issue_type(mq, req)) { 2343 case MMC_ISSUE_SYNC: 2344 ret = mmc_blk_wait_for_idle(mq, host); 2345 if (ret) 2346 return MMC_REQ_BUSY; 2347 switch (req_op(req)) { 2348 case REQ_OP_DRV_IN: 2349 case REQ_OP_DRV_OUT: 2350 mmc_blk_issue_drv_op(mq, req); 2351 break; 2352 case REQ_OP_DISCARD: 2353 mmc_blk_issue_discard_rq(mq, req); 2354 break; 2355 case REQ_OP_SECURE_ERASE: 2356 mmc_blk_issue_secdiscard_rq(mq, req); 2357 break; 2358 case REQ_OP_WRITE_ZEROES: 2359 mmc_blk_issue_trim_rq(mq, req); 2360 break; 2361 case REQ_OP_FLUSH: 2362 mmc_blk_issue_flush(mq, req); 2363 break; 2364 default: 2365 WARN_ON_ONCE(1); 2366 return MMC_REQ_FAILED_TO_START; 2367 } 2368 return MMC_REQ_FINISHED; 2369 case MMC_ISSUE_DCMD: 2370 case MMC_ISSUE_ASYNC: 2371 switch (req_op(req)) { 2372 case REQ_OP_FLUSH: 2373 if (!mmc_cache_enabled(host)) { 2374 blk_mq_end_request(req, BLK_STS_OK); 2375 return MMC_REQ_FINISHED; 2376 } 2377 ret = mmc_blk_cqe_issue_flush(mq, req); 2378 break; 2379 case REQ_OP_READ: 2380 case REQ_OP_WRITE: 2381 if (host->cqe_enabled) 2382 ret = mmc_blk_cqe_issue_rw_rq(mq, req); 2383 else 2384 ret = mmc_blk_mq_issue_rw_rq(mq, req); 2385 break; 2386 default: 2387 WARN_ON_ONCE(1); 2388 ret = -EINVAL; 2389 } 2390 if (!ret) 2391 return MMC_REQ_STARTED; 2392 return ret == -EBUSY ? MMC_REQ_BUSY : MMC_REQ_FAILED_TO_START; 2393 default: 2394 WARN_ON_ONCE(1); 2395 return MMC_REQ_FAILED_TO_START; 2396 } 2397 } 2398 2399 static inline int mmc_blk_readonly(struct mmc_card *card) 2400 { 2401 return mmc_card_readonly(card) || 2402 !(card->csd.cmdclass & CCC_BLOCK_WRITE); 2403 } 2404 2405 static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card, 2406 struct device *parent, 2407 sector_t size, 2408 bool default_ro, 2409 const char *subname, 2410 int area_type, 2411 unsigned int part_type) 2412 { 2413 struct mmc_blk_data *md; 2414 int devidx, ret; 2415 char cap_str[10]; 2416 bool cache_enabled = false; 2417 bool fua_enabled = false; 2418 2419 devidx = ida_simple_get(&mmc_blk_ida, 0, max_devices, GFP_KERNEL); 2420 if (devidx < 0) { 2421 /* 2422 * We get -ENOSPC because there are no more any available 2423 * devidx. The reason may be that, either userspace haven't yet 2424 * unmounted the partitions, which postpones mmc_blk_release() 2425 * from being called, or the device has more partitions than 2426 * what we support. 2427 */ 2428 if (devidx == -ENOSPC) 2429 dev_err(mmc_dev(card->host), 2430 "no more device IDs available\n"); 2431 2432 return ERR_PTR(devidx); 2433 } 2434 2435 md = kzalloc(sizeof(struct mmc_blk_data), GFP_KERNEL); 2436 if (!md) { 2437 ret = -ENOMEM; 2438 goto out; 2439 } 2440 2441 md->area_type = area_type; 2442 2443 /* 2444 * Set the read-only status based on the supported commands 2445 * and the write protect switch. 2446 */ 2447 md->read_only = mmc_blk_readonly(card); 2448 2449 md->disk = mmc_init_queue(&md->queue, card); 2450 if (IS_ERR(md->disk)) { 2451 ret = PTR_ERR(md->disk); 2452 goto err_kfree; 2453 } 2454 2455 INIT_LIST_HEAD(&md->part); 2456 INIT_LIST_HEAD(&md->rpmbs); 2457 kref_init(&md->kref); 2458 2459 md->queue.blkdata = md; 2460 md->part_type = part_type; 2461 2462 md->disk->major = MMC_BLOCK_MAJOR; 2463 md->disk->minors = perdev_minors; 2464 md->disk->first_minor = devidx * perdev_minors; 2465 md->disk->fops = &mmc_bdops; 2466 md->disk->private_data = md; 2467 md->parent = parent; 2468 set_disk_ro(md->disk, md->read_only || default_ro); 2469 if (area_type & (MMC_BLK_DATA_AREA_RPMB | MMC_BLK_DATA_AREA_BOOT)) 2470 md->disk->flags |= GENHD_FL_NO_PART; 2471 2472 /* 2473 * As discussed on lkml, GENHD_FL_REMOVABLE should: 2474 * 2475 * - be set for removable media with permanent block devices 2476 * - be unset for removable block devices with permanent media 2477 * 2478 * Since MMC block devices clearly fall under the second 2479 * case, we do not set GENHD_FL_REMOVABLE. Userspace 2480 * should use the block device creation/destruction hotplug 2481 * messages to tell when the card is present. 2482 */ 2483 2484 snprintf(md->disk->disk_name, sizeof(md->disk->disk_name), 2485 "mmcblk%u%s", card->host->index, subname ? subname : ""); 2486 2487 set_capacity(md->disk, size); 2488 2489 if (mmc_host_cmd23(card->host)) { 2490 if ((mmc_card_mmc(card) && 2491 card->csd.mmca_vsn >= CSD_SPEC_VER_3) || 2492 (mmc_card_sd(card) && 2493 card->scr.cmds & SD_SCR_CMD23_SUPPORT)) 2494 md->flags |= MMC_BLK_CMD23; 2495 } 2496 2497 if (md->flags & MMC_BLK_CMD23 && 2498 ((card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN) || 2499 card->ext_csd.rel_sectors)) { 2500 md->flags |= MMC_BLK_REL_WR; 2501 fua_enabled = true; 2502 cache_enabled = true; 2503 } 2504 if (mmc_cache_enabled(card->host)) 2505 cache_enabled = true; 2506 2507 blk_queue_write_cache(md->queue.queue, cache_enabled, fua_enabled); 2508 2509 string_get_size((u64)size, 512, STRING_UNITS_2, 2510 cap_str, sizeof(cap_str)); 2511 pr_info("%s: %s %s %s %s\n", 2512 md->disk->disk_name, mmc_card_id(card), mmc_card_name(card), 2513 cap_str, md->read_only ? "(ro)" : ""); 2514 2515 /* used in ->open, must be set before add_disk: */ 2516 if (area_type == MMC_BLK_DATA_AREA_MAIN) 2517 dev_set_drvdata(&card->dev, md); 2518 ret = device_add_disk(md->parent, md->disk, mmc_disk_attr_groups); 2519 if (ret) 2520 goto err_put_disk; 2521 return md; 2522 2523 err_put_disk: 2524 put_disk(md->disk); 2525 blk_mq_free_tag_set(&md->queue.tag_set); 2526 err_kfree: 2527 kfree(md); 2528 out: 2529 ida_simple_remove(&mmc_blk_ida, devidx); 2530 return ERR_PTR(ret); 2531 } 2532 2533 static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card) 2534 { 2535 sector_t size; 2536 2537 if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) { 2538 /* 2539 * The EXT_CSD sector count is in number or 512 byte 2540 * sectors. 2541 */ 2542 size = card->ext_csd.sectors; 2543 } else { 2544 /* 2545 * The CSD capacity field is in units of read_blkbits. 2546 * set_capacity takes units of 512 bytes. 2547 */ 2548 size = (typeof(sector_t))card->csd.capacity 2549 << (card->csd.read_blkbits - 9); 2550 } 2551 2552 return mmc_blk_alloc_req(card, &card->dev, size, false, NULL, 2553 MMC_BLK_DATA_AREA_MAIN, 0); 2554 } 2555 2556 static int mmc_blk_alloc_part(struct mmc_card *card, 2557 struct mmc_blk_data *md, 2558 unsigned int part_type, 2559 sector_t size, 2560 bool default_ro, 2561 const char *subname, 2562 int area_type) 2563 { 2564 struct mmc_blk_data *part_md; 2565 2566 part_md = mmc_blk_alloc_req(card, disk_to_dev(md->disk), size, default_ro, 2567 subname, area_type, part_type); 2568 if (IS_ERR(part_md)) 2569 return PTR_ERR(part_md); 2570 list_add(&part_md->part, &md->part); 2571 2572 return 0; 2573 } 2574 2575 /** 2576 * mmc_rpmb_ioctl() - ioctl handler for the RPMB chardev 2577 * @filp: the character device file 2578 * @cmd: the ioctl() command 2579 * @arg: the argument from userspace 2580 * 2581 * This will essentially just redirect the ioctl()s coming in over to 2582 * the main block device spawning the RPMB character device. 2583 */ 2584 static long mmc_rpmb_ioctl(struct file *filp, unsigned int cmd, 2585 unsigned long arg) 2586 { 2587 struct mmc_rpmb_data *rpmb = filp->private_data; 2588 int ret; 2589 2590 switch (cmd) { 2591 case MMC_IOC_CMD: 2592 ret = mmc_blk_ioctl_cmd(rpmb->md, 2593 (struct mmc_ioc_cmd __user *)arg, 2594 rpmb); 2595 break; 2596 case MMC_IOC_MULTI_CMD: 2597 ret = mmc_blk_ioctl_multi_cmd(rpmb->md, 2598 (struct mmc_ioc_multi_cmd __user *)arg, 2599 rpmb); 2600 break; 2601 default: 2602 ret = -EINVAL; 2603 break; 2604 } 2605 2606 return ret; 2607 } 2608 2609 #ifdef CONFIG_COMPAT 2610 static long mmc_rpmb_ioctl_compat(struct file *filp, unsigned int cmd, 2611 unsigned long arg) 2612 { 2613 return mmc_rpmb_ioctl(filp, cmd, (unsigned long)compat_ptr(arg)); 2614 } 2615 #endif 2616 2617 static int mmc_rpmb_chrdev_open(struct inode *inode, struct file *filp) 2618 { 2619 struct mmc_rpmb_data *rpmb = container_of(inode->i_cdev, 2620 struct mmc_rpmb_data, chrdev); 2621 2622 get_device(&rpmb->dev); 2623 filp->private_data = rpmb; 2624 mmc_blk_get(rpmb->md->disk); 2625 2626 return nonseekable_open(inode, filp); 2627 } 2628 2629 static int mmc_rpmb_chrdev_release(struct inode *inode, struct file *filp) 2630 { 2631 struct mmc_rpmb_data *rpmb = container_of(inode->i_cdev, 2632 struct mmc_rpmb_data, chrdev); 2633 2634 mmc_blk_put(rpmb->md); 2635 put_device(&rpmb->dev); 2636 2637 return 0; 2638 } 2639 2640 static const struct file_operations mmc_rpmb_fileops = { 2641 .release = mmc_rpmb_chrdev_release, 2642 .open = mmc_rpmb_chrdev_open, 2643 .owner = THIS_MODULE, 2644 .llseek = no_llseek, 2645 .unlocked_ioctl = mmc_rpmb_ioctl, 2646 #ifdef CONFIG_COMPAT 2647 .compat_ioctl = mmc_rpmb_ioctl_compat, 2648 #endif 2649 }; 2650 2651 static void mmc_blk_rpmb_device_release(struct device *dev) 2652 { 2653 struct mmc_rpmb_data *rpmb = dev_get_drvdata(dev); 2654 2655 ida_simple_remove(&mmc_rpmb_ida, rpmb->id); 2656 kfree(rpmb); 2657 } 2658 2659 static int mmc_blk_alloc_rpmb_part(struct mmc_card *card, 2660 struct mmc_blk_data *md, 2661 unsigned int part_index, 2662 sector_t size, 2663 const char *subname) 2664 { 2665 int devidx, ret; 2666 char rpmb_name[DISK_NAME_LEN]; 2667 char cap_str[10]; 2668 struct mmc_rpmb_data *rpmb; 2669 2670 /* This creates the minor number for the RPMB char device */ 2671 devidx = ida_simple_get(&mmc_rpmb_ida, 0, max_devices, GFP_KERNEL); 2672 if (devidx < 0) 2673 return devidx; 2674 2675 rpmb = kzalloc(sizeof(*rpmb), GFP_KERNEL); 2676 if (!rpmb) { 2677 ida_simple_remove(&mmc_rpmb_ida, devidx); 2678 return -ENOMEM; 2679 } 2680 2681 snprintf(rpmb_name, sizeof(rpmb_name), 2682 "mmcblk%u%s", card->host->index, subname ? subname : ""); 2683 2684 rpmb->id = devidx; 2685 rpmb->part_index = part_index; 2686 rpmb->dev.init_name = rpmb_name; 2687 rpmb->dev.bus = &mmc_rpmb_bus_type; 2688 rpmb->dev.devt = MKDEV(MAJOR(mmc_rpmb_devt), rpmb->id); 2689 rpmb->dev.parent = &card->dev; 2690 rpmb->dev.release = mmc_blk_rpmb_device_release; 2691 device_initialize(&rpmb->dev); 2692 dev_set_drvdata(&rpmb->dev, rpmb); 2693 rpmb->md = md; 2694 2695 cdev_init(&rpmb->chrdev, &mmc_rpmb_fileops); 2696 rpmb->chrdev.owner = THIS_MODULE; 2697 ret = cdev_device_add(&rpmb->chrdev, &rpmb->dev); 2698 if (ret) { 2699 pr_err("%s: could not add character device\n", rpmb_name); 2700 goto out_put_device; 2701 } 2702 2703 list_add(&rpmb->node, &md->rpmbs); 2704 2705 string_get_size((u64)size, 512, STRING_UNITS_2, 2706 cap_str, sizeof(cap_str)); 2707 2708 pr_info("%s: %s %s %s, chardev (%d:%d)\n", 2709 rpmb_name, mmc_card_id(card), mmc_card_name(card), cap_str, 2710 MAJOR(mmc_rpmb_devt), rpmb->id); 2711 2712 return 0; 2713 2714 out_put_device: 2715 put_device(&rpmb->dev); 2716 return ret; 2717 } 2718 2719 static void mmc_blk_remove_rpmb_part(struct mmc_rpmb_data *rpmb) 2720 2721 { 2722 cdev_device_del(&rpmb->chrdev, &rpmb->dev); 2723 put_device(&rpmb->dev); 2724 } 2725 2726 /* MMC Physical partitions consist of two boot partitions and 2727 * up to four general purpose partitions. 2728 * For each partition enabled in EXT_CSD a block device will be allocatedi 2729 * to provide access to the partition. 2730 */ 2731 2732 static int mmc_blk_alloc_parts(struct mmc_card *card, struct mmc_blk_data *md) 2733 { 2734 int idx, ret; 2735 2736 if (!mmc_card_mmc(card)) 2737 return 0; 2738 2739 for (idx = 0; idx < card->nr_parts; idx++) { 2740 if (card->part[idx].area_type & MMC_BLK_DATA_AREA_RPMB) { 2741 /* 2742 * RPMB partitions does not provide block access, they 2743 * are only accessed using ioctl():s. Thus create 2744 * special RPMB block devices that do not have a 2745 * backing block queue for these. 2746 */ 2747 ret = mmc_blk_alloc_rpmb_part(card, md, 2748 card->part[idx].part_cfg, 2749 card->part[idx].size >> 9, 2750 card->part[idx].name); 2751 if (ret) 2752 return ret; 2753 } else if (card->part[idx].size) { 2754 ret = mmc_blk_alloc_part(card, md, 2755 card->part[idx].part_cfg, 2756 card->part[idx].size >> 9, 2757 card->part[idx].force_ro, 2758 card->part[idx].name, 2759 card->part[idx].area_type); 2760 if (ret) 2761 return ret; 2762 } 2763 } 2764 2765 return 0; 2766 } 2767 2768 static void mmc_blk_remove_req(struct mmc_blk_data *md) 2769 { 2770 /* 2771 * Flush remaining requests and free queues. It is freeing the queue 2772 * that stops new requests from being accepted. 2773 */ 2774 del_gendisk(md->disk); 2775 mmc_cleanup_queue(&md->queue); 2776 mmc_blk_put(md); 2777 } 2778 2779 static void mmc_blk_remove_parts(struct mmc_card *card, 2780 struct mmc_blk_data *md) 2781 { 2782 struct list_head *pos, *q; 2783 struct mmc_blk_data *part_md; 2784 struct mmc_rpmb_data *rpmb; 2785 2786 /* Remove RPMB partitions */ 2787 list_for_each_safe(pos, q, &md->rpmbs) { 2788 rpmb = list_entry(pos, struct mmc_rpmb_data, node); 2789 list_del(pos); 2790 mmc_blk_remove_rpmb_part(rpmb); 2791 } 2792 /* Remove block partitions */ 2793 list_for_each_safe(pos, q, &md->part) { 2794 part_md = list_entry(pos, struct mmc_blk_data, part); 2795 list_del(pos); 2796 mmc_blk_remove_req(part_md); 2797 } 2798 } 2799 2800 #ifdef CONFIG_DEBUG_FS 2801 2802 static int mmc_dbg_card_status_get(void *data, u64 *val) 2803 { 2804 struct mmc_card *card = data; 2805 struct mmc_blk_data *md = dev_get_drvdata(&card->dev); 2806 struct mmc_queue *mq = &md->queue; 2807 struct request *req; 2808 int ret; 2809 2810 /* Ask the block layer about the card status */ 2811 req = blk_mq_alloc_request(mq->queue, REQ_OP_DRV_IN, 0); 2812 if (IS_ERR(req)) 2813 return PTR_ERR(req); 2814 req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_GET_CARD_STATUS; 2815 blk_execute_rq(req, false); 2816 ret = req_to_mmc_queue_req(req)->drv_op_result; 2817 if (ret >= 0) { 2818 *val = ret; 2819 ret = 0; 2820 } 2821 blk_mq_free_request(req); 2822 2823 return ret; 2824 } 2825 DEFINE_DEBUGFS_ATTRIBUTE(mmc_dbg_card_status_fops, mmc_dbg_card_status_get, 2826 NULL, "%08llx\n"); 2827 2828 /* That is two digits * 512 + 1 for newline */ 2829 #define EXT_CSD_STR_LEN 1025 2830 2831 static int mmc_ext_csd_open(struct inode *inode, struct file *filp) 2832 { 2833 struct mmc_card *card = inode->i_private; 2834 struct mmc_blk_data *md = dev_get_drvdata(&card->dev); 2835 struct mmc_queue *mq = &md->queue; 2836 struct request *req; 2837 char *buf; 2838 ssize_t n = 0; 2839 u8 *ext_csd; 2840 int err, i; 2841 2842 buf = kmalloc(EXT_CSD_STR_LEN + 1, GFP_KERNEL); 2843 if (!buf) 2844 return -ENOMEM; 2845 2846 /* Ask the block layer for the EXT CSD */ 2847 req = blk_mq_alloc_request(mq->queue, REQ_OP_DRV_IN, 0); 2848 if (IS_ERR(req)) { 2849 err = PTR_ERR(req); 2850 goto out_free; 2851 } 2852 req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_GET_EXT_CSD; 2853 req_to_mmc_queue_req(req)->drv_op_data = &ext_csd; 2854 blk_execute_rq(req, false); 2855 err = req_to_mmc_queue_req(req)->drv_op_result; 2856 blk_mq_free_request(req); 2857 if (err) { 2858 pr_err("FAILED %d\n", err); 2859 goto out_free; 2860 } 2861 2862 for (i = 0; i < 512; i++) 2863 n += sprintf(buf + n, "%02x", ext_csd[i]); 2864 n += sprintf(buf + n, "\n"); 2865 2866 if (n != EXT_CSD_STR_LEN) { 2867 err = -EINVAL; 2868 kfree(ext_csd); 2869 goto out_free; 2870 } 2871 2872 filp->private_data = buf; 2873 kfree(ext_csd); 2874 return 0; 2875 2876 out_free: 2877 kfree(buf); 2878 return err; 2879 } 2880 2881 static ssize_t mmc_ext_csd_read(struct file *filp, char __user *ubuf, 2882 size_t cnt, loff_t *ppos) 2883 { 2884 char *buf = filp->private_data; 2885 2886 return simple_read_from_buffer(ubuf, cnt, ppos, 2887 buf, EXT_CSD_STR_LEN); 2888 } 2889 2890 static int mmc_ext_csd_release(struct inode *inode, struct file *file) 2891 { 2892 kfree(file->private_data); 2893 return 0; 2894 } 2895 2896 static const struct file_operations mmc_dbg_ext_csd_fops = { 2897 .open = mmc_ext_csd_open, 2898 .read = mmc_ext_csd_read, 2899 .release = mmc_ext_csd_release, 2900 .llseek = default_llseek, 2901 }; 2902 2903 static int mmc_blk_add_debugfs(struct mmc_card *card, struct mmc_blk_data *md) 2904 { 2905 struct dentry *root; 2906 2907 if (!card->debugfs_root) 2908 return 0; 2909 2910 root = card->debugfs_root; 2911 2912 if (mmc_card_mmc(card) || mmc_card_sd(card)) { 2913 md->status_dentry = 2914 debugfs_create_file_unsafe("status", 0400, root, 2915 card, 2916 &mmc_dbg_card_status_fops); 2917 if (!md->status_dentry) 2918 return -EIO; 2919 } 2920 2921 if (mmc_card_mmc(card)) { 2922 md->ext_csd_dentry = 2923 debugfs_create_file("ext_csd", S_IRUSR, root, card, 2924 &mmc_dbg_ext_csd_fops); 2925 if (!md->ext_csd_dentry) 2926 return -EIO; 2927 } 2928 2929 return 0; 2930 } 2931 2932 static void mmc_blk_remove_debugfs(struct mmc_card *card, 2933 struct mmc_blk_data *md) 2934 { 2935 if (!card->debugfs_root) 2936 return; 2937 2938 if (!IS_ERR_OR_NULL(md->status_dentry)) { 2939 debugfs_remove(md->status_dentry); 2940 md->status_dentry = NULL; 2941 } 2942 2943 if (!IS_ERR_OR_NULL(md->ext_csd_dentry)) { 2944 debugfs_remove(md->ext_csd_dentry); 2945 md->ext_csd_dentry = NULL; 2946 } 2947 } 2948 2949 #else 2950 2951 static int mmc_blk_add_debugfs(struct mmc_card *card, struct mmc_blk_data *md) 2952 { 2953 return 0; 2954 } 2955 2956 static void mmc_blk_remove_debugfs(struct mmc_card *card, 2957 struct mmc_blk_data *md) 2958 { 2959 } 2960 2961 #endif /* CONFIG_DEBUG_FS */ 2962 2963 static int mmc_blk_probe(struct mmc_card *card) 2964 { 2965 struct mmc_blk_data *md; 2966 int ret = 0; 2967 2968 /* 2969 * Check that the card supports the command class(es) we need. 2970 */ 2971 if (!(card->csd.cmdclass & CCC_BLOCK_READ)) 2972 return -ENODEV; 2973 2974 mmc_fixup_device(card, mmc_blk_fixups); 2975 2976 card->complete_wq = alloc_workqueue("mmc_complete", 2977 WQ_MEM_RECLAIM | WQ_HIGHPRI, 0); 2978 if (!card->complete_wq) { 2979 pr_err("Failed to create mmc completion workqueue"); 2980 return -ENOMEM; 2981 } 2982 2983 md = mmc_blk_alloc(card); 2984 if (IS_ERR(md)) { 2985 ret = PTR_ERR(md); 2986 goto out_free; 2987 } 2988 2989 ret = mmc_blk_alloc_parts(card, md); 2990 if (ret) 2991 goto out; 2992 2993 /* Add two debugfs entries */ 2994 mmc_blk_add_debugfs(card, md); 2995 2996 pm_runtime_set_autosuspend_delay(&card->dev, 3000); 2997 pm_runtime_use_autosuspend(&card->dev); 2998 2999 /* 3000 * Don't enable runtime PM for SD-combo cards here. Leave that 3001 * decision to be taken during the SDIO init sequence instead. 3002 */ 3003 if (!mmc_card_sd_combo(card)) { 3004 pm_runtime_set_active(&card->dev); 3005 pm_runtime_enable(&card->dev); 3006 } 3007 3008 return 0; 3009 3010 out: 3011 mmc_blk_remove_parts(card, md); 3012 mmc_blk_remove_req(md); 3013 out_free: 3014 destroy_workqueue(card->complete_wq); 3015 return ret; 3016 } 3017 3018 static void mmc_blk_remove(struct mmc_card *card) 3019 { 3020 struct mmc_blk_data *md = dev_get_drvdata(&card->dev); 3021 3022 mmc_blk_remove_debugfs(card, md); 3023 mmc_blk_remove_parts(card, md); 3024 pm_runtime_get_sync(&card->dev); 3025 if (md->part_curr != md->part_type) { 3026 mmc_claim_host(card->host); 3027 mmc_blk_part_switch(card, md->part_type); 3028 mmc_release_host(card->host); 3029 } 3030 if (!mmc_card_sd_combo(card)) 3031 pm_runtime_disable(&card->dev); 3032 pm_runtime_put_noidle(&card->dev); 3033 mmc_blk_remove_req(md); 3034 dev_set_drvdata(&card->dev, NULL); 3035 destroy_workqueue(card->complete_wq); 3036 } 3037 3038 static int _mmc_blk_suspend(struct mmc_card *card) 3039 { 3040 struct mmc_blk_data *part_md; 3041 struct mmc_blk_data *md = dev_get_drvdata(&card->dev); 3042 3043 if (md) { 3044 mmc_queue_suspend(&md->queue); 3045 list_for_each_entry(part_md, &md->part, part) { 3046 mmc_queue_suspend(&part_md->queue); 3047 } 3048 } 3049 return 0; 3050 } 3051 3052 static void mmc_blk_shutdown(struct mmc_card *card) 3053 { 3054 _mmc_blk_suspend(card); 3055 } 3056 3057 #ifdef CONFIG_PM_SLEEP 3058 static int mmc_blk_suspend(struct device *dev) 3059 { 3060 struct mmc_card *card = mmc_dev_to_card(dev); 3061 3062 return _mmc_blk_suspend(card); 3063 } 3064 3065 static int mmc_blk_resume(struct device *dev) 3066 { 3067 struct mmc_blk_data *part_md; 3068 struct mmc_blk_data *md = dev_get_drvdata(dev); 3069 3070 if (md) { 3071 /* 3072 * Resume involves the card going into idle state, 3073 * so current partition is always the main one. 3074 */ 3075 md->part_curr = md->part_type; 3076 mmc_queue_resume(&md->queue); 3077 list_for_each_entry(part_md, &md->part, part) { 3078 mmc_queue_resume(&part_md->queue); 3079 } 3080 } 3081 return 0; 3082 } 3083 #endif 3084 3085 static SIMPLE_DEV_PM_OPS(mmc_blk_pm_ops, mmc_blk_suspend, mmc_blk_resume); 3086 3087 static struct mmc_driver mmc_driver = { 3088 .drv = { 3089 .name = "mmcblk", 3090 .pm = &mmc_blk_pm_ops, 3091 }, 3092 .probe = mmc_blk_probe, 3093 .remove = mmc_blk_remove, 3094 .shutdown = mmc_blk_shutdown, 3095 }; 3096 3097 static int __init mmc_blk_init(void) 3098 { 3099 int res; 3100 3101 res = bus_register(&mmc_rpmb_bus_type); 3102 if (res < 0) { 3103 pr_err("mmcblk: could not register RPMB bus type\n"); 3104 return res; 3105 } 3106 res = alloc_chrdev_region(&mmc_rpmb_devt, 0, MAX_DEVICES, "rpmb"); 3107 if (res < 0) { 3108 pr_err("mmcblk: failed to allocate rpmb chrdev region\n"); 3109 goto out_bus_unreg; 3110 } 3111 3112 if (perdev_minors != CONFIG_MMC_BLOCK_MINORS) 3113 pr_info("mmcblk: using %d minors per device\n", perdev_minors); 3114 3115 max_devices = min(MAX_DEVICES, (1 << MINORBITS) / perdev_minors); 3116 3117 res = register_blkdev(MMC_BLOCK_MAJOR, "mmc"); 3118 if (res) 3119 goto out_chrdev_unreg; 3120 3121 res = mmc_register_driver(&mmc_driver); 3122 if (res) 3123 goto out_blkdev_unreg; 3124 3125 return 0; 3126 3127 out_blkdev_unreg: 3128 unregister_blkdev(MMC_BLOCK_MAJOR, "mmc"); 3129 out_chrdev_unreg: 3130 unregister_chrdev_region(mmc_rpmb_devt, MAX_DEVICES); 3131 out_bus_unreg: 3132 bus_unregister(&mmc_rpmb_bus_type); 3133 return res; 3134 } 3135 3136 static void __exit mmc_blk_exit(void) 3137 { 3138 mmc_unregister_driver(&mmc_driver); 3139 unregister_blkdev(MMC_BLOCK_MAJOR, "mmc"); 3140 unregister_chrdev_region(mmc_rpmb_devt, MAX_DEVICES); 3141 bus_unregister(&mmc_rpmb_bus_type); 3142 } 3143 3144 module_init(mmc_blk_init); 3145 module_exit(mmc_blk_exit); 3146 3147 MODULE_LICENSE("GPL"); 3148 MODULE_DESCRIPTION("Multimedia Card (MMC) block device driver"); 3149 3150