1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Block driver for media (i.e., flash cards) 4 * 5 * Copyright 2002 Hewlett-Packard Company 6 * Copyright 2005-2008 Pierre Ossman 7 * 8 * Use consistent with the GNU GPL is permitted, 9 * provided that this copyright notice is 10 * preserved in its entirety in all copies and derived works. 11 * 12 * HEWLETT-PACKARD COMPANY MAKES NO WARRANTIES, EXPRESSED OR IMPLIED, 13 * AS TO THE USEFULNESS OR CORRECTNESS OF THIS CODE OR ITS 14 * FITNESS FOR ANY PARTICULAR PURPOSE. 15 * 16 * Many thanks to Alessandro Rubini and Jonathan Corbet! 17 * 18 * Author: Andrew Christian 19 * 28 May 2002 20 */ 21 #include <linux/moduleparam.h> 22 #include <linux/module.h> 23 #include <linux/init.h> 24 25 #include <linux/kernel.h> 26 #include <linux/fs.h> 27 #include <linux/slab.h> 28 #include <linux/errno.h> 29 #include <linux/hdreg.h> 30 #include <linux/kdev_t.h> 31 #include <linux/kref.h> 32 #include <linux/blkdev.h> 33 #include <linux/cdev.h> 34 #include <linux/mutex.h> 35 #include <linux/scatterlist.h> 36 #include <linux/string.h> 37 #include <linux/string_helpers.h> 38 #include <linux/delay.h> 39 #include <linux/capability.h> 40 #include <linux/compat.h> 41 #include <linux/pm_runtime.h> 42 #include <linux/idr.h> 43 #include <linux/debugfs.h> 44 #include <linux/rpmb.h> 45 46 #include <linux/mmc/ioctl.h> 47 #include <linux/mmc/card.h> 48 #include <linux/mmc/host.h> 49 #include <linux/mmc/mmc.h> 50 #include <linux/mmc/sd.h> 51 52 #include <linux/uaccess.h> 53 #include <linux/unaligned.h> 54 55 #include "queue.h" 56 #include "block.h" 57 #include "core.h" 58 #include "card.h" 59 #include "crypto.h" 60 #include "host.h" 61 #include "bus.h" 62 #include "mmc_ops.h" 63 #include "quirks.h" 64 #include "sd_ops.h" 65 66 MODULE_ALIAS("mmc:block"); 67 #ifdef MODULE_PARAM_PREFIX 68 #undef MODULE_PARAM_PREFIX 69 #endif 70 #define MODULE_PARAM_PREFIX "mmcblk." 71 72 /* 73 * Set a 10 second timeout for polling write request busy state. Note, mmc core 74 * is setting a 3 second timeout for SD cards, and SDHCI has long had a 10 75 * second software timer to timeout the whole request, so 10 seconds should be 76 * ample. 77 */ 78 #define MMC_BLK_TIMEOUT_MS (10 * 1000) 79 #define MMC_EXTRACT_INDEX_FROM_ARG(x) ((x & 0x00FF0000) >> 16) 80 #define MMC_EXTRACT_VALUE_FROM_ARG(x) ((x & 0x0000FF00) >> 8) 81 82 #define RPMB_FRAME_SIZE sizeof(struct rpmb_frame) 83 #define CHECK_SIZE_NEQ(val) ((val) != sizeof(struct rpmb_frame)) 84 #define CHECK_SIZE_ALIGNED(val) IS_ALIGNED((val), sizeof(struct rpmb_frame)) 85 86 static DEFINE_MUTEX(block_mutex); 87 88 /* 89 * The defaults come from config options but can be overriden by module 90 * or bootarg options. 91 */ 92 static int perdev_minors = CONFIG_MMC_BLOCK_MINORS; 93 94 /* 95 * We've only got one major, so number of mmcblk devices is 96 * limited to (1 << 20) / number of minors per device. It is also 97 * limited by the MAX_DEVICES below. 98 */ 99 static int max_devices; 100 101 #define MAX_DEVICES 256 102 103 static DEFINE_IDA(mmc_blk_ida); 104 static DEFINE_IDA(mmc_rpmb_ida); 105 106 struct mmc_blk_busy_data { 107 struct mmc_card *card; 108 u32 status; 109 }; 110 111 /* 112 * There is one mmc_blk_data per slot. 113 */ 114 struct mmc_blk_data { 115 struct device *parent; 116 struct gendisk *disk; 117 struct mmc_queue queue; 118 struct list_head part; 119 struct list_head rpmbs; 120 121 unsigned int flags; 122 #define MMC_BLK_CMD23 (1 << 0) /* Can do SET_BLOCK_COUNT for multiblock */ 123 #define MMC_BLK_REL_WR (1 << 1) /* MMC Reliable write support */ 124 125 struct kref kref; 126 unsigned int read_only; 127 unsigned int part_type; 128 unsigned int reset_done; 129 #define MMC_BLK_READ BIT(0) 130 #define MMC_BLK_WRITE BIT(1) 131 #define MMC_BLK_DISCARD BIT(2) 132 #define MMC_BLK_SECDISCARD BIT(3) 133 #define MMC_BLK_CQE_RECOVERY BIT(4) 134 #define MMC_BLK_TRIM BIT(5) 135 136 /* 137 * Only set in main mmc_blk_data associated 138 * with mmc_card with dev_set_drvdata, and keeps 139 * track of the current selected device partition. 140 */ 141 unsigned int part_curr; 142 #define MMC_BLK_PART_INVALID UINT_MAX /* Unknown partition active */ 143 int area_type; 144 145 /* debugfs files (only in main mmc_blk_data) */ 146 struct dentry *status_dentry; 147 struct dentry *ext_csd_dentry; 148 }; 149 150 /* Device type for RPMB character devices */ 151 static dev_t mmc_rpmb_devt; 152 153 /* Bus type for RPMB character devices */ 154 static const struct bus_type mmc_rpmb_bus_type = { 155 .name = "mmc_rpmb", 156 }; 157 158 /** 159 * struct mmc_rpmb_data - special RPMB device type for these areas 160 * @dev: the device for the RPMB area 161 * @chrdev: character device for the RPMB area 162 * @id: unique device ID number 163 * @part_index: partition index (0 on first) 164 * @md: parent MMC block device 165 * @rdev: registered RPMB device 166 * @node: list item, so we can put this device on a list 167 */ 168 struct mmc_rpmb_data { 169 struct device dev; 170 struct cdev chrdev; 171 int id; 172 unsigned int part_index; 173 struct mmc_blk_data *md; 174 struct rpmb_dev *rdev; 175 struct list_head node; 176 }; 177 178 static DEFINE_MUTEX(open_lock); 179 180 module_param(perdev_minors, int, 0444); 181 MODULE_PARM_DESC(perdev_minors, "Minors numbers to allocate per device"); 182 183 static inline int mmc_blk_part_switch(struct mmc_card *card, 184 unsigned int part_type); 185 static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq, 186 struct mmc_card *card, 187 int recovery_mode, 188 struct mmc_queue *mq); 189 static void mmc_blk_hsq_req_done(struct mmc_request *mrq); 190 static int mmc_spi_err_check(struct mmc_card *card); 191 static int mmc_blk_busy_cb(void *cb_data, bool *busy); 192 193 static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk) 194 { 195 struct mmc_blk_data *md; 196 197 mutex_lock(&open_lock); 198 md = disk->private_data; 199 if (md && !kref_get_unless_zero(&md->kref)) 200 md = NULL; 201 mutex_unlock(&open_lock); 202 203 return md; 204 } 205 206 static inline int mmc_get_devidx(struct gendisk *disk) 207 { 208 int devidx = disk->first_minor / perdev_minors; 209 return devidx; 210 } 211 212 static void mmc_blk_kref_release(struct kref *ref) 213 { 214 struct mmc_blk_data *md = container_of(ref, struct mmc_blk_data, kref); 215 int devidx; 216 217 devidx = mmc_get_devidx(md->disk); 218 ida_free(&mmc_blk_ida, devidx); 219 220 mutex_lock(&open_lock); 221 md->disk->private_data = NULL; 222 mutex_unlock(&open_lock); 223 224 put_disk(md->disk); 225 kfree(md); 226 } 227 228 static void mmc_blk_put(struct mmc_blk_data *md) 229 { 230 kref_put(&md->kref, mmc_blk_kref_release); 231 } 232 233 static ssize_t power_ro_lock_show(struct device *dev, 234 struct device_attribute *attr, char *buf) 235 { 236 int ret; 237 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev)); 238 struct mmc_card *card = md->queue.card; 239 int locked = 0; 240 241 if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PERM_WP_EN) 242 locked = 2; 243 else if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PWR_WP_EN) 244 locked = 1; 245 246 ret = sysfs_emit(buf, "%d\n", locked); 247 248 mmc_blk_put(md); 249 250 return ret; 251 } 252 253 static ssize_t power_ro_lock_store(struct device *dev, 254 struct device_attribute *attr, const char *buf, size_t count) 255 { 256 int ret; 257 struct mmc_blk_data *md, *part_md; 258 struct mmc_queue *mq; 259 struct request *req; 260 unsigned long set; 261 262 if (kstrtoul(buf, 0, &set)) 263 return -EINVAL; 264 265 if (set != 1) 266 return count; 267 268 md = mmc_blk_get(dev_to_disk(dev)); 269 mq = &md->queue; 270 271 /* Dispatch locking to the block layer */ 272 req = blk_mq_alloc_request(mq->queue, REQ_OP_DRV_OUT, 0); 273 if (IS_ERR(req)) { 274 count = PTR_ERR(req); 275 goto out_put; 276 } 277 req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_BOOT_WP; 278 req_to_mmc_queue_req(req)->drv_op_result = -EIO; 279 blk_execute_rq(req, false); 280 ret = req_to_mmc_queue_req(req)->drv_op_result; 281 blk_mq_free_request(req); 282 283 if (!ret) { 284 pr_info("%s: Locking boot partition ro until next power on\n", 285 md->disk->disk_name); 286 set_disk_ro(md->disk, 1); 287 288 list_for_each_entry(part_md, &md->part, part) 289 if (part_md->area_type == MMC_BLK_DATA_AREA_BOOT) { 290 pr_info("%s: Locking boot partition ro until next power on\n", part_md->disk->disk_name); 291 set_disk_ro(part_md->disk, 1); 292 } 293 } 294 out_put: 295 mmc_blk_put(md); 296 return count; 297 } 298 299 static DEVICE_ATTR(ro_lock_until_next_power_on, 0, 300 power_ro_lock_show, power_ro_lock_store); 301 302 static ssize_t force_ro_show(struct device *dev, struct device_attribute *attr, 303 char *buf) 304 { 305 int ret; 306 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev)); 307 308 ret = sysfs_emit(buf, "%d\n", 309 get_disk_ro(dev_to_disk(dev)) ^ 310 md->read_only); 311 mmc_blk_put(md); 312 return ret; 313 } 314 315 static ssize_t force_ro_store(struct device *dev, struct device_attribute *attr, 316 const char *buf, size_t count) 317 { 318 int ret; 319 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev)); 320 unsigned long set; 321 322 if (kstrtoul(buf, 0, &set)) { 323 ret = -EINVAL; 324 goto out; 325 } 326 327 set_disk_ro(dev_to_disk(dev), set || md->read_only); 328 ret = count; 329 out: 330 mmc_blk_put(md); 331 return ret; 332 } 333 334 static DEVICE_ATTR(force_ro, 0644, force_ro_show, force_ro_store); 335 336 static struct attribute *mmc_disk_attrs[] = { 337 &dev_attr_force_ro.attr, 338 &dev_attr_ro_lock_until_next_power_on.attr, 339 NULL, 340 }; 341 342 static umode_t mmc_disk_attrs_is_visible(struct kobject *kobj, 343 struct attribute *a, int n) 344 { 345 struct device *dev = kobj_to_dev(kobj); 346 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev)); 347 umode_t mode = a->mode; 348 349 if (a == &dev_attr_ro_lock_until_next_power_on.attr && 350 (md->area_type & MMC_BLK_DATA_AREA_BOOT) && 351 md->queue.card->ext_csd.boot_ro_lockable) { 352 mode = S_IRUGO; 353 if (!(md->queue.card->ext_csd.boot_ro_lock & 354 EXT_CSD_BOOT_WP_B_PWR_WP_DIS)) 355 mode |= S_IWUSR; 356 } 357 358 mmc_blk_put(md); 359 return mode; 360 } 361 362 static const struct attribute_group mmc_disk_attr_group = { 363 .is_visible = mmc_disk_attrs_is_visible, 364 .attrs = mmc_disk_attrs, 365 }; 366 367 static const struct attribute_group *mmc_disk_attr_groups[] = { 368 &mmc_disk_attr_group, 369 NULL, 370 }; 371 372 static int mmc_blk_open(struct gendisk *disk, blk_mode_t mode) 373 { 374 struct mmc_blk_data *md = mmc_blk_get(disk); 375 int ret = -ENXIO; 376 377 mutex_lock(&block_mutex); 378 if (md) { 379 ret = 0; 380 if ((mode & BLK_OPEN_WRITE) && md->read_only) { 381 mmc_blk_put(md); 382 ret = -EROFS; 383 } 384 } 385 mutex_unlock(&block_mutex); 386 387 return ret; 388 } 389 390 static void mmc_blk_release(struct gendisk *disk) 391 { 392 struct mmc_blk_data *md = disk->private_data; 393 394 mutex_lock(&block_mutex); 395 mmc_blk_put(md); 396 mutex_unlock(&block_mutex); 397 } 398 399 static int 400 mmc_blk_getgeo(struct gendisk *disk, struct hd_geometry *geo) 401 { 402 geo->cylinders = get_capacity(disk) / (4 * 16); 403 geo->heads = 4; 404 geo->sectors = 16; 405 return 0; 406 } 407 408 struct mmc_blk_ioc_data { 409 struct mmc_ioc_cmd ic; 410 unsigned char *buf; 411 u64 buf_bytes; 412 unsigned int flags; 413 #define MMC_BLK_IOC_DROP BIT(0) /* drop this mrq */ 414 #define MMC_BLK_IOC_SBC BIT(1) /* use mrq.sbc */ 415 416 struct mmc_rpmb_data *rpmb; 417 }; 418 419 static struct mmc_blk_ioc_data *mmc_blk_ioctl_copy_from_user( 420 struct mmc_ioc_cmd __user *user) 421 { 422 struct mmc_blk_ioc_data *idata; 423 int err; 424 425 idata = kzalloc(sizeof(*idata), GFP_KERNEL); 426 if (!idata) { 427 err = -ENOMEM; 428 goto out; 429 } 430 431 if (copy_from_user(&idata->ic, user, sizeof(idata->ic))) { 432 err = -EFAULT; 433 goto idata_err; 434 } 435 436 idata->buf_bytes = (u64) idata->ic.blksz * idata->ic.blocks; 437 if (idata->buf_bytes > MMC_IOC_MAX_BYTES) { 438 err = -EOVERFLOW; 439 goto idata_err; 440 } 441 442 if (!idata->buf_bytes) { 443 idata->buf = NULL; 444 return idata; 445 } 446 447 idata->buf = memdup_user((void __user *)(unsigned long) 448 idata->ic.data_ptr, idata->buf_bytes); 449 if (IS_ERR(idata->buf)) { 450 err = PTR_ERR(idata->buf); 451 goto idata_err; 452 } 453 454 return idata; 455 456 idata_err: 457 kfree(idata); 458 out: 459 return ERR_PTR(err); 460 } 461 462 static int mmc_blk_ioctl_copy_to_user(struct mmc_ioc_cmd __user *ic_ptr, 463 struct mmc_blk_ioc_data *idata) 464 { 465 struct mmc_ioc_cmd *ic = &idata->ic; 466 467 if (copy_to_user(&(ic_ptr->response), ic->response, 468 sizeof(ic->response))) 469 return -EFAULT; 470 471 if (!idata->ic.write_flag) { 472 if (copy_to_user((void __user *)(unsigned long)ic->data_ptr, 473 idata->buf, idata->buf_bytes)) 474 return -EFAULT; 475 } 476 477 return 0; 478 } 479 480 static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md, 481 struct mmc_blk_ioc_data **idatas, int i) 482 { 483 struct mmc_command cmd = {}, sbc = {}; 484 struct mmc_data data = {}; 485 struct mmc_request mrq = {}; 486 struct scatterlist sg; 487 bool r1b_resp; 488 unsigned int busy_timeout_ms; 489 int err; 490 unsigned int target_part; 491 struct mmc_blk_ioc_data *idata = idatas[i]; 492 struct mmc_blk_ioc_data *prev_idata = NULL; 493 494 if (!card || !md || !idata) 495 return -EINVAL; 496 497 if (idata->flags & MMC_BLK_IOC_DROP) 498 return 0; 499 500 if (idata->flags & MMC_BLK_IOC_SBC && i > 0) 501 prev_idata = idatas[i - 1]; 502 503 /* 504 * The RPMB accesses comes in from the character device, so we 505 * need to target these explicitly. Else we just target the 506 * partition type for the block device the ioctl() was issued 507 * on. 508 */ 509 if (idata->rpmb) { 510 /* Support multiple RPMB partitions */ 511 target_part = idata->rpmb->part_index; 512 target_part |= EXT_CSD_PART_CONFIG_ACC_RPMB; 513 } else { 514 target_part = md->part_type; 515 } 516 517 cmd.opcode = idata->ic.opcode; 518 cmd.arg = idata->ic.arg; 519 cmd.flags = idata->ic.flags; 520 521 if (idata->buf_bytes) { 522 data.sg = &sg; 523 data.sg_len = 1; 524 data.blksz = idata->ic.blksz; 525 data.blocks = idata->ic.blocks; 526 527 sg_init_one(data.sg, idata->buf, idata->buf_bytes); 528 529 if (idata->ic.write_flag) 530 data.flags = MMC_DATA_WRITE; 531 else 532 data.flags = MMC_DATA_READ; 533 534 /* data.flags must already be set before doing this. */ 535 mmc_set_data_timeout(&data, card); 536 537 /* Allow overriding the timeout_ns for empirical tuning. */ 538 if (idata->ic.data_timeout_ns) 539 data.timeout_ns = idata->ic.data_timeout_ns; 540 541 mrq.data = &data; 542 } 543 544 mrq.cmd = &cmd; 545 546 err = mmc_blk_part_switch(card, target_part); 547 if (err) 548 return err; 549 550 if (idata->ic.is_acmd) { 551 err = mmc_app_cmd(card->host, card); 552 if (err) 553 return err; 554 } 555 556 if (idata->rpmb || prev_idata) { 557 sbc.opcode = MMC_SET_BLOCK_COUNT; 558 /* 559 * We don't do any blockcount validation because the max size 560 * may be increased by a future standard. We just copy the 561 * 'Reliable Write' bit here. 562 */ 563 sbc.arg = data.blocks | (idata->ic.write_flag & BIT(31)); 564 if (prev_idata) 565 sbc.arg = prev_idata->ic.arg; 566 sbc.flags = MMC_RSP_R1 | MMC_CMD_AC; 567 mrq.sbc = &sbc; 568 } 569 570 if ((MMC_EXTRACT_INDEX_FROM_ARG(cmd.arg) == EXT_CSD_SANITIZE_START) && 571 (cmd.opcode == MMC_SWITCH)) 572 return mmc_sanitize(card, idata->ic.cmd_timeout_ms); 573 574 /* If it's an R1B response we need some more preparations. */ 575 busy_timeout_ms = idata->ic.cmd_timeout_ms ? : MMC_BLK_TIMEOUT_MS; 576 r1b_resp = (cmd.flags & MMC_RSP_R1B) == MMC_RSP_R1B; 577 if (r1b_resp) 578 mmc_prepare_busy_cmd(card->host, &cmd, busy_timeout_ms); 579 580 mmc_wait_for_req(card->host, &mrq); 581 memcpy(&idata->ic.response, cmd.resp, sizeof(cmd.resp)); 582 583 if (prev_idata) { 584 memcpy(&prev_idata->ic.response, sbc.resp, sizeof(sbc.resp)); 585 if (sbc.error) { 586 dev_err(mmc_dev(card->host), "%s: sbc error %d\n", 587 __func__, sbc.error); 588 return sbc.error; 589 } 590 } 591 592 if (cmd.error) { 593 dev_err(mmc_dev(card->host), "%s: cmd error %d\n", 594 __func__, cmd.error); 595 return cmd.error; 596 } 597 if (data.error) { 598 dev_err(mmc_dev(card->host), "%s: data error %d\n", 599 __func__, data.error); 600 return data.error; 601 } 602 603 /* 604 * Make sure the cache of the PARTITION_CONFIG register and 605 * PARTITION_ACCESS bits is updated in case the ioctl ext_csd write 606 * changed it successfully. 607 */ 608 if ((MMC_EXTRACT_INDEX_FROM_ARG(cmd.arg) == EXT_CSD_PART_CONFIG) && 609 (cmd.opcode == MMC_SWITCH)) { 610 struct mmc_blk_data *main_md = dev_get_drvdata(&card->dev); 611 u8 value = MMC_EXTRACT_VALUE_FROM_ARG(cmd.arg); 612 613 /* 614 * Update cache so the next mmc_blk_part_switch call operates 615 * on up-to-date data. 616 */ 617 card->ext_csd.part_config = value; 618 main_md->part_curr = value & EXT_CSD_PART_CONFIG_ACC_MASK; 619 } 620 621 /* 622 * Make sure to update CACHE_CTRL in case it was changed. The cache 623 * will get turned back on if the card is re-initialized, e.g. 624 * suspend/resume or hw reset in recovery. 625 */ 626 if ((MMC_EXTRACT_INDEX_FROM_ARG(cmd.arg) == EXT_CSD_CACHE_CTRL) && 627 (cmd.opcode == MMC_SWITCH)) { 628 u8 value = MMC_EXTRACT_VALUE_FROM_ARG(cmd.arg) & 1; 629 630 card->ext_csd.cache_ctrl = value; 631 } 632 633 /* 634 * According to the SD specs, some commands require a delay after 635 * issuing the command. 636 */ 637 if (idata->ic.postsleep_min_us) 638 usleep_range(idata->ic.postsleep_min_us, idata->ic.postsleep_max_us); 639 640 if (mmc_host_is_spi(card->host)) { 641 if (idata->ic.write_flag || r1b_resp || cmd.flags & MMC_RSP_SPI_BUSY) 642 return mmc_spi_err_check(card); 643 return err; 644 } 645 646 /* 647 * Ensure RPMB, writes and R1B responses are completed by polling with 648 * CMD13. Note that, usually we don't need to poll when using HW busy 649 * detection, but here it's needed since some commands may indicate the 650 * error through the R1 status bits. 651 */ 652 if (idata->rpmb || idata->ic.write_flag || r1b_resp) { 653 struct mmc_blk_busy_data cb_data = { 654 .card = card, 655 }; 656 657 err = __mmc_poll_for_busy(card->host, 0, busy_timeout_ms, 658 &mmc_blk_busy_cb, &cb_data); 659 660 idata->ic.response[0] = cb_data.status; 661 } 662 663 return err; 664 } 665 666 static int mmc_blk_ioctl_cmd(struct mmc_blk_data *md, 667 struct mmc_ioc_cmd __user *ic_ptr, 668 struct mmc_rpmb_data *rpmb) 669 { 670 struct mmc_blk_ioc_data *idata; 671 struct mmc_blk_ioc_data *idatas[1]; 672 struct mmc_queue *mq; 673 struct mmc_card *card; 674 int err = 0, ioc_err = 0; 675 struct request *req; 676 677 idata = mmc_blk_ioctl_copy_from_user(ic_ptr); 678 if (IS_ERR(idata)) 679 return PTR_ERR(idata); 680 /* This will be NULL on non-RPMB ioctl():s */ 681 idata->rpmb = rpmb; 682 683 card = md->queue.card; 684 if (IS_ERR(card)) { 685 err = PTR_ERR(card); 686 goto cmd_done; 687 } 688 689 /* 690 * Dispatch the ioctl() into the block request queue. 691 */ 692 mq = &md->queue; 693 req = blk_mq_alloc_request(mq->queue, 694 idata->ic.write_flag ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0); 695 if (IS_ERR(req)) { 696 err = PTR_ERR(req); 697 goto cmd_done; 698 } 699 idatas[0] = idata; 700 req_to_mmc_queue_req(req)->drv_op = 701 rpmb ? MMC_DRV_OP_IOCTL_RPMB : MMC_DRV_OP_IOCTL; 702 req_to_mmc_queue_req(req)->drv_op_result = -EIO; 703 req_to_mmc_queue_req(req)->drv_op_data = idatas; 704 req_to_mmc_queue_req(req)->ioc_count = 1; 705 blk_execute_rq(req, false); 706 ioc_err = req_to_mmc_queue_req(req)->drv_op_result; 707 err = mmc_blk_ioctl_copy_to_user(ic_ptr, idata); 708 blk_mq_free_request(req); 709 710 cmd_done: 711 kfree(idata->buf); 712 kfree(idata); 713 return ioc_err ? ioc_err : err; 714 } 715 716 static int mmc_blk_ioctl_multi_cmd(struct mmc_blk_data *md, 717 struct mmc_ioc_multi_cmd __user *user, 718 struct mmc_rpmb_data *rpmb) 719 { 720 struct mmc_blk_ioc_data **idata = NULL; 721 struct mmc_ioc_cmd __user *cmds = user->cmds; 722 struct mmc_card *card; 723 struct mmc_queue *mq; 724 int err = 0, ioc_err = 0; 725 __u64 num_of_cmds; 726 unsigned int i, n; 727 struct request *req; 728 729 if (copy_from_user(&num_of_cmds, &user->num_of_cmds, 730 sizeof(num_of_cmds))) 731 return -EFAULT; 732 733 if (!num_of_cmds) 734 return 0; 735 736 if (num_of_cmds > MMC_IOC_MAX_CMDS) 737 return -EINVAL; 738 739 n = num_of_cmds; 740 idata = kcalloc(n, sizeof(*idata), GFP_KERNEL); 741 if (!idata) 742 return -ENOMEM; 743 744 for (i = 0; i < n; i++) { 745 idata[i] = mmc_blk_ioctl_copy_from_user(&cmds[i]); 746 if (IS_ERR(idata[i])) { 747 err = PTR_ERR(idata[i]); 748 n = i; 749 goto cmd_err; 750 } 751 /* This will be NULL on non-RPMB ioctl():s */ 752 idata[i]->rpmb = rpmb; 753 } 754 755 card = md->queue.card; 756 if (IS_ERR(card)) { 757 err = PTR_ERR(card); 758 goto cmd_err; 759 } 760 761 762 /* 763 * Dispatch the ioctl()s into the block request queue. 764 */ 765 mq = &md->queue; 766 req = blk_mq_alloc_request(mq->queue, 767 idata[0]->ic.write_flag ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0); 768 if (IS_ERR(req)) { 769 err = PTR_ERR(req); 770 goto cmd_err; 771 } 772 req_to_mmc_queue_req(req)->drv_op = 773 rpmb ? MMC_DRV_OP_IOCTL_RPMB : MMC_DRV_OP_IOCTL; 774 req_to_mmc_queue_req(req)->drv_op_result = -EIO; 775 req_to_mmc_queue_req(req)->drv_op_data = idata; 776 req_to_mmc_queue_req(req)->ioc_count = n; 777 blk_execute_rq(req, false); 778 ioc_err = req_to_mmc_queue_req(req)->drv_op_result; 779 780 /* copy to user if data and response */ 781 for (i = 0; i < n && !err; i++) 782 err = mmc_blk_ioctl_copy_to_user(&cmds[i], idata[i]); 783 784 blk_mq_free_request(req); 785 786 cmd_err: 787 for (i = 0; i < n; i++) { 788 kfree(idata[i]->buf); 789 kfree(idata[i]); 790 } 791 kfree(idata); 792 return ioc_err ? ioc_err : err; 793 } 794 795 static int mmc_blk_check_blkdev(struct block_device *bdev) 796 { 797 /* 798 * The caller must have CAP_SYS_RAWIO, and must be calling this on the 799 * whole block device, not on a partition. This prevents overspray 800 * between sibling partitions. 801 */ 802 if (!capable(CAP_SYS_RAWIO) || bdev_is_partition(bdev)) 803 return -EPERM; 804 return 0; 805 } 806 807 static int mmc_blk_ioctl(struct block_device *bdev, blk_mode_t mode, 808 unsigned int cmd, unsigned long arg) 809 { 810 struct mmc_blk_data *md; 811 int ret; 812 813 switch (cmd) { 814 case MMC_IOC_CMD: 815 ret = mmc_blk_check_blkdev(bdev); 816 if (ret) 817 return ret; 818 md = mmc_blk_get(bdev->bd_disk); 819 if (!md) 820 return -EINVAL; 821 ret = mmc_blk_ioctl_cmd(md, 822 (struct mmc_ioc_cmd __user *)arg, 823 NULL); 824 mmc_blk_put(md); 825 return ret; 826 case MMC_IOC_MULTI_CMD: 827 ret = mmc_blk_check_blkdev(bdev); 828 if (ret) 829 return ret; 830 md = mmc_blk_get(bdev->bd_disk); 831 if (!md) 832 return -EINVAL; 833 ret = mmc_blk_ioctl_multi_cmd(md, 834 (struct mmc_ioc_multi_cmd __user *)arg, 835 NULL); 836 mmc_blk_put(md); 837 return ret; 838 default: 839 return -EINVAL; 840 } 841 } 842 843 #ifdef CONFIG_COMPAT 844 static int mmc_blk_compat_ioctl(struct block_device *bdev, blk_mode_t mode, 845 unsigned int cmd, unsigned long arg) 846 { 847 return mmc_blk_ioctl(bdev, mode, cmd, (unsigned long) compat_ptr(arg)); 848 } 849 #endif 850 851 static int mmc_blk_alternative_gpt_sector(struct gendisk *disk, 852 sector_t *sector) 853 { 854 struct mmc_blk_data *md; 855 int ret; 856 857 md = mmc_blk_get(disk); 858 if (!md) 859 return -EINVAL; 860 861 if (md->queue.card) 862 ret = mmc_card_alternative_gpt_sector(md->queue.card, sector); 863 else 864 ret = -ENODEV; 865 866 mmc_blk_put(md); 867 868 return ret; 869 } 870 871 static const struct block_device_operations mmc_bdops = { 872 .open = mmc_blk_open, 873 .release = mmc_blk_release, 874 .getgeo = mmc_blk_getgeo, 875 .owner = THIS_MODULE, 876 .ioctl = mmc_blk_ioctl, 877 #ifdef CONFIG_COMPAT 878 .compat_ioctl = mmc_blk_compat_ioctl, 879 #endif 880 .alternative_gpt_sector = mmc_blk_alternative_gpt_sector, 881 }; 882 883 static int mmc_blk_part_switch_pre(struct mmc_card *card, 884 unsigned int part_type) 885 { 886 const unsigned int mask = EXT_CSD_PART_CONFIG_ACC_MASK; 887 const unsigned int rpmb = EXT_CSD_PART_CONFIG_ACC_RPMB; 888 int ret = 0; 889 890 if ((part_type & mask) == rpmb) { 891 if (card->ext_csd.cmdq_en) { 892 ret = mmc_cmdq_disable(card); 893 if (ret) 894 return ret; 895 } 896 mmc_retune_pause(card->host); 897 } 898 899 return ret; 900 } 901 902 static int mmc_blk_part_switch_post(struct mmc_card *card, 903 unsigned int part_type) 904 { 905 const unsigned int mask = EXT_CSD_PART_CONFIG_ACC_MASK; 906 const unsigned int rpmb = EXT_CSD_PART_CONFIG_ACC_RPMB; 907 int ret = 0; 908 909 if ((part_type & mask) == rpmb) { 910 mmc_retune_unpause(card->host); 911 if (card->reenable_cmdq && !card->ext_csd.cmdq_en) 912 ret = mmc_cmdq_enable(card); 913 } 914 915 return ret; 916 } 917 918 static inline int mmc_blk_part_switch(struct mmc_card *card, 919 unsigned int part_type) 920 { 921 int ret = 0; 922 struct mmc_blk_data *main_md = dev_get_drvdata(&card->dev); 923 924 if (main_md->part_curr == part_type) 925 return 0; 926 927 if (mmc_card_mmc(card)) { 928 u8 part_config = card->ext_csd.part_config; 929 930 ret = mmc_blk_part_switch_pre(card, part_type); 931 if (ret) 932 return ret; 933 934 part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK; 935 part_config |= part_type; 936 937 ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 938 EXT_CSD_PART_CONFIG, part_config, 939 card->ext_csd.part_time); 940 if (ret) { 941 mmc_blk_part_switch_post(card, part_type); 942 return ret; 943 } 944 945 card->ext_csd.part_config = part_config; 946 947 ret = mmc_blk_part_switch_post(card, main_md->part_curr); 948 } 949 950 main_md->part_curr = part_type; 951 return ret; 952 } 953 954 static int mmc_sd_num_wr_blocks(struct mmc_card *card, u32 *written_blocks) 955 { 956 int err; 957 u32 result; 958 __be32 *blocks; 959 u8 resp_sz = mmc_card_ult_capacity(card) ? 8 : 4; 960 unsigned int noio_flag; 961 962 struct mmc_request mrq = {}; 963 struct mmc_command cmd = {}; 964 struct mmc_data data = {}; 965 struct scatterlist sg; 966 967 err = mmc_app_cmd(card->host, card); 968 if (err) 969 return err; 970 971 cmd.opcode = SD_APP_SEND_NUM_WR_BLKS; 972 cmd.arg = 0; 973 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; 974 975 data.blksz = resp_sz; 976 data.blocks = 1; 977 data.flags = MMC_DATA_READ; 978 data.sg = &sg; 979 data.sg_len = 1; 980 mmc_set_data_timeout(&data, card); 981 982 mrq.cmd = &cmd; 983 mrq.data = &data; 984 985 noio_flag = memalloc_noio_save(); 986 blocks = kmalloc(resp_sz, GFP_KERNEL); 987 memalloc_noio_restore(noio_flag); 988 if (!blocks) 989 return -ENOMEM; 990 991 sg_init_one(&sg, blocks, resp_sz); 992 993 mmc_wait_for_req(card->host, &mrq); 994 995 if (mmc_card_ult_capacity(card)) { 996 /* 997 * Normally, ACMD22 returns the number of written sectors as 998 * u32. SDUC, however, returns it as u64. This is not a 999 * superfluous requirement, because SDUC writes may exceed 2TB. 1000 * For Linux mmc however, the previously write operation could 1001 * not be more than the block layer limits, thus just make room 1002 * for a u64 and cast the response back to u32. 1003 */ 1004 result = clamp_val(get_unaligned_be64(blocks), 0, UINT_MAX); 1005 } else { 1006 result = ntohl(*blocks); 1007 } 1008 kfree(blocks); 1009 1010 if (cmd.error || data.error) 1011 return -EIO; 1012 1013 *written_blocks = result; 1014 1015 return 0; 1016 } 1017 1018 static unsigned int mmc_blk_clock_khz(struct mmc_host *host) 1019 { 1020 if (host->actual_clock) 1021 return host->actual_clock / 1000; 1022 1023 /* Clock may be subject to a divisor, fudge it by a factor of 2. */ 1024 if (host->ios.clock) 1025 return host->ios.clock / 2000; 1026 1027 /* How can there be no clock */ 1028 WARN_ON_ONCE(1); 1029 return 100; /* 100 kHz is minimum possible value */ 1030 } 1031 1032 static unsigned int mmc_blk_data_timeout_ms(struct mmc_host *host, 1033 struct mmc_data *data) 1034 { 1035 unsigned int ms = DIV_ROUND_UP(data->timeout_ns, 1000000); 1036 unsigned int khz; 1037 1038 if (data->timeout_clks) { 1039 khz = mmc_blk_clock_khz(host); 1040 ms += DIV_ROUND_UP(data->timeout_clks, khz); 1041 } 1042 1043 return ms; 1044 } 1045 1046 /* 1047 * Attempts to reset the card and get back to the requested partition. 1048 * Therefore any error here must result in cancelling the block layer 1049 * request, it must not be reattempted without going through the mmc_blk 1050 * partition sanity checks. 1051 */ 1052 static int mmc_blk_reset(struct mmc_blk_data *md, struct mmc_host *host, 1053 int type) 1054 { 1055 int err; 1056 struct mmc_blk_data *main_md = dev_get_drvdata(&host->card->dev); 1057 1058 if (md->reset_done & type) 1059 return -EEXIST; 1060 1061 md->reset_done |= type; 1062 err = mmc_hw_reset(host->card); 1063 /* 1064 * A successful reset will leave the card in the main partition, but 1065 * upon failure it might not be, so set it to MMC_BLK_PART_INVALID 1066 * in that case. 1067 */ 1068 main_md->part_curr = err ? MMC_BLK_PART_INVALID : main_md->part_type; 1069 if (err) 1070 return err; 1071 /* Ensure we switch back to the correct partition */ 1072 if (mmc_blk_part_switch(host->card, md->part_type)) 1073 /* 1074 * We have failed to get back into the correct 1075 * partition, so we need to abort the whole request. 1076 */ 1077 return -ENODEV; 1078 return 0; 1079 } 1080 1081 static inline void mmc_blk_reset_success(struct mmc_blk_data *md, int type) 1082 { 1083 md->reset_done &= ~type; 1084 } 1085 1086 static void mmc_blk_check_sbc(struct mmc_queue_req *mq_rq) 1087 { 1088 struct mmc_blk_ioc_data **idata = mq_rq->drv_op_data; 1089 int i; 1090 1091 for (i = 1; i < mq_rq->ioc_count; i++) { 1092 if (idata[i - 1]->ic.opcode == MMC_SET_BLOCK_COUNT && 1093 mmc_op_multi(idata[i]->ic.opcode)) { 1094 idata[i - 1]->flags |= MMC_BLK_IOC_DROP; 1095 idata[i]->flags |= MMC_BLK_IOC_SBC; 1096 } 1097 } 1098 } 1099 1100 /* 1101 * The non-block commands come back from the block layer after it queued it and 1102 * processed it with all other requests and then they get issued in this 1103 * function. 1104 */ 1105 static void mmc_blk_issue_drv_op(struct mmc_queue *mq, struct request *req) 1106 { 1107 struct mmc_queue_req *mq_rq; 1108 struct mmc_card *card = mq->card; 1109 struct mmc_blk_data *md = mq->blkdata; 1110 struct mmc_blk_ioc_data **idata; 1111 bool rpmb_ioctl; 1112 u8 **ext_csd; 1113 u32 status; 1114 int ret; 1115 int i; 1116 1117 mq_rq = req_to_mmc_queue_req(req); 1118 rpmb_ioctl = (mq_rq->drv_op == MMC_DRV_OP_IOCTL_RPMB); 1119 1120 switch (mq_rq->drv_op) { 1121 case MMC_DRV_OP_IOCTL: 1122 if (card->ext_csd.cmdq_en) { 1123 ret = mmc_cmdq_disable(card); 1124 if (ret) 1125 break; 1126 } 1127 1128 mmc_blk_check_sbc(mq_rq); 1129 1130 fallthrough; 1131 case MMC_DRV_OP_IOCTL_RPMB: 1132 idata = mq_rq->drv_op_data; 1133 for (i = 0, ret = 0; i < mq_rq->ioc_count; i++) { 1134 ret = __mmc_blk_ioctl_cmd(card, md, idata, i); 1135 if (ret) 1136 break; 1137 } 1138 /* Always switch back to main area after RPMB access */ 1139 if (rpmb_ioctl) 1140 mmc_blk_part_switch(card, 0); 1141 else if (card->reenable_cmdq && !card->ext_csd.cmdq_en) 1142 mmc_cmdq_enable(card); 1143 break; 1144 case MMC_DRV_OP_BOOT_WP: 1145 ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BOOT_WP, 1146 card->ext_csd.boot_ro_lock | 1147 EXT_CSD_BOOT_WP_B_PWR_WP_EN, 1148 card->ext_csd.part_time); 1149 if (ret) 1150 pr_err("%s: Locking boot partition ro until next power on failed: %d\n", 1151 md->disk->disk_name, ret); 1152 else 1153 card->ext_csd.boot_ro_lock |= 1154 EXT_CSD_BOOT_WP_B_PWR_WP_EN; 1155 break; 1156 case MMC_DRV_OP_GET_CARD_STATUS: 1157 ret = mmc_send_status(card, &status); 1158 if (!ret) 1159 ret = status; 1160 break; 1161 case MMC_DRV_OP_GET_EXT_CSD: 1162 ext_csd = mq_rq->drv_op_data; 1163 ret = mmc_get_ext_csd(card, ext_csd); 1164 break; 1165 default: 1166 pr_err("%s: unknown driver specific operation\n", 1167 md->disk->disk_name); 1168 ret = -EINVAL; 1169 break; 1170 } 1171 mq_rq->drv_op_result = ret; 1172 blk_mq_end_request(req, ret ? BLK_STS_IOERR : BLK_STS_OK); 1173 } 1174 1175 static void mmc_blk_issue_erase_rq(struct mmc_queue *mq, struct request *req, 1176 int type, unsigned int erase_arg) 1177 { 1178 struct mmc_blk_data *md = mq->blkdata; 1179 struct mmc_card *card = md->queue.card; 1180 unsigned int nr; 1181 sector_t from; 1182 int err = 0; 1183 blk_status_t status = BLK_STS_OK; 1184 1185 if (!mmc_card_can_erase(card)) { 1186 status = BLK_STS_NOTSUPP; 1187 goto fail; 1188 } 1189 1190 from = blk_rq_pos(req); 1191 nr = blk_rq_sectors(req); 1192 1193 do { 1194 err = 0; 1195 if (card->quirks & MMC_QUIRK_INAND_CMD38) { 1196 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 1197 INAND_CMD38_ARG_EXT_CSD, 1198 erase_arg == MMC_TRIM_ARG ? 1199 INAND_CMD38_ARG_TRIM : 1200 INAND_CMD38_ARG_ERASE, 1201 card->ext_csd.generic_cmd6_time); 1202 } 1203 if (!err) 1204 err = mmc_erase(card, from, nr, erase_arg); 1205 } while (err == -EIO && !mmc_blk_reset(md, card->host, type)); 1206 if (err) 1207 status = BLK_STS_IOERR; 1208 else 1209 mmc_blk_reset_success(md, type); 1210 fail: 1211 blk_mq_end_request(req, status); 1212 } 1213 1214 static void mmc_blk_issue_trim_rq(struct mmc_queue *mq, struct request *req) 1215 { 1216 mmc_blk_issue_erase_rq(mq, req, MMC_BLK_TRIM, MMC_TRIM_ARG); 1217 } 1218 1219 static void mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req) 1220 { 1221 struct mmc_blk_data *md = mq->blkdata; 1222 struct mmc_card *card = md->queue.card; 1223 unsigned int arg = card->erase_arg; 1224 1225 if (mmc_card_broken_sd_discard(card)) 1226 arg = SD_ERASE_ARG; 1227 1228 mmc_blk_issue_erase_rq(mq, req, MMC_BLK_DISCARD, arg); 1229 } 1230 1231 static void mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq, 1232 struct request *req) 1233 { 1234 struct mmc_blk_data *md = mq->blkdata; 1235 struct mmc_card *card = md->queue.card; 1236 unsigned int nr, arg; 1237 sector_t from; 1238 int err = 0, type = MMC_BLK_SECDISCARD; 1239 blk_status_t status = BLK_STS_OK; 1240 1241 if (!(mmc_card_can_secure_erase_trim(card))) { 1242 status = BLK_STS_NOTSUPP; 1243 goto out; 1244 } 1245 1246 from = blk_rq_pos(req); 1247 nr = blk_rq_sectors(req); 1248 1249 if (mmc_card_can_trim(card) && !mmc_erase_group_aligned(card, from, nr)) 1250 arg = MMC_SECURE_TRIM1_ARG; 1251 else 1252 arg = MMC_SECURE_ERASE_ARG; 1253 1254 retry: 1255 if (card->quirks & MMC_QUIRK_INAND_CMD38) { 1256 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 1257 INAND_CMD38_ARG_EXT_CSD, 1258 arg == MMC_SECURE_TRIM1_ARG ? 1259 INAND_CMD38_ARG_SECTRIM1 : 1260 INAND_CMD38_ARG_SECERASE, 1261 card->ext_csd.generic_cmd6_time); 1262 if (err) 1263 goto out_retry; 1264 } 1265 1266 err = mmc_erase(card, from, nr, arg); 1267 if (err == -EIO) 1268 goto out_retry; 1269 if (err) { 1270 status = BLK_STS_IOERR; 1271 goto out; 1272 } 1273 1274 if (arg == MMC_SECURE_TRIM1_ARG) { 1275 if (card->quirks & MMC_QUIRK_INAND_CMD38) { 1276 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 1277 INAND_CMD38_ARG_EXT_CSD, 1278 INAND_CMD38_ARG_SECTRIM2, 1279 card->ext_csd.generic_cmd6_time); 1280 if (err) 1281 goto out_retry; 1282 } 1283 1284 err = mmc_erase(card, from, nr, MMC_SECURE_TRIM2_ARG); 1285 if (err == -EIO) 1286 goto out_retry; 1287 if (err) { 1288 status = BLK_STS_IOERR; 1289 goto out; 1290 } 1291 } 1292 1293 out_retry: 1294 if (err && !mmc_blk_reset(md, card->host, type)) 1295 goto retry; 1296 if (!err) 1297 mmc_blk_reset_success(md, type); 1298 out: 1299 blk_mq_end_request(req, status); 1300 } 1301 1302 static void mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req) 1303 { 1304 struct mmc_blk_data *md = mq->blkdata; 1305 struct mmc_card *card = md->queue.card; 1306 int ret = 0; 1307 1308 ret = mmc_flush_cache(card->host); 1309 blk_mq_end_request(req, ret ? BLK_STS_IOERR : BLK_STS_OK); 1310 } 1311 1312 /* 1313 * Reformat current write as a reliable write, supporting 1314 * both legacy and the enhanced reliable write MMC cards. 1315 * In each transfer we'll handle only as much as a single 1316 * reliable write can handle, thus finish the request in 1317 * partial completions. 1318 */ 1319 static inline void mmc_apply_rel_rw(struct mmc_blk_request *brq, 1320 struct mmc_card *card, 1321 struct request *req) 1322 { 1323 if (!(card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN)) { 1324 /* Legacy mode imposes restrictions on transfers. */ 1325 if (!IS_ALIGNED(blk_rq_pos(req), card->ext_csd.rel_sectors)) 1326 brq->data.blocks = 1; 1327 1328 if (brq->data.blocks > card->ext_csd.rel_sectors) 1329 brq->data.blocks = card->ext_csd.rel_sectors; 1330 else if (brq->data.blocks < card->ext_csd.rel_sectors) 1331 brq->data.blocks = 1; 1332 } 1333 } 1334 1335 #define CMD_ERRORS_EXCL_OOR \ 1336 (R1_ADDRESS_ERROR | /* Misaligned address */ \ 1337 R1_BLOCK_LEN_ERROR | /* Transferred block length incorrect */\ 1338 R1_WP_VIOLATION | /* Tried to write to protected block */ \ 1339 R1_CARD_ECC_FAILED | /* Card ECC failed */ \ 1340 R1_CC_ERROR | /* Card controller error */ \ 1341 R1_ERROR) /* General/unknown error */ 1342 1343 #define CMD_ERRORS \ 1344 (CMD_ERRORS_EXCL_OOR | \ 1345 R1_OUT_OF_RANGE) /* Command argument out of range */ \ 1346 1347 static void mmc_blk_eval_resp_error(struct mmc_blk_request *brq) 1348 { 1349 u32 val; 1350 1351 /* 1352 * Per the SD specification(physical layer version 4.10)[1], 1353 * section 4.3.3, it explicitly states that "When the last 1354 * block of user area is read using CMD18, the host should 1355 * ignore OUT_OF_RANGE error that may occur even the sequence 1356 * is correct". And JESD84-B51 for eMMC also has a similar 1357 * statement on section 6.8.3. 1358 * 1359 * Multiple block read/write could be done by either predefined 1360 * method, namely CMD23, or open-ending mode. For open-ending mode, 1361 * we should ignore the OUT_OF_RANGE error as it's normal behaviour. 1362 * 1363 * However the spec[1] doesn't tell us whether we should also 1364 * ignore that for predefined method. But per the spec[1], section 1365 * 4.15 Set Block Count Command, it says"If illegal block count 1366 * is set, out of range error will be indicated during read/write 1367 * operation (For example, data transfer is stopped at user area 1368 * boundary)." In another word, we could expect a out of range error 1369 * in the response for the following CMD18/25. And if argument of 1370 * CMD23 + the argument of CMD18/25 exceed the max number of blocks, 1371 * we could also expect to get a -ETIMEDOUT or any error number from 1372 * the host drivers due to missing data response(for write)/data(for 1373 * read), as the cards will stop the data transfer by itself per the 1374 * spec. So we only need to check R1_OUT_OF_RANGE for open-ending mode. 1375 */ 1376 1377 if (!brq->stop.error) { 1378 bool oor_with_open_end; 1379 /* If there is no error yet, check R1 response */ 1380 1381 val = brq->stop.resp[0] & CMD_ERRORS; 1382 oor_with_open_end = val & R1_OUT_OF_RANGE && !brq->mrq.sbc; 1383 1384 if (val && !oor_with_open_end) 1385 brq->stop.error = -EIO; 1386 } 1387 } 1388 1389 static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq, 1390 int recovery_mode, bool *do_rel_wr_p, 1391 bool *do_data_tag_p) 1392 { 1393 struct mmc_blk_data *md = mq->blkdata; 1394 struct mmc_card *card = md->queue.card; 1395 struct mmc_blk_request *brq = &mqrq->brq; 1396 struct request *req = mmc_queue_req_to_req(mqrq); 1397 bool do_rel_wr, do_data_tag; 1398 1399 /* 1400 * Reliable writes are used to implement Forced Unit Access and 1401 * are supported only on MMCs. 1402 */ 1403 do_rel_wr = (req->cmd_flags & REQ_FUA) && 1404 rq_data_dir(req) == WRITE && 1405 (md->flags & MMC_BLK_REL_WR); 1406 1407 memset(brq, 0, sizeof(struct mmc_blk_request)); 1408 1409 mmc_crypto_prepare_req(mqrq); 1410 1411 brq->mrq.data = &brq->data; 1412 brq->mrq.tag = req->tag; 1413 1414 brq->stop.opcode = MMC_STOP_TRANSMISSION; 1415 brq->stop.arg = 0; 1416 1417 if (rq_data_dir(req) == READ) { 1418 brq->data.flags = MMC_DATA_READ; 1419 brq->stop.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; 1420 } else { 1421 brq->data.flags = MMC_DATA_WRITE; 1422 brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; 1423 } 1424 1425 brq->data.blksz = 512; 1426 brq->data.blocks = blk_rq_sectors(req); 1427 brq->data.blk_addr = blk_rq_pos(req); 1428 1429 /* 1430 * The command queue supports 2 priorities: "high" (1) and "simple" (0). 1431 * The eMMC will give "high" priority tasks priority over "simple" 1432 * priority tasks. Here we always set "simple" priority by not setting 1433 * MMC_DATA_PRIO. 1434 */ 1435 1436 /* 1437 * The block layer doesn't support all sector count 1438 * restrictions, so we need to be prepared for too big 1439 * requests. 1440 */ 1441 if (brq->data.blocks > card->host->max_blk_count) 1442 brq->data.blocks = card->host->max_blk_count; 1443 1444 if (brq->data.blocks > 1) { 1445 /* 1446 * Some SD cards in SPI mode return a CRC error or even lock up 1447 * completely when trying to read the last block using a 1448 * multiblock read command. 1449 */ 1450 if (mmc_host_is_spi(card->host) && (rq_data_dir(req) == READ) && 1451 (blk_rq_pos(req) + blk_rq_sectors(req) == 1452 get_capacity(md->disk))) 1453 brq->data.blocks--; 1454 1455 /* 1456 * After a read error, we redo the request one (native) sector 1457 * at a time in order to accurately determine which 1458 * sectors can be read successfully. 1459 */ 1460 if (recovery_mode) 1461 brq->data.blocks = queue_physical_block_size(mq->queue) >> 9; 1462 1463 /* 1464 * Some controllers have HW issues while operating 1465 * in multiple I/O mode 1466 */ 1467 if (card->host->ops->multi_io_quirk) 1468 brq->data.blocks = card->host->ops->multi_io_quirk(card, 1469 (rq_data_dir(req) == READ) ? 1470 MMC_DATA_READ : MMC_DATA_WRITE, 1471 brq->data.blocks); 1472 } 1473 1474 if (do_rel_wr) { 1475 mmc_apply_rel_rw(brq, card, req); 1476 brq->data.flags |= MMC_DATA_REL_WR; 1477 } 1478 1479 /* 1480 * Data tag is used only during writing meta data to speed 1481 * up write and any subsequent read of this meta data 1482 */ 1483 do_data_tag = card->ext_csd.data_tag_unit_size && 1484 (req->cmd_flags & REQ_META) && 1485 (rq_data_dir(req) == WRITE) && 1486 ((brq->data.blocks * brq->data.blksz) >= 1487 card->ext_csd.data_tag_unit_size); 1488 1489 if (do_data_tag) 1490 brq->data.flags |= MMC_DATA_DAT_TAG; 1491 1492 mmc_set_data_timeout(&brq->data, card); 1493 1494 brq->data.sg = mqrq->sg; 1495 brq->data.sg_len = mmc_queue_map_sg(mq, mqrq); 1496 1497 /* 1498 * Adjust the sg list so it is the same size as the 1499 * request. 1500 */ 1501 if (brq->data.blocks != blk_rq_sectors(req)) { 1502 int i, data_size = brq->data.blocks << 9; 1503 struct scatterlist *sg; 1504 1505 for_each_sg(brq->data.sg, sg, brq->data.sg_len, i) { 1506 data_size -= sg->length; 1507 if (data_size <= 0) { 1508 sg->length += data_size; 1509 i++; 1510 break; 1511 } 1512 } 1513 brq->data.sg_len = i; 1514 } 1515 1516 if (do_rel_wr_p) 1517 *do_rel_wr_p = do_rel_wr; 1518 1519 if (do_data_tag_p) 1520 *do_data_tag_p = do_data_tag; 1521 } 1522 1523 #define MMC_CQE_RETRIES 2 1524 1525 static void mmc_blk_cqe_complete_rq(struct mmc_queue *mq, struct request *req) 1526 { 1527 struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req); 1528 struct mmc_request *mrq = &mqrq->brq.mrq; 1529 struct request_queue *q = req->q; 1530 struct mmc_host *host = mq->card->host; 1531 enum mmc_issue_type issue_type = mmc_issue_type(mq, req); 1532 unsigned long flags; 1533 bool put_card; 1534 int err; 1535 1536 mmc_cqe_post_req(host, mrq); 1537 1538 if (mrq->cmd && mrq->cmd->error) 1539 err = mrq->cmd->error; 1540 else if (mrq->data && mrq->data->error) 1541 err = mrq->data->error; 1542 else 1543 err = 0; 1544 1545 if (err) { 1546 if (mqrq->retries++ < MMC_CQE_RETRIES) 1547 blk_mq_requeue_request(req, true); 1548 else 1549 blk_mq_end_request(req, BLK_STS_IOERR); 1550 } else if (mrq->data) { 1551 if (blk_update_request(req, BLK_STS_OK, mrq->data->bytes_xfered)) 1552 blk_mq_requeue_request(req, true); 1553 else 1554 __blk_mq_end_request(req, BLK_STS_OK); 1555 } else if (mq->in_recovery) { 1556 blk_mq_requeue_request(req, true); 1557 } else { 1558 blk_mq_end_request(req, BLK_STS_OK); 1559 } 1560 1561 spin_lock_irqsave(&mq->lock, flags); 1562 1563 mq->in_flight[issue_type] -= 1; 1564 1565 put_card = (mmc_tot_in_flight(mq) == 0); 1566 1567 mmc_cqe_check_busy(mq); 1568 1569 spin_unlock_irqrestore(&mq->lock, flags); 1570 1571 if (!mq->cqe_busy) 1572 blk_mq_run_hw_queues(q, true); 1573 1574 if (put_card) 1575 mmc_put_card(mq->card, &mq->ctx); 1576 } 1577 1578 void mmc_blk_cqe_recovery(struct mmc_queue *mq) 1579 { 1580 struct mmc_card *card = mq->card; 1581 struct mmc_host *host = card->host; 1582 int err; 1583 1584 pr_debug("%s: CQE recovery start\n", mmc_hostname(host)); 1585 1586 err = mmc_cqe_recovery(host); 1587 if (err) 1588 mmc_blk_reset(mq->blkdata, host, MMC_BLK_CQE_RECOVERY); 1589 mmc_blk_reset_success(mq->blkdata, MMC_BLK_CQE_RECOVERY); 1590 1591 pr_debug("%s: CQE recovery done\n", mmc_hostname(host)); 1592 } 1593 1594 static void mmc_blk_cqe_req_done(struct mmc_request *mrq) 1595 { 1596 struct mmc_queue_req *mqrq = container_of(mrq, struct mmc_queue_req, 1597 brq.mrq); 1598 struct request *req = mmc_queue_req_to_req(mqrq); 1599 struct request_queue *q = req->q; 1600 struct mmc_queue *mq = q->queuedata; 1601 1602 /* 1603 * Block layer timeouts race with completions which means the normal 1604 * completion path cannot be used during recovery. 1605 */ 1606 if (mq->in_recovery) 1607 mmc_blk_cqe_complete_rq(mq, req); 1608 else if (likely(!blk_should_fake_timeout(req->q))) 1609 blk_mq_complete_request(req); 1610 } 1611 1612 static int mmc_blk_cqe_start_req(struct mmc_host *host, struct mmc_request *mrq) 1613 { 1614 mrq->done = mmc_blk_cqe_req_done; 1615 mrq->recovery_notifier = mmc_cqe_recovery_notifier; 1616 1617 return mmc_cqe_start_req(host, mrq); 1618 } 1619 1620 static struct mmc_request *mmc_blk_cqe_prep_dcmd(struct mmc_queue_req *mqrq, 1621 struct request *req) 1622 { 1623 struct mmc_blk_request *brq = &mqrq->brq; 1624 1625 memset(brq, 0, sizeof(*brq)); 1626 1627 brq->mrq.cmd = &brq->cmd; 1628 brq->mrq.tag = req->tag; 1629 1630 return &brq->mrq; 1631 } 1632 1633 static int mmc_blk_cqe_issue_flush(struct mmc_queue *mq, struct request *req) 1634 { 1635 struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req); 1636 struct mmc_request *mrq = mmc_blk_cqe_prep_dcmd(mqrq, req); 1637 1638 mrq->cmd->opcode = MMC_SWITCH; 1639 mrq->cmd->arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) | 1640 (EXT_CSD_FLUSH_CACHE << 16) | 1641 (1 << 8) | 1642 EXT_CSD_CMD_SET_NORMAL; 1643 mrq->cmd->flags = MMC_CMD_AC | MMC_RSP_R1B; 1644 1645 return mmc_blk_cqe_start_req(mq->card->host, mrq); 1646 } 1647 1648 static int mmc_blk_hsq_issue_rw_rq(struct mmc_queue *mq, struct request *req) 1649 { 1650 struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req); 1651 struct mmc_host *host = mq->card->host; 1652 int err; 1653 1654 mmc_blk_rw_rq_prep(mqrq, mq->card, 0, mq); 1655 mqrq->brq.mrq.done = mmc_blk_hsq_req_done; 1656 mmc_pre_req(host, &mqrq->brq.mrq); 1657 1658 err = mmc_cqe_start_req(host, &mqrq->brq.mrq); 1659 if (err) 1660 mmc_post_req(host, &mqrq->brq.mrq, err); 1661 1662 return err; 1663 } 1664 1665 static int mmc_blk_cqe_issue_rw_rq(struct mmc_queue *mq, struct request *req) 1666 { 1667 struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req); 1668 struct mmc_host *host = mq->card->host; 1669 1670 if (host->hsq_enabled) 1671 return mmc_blk_hsq_issue_rw_rq(mq, req); 1672 1673 mmc_blk_data_prep(mq, mqrq, 0, NULL, NULL); 1674 1675 return mmc_blk_cqe_start_req(mq->card->host, &mqrq->brq.mrq); 1676 } 1677 1678 static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq, 1679 struct mmc_card *card, 1680 int recovery_mode, 1681 struct mmc_queue *mq) 1682 { 1683 u32 readcmd, writecmd; 1684 struct mmc_blk_request *brq = &mqrq->brq; 1685 struct request *req = mmc_queue_req_to_req(mqrq); 1686 struct mmc_blk_data *md = mq->blkdata; 1687 bool do_rel_wr, do_data_tag; 1688 1689 mmc_blk_data_prep(mq, mqrq, recovery_mode, &do_rel_wr, &do_data_tag); 1690 1691 brq->mrq.cmd = &brq->cmd; 1692 1693 brq->cmd.arg = blk_rq_pos(req); 1694 if (!mmc_card_blockaddr(card)) 1695 brq->cmd.arg <<= 9; 1696 brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; 1697 1698 if (brq->data.blocks > 1 || do_rel_wr) { 1699 /* SPI multiblock writes terminate using a special 1700 * token, not a STOP_TRANSMISSION request. 1701 */ 1702 if (!mmc_host_is_spi(card->host) || 1703 rq_data_dir(req) == READ) 1704 brq->mrq.stop = &brq->stop; 1705 readcmd = MMC_READ_MULTIPLE_BLOCK; 1706 writecmd = MMC_WRITE_MULTIPLE_BLOCK; 1707 } else { 1708 brq->mrq.stop = NULL; 1709 readcmd = MMC_READ_SINGLE_BLOCK; 1710 writecmd = MMC_WRITE_BLOCK; 1711 } 1712 brq->cmd.opcode = rq_data_dir(req) == READ ? readcmd : writecmd; 1713 1714 /* 1715 * Pre-defined multi-block transfers are preferable to 1716 * open ended-ones (and necessary for reliable writes). 1717 * However, it is not sufficient to just send CMD23, 1718 * and avoid the final CMD12, as on an error condition 1719 * CMD12 (stop) needs to be sent anyway. This, coupled 1720 * with Auto-CMD23 enhancements provided by some 1721 * hosts, means that the complexity of dealing 1722 * with this is best left to the host. If CMD23 is 1723 * supported by card and host, we'll fill sbc in and let 1724 * the host deal with handling it correctly. This means 1725 * that for hosts that don't expose MMC_CAP_CMD23, no 1726 * change of behavior will be observed. 1727 * 1728 * N.B: Some MMC cards experience perf degradation. 1729 * We'll avoid using CMD23-bounded multiblock writes for 1730 * these, while retaining features like reliable writes. 1731 */ 1732 if ((md->flags & MMC_BLK_CMD23) && mmc_op_multi(brq->cmd.opcode) && 1733 (do_rel_wr || !mmc_card_blk_no_cmd23(card) || do_data_tag)) { 1734 brq->sbc.opcode = MMC_SET_BLOCK_COUNT; 1735 brq->sbc.arg = brq->data.blocks | 1736 (do_rel_wr ? (1 << 31) : 0) | 1737 (do_data_tag ? (1 << 29) : 0); 1738 brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC; 1739 brq->mrq.sbc = &brq->sbc; 1740 } 1741 1742 if (mmc_card_ult_capacity(card)) { 1743 brq->cmd.ext_addr = blk_rq_pos(req) >> 32; 1744 brq->cmd.has_ext_addr = true; 1745 } 1746 } 1747 1748 #define MMC_MAX_RETRIES 5 1749 #define MMC_DATA_RETRIES 2 1750 #define MMC_NO_RETRIES (MMC_MAX_RETRIES + 1) 1751 1752 static int mmc_blk_send_stop(struct mmc_card *card, unsigned int timeout) 1753 { 1754 struct mmc_command cmd = { 1755 .opcode = MMC_STOP_TRANSMISSION, 1756 .flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC, 1757 /* Some hosts wait for busy anyway, so provide a busy timeout */ 1758 .busy_timeout = timeout, 1759 }; 1760 1761 return mmc_wait_for_cmd(card->host, &cmd, 5); 1762 } 1763 1764 static int mmc_blk_fix_state(struct mmc_card *card, struct request *req) 1765 { 1766 struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req); 1767 struct mmc_blk_request *brq = &mqrq->brq; 1768 unsigned int timeout = mmc_blk_data_timeout_ms(card->host, &brq->data); 1769 int err; 1770 1771 mmc_retune_hold_now(card->host); 1772 1773 mmc_blk_send_stop(card, timeout); 1774 1775 err = mmc_poll_for_busy(card, timeout, false, MMC_BUSY_IO); 1776 1777 mmc_retune_release(card->host); 1778 1779 return err; 1780 } 1781 1782 #define MMC_READ_SINGLE_RETRIES 2 1783 1784 /* Single (native) sector read during recovery */ 1785 static void mmc_blk_read_single(struct mmc_queue *mq, struct request *req) 1786 { 1787 struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req); 1788 struct mmc_request *mrq = &mqrq->brq.mrq; 1789 struct mmc_card *card = mq->card; 1790 struct mmc_host *host = card->host; 1791 blk_status_t error = BLK_STS_OK; 1792 size_t bytes_per_read = queue_physical_block_size(mq->queue); 1793 1794 do { 1795 u32 status; 1796 int err; 1797 int retries = 0; 1798 1799 while (retries++ <= MMC_READ_SINGLE_RETRIES) { 1800 mmc_blk_rw_rq_prep(mqrq, card, 1, mq); 1801 1802 mmc_wait_for_req(host, mrq); 1803 1804 err = mmc_send_status(card, &status); 1805 if (err) 1806 goto error_exit; 1807 1808 if (!mmc_host_is_spi(host) && 1809 !mmc_ready_for_data(status)) { 1810 err = mmc_blk_fix_state(card, req); 1811 if (err) 1812 goto error_exit; 1813 } 1814 1815 if (!mrq->cmd->error) 1816 break; 1817 } 1818 1819 if (mrq->cmd->error || 1820 mrq->data->error || 1821 (!mmc_host_is_spi(host) && 1822 (mrq->cmd->resp[0] & CMD_ERRORS || status & CMD_ERRORS))) 1823 error = BLK_STS_IOERR; 1824 else 1825 error = BLK_STS_OK; 1826 1827 } while (blk_update_request(req, error, bytes_per_read)); 1828 1829 return; 1830 1831 error_exit: 1832 mrq->data->bytes_xfered = 0; 1833 blk_update_request(req, BLK_STS_IOERR, bytes_per_read); 1834 /* Let it try the remaining request again */ 1835 if (mqrq->retries > MMC_MAX_RETRIES - 1) 1836 mqrq->retries = MMC_MAX_RETRIES - 1; 1837 } 1838 1839 static inline bool mmc_blk_oor_valid(struct mmc_blk_request *brq) 1840 { 1841 return !!brq->mrq.sbc; 1842 } 1843 1844 static inline u32 mmc_blk_stop_err_bits(struct mmc_blk_request *brq) 1845 { 1846 return mmc_blk_oor_valid(brq) ? CMD_ERRORS : CMD_ERRORS_EXCL_OOR; 1847 } 1848 1849 /* 1850 * Check for errors the host controller driver might not have seen such as 1851 * response mode errors or invalid card state. 1852 */ 1853 static bool mmc_blk_status_error(struct request *req, u32 status) 1854 { 1855 struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req); 1856 struct mmc_blk_request *brq = &mqrq->brq; 1857 struct mmc_queue *mq = req->q->queuedata; 1858 u32 stop_err_bits; 1859 1860 if (mmc_host_is_spi(mq->card->host)) 1861 return false; 1862 1863 stop_err_bits = mmc_blk_stop_err_bits(brq); 1864 1865 return brq->cmd.resp[0] & CMD_ERRORS || 1866 brq->stop.resp[0] & stop_err_bits || 1867 status & stop_err_bits || 1868 (rq_data_dir(req) == WRITE && !mmc_ready_for_data(status)); 1869 } 1870 1871 static inline bool mmc_blk_cmd_started(struct mmc_blk_request *brq) 1872 { 1873 return !brq->sbc.error && !brq->cmd.error && 1874 !(brq->cmd.resp[0] & CMD_ERRORS); 1875 } 1876 1877 /* 1878 * Requests are completed by mmc_blk_mq_complete_rq() which sets simple 1879 * policy: 1880 * 1. A request that has transferred at least some data is considered 1881 * successful and will be requeued if there is remaining data to 1882 * transfer. 1883 * 2. Otherwise the number of retries is incremented and the request 1884 * will be requeued if there are remaining retries. 1885 * 3. Otherwise the request will be errored out. 1886 * That means mmc_blk_mq_complete_rq() is controlled by bytes_xfered and 1887 * mqrq->retries. So there are only 4 possible actions here: 1888 * 1. do not accept the bytes_xfered value i.e. set it to zero 1889 * 2. change mqrq->retries to determine the number of retries 1890 * 3. try to reset the card 1891 * 4. read one sector at a time 1892 */ 1893 static void mmc_blk_mq_rw_recovery(struct mmc_queue *mq, struct request *req) 1894 { 1895 int type = rq_data_dir(req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE; 1896 struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req); 1897 struct mmc_blk_request *brq = &mqrq->brq; 1898 struct mmc_blk_data *md = mq->blkdata; 1899 struct mmc_card *card = mq->card; 1900 u32 status; 1901 u32 blocks; 1902 int err; 1903 1904 /* 1905 * Some errors the host driver might not have seen. Set the number of 1906 * bytes transferred to zero in that case. 1907 */ 1908 err = __mmc_send_status(card, &status, 0); 1909 if (err || mmc_blk_status_error(req, status)) 1910 brq->data.bytes_xfered = 0; 1911 1912 mmc_retune_release(card->host); 1913 1914 /* 1915 * Try again to get the status. This also provides an opportunity for 1916 * re-tuning. 1917 */ 1918 if (err) 1919 err = __mmc_send_status(card, &status, 0); 1920 1921 /* 1922 * Nothing more to do after the number of bytes transferred has been 1923 * updated and there is no card. 1924 */ 1925 if (err && mmc_detect_card_removed(card->host)) 1926 return; 1927 1928 /* Try to get back to "tran" state */ 1929 if (!mmc_host_is_spi(mq->card->host) && 1930 (err || !mmc_ready_for_data(status))) 1931 err = mmc_blk_fix_state(mq->card, req); 1932 1933 /* 1934 * Special case for SD cards where the card might record the number of 1935 * blocks written. 1936 */ 1937 if (!err && mmc_blk_cmd_started(brq) && mmc_card_sd(card) && 1938 rq_data_dir(req) == WRITE) { 1939 if (mmc_sd_num_wr_blocks(card, &blocks)) 1940 brq->data.bytes_xfered = 0; 1941 else 1942 brq->data.bytes_xfered = blocks << 9; 1943 } 1944 1945 /* Reset if the card is in a bad state */ 1946 if (!mmc_host_is_spi(mq->card->host) && 1947 err && mmc_blk_reset(md, card->host, type)) { 1948 pr_err("%s: recovery failed!\n", req->q->disk->disk_name); 1949 mqrq->retries = MMC_NO_RETRIES; 1950 return; 1951 } 1952 1953 /* 1954 * If anything was done, just return and if there is anything remaining 1955 * on the request it will get requeued. 1956 */ 1957 if (brq->data.bytes_xfered) 1958 return; 1959 1960 /* Reset before last retry */ 1961 if (mqrq->retries + 1 == MMC_MAX_RETRIES && 1962 mmc_blk_reset(md, card->host, type)) 1963 return; 1964 1965 /* Command errors fail fast, so use all MMC_MAX_RETRIES */ 1966 if (brq->sbc.error || brq->cmd.error) 1967 return; 1968 1969 /* Reduce the remaining retries for data errors */ 1970 if (mqrq->retries < MMC_MAX_RETRIES - MMC_DATA_RETRIES) { 1971 mqrq->retries = MMC_MAX_RETRIES - MMC_DATA_RETRIES; 1972 return; 1973 } 1974 1975 if (rq_data_dir(req) == READ && brq->data.blocks > 1976 queue_physical_block_size(mq->queue) >> 9) { 1977 /* Read one (native) sector at a time */ 1978 mmc_blk_read_single(mq, req); 1979 return; 1980 } 1981 } 1982 1983 static inline bool mmc_blk_rq_error(struct mmc_blk_request *brq) 1984 { 1985 mmc_blk_eval_resp_error(brq); 1986 1987 return brq->sbc.error || brq->cmd.error || brq->stop.error || 1988 brq->data.error || brq->cmd.resp[0] & CMD_ERRORS; 1989 } 1990 1991 static int mmc_spi_err_check(struct mmc_card *card) 1992 { 1993 u32 status = 0; 1994 int err; 1995 1996 /* 1997 * SPI does not have a TRAN state we have to wait on, instead the 1998 * card is ready again when it no longer holds the line LOW. 1999 * We still have to ensure two things here before we know the write 2000 * was successful: 2001 * 1. The card has not disconnected during busy and we actually read our 2002 * own pull-up, thinking it was still connected, so ensure it 2003 * still responds. 2004 * 2. Check for any error bits, in particular R1_SPI_IDLE to catch a 2005 * just reconnected card after being disconnected during busy. 2006 */ 2007 err = __mmc_send_status(card, &status, 0); 2008 if (err) 2009 return err; 2010 /* All R1 and R2 bits of SPI are errors in our case */ 2011 if (status) 2012 return -EIO; 2013 return 0; 2014 } 2015 2016 static int mmc_blk_busy_cb(void *cb_data, bool *busy) 2017 { 2018 struct mmc_blk_busy_data *data = cb_data; 2019 u32 status = 0; 2020 int err; 2021 2022 err = mmc_send_status(data->card, &status); 2023 if (err) 2024 return err; 2025 2026 /* Accumulate response error bits. */ 2027 data->status |= status; 2028 2029 *busy = !mmc_ready_for_data(status); 2030 return 0; 2031 } 2032 2033 static int mmc_blk_card_busy(struct mmc_card *card, struct request *req) 2034 { 2035 struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req); 2036 struct mmc_blk_busy_data cb_data; 2037 int err; 2038 2039 if (rq_data_dir(req) == READ) 2040 return 0; 2041 2042 if (mmc_host_is_spi(card->host)) { 2043 err = mmc_spi_err_check(card); 2044 if (err) 2045 mqrq->brq.data.bytes_xfered = 0; 2046 return err; 2047 } 2048 2049 cb_data.card = card; 2050 cb_data.status = 0; 2051 err = __mmc_poll_for_busy(card->host, 0, MMC_BLK_TIMEOUT_MS, 2052 &mmc_blk_busy_cb, &cb_data); 2053 2054 /* 2055 * Do not assume data transferred correctly if there are any error bits 2056 * set. 2057 */ 2058 if (cb_data.status & mmc_blk_stop_err_bits(&mqrq->brq)) { 2059 mqrq->brq.data.bytes_xfered = 0; 2060 err = err ? err : -EIO; 2061 } 2062 2063 /* Copy the exception bit so it will be seen later on */ 2064 if (mmc_card_mmc(card) && cb_data.status & R1_EXCEPTION_EVENT) 2065 mqrq->brq.cmd.resp[0] |= R1_EXCEPTION_EVENT; 2066 2067 return err; 2068 } 2069 2070 static inline void mmc_blk_rw_reset_success(struct mmc_queue *mq, 2071 struct request *req) 2072 { 2073 int type = rq_data_dir(req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE; 2074 2075 mmc_blk_reset_success(mq->blkdata, type); 2076 } 2077 2078 static void mmc_blk_mq_complete_rq(struct mmc_queue *mq, struct request *req) 2079 { 2080 struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req); 2081 unsigned int nr_bytes = mqrq->brq.data.bytes_xfered; 2082 2083 if (nr_bytes) { 2084 if (blk_update_request(req, BLK_STS_OK, nr_bytes)) 2085 blk_mq_requeue_request(req, true); 2086 else 2087 __blk_mq_end_request(req, BLK_STS_OK); 2088 } else if (!blk_rq_bytes(req)) { 2089 __blk_mq_end_request(req, BLK_STS_IOERR); 2090 } else if (mqrq->retries++ < MMC_MAX_RETRIES) { 2091 blk_mq_requeue_request(req, true); 2092 } else { 2093 if (mmc_card_removed(mq->card)) 2094 req->rq_flags |= RQF_QUIET; 2095 blk_mq_end_request(req, BLK_STS_IOERR); 2096 } 2097 } 2098 2099 static bool mmc_blk_urgent_bkops_needed(struct mmc_queue *mq, 2100 struct mmc_queue_req *mqrq) 2101 { 2102 return mmc_card_mmc(mq->card) && !mmc_host_is_spi(mq->card->host) && 2103 (mqrq->brq.cmd.resp[0] & R1_EXCEPTION_EVENT || 2104 mqrq->brq.stop.resp[0] & R1_EXCEPTION_EVENT); 2105 } 2106 2107 static void mmc_blk_urgent_bkops(struct mmc_queue *mq, 2108 struct mmc_queue_req *mqrq) 2109 { 2110 if (mmc_blk_urgent_bkops_needed(mq, mqrq)) 2111 mmc_run_bkops(mq->card); 2112 } 2113 2114 static void mmc_blk_hsq_req_done(struct mmc_request *mrq) 2115 { 2116 struct mmc_queue_req *mqrq = 2117 container_of(mrq, struct mmc_queue_req, brq.mrq); 2118 struct request *req = mmc_queue_req_to_req(mqrq); 2119 struct request_queue *q = req->q; 2120 struct mmc_queue *mq = q->queuedata; 2121 struct mmc_host *host = mq->card->host; 2122 unsigned long flags; 2123 2124 if (mmc_blk_rq_error(&mqrq->brq) || 2125 mmc_blk_urgent_bkops_needed(mq, mqrq)) { 2126 spin_lock_irqsave(&mq->lock, flags); 2127 mq->recovery_needed = true; 2128 mq->recovery_req = req; 2129 spin_unlock_irqrestore(&mq->lock, flags); 2130 2131 host->cqe_ops->cqe_recovery_start(host); 2132 2133 schedule_work(&mq->recovery_work); 2134 return; 2135 } 2136 2137 mmc_blk_rw_reset_success(mq, req); 2138 2139 /* 2140 * Block layer timeouts race with completions which means the normal 2141 * completion path cannot be used during recovery. 2142 */ 2143 if (mq->in_recovery) 2144 mmc_blk_cqe_complete_rq(mq, req); 2145 else if (likely(!blk_should_fake_timeout(req->q))) 2146 blk_mq_complete_request(req); 2147 } 2148 2149 void mmc_blk_mq_complete(struct request *req) 2150 { 2151 struct mmc_queue *mq = req->q->queuedata; 2152 struct mmc_host *host = mq->card->host; 2153 2154 if (host->cqe_enabled) 2155 mmc_blk_cqe_complete_rq(mq, req); 2156 else if (likely(!blk_should_fake_timeout(req->q))) 2157 mmc_blk_mq_complete_rq(mq, req); 2158 } 2159 2160 static void mmc_blk_mq_poll_completion(struct mmc_queue *mq, 2161 struct request *req) 2162 { 2163 struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req); 2164 struct mmc_host *host = mq->card->host; 2165 2166 if (mmc_blk_rq_error(&mqrq->brq) || 2167 mmc_blk_card_busy(mq->card, req)) { 2168 mmc_blk_mq_rw_recovery(mq, req); 2169 } else { 2170 mmc_blk_rw_reset_success(mq, req); 2171 mmc_retune_release(host); 2172 } 2173 2174 mmc_blk_urgent_bkops(mq, mqrq); 2175 } 2176 2177 static void mmc_blk_mq_dec_in_flight(struct mmc_queue *mq, enum mmc_issue_type issue_type) 2178 { 2179 unsigned long flags; 2180 bool put_card; 2181 2182 spin_lock_irqsave(&mq->lock, flags); 2183 2184 mq->in_flight[issue_type] -= 1; 2185 2186 put_card = (mmc_tot_in_flight(mq) == 0); 2187 2188 spin_unlock_irqrestore(&mq->lock, flags); 2189 2190 if (put_card) 2191 mmc_put_card(mq->card, &mq->ctx); 2192 } 2193 2194 static void mmc_blk_mq_post_req(struct mmc_queue *mq, struct request *req, 2195 bool can_sleep) 2196 { 2197 enum mmc_issue_type issue_type = mmc_issue_type(mq, req); 2198 struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req); 2199 struct mmc_request *mrq = &mqrq->brq.mrq; 2200 struct mmc_host *host = mq->card->host; 2201 2202 mmc_post_req(host, mrq, 0); 2203 2204 /* 2205 * Block layer timeouts race with completions which means the normal 2206 * completion path cannot be used during recovery. 2207 */ 2208 if (mq->in_recovery) { 2209 mmc_blk_mq_complete_rq(mq, req); 2210 } else if (likely(!blk_should_fake_timeout(req->q))) { 2211 if (can_sleep) 2212 blk_mq_complete_request_direct(req, mmc_blk_mq_complete); 2213 else 2214 blk_mq_complete_request(req); 2215 } 2216 2217 mmc_blk_mq_dec_in_flight(mq, issue_type); 2218 } 2219 2220 void mmc_blk_mq_recovery(struct mmc_queue *mq) 2221 { 2222 struct request *req = mq->recovery_req; 2223 struct mmc_host *host = mq->card->host; 2224 struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req); 2225 2226 mq->recovery_req = NULL; 2227 mq->rw_wait = false; 2228 2229 if (mmc_blk_rq_error(&mqrq->brq)) { 2230 mmc_retune_hold_now(host); 2231 mmc_blk_mq_rw_recovery(mq, req); 2232 } 2233 2234 mmc_blk_urgent_bkops(mq, mqrq); 2235 2236 mmc_blk_mq_post_req(mq, req, true); 2237 } 2238 2239 static void mmc_blk_mq_complete_prev_req(struct mmc_queue *mq, 2240 struct request **prev_req) 2241 { 2242 if (mmc_host_can_done_complete(mq->card->host)) 2243 return; 2244 2245 mutex_lock(&mq->complete_lock); 2246 2247 if (!mq->complete_req) 2248 goto out_unlock; 2249 2250 mmc_blk_mq_poll_completion(mq, mq->complete_req); 2251 2252 if (prev_req) 2253 *prev_req = mq->complete_req; 2254 else 2255 mmc_blk_mq_post_req(mq, mq->complete_req, true); 2256 2257 mq->complete_req = NULL; 2258 2259 out_unlock: 2260 mutex_unlock(&mq->complete_lock); 2261 } 2262 2263 void mmc_blk_mq_complete_work(struct work_struct *work) 2264 { 2265 struct mmc_queue *mq = container_of(work, struct mmc_queue, 2266 complete_work); 2267 2268 mmc_blk_mq_complete_prev_req(mq, NULL); 2269 } 2270 2271 static void mmc_blk_mq_req_done(struct mmc_request *mrq) 2272 { 2273 struct mmc_queue_req *mqrq = container_of(mrq, struct mmc_queue_req, 2274 brq.mrq); 2275 struct request *req = mmc_queue_req_to_req(mqrq); 2276 struct request_queue *q = req->q; 2277 struct mmc_queue *mq = q->queuedata; 2278 struct mmc_host *host = mq->card->host; 2279 unsigned long flags; 2280 2281 if (!mmc_host_can_done_complete(host)) { 2282 bool waiting; 2283 2284 /* 2285 * We cannot complete the request in this context, so record 2286 * that there is a request to complete, and that a following 2287 * request does not need to wait (although it does need to 2288 * complete complete_req first). 2289 */ 2290 spin_lock_irqsave(&mq->lock, flags); 2291 mq->complete_req = req; 2292 mq->rw_wait = false; 2293 waiting = mq->waiting; 2294 spin_unlock_irqrestore(&mq->lock, flags); 2295 2296 /* 2297 * If 'waiting' then the waiting task will complete this 2298 * request, otherwise queue a work to do it. Note that 2299 * complete_work may still race with the dispatch of a following 2300 * request. 2301 */ 2302 if (waiting) 2303 wake_up(&mq->wait); 2304 else 2305 queue_work(mq->card->complete_wq, &mq->complete_work); 2306 2307 return; 2308 } 2309 2310 /* Take the recovery path for errors or urgent background operations */ 2311 if (mmc_blk_rq_error(&mqrq->brq) || 2312 mmc_blk_urgent_bkops_needed(mq, mqrq)) { 2313 spin_lock_irqsave(&mq->lock, flags); 2314 mq->recovery_needed = true; 2315 mq->recovery_req = req; 2316 spin_unlock_irqrestore(&mq->lock, flags); 2317 wake_up(&mq->wait); 2318 schedule_work(&mq->recovery_work); 2319 return; 2320 } 2321 2322 mmc_blk_rw_reset_success(mq, req); 2323 2324 mq->rw_wait = false; 2325 wake_up(&mq->wait); 2326 2327 /* context unknown */ 2328 mmc_blk_mq_post_req(mq, req, false); 2329 } 2330 2331 static bool mmc_blk_rw_wait_cond(struct mmc_queue *mq, int *err) 2332 { 2333 unsigned long flags; 2334 bool done; 2335 2336 /* 2337 * Wait while there is another request in progress, but not if recovery 2338 * is needed. Also indicate whether there is a request waiting to start. 2339 */ 2340 spin_lock_irqsave(&mq->lock, flags); 2341 if (mq->recovery_needed) { 2342 *err = -EBUSY; 2343 done = true; 2344 } else { 2345 done = !mq->rw_wait; 2346 } 2347 mq->waiting = !done; 2348 spin_unlock_irqrestore(&mq->lock, flags); 2349 2350 return done; 2351 } 2352 2353 static int mmc_blk_rw_wait(struct mmc_queue *mq, struct request **prev_req) 2354 { 2355 int err = 0; 2356 2357 wait_event(mq->wait, mmc_blk_rw_wait_cond(mq, &err)); 2358 2359 /* Always complete the previous request if there is one */ 2360 mmc_blk_mq_complete_prev_req(mq, prev_req); 2361 2362 return err; 2363 } 2364 2365 static int mmc_blk_mq_issue_rw_rq(struct mmc_queue *mq, 2366 struct request *req) 2367 { 2368 struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req); 2369 struct mmc_host *host = mq->card->host; 2370 struct request *prev_req = NULL; 2371 int err = 0; 2372 2373 mmc_blk_rw_rq_prep(mqrq, mq->card, 0, mq); 2374 2375 mqrq->brq.mrq.done = mmc_blk_mq_req_done; 2376 2377 mmc_pre_req(host, &mqrq->brq.mrq); 2378 2379 err = mmc_blk_rw_wait(mq, &prev_req); 2380 if (err) 2381 goto out_post_req; 2382 2383 mq->rw_wait = true; 2384 2385 err = mmc_start_request(host, &mqrq->brq.mrq); 2386 2387 if (prev_req) 2388 mmc_blk_mq_post_req(mq, prev_req, true); 2389 2390 if (err) 2391 mq->rw_wait = false; 2392 2393 /* Release re-tuning here where there is no synchronization required */ 2394 if (err || mmc_host_can_done_complete(host)) 2395 mmc_retune_release(host); 2396 2397 out_post_req: 2398 if (err) 2399 mmc_post_req(host, &mqrq->brq.mrq, err); 2400 2401 return err; 2402 } 2403 2404 static int mmc_blk_wait_for_idle(struct mmc_queue *mq, struct mmc_host *host) 2405 { 2406 if (host->cqe_enabled) 2407 return host->cqe_ops->cqe_wait_for_idle(host); 2408 2409 return mmc_blk_rw_wait(mq, NULL); 2410 } 2411 2412 enum mmc_issued mmc_blk_mq_issue_rq(struct mmc_queue *mq, struct request *req) 2413 { 2414 struct mmc_blk_data *md = mq->blkdata; 2415 struct mmc_card *card = md->queue.card; 2416 struct mmc_host *host = card->host; 2417 int ret; 2418 2419 ret = mmc_blk_part_switch(card, md->part_type); 2420 if (ret) 2421 return MMC_REQ_FAILED_TO_START; 2422 2423 switch (mmc_issue_type(mq, req)) { 2424 case MMC_ISSUE_SYNC: 2425 ret = mmc_blk_wait_for_idle(mq, host); 2426 if (ret) 2427 return MMC_REQ_BUSY; 2428 switch (req_op(req)) { 2429 case REQ_OP_DRV_IN: 2430 case REQ_OP_DRV_OUT: 2431 mmc_blk_issue_drv_op(mq, req); 2432 break; 2433 case REQ_OP_DISCARD: 2434 mmc_blk_issue_discard_rq(mq, req); 2435 break; 2436 case REQ_OP_SECURE_ERASE: 2437 mmc_blk_issue_secdiscard_rq(mq, req); 2438 break; 2439 case REQ_OP_WRITE_ZEROES: 2440 mmc_blk_issue_trim_rq(mq, req); 2441 break; 2442 case REQ_OP_FLUSH: 2443 mmc_blk_issue_flush(mq, req); 2444 break; 2445 default: 2446 WARN_ON_ONCE(1); 2447 return MMC_REQ_FAILED_TO_START; 2448 } 2449 return MMC_REQ_FINISHED; 2450 case MMC_ISSUE_DCMD: 2451 case MMC_ISSUE_ASYNC: 2452 switch (req_op(req)) { 2453 case REQ_OP_FLUSH: 2454 if (!mmc_cache_enabled(host)) { 2455 blk_mq_end_request(req, BLK_STS_OK); 2456 return MMC_REQ_FINISHED; 2457 } 2458 ret = mmc_blk_cqe_issue_flush(mq, req); 2459 break; 2460 case REQ_OP_WRITE: 2461 card->written_flag = true; 2462 fallthrough; 2463 case REQ_OP_READ: 2464 if (host->cqe_enabled) 2465 ret = mmc_blk_cqe_issue_rw_rq(mq, req); 2466 else 2467 ret = mmc_blk_mq_issue_rw_rq(mq, req); 2468 break; 2469 default: 2470 WARN_ON_ONCE(1); 2471 ret = -EINVAL; 2472 } 2473 if (!ret) 2474 return MMC_REQ_STARTED; 2475 return ret == -EBUSY ? MMC_REQ_BUSY : MMC_REQ_FAILED_TO_START; 2476 default: 2477 WARN_ON_ONCE(1); 2478 return MMC_REQ_FAILED_TO_START; 2479 } 2480 } 2481 2482 static inline int mmc_blk_readonly(struct mmc_card *card) 2483 { 2484 return mmc_card_readonly(card) || 2485 !(card->csd.cmdclass & CCC_BLOCK_WRITE); 2486 } 2487 2488 /* 2489 * Search for a declared partitions node for the disk in mmc-card related node. 2490 * 2491 * This is to permit support for partition table defined in DT in special case 2492 * where a partition table is not written in the disk and is expected to be 2493 * passed from the running system. 2494 * 2495 * For the user disk, "partitions" node is searched. 2496 * For the special HW disk, "partitions-" node with the appended name is used 2497 * following this conversion table (to adhere to JEDEC naming) 2498 * - boot0 -> partitions-boot1 2499 * - boot1 -> partitions-boot2 2500 * - gp0 -> partitions-gp1 2501 * - gp1 -> partitions-gp2 2502 * - gp2 -> partitions-gp3 2503 * - gp3 -> partitions-gp4 2504 */ 2505 static struct fwnode_handle *mmc_blk_get_partitions_node(struct device *mmc_dev, 2506 const char *subname) 2507 { 2508 const char *node_name = "partitions"; 2509 2510 if (subname) { 2511 mmc_dev = mmc_dev->parent; 2512 2513 /* 2514 * Check if we are allocating a BOOT disk boot0/1 disk. 2515 * In DT we use the JEDEC naming boot1/2. 2516 */ 2517 if (!strcmp(subname, "boot0")) 2518 node_name = "partitions-boot1"; 2519 if (!strcmp(subname, "boot1")) 2520 node_name = "partitions-boot2"; 2521 /* 2522 * Check if we are allocating a GP disk gp0/1/2/3 disk. 2523 * In DT we use the JEDEC naming gp1/2/3/4. 2524 */ 2525 if (!strcmp(subname, "gp0")) 2526 node_name = "partitions-gp1"; 2527 if (!strcmp(subname, "gp1")) 2528 node_name = "partitions-gp2"; 2529 if (!strcmp(subname, "gp2")) 2530 node_name = "partitions-gp3"; 2531 if (!strcmp(subname, "gp3")) 2532 node_name = "partitions-gp4"; 2533 } 2534 2535 return device_get_named_child_node(mmc_dev, node_name); 2536 } 2537 2538 static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card, 2539 struct device *parent, 2540 sector_t size, 2541 bool default_ro, 2542 const char *subname, 2543 int area_type, 2544 unsigned int part_type) 2545 { 2546 struct fwnode_handle *disk_fwnode; 2547 struct mmc_blk_data *md; 2548 int devidx, ret; 2549 char cap_str[10]; 2550 unsigned int features = 0; 2551 2552 devidx = ida_alloc_max(&mmc_blk_ida, max_devices - 1, GFP_KERNEL); 2553 if (devidx < 0) { 2554 /* 2555 * We get -ENOSPC because there are no more any available 2556 * devidx. The reason may be that, either userspace haven't yet 2557 * unmounted the partitions, which postpones mmc_blk_release() 2558 * from being called, or the device has more partitions than 2559 * what we support. 2560 */ 2561 if (devidx == -ENOSPC) 2562 dev_err(mmc_dev(card->host), 2563 "no more device IDs available\n"); 2564 2565 return ERR_PTR(devidx); 2566 } 2567 2568 md = kzalloc(sizeof(*md), GFP_KERNEL); 2569 if (!md) { 2570 ret = -ENOMEM; 2571 goto out; 2572 } 2573 2574 md->area_type = area_type; 2575 2576 /* 2577 * Set the read-only status based on the supported commands 2578 * and the write protect switch. 2579 */ 2580 md->read_only = mmc_blk_readonly(card); 2581 2582 if (mmc_host_can_cmd23(card->host) && mmc_card_can_cmd23(card)) 2583 md->flags |= MMC_BLK_CMD23; 2584 2585 if (md->flags & MMC_BLK_CMD23 && 2586 ((card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN) || 2587 card->ext_csd.rel_sectors)) { 2588 md->flags |= MMC_BLK_REL_WR; 2589 features |= (BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA); 2590 } else if (mmc_cache_enabled(card->host)) { 2591 features |= BLK_FEAT_WRITE_CACHE; 2592 } 2593 2594 md->disk = mmc_init_queue(&md->queue, card, features); 2595 if (IS_ERR(md->disk)) { 2596 ret = PTR_ERR(md->disk); 2597 goto err_kfree; 2598 } 2599 2600 INIT_LIST_HEAD(&md->part); 2601 INIT_LIST_HEAD(&md->rpmbs); 2602 kref_init(&md->kref); 2603 2604 md->queue.blkdata = md; 2605 md->part_type = part_type; 2606 2607 md->disk->major = MMC_BLOCK_MAJOR; 2608 md->disk->minors = perdev_minors; 2609 md->disk->first_minor = devidx * perdev_minors; 2610 md->disk->fops = &mmc_bdops; 2611 md->disk->private_data = md; 2612 md->parent = parent; 2613 set_disk_ro(md->disk, md->read_only || default_ro); 2614 if (area_type & MMC_BLK_DATA_AREA_RPMB) 2615 md->disk->flags |= GENHD_FL_NO_PART; 2616 2617 /* 2618 * As discussed on lkml, GENHD_FL_REMOVABLE should: 2619 * 2620 * - be set for removable media with permanent block devices 2621 * - be unset for removable block devices with permanent media 2622 * 2623 * Since MMC block devices clearly fall under the second 2624 * case, we do not set GENHD_FL_REMOVABLE. Userspace 2625 * should use the block device creation/destruction hotplug 2626 * messages to tell when the card is present. 2627 */ 2628 2629 snprintf(md->disk->disk_name, sizeof(md->disk->disk_name), 2630 "mmcblk%u%s", card->host->index, subname ? subname : ""); 2631 2632 set_capacity(md->disk, size); 2633 2634 string_get_size((u64)size, 512, STRING_UNITS_2, 2635 cap_str, sizeof(cap_str)); 2636 pr_info("%s: %s %s %s%s\n", 2637 md->disk->disk_name, mmc_card_id(card), mmc_card_name(card), 2638 cap_str, md->read_only ? " (ro)" : ""); 2639 2640 /* used in ->open, must be set before add_disk: */ 2641 if (area_type == MMC_BLK_DATA_AREA_MAIN) 2642 dev_set_drvdata(&card->dev, md); 2643 disk_fwnode = mmc_blk_get_partitions_node(parent, subname); 2644 ret = add_disk_fwnode(md->parent, md->disk, mmc_disk_attr_groups, 2645 disk_fwnode); 2646 if (ret) 2647 goto err_put_disk; 2648 return md; 2649 2650 err_put_disk: 2651 put_disk(md->disk); 2652 blk_mq_free_tag_set(&md->queue.tag_set); 2653 err_kfree: 2654 kfree(md); 2655 out: 2656 ida_free(&mmc_blk_ida, devidx); 2657 return ERR_PTR(ret); 2658 } 2659 2660 static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card) 2661 { 2662 sector_t size; 2663 2664 if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) { 2665 /* 2666 * The EXT_CSD sector count is in number or 512 byte 2667 * sectors. 2668 */ 2669 size = card->ext_csd.sectors; 2670 } else { 2671 /* 2672 * The CSD capacity field is in units of read_blkbits. 2673 * set_capacity takes units of 512 bytes. 2674 */ 2675 size = (typeof(sector_t))card->csd.capacity 2676 << (card->csd.read_blkbits - 9); 2677 } 2678 2679 return mmc_blk_alloc_req(card, &card->dev, size, false, NULL, 2680 MMC_BLK_DATA_AREA_MAIN, 0); 2681 } 2682 2683 static int mmc_blk_alloc_part(struct mmc_card *card, 2684 struct mmc_blk_data *md, 2685 unsigned int part_type, 2686 sector_t size, 2687 bool default_ro, 2688 const char *subname, 2689 int area_type) 2690 { 2691 struct mmc_blk_data *part_md; 2692 2693 part_md = mmc_blk_alloc_req(card, disk_to_dev(md->disk), size, default_ro, 2694 subname, area_type, part_type); 2695 if (IS_ERR(part_md)) 2696 return PTR_ERR(part_md); 2697 list_add(&part_md->part, &md->part); 2698 2699 return 0; 2700 } 2701 2702 /** 2703 * mmc_rpmb_ioctl() - ioctl handler for the RPMB chardev 2704 * @filp: the character device file 2705 * @cmd: the ioctl() command 2706 * @arg: the argument from userspace 2707 * 2708 * This will essentially just redirect the ioctl()s coming in over to 2709 * the main block device spawning the RPMB character device. 2710 */ 2711 static long mmc_rpmb_ioctl(struct file *filp, unsigned int cmd, 2712 unsigned long arg) 2713 { 2714 struct mmc_rpmb_data *rpmb = filp->private_data; 2715 int ret; 2716 2717 switch (cmd) { 2718 case MMC_IOC_CMD: 2719 ret = mmc_blk_ioctl_cmd(rpmb->md, 2720 (struct mmc_ioc_cmd __user *)arg, 2721 rpmb); 2722 break; 2723 case MMC_IOC_MULTI_CMD: 2724 ret = mmc_blk_ioctl_multi_cmd(rpmb->md, 2725 (struct mmc_ioc_multi_cmd __user *)arg, 2726 rpmb); 2727 break; 2728 default: 2729 ret = -EINVAL; 2730 break; 2731 } 2732 2733 return ret; 2734 } 2735 2736 #ifdef CONFIG_COMPAT 2737 static long mmc_rpmb_ioctl_compat(struct file *filp, unsigned int cmd, 2738 unsigned long arg) 2739 { 2740 return mmc_rpmb_ioctl(filp, cmd, (unsigned long)compat_ptr(arg)); 2741 } 2742 #endif 2743 2744 static int mmc_rpmb_chrdev_open(struct inode *inode, struct file *filp) 2745 { 2746 struct mmc_rpmb_data *rpmb = container_of(inode->i_cdev, 2747 struct mmc_rpmb_data, chrdev); 2748 2749 get_device(&rpmb->dev); 2750 filp->private_data = rpmb; 2751 2752 return nonseekable_open(inode, filp); 2753 } 2754 2755 static int mmc_rpmb_chrdev_release(struct inode *inode, struct file *filp) 2756 { 2757 struct mmc_rpmb_data *rpmb = container_of(inode->i_cdev, 2758 struct mmc_rpmb_data, chrdev); 2759 2760 put_device(&rpmb->dev); 2761 2762 return 0; 2763 } 2764 2765 static const struct file_operations mmc_rpmb_fileops = { 2766 .release = mmc_rpmb_chrdev_release, 2767 .open = mmc_rpmb_chrdev_open, 2768 .owner = THIS_MODULE, 2769 .unlocked_ioctl = mmc_rpmb_ioctl, 2770 #ifdef CONFIG_COMPAT 2771 .compat_ioctl = mmc_rpmb_ioctl_compat, 2772 #endif 2773 }; 2774 2775 static void mmc_blk_rpmb_device_release(struct device *dev) 2776 { 2777 struct mmc_rpmb_data *rpmb = dev_get_drvdata(dev); 2778 2779 rpmb_dev_unregister(rpmb->rdev); 2780 mmc_blk_put(rpmb->md); 2781 ida_free(&mmc_rpmb_ida, rpmb->id); 2782 kfree(rpmb); 2783 } 2784 2785 static void free_idata(struct mmc_blk_ioc_data **idata, unsigned int cmd_count) 2786 { 2787 unsigned int n; 2788 2789 for (n = 0; n < cmd_count; n++) 2790 kfree(idata[n]); 2791 kfree(idata); 2792 } 2793 2794 static struct mmc_blk_ioc_data **alloc_idata(struct mmc_rpmb_data *rpmb, 2795 unsigned int cmd_count) 2796 { 2797 struct mmc_blk_ioc_data **idata; 2798 unsigned int n; 2799 2800 idata = kcalloc(cmd_count, sizeof(*idata), GFP_KERNEL); 2801 if (!idata) 2802 return NULL; 2803 2804 for (n = 0; n < cmd_count; n++) { 2805 idata[n] = kcalloc(1, sizeof(**idata), GFP_KERNEL); 2806 if (!idata[n]) { 2807 free_idata(idata, n); 2808 return NULL; 2809 } 2810 idata[n]->rpmb = rpmb; 2811 } 2812 2813 return idata; 2814 } 2815 2816 static void set_idata(struct mmc_blk_ioc_data *idata, u32 opcode, 2817 int write_flag, u8 *buf, unsigned int buf_bytes) 2818 { 2819 /* 2820 * The size of an RPMB frame must match what's expected by the 2821 * hardware. 2822 */ 2823 static_assert(!CHECK_SIZE_NEQ(512), "RPMB frame size must be 512 bytes"); 2824 2825 idata->ic.opcode = opcode; 2826 idata->ic.flags = MMC_RSP_R1 | MMC_CMD_ADTC; 2827 idata->ic.write_flag = write_flag; 2828 idata->ic.blksz = RPMB_FRAME_SIZE; 2829 idata->ic.blocks = buf_bytes / idata->ic.blksz; 2830 idata->buf = buf; 2831 idata->buf_bytes = buf_bytes; 2832 } 2833 2834 static int mmc_route_rpmb_frames(struct device *dev, u8 *req, 2835 unsigned int req_len, u8 *resp, 2836 unsigned int resp_len) 2837 { 2838 struct rpmb_frame *frm = (struct rpmb_frame *)req; 2839 struct mmc_rpmb_data *rpmb = dev_get_drvdata(dev); 2840 struct mmc_blk_data *md = rpmb->md; 2841 struct mmc_blk_ioc_data **idata; 2842 struct mmc_queue_req *mq_rq; 2843 unsigned int cmd_count; 2844 struct request *rq; 2845 u16 req_type; 2846 bool write; 2847 int ret; 2848 2849 if (IS_ERR(md->queue.card)) 2850 return PTR_ERR(md->queue.card); 2851 2852 if (req_len < RPMB_FRAME_SIZE) 2853 return -EINVAL; 2854 2855 req_type = be16_to_cpu(frm->req_resp); 2856 switch (req_type) { 2857 case RPMB_PROGRAM_KEY: 2858 if (CHECK_SIZE_NEQ(req_len) || CHECK_SIZE_NEQ(resp_len)) 2859 return -EINVAL; 2860 write = true; 2861 break; 2862 case RPMB_GET_WRITE_COUNTER: 2863 if (CHECK_SIZE_NEQ(req_len) || CHECK_SIZE_NEQ(resp_len)) 2864 return -EINVAL; 2865 write = false; 2866 break; 2867 case RPMB_WRITE_DATA: 2868 if (!CHECK_SIZE_ALIGNED(req_len) || CHECK_SIZE_NEQ(resp_len)) 2869 return -EINVAL; 2870 write = true; 2871 break; 2872 case RPMB_READ_DATA: 2873 if (CHECK_SIZE_NEQ(req_len) || !CHECK_SIZE_ALIGNED(resp_len)) 2874 return -EINVAL; 2875 write = false; 2876 break; 2877 default: 2878 return -EINVAL; 2879 } 2880 2881 /* Write operations require 3 commands, read operations require 2 */ 2882 cmd_count = write ? 3 : 2; 2883 2884 idata = alloc_idata(rpmb, cmd_count); 2885 if (!idata) 2886 return -ENOMEM; 2887 2888 if (write) { 2889 struct rpmb_frame *resp_frm = (struct rpmb_frame *)resp; 2890 2891 /* Send write request frame(s) */ 2892 set_idata(idata[0], MMC_WRITE_MULTIPLE_BLOCK, 2893 1 | MMC_CMD23_ARG_REL_WR, req, req_len); 2894 2895 /* Send result request frame */ 2896 memset(resp_frm, 0, RPMB_FRAME_SIZE); 2897 resp_frm->req_resp = cpu_to_be16(RPMB_RESULT_READ); 2898 set_idata(idata[1], MMC_WRITE_MULTIPLE_BLOCK, 1, resp, 2899 resp_len); 2900 2901 /* Read response frame */ 2902 set_idata(idata[2], MMC_READ_MULTIPLE_BLOCK, 0, resp, resp_len); 2903 } else { 2904 /* Send write request frame(s) */ 2905 set_idata(idata[0], MMC_WRITE_MULTIPLE_BLOCK, 1, req, req_len); 2906 2907 /* Read response frame */ 2908 set_idata(idata[1], MMC_READ_MULTIPLE_BLOCK, 0, resp, resp_len); 2909 } 2910 2911 rq = blk_mq_alloc_request(md->queue.queue, REQ_OP_DRV_OUT, 0); 2912 if (IS_ERR(rq)) { 2913 ret = PTR_ERR(rq); 2914 goto out; 2915 } 2916 2917 mq_rq = req_to_mmc_queue_req(rq); 2918 mq_rq->drv_op = MMC_DRV_OP_IOCTL_RPMB; 2919 mq_rq->drv_op_result = -EIO; 2920 mq_rq->drv_op_data = idata; 2921 mq_rq->ioc_count = cmd_count; 2922 blk_execute_rq(rq, false); 2923 ret = req_to_mmc_queue_req(rq)->drv_op_result; 2924 2925 blk_mq_free_request(rq); 2926 2927 out: 2928 free_idata(idata, cmd_count); 2929 return ret; 2930 } 2931 2932 static int mmc_blk_alloc_rpmb_part(struct mmc_card *card, 2933 struct mmc_blk_data *md, 2934 unsigned int part_index, 2935 sector_t size, 2936 const char *subname) 2937 { 2938 int devidx, ret; 2939 char rpmb_name[DISK_NAME_LEN]; 2940 char cap_str[10]; 2941 struct mmc_rpmb_data *rpmb; 2942 2943 /* This creates the minor number for the RPMB char device */ 2944 devidx = ida_alloc_max(&mmc_rpmb_ida, max_devices - 1, GFP_KERNEL); 2945 if (devidx < 0) 2946 return devidx; 2947 2948 rpmb = kzalloc(sizeof(*rpmb), GFP_KERNEL); 2949 if (!rpmb) { 2950 ida_free(&mmc_rpmb_ida, devidx); 2951 return -ENOMEM; 2952 } 2953 2954 snprintf(rpmb_name, sizeof(rpmb_name), 2955 "mmcblk%u%s", card->host->index, subname ? subname : ""); 2956 2957 rpmb->id = devidx; 2958 rpmb->part_index = part_index; 2959 rpmb->dev.init_name = rpmb_name; 2960 rpmb->dev.bus = &mmc_rpmb_bus_type; 2961 rpmb->dev.devt = MKDEV(MAJOR(mmc_rpmb_devt), rpmb->id); 2962 rpmb->dev.parent = &card->dev; 2963 rpmb->dev.release = mmc_blk_rpmb_device_release; 2964 device_initialize(&rpmb->dev); 2965 dev_set_drvdata(&rpmb->dev, rpmb); 2966 mmc_blk_get(md->disk); 2967 rpmb->md = md; 2968 2969 cdev_init(&rpmb->chrdev, &mmc_rpmb_fileops); 2970 rpmb->chrdev.owner = THIS_MODULE; 2971 ret = cdev_device_add(&rpmb->chrdev, &rpmb->dev); 2972 if (ret) { 2973 pr_err("%s: could not add character device\n", rpmb_name); 2974 goto out_put_device; 2975 } 2976 2977 list_add(&rpmb->node, &md->rpmbs); 2978 2979 string_get_size((u64)size, 512, STRING_UNITS_2, 2980 cap_str, sizeof(cap_str)); 2981 2982 pr_info("%s: %s %s %s, chardev (%d:%d)\n", 2983 rpmb_name, mmc_card_id(card), mmc_card_name(card), cap_str, 2984 MAJOR(mmc_rpmb_devt), rpmb->id); 2985 2986 return 0; 2987 2988 out_put_device: 2989 put_device(&rpmb->dev); 2990 return ret; 2991 } 2992 2993 static void mmc_blk_remove_rpmb_part(struct mmc_rpmb_data *rpmb) 2994 2995 { 2996 cdev_device_del(&rpmb->chrdev, &rpmb->dev); 2997 put_device(&rpmb->dev); 2998 } 2999 3000 /* MMC Physical partitions consist of two boot partitions and 3001 * up to four general purpose partitions. 3002 * For each partition enabled in EXT_CSD a block device will be allocatedi 3003 * to provide access to the partition. 3004 */ 3005 3006 static int mmc_blk_alloc_parts(struct mmc_card *card, struct mmc_blk_data *md) 3007 { 3008 int idx, ret; 3009 3010 if (!mmc_card_mmc(card)) 3011 return 0; 3012 3013 for (idx = 0; idx < card->nr_parts; idx++) { 3014 if (card->part[idx].area_type & MMC_BLK_DATA_AREA_RPMB) { 3015 /* 3016 * RPMB partitions does not provide block access, they 3017 * are only accessed using ioctl():s. Thus create 3018 * special RPMB block devices that do not have a 3019 * backing block queue for these. 3020 */ 3021 ret = mmc_blk_alloc_rpmb_part(card, md, 3022 card->part[idx].part_cfg, 3023 card->part[idx].size >> 9, 3024 card->part[idx].name); 3025 if (ret) 3026 return ret; 3027 } else if (card->part[idx].size) { 3028 ret = mmc_blk_alloc_part(card, md, 3029 card->part[idx].part_cfg, 3030 card->part[idx].size >> 9, 3031 card->part[idx].force_ro, 3032 card->part[idx].name, 3033 card->part[idx].area_type); 3034 if (ret) 3035 return ret; 3036 } 3037 } 3038 3039 return 0; 3040 } 3041 3042 static void mmc_blk_remove_req(struct mmc_blk_data *md) 3043 { 3044 /* 3045 * Flush remaining requests and free queues. It is freeing the queue 3046 * that stops new requests from being accepted. 3047 */ 3048 del_gendisk(md->disk); 3049 mmc_cleanup_queue(&md->queue); 3050 mmc_blk_put(md); 3051 } 3052 3053 static void mmc_blk_remove_parts(struct mmc_card *card, 3054 struct mmc_blk_data *md) 3055 { 3056 struct list_head *pos, *q; 3057 struct mmc_blk_data *part_md; 3058 struct mmc_rpmb_data *rpmb; 3059 3060 /* Remove RPMB partitions */ 3061 list_for_each_safe(pos, q, &md->rpmbs) { 3062 rpmb = list_entry(pos, struct mmc_rpmb_data, node); 3063 list_del(pos); 3064 mmc_blk_remove_rpmb_part(rpmb); 3065 } 3066 /* Remove block partitions */ 3067 list_for_each_safe(pos, q, &md->part) { 3068 part_md = list_entry(pos, struct mmc_blk_data, part); 3069 list_del(pos); 3070 mmc_blk_remove_req(part_md); 3071 } 3072 } 3073 3074 #ifdef CONFIG_DEBUG_FS 3075 3076 static int mmc_dbg_card_status_get(void *data, u64 *val) 3077 { 3078 struct mmc_card *card = data; 3079 struct mmc_blk_data *md = dev_get_drvdata(&card->dev); 3080 struct mmc_queue *mq = &md->queue; 3081 struct request *req; 3082 int ret; 3083 3084 /* Ask the block layer about the card status */ 3085 req = blk_mq_alloc_request(mq->queue, REQ_OP_DRV_IN, 0); 3086 if (IS_ERR(req)) 3087 return PTR_ERR(req); 3088 req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_GET_CARD_STATUS; 3089 req_to_mmc_queue_req(req)->drv_op_result = -EIO; 3090 blk_execute_rq(req, false); 3091 ret = req_to_mmc_queue_req(req)->drv_op_result; 3092 if (ret >= 0) { 3093 *val = ret; 3094 ret = 0; 3095 } 3096 blk_mq_free_request(req); 3097 3098 return ret; 3099 } 3100 DEFINE_DEBUGFS_ATTRIBUTE(mmc_dbg_card_status_fops, mmc_dbg_card_status_get, 3101 NULL, "%08llx\n"); 3102 3103 /* That is two digits * 512 + 1 for newline */ 3104 #define EXT_CSD_STR_LEN 1025 3105 3106 static int mmc_ext_csd_open(struct inode *inode, struct file *filp) 3107 { 3108 struct mmc_card *card = inode->i_private; 3109 struct mmc_blk_data *md = dev_get_drvdata(&card->dev); 3110 struct mmc_queue *mq = &md->queue; 3111 struct request *req; 3112 char *buf; 3113 ssize_t n = 0; 3114 u8 *ext_csd; 3115 int err, i; 3116 3117 buf = kmalloc(EXT_CSD_STR_LEN + 1, GFP_KERNEL); 3118 if (!buf) 3119 return -ENOMEM; 3120 3121 /* Ask the block layer for the EXT CSD */ 3122 req = blk_mq_alloc_request(mq->queue, REQ_OP_DRV_IN, 0); 3123 if (IS_ERR(req)) { 3124 err = PTR_ERR(req); 3125 goto out_free; 3126 } 3127 req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_GET_EXT_CSD; 3128 req_to_mmc_queue_req(req)->drv_op_result = -EIO; 3129 req_to_mmc_queue_req(req)->drv_op_data = &ext_csd; 3130 blk_execute_rq(req, false); 3131 err = req_to_mmc_queue_req(req)->drv_op_result; 3132 blk_mq_free_request(req); 3133 if (err) { 3134 pr_err("FAILED %d\n", err); 3135 goto out_free; 3136 } 3137 3138 for (i = 0; i < 512; i++) 3139 n += sprintf(buf + n, "%02x", ext_csd[i]); 3140 n += sprintf(buf + n, "\n"); 3141 3142 if (n != EXT_CSD_STR_LEN) { 3143 err = -EINVAL; 3144 kfree(ext_csd); 3145 goto out_free; 3146 } 3147 3148 filp->private_data = buf; 3149 kfree(ext_csd); 3150 return 0; 3151 3152 out_free: 3153 kfree(buf); 3154 return err; 3155 } 3156 3157 static ssize_t mmc_ext_csd_read(struct file *filp, char __user *ubuf, 3158 size_t cnt, loff_t *ppos) 3159 { 3160 char *buf = filp->private_data; 3161 3162 return simple_read_from_buffer(ubuf, cnt, ppos, 3163 buf, EXT_CSD_STR_LEN); 3164 } 3165 3166 static int mmc_ext_csd_release(struct inode *inode, struct file *file) 3167 { 3168 kfree(file->private_data); 3169 return 0; 3170 } 3171 3172 static const struct file_operations mmc_dbg_ext_csd_fops = { 3173 .open = mmc_ext_csd_open, 3174 .read = mmc_ext_csd_read, 3175 .release = mmc_ext_csd_release, 3176 .llseek = default_llseek, 3177 }; 3178 3179 static void mmc_blk_add_debugfs(struct mmc_card *card, struct mmc_blk_data *md) 3180 { 3181 struct dentry *root; 3182 3183 if (!card->debugfs_root) 3184 return; 3185 3186 root = card->debugfs_root; 3187 3188 if (mmc_card_mmc(card) || mmc_card_sd(card)) { 3189 md->status_dentry = 3190 debugfs_create_file_unsafe("status", 0400, root, 3191 card, 3192 &mmc_dbg_card_status_fops); 3193 } 3194 3195 if (mmc_card_mmc(card)) { 3196 md->ext_csd_dentry = 3197 debugfs_create_file("ext_csd", S_IRUSR, root, card, 3198 &mmc_dbg_ext_csd_fops); 3199 } 3200 } 3201 3202 static void mmc_blk_remove_debugfs(struct mmc_card *card, 3203 struct mmc_blk_data *md) 3204 { 3205 if (!card->debugfs_root) 3206 return; 3207 3208 debugfs_remove(md->status_dentry); 3209 md->status_dentry = NULL; 3210 3211 debugfs_remove(md->ext_csd_dentry); 3212 md->ext_csd_dentry = NULL; 3213 } 3214 3215 #else 3216 3217 static void mmc_blk_add_debugfs(struct mmc_card *card, struct mmc_blk_data *md) 3218 { 3219 } 3220 3221 static void mmc_blk_remove_debugfs(struct mmc_card *card, 3222 struct mmc_blk_data *md) 3223 { 3224 } 3225 3226 #endif /* CONFIG_DEBUG_FS */ 3227 3228 static void mmc_blk_rpmb_add(struct mmc_card *card) 3229 { 3230 struct mmc_blk_data *md = dev_get_drvdata(&card->dev); 3231 struct mmc_rpmb_data *rpmb; 3232 struct rpmb_dev *rdev; 3233 unsigned int n; 3234 u32 cid[4]; 3235 struct rpmb_descr descr = { 3236 .type = RPMB_TYPE_EMMC, 3237 .route_frames = mmc_route_rpmb_frames, 3238 .reliable_wr_count = card->ext_csd.enhanced_rpmb_supported ? 3239 2 : 32, 3240 .capacity = card->ext_csd.raw_rpmb_size_mult, 3241 .dev_id = (void *)cid, 3242 .dev_id_len = sizeof(cid), 3243 }; 3244 3245 /* 3246 * Provice CID as an octet array. The CID needs to be interpreted 3247 * when used as input to derive the RPMB key since some fields 3248 * will change due to firmware updates. 3249 */ 3250 for (n = 0; n < 4; n++) 3251 cid[n] = be32_to_cpu((__force __be32)card->raw_cid[n]); 3252 3253 list_for_each_entry(rpmb, &md->rpmbs, node) { 3254 rdev = rpmb_dev_register(&rpmb->dev, &descr); 3255 if (IS_ERR(rdev)) { 3256 pr_warn("%s: could not register RPMB device\n", 3257 dev_name(&rpmb->dev)); 3258 continue; 3259 } 3260 rpmb->rdev = rdev; 3261 } 3262 } 3263 3264 static int mmc_blk_probe(struct mmc_card *card) 3265 { 3266 struct mmc_blk_data *md; 3267 int ret = 0; 3268 3269 /* 3270 * Check that the card supports the command class(es) we need. 3271 */ 3272 if (!(card->csd.cmdclass & CCC_BLOCK_READ)) 3273 return -ENODEV; 3274 3275 mmc_fixup_device(card, mmc_blk_fixups); 3276 3277 card->complete_wq = alloc_workqueue("mmc_complete", 3278 WQ_MEM_RECLAIM | WQ_HIGHPRI, 0); 3279 if (!card->complete_wq) { 3280 pr_err("Failed to create mmc completion workqueue"); 3281 return -ENOMEM; 3282 } 3283 3284 md = mmc_blk_alloc(card); 3285 if (IS_ERR(md)) { 3286 ret = PTR_ERR(md); 3287 goto out_free; 3288 } 3289 3290 ret = mmc_blk_alloc_parts(card, md); 3291 if (ret) 3292 goto out; 3293 3294 /* Add two debugfs entries */ 3295 mmc_blk_add_debugfs(card, md); 3296 3297 pm_runtime_set_autosuspend_delay(&card->dev, 3000); 3298 pm_runtime_use_autosuspend(&card->dev); 3299 3300 /* 3301 * Don't enable runtime PM for SD-combo cards here. Leave that 3302 * decision to be taken during the SDIO init sequence instead. 3303 */ 3304 if (!mmc_card_sd_combo(card)) { 3305 pm_runtime_set_active(&card->dev); 3306 pm_runtime_enable(&card->dev); 3307 } 3308 3309 mmc_blk_rpmb_add(card); 3310 3311 return 0; 3312 3313 out: 3314 mmc_blk_remove_parts(card, md); 3315 mmc_blk_remove_req(md); 3316 out_free: 3317 destroy_workqueue(card->complete_wq); 3318 return ret; 3319 } 3320 3321 static void mmc_blk_remove(struct mmc_card *card) 3322 { 3323 struct mmc_blk_data *md = dev_get_drvdata(&card->dev); 3324 3325 mmc_blk_remove_debugfs(card, md); 3326 mmc_blk_remove_parts(card, md); 3327 pm_runtime_get_sync(&card->dev); 3328 if (md->part_curr != md->part_type) { 3329 mmc_claim_host(card->host); 3330 mmc_blk_part_switch(card, md->part_type); 3331 mmc_release_host(card->host); 3332 } 3333 if (!mmc_card_sd_combo(card)) 3334 pm_runtime_disable(&card->dev); 3335 pm_runtime_put_noidle(&card->dev); 3336 mmc_blk_remove_req(md); 3337 destroy_workqueue(card->complete_wq); 3338 } 3339 3340 static int _mmc_blk_suspend(struct mmc_card *card) 3341 { 3342 struct mmc_blk_data *part_md; 3343 struct mmc_blk_data *md = dev_get_drvdata(&card->dev); 3344 3345 if (md) { 3346 mmc_queue_suspend(&md->queue); 3347 list_for_each_entry(part_md, &md->part, part) { 3348 mmc_queue_suspend(&part_md->queue); 3349 } 3350 } 3351 return 0; 3352 } 3353 3354 static void mmc_blk_shutdown(struct mmc_card *card) 3355 { 3356 _mmc_blk_suspend(card); 3357 } 3358 3359 #ifdef CONFIG_PM_SLEEP 3360 static int mmc_blk_suspend(struct device *dev) 3361 { 3362 struct mmc_card *card = mmc_dev_to_card(dev); 3363 3364 return _mmc_blk_suspend(card); 3365 } 3366 3367 static int mmc_blk_resume(struct device *dev) 3368 { 3369 struct mmc_blk_data *part_md; 3370 struct mmc_blk_data *md = dev_get_drvdata(dev); 3371 3372 if (md) { 3373 /* 3374 * Resume involves the card going into idle state, 3375 * so current partition is always the main one. 3376 */ 3377 md->part_curr = md->part_type; 3378 mmc_queue_resume(&md->queue); 3379 list_for_each_entry(part_md, &md->part, part) { 3380 mmc_queue_resume(&part_md->queue); 3381 } 3382 } 3383 return 0; 3384 } 3385 #endif 3386 3387 static SIMPLE_DEV_PM_OPS(mmc_blk_pm_ops, mmc_blk_suspend, mmc_blk_resume); 3388 3389 static struct mmc_driver mmc_driver = { 3390 .drv = { 3391 .name = "mmcblk", 3392 .pm = &mmc_blk_pm_ops, 3393 }, 3394 .probe = mmc_blk_probe, 3395 .remove = mmc_blk_remove, 3396 .shutdown = mmc_blk_shutdown, 3397 }; 3398 3399 static int __init mmc_blk_init(void) 3400 { 3401 int res; 3402 3403 res = bus_register(&mmc_rpmb_bus_type); 3404 if (res < 0) { 3405 pr_err("mmcblk: could not register RPMB bus type\n"); 3406 return res; 3407 } 3408 res = alloc_chrdev_region(&mmc_rpmb_devt, 0, MAX_DEVICES, "rpmb"); 3409 if (res < 0) { 3410 pr_err("mmcblk: failed to allocate rpmb chrdev region\n"); 3411 goto out_bus_unreg; 3412 } 3413 3414 if (perdev_minors != CONFIG_MMC_BLOCK_MINORS) 3415 pr_info("mmcblk: using %d minors per device\n", perdev_minors); 3416 3417 max_devices = min(MAX_DEVICES, (1 << MINORBITS) / perdev_minors); 3418 3419 res = register_blkdev(MMC_BLOCK_MAJOR, "mmc"); 3420 if (res) 3421 goto out_chrdev_unreg; 3422 3423 res = mmc_register_driver(&mmc_driver); 3424 if (res) 3425 goto out_blkdev_unreg; 3426 3427 return 0; 3428 3429 out_blkdev_unreg: 3430 unregister_blkdev(MMC_BLOCK_MAJOR, "mmc"); 3431 out_chrdev_unreg: 3432 unregister_chrdev_region(mmc_rpmb_devt, MAX_DEVICES); 3433 out_bus_unreg: 3434 bus_unregister(&mmc_rpmb_bus_type); 3435 return res; 3436 } 3437 3438 static void __exit mmc_blk_exit(void) 3439 { 3440 mmc_unregister_driver(&mmc_driver); 3441 unregister_blkdev(MMC_BLOCK_MAJOR, "mmc"); 3442 unregister_chrdev_region(mmc_rpmb_devt, MAX_DEVICES); 3443 bus_unregister(&mmc_rpmb_bus_type); 3444 } 3445 3446 module_init(mmc_blk_init); 3447 module_exit(mmc_blk_exit); 3448 3449 MODULE_LICENSE("GPL"); 3450 MODULE_DESCRIPTION("Multimedia Card (MMC) block device driver"); 3451