1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Block driver for media (i.e., flash cards) 4 * 5 * Copyright 2002 Hewlett-Packard Company 6 * Copyright 2005-2008 Pierre Ossman 7 * 8 * Use consistent with the GNU GPL is permitted, 9 * provided that this copyright notice is 10 * preserved in its entirety in all copies and derived works. 11 * 12 * HEWLETT-PACKARD COMPANY MAKES NO WARRANTIES, EXPRESSED OR IMPLIED, 13 * AS TO THE USEFULNESS OR CORRECTNESS OF THIS CODE OR ITS 14 * FITNESS FOR ANY PARTICULAR PURPOSE. 15 * 16 * Many thanks to Alessandro Rubini and Jonathan Corbet! 17 * 18 * Author: Andrew Christian 19 * 28 May 2002 20 */ 21 #include <linux/moduleparam.h> 22 #include <linux/module.h> 23 #include <linux/init.h> 24 25 #include <linux/kernel.h> 26 #include <linux/fs.h> 27 #include <linux/slab.h> 28 #include <linux/errno.h> 29 #include <linux/hdreg.h> 30 #include <linux/kdev_t.h> 31 #include <linux/kref.h> 32 #include <linux/blkdev.h> 33 #include <linux/cdev.h> 34 #include <linux/mutex.h> 35 #include <linux/scatterlist.h> 36 #include <linux/string.h> 37 #include <linux/string_helpers.h> 38 #include <linux/delay.h> 39 #include <linux/capability.h> 40 #include <linux/compat.h> 41 #include <linux/pm_runtime.h> 42 #include <linux/idr.h> 43 #include <linux/debugfs.h> 44 #include <linux/rpmb.h> 45 46 #include <linux/mmc/ioctl.h> 47 #include <linux/mmc/card.h> 48 #include <linux/mmc/host.h> 49 #include <linux/mmc/mmc.h> 50 #include <linux/mmc/sd.h> 51 52 #include <linux/uaccess.h> 53 #include <linux/unaligned.h> 54 55 #include "queue.h" 56 #include "block.h" 57 #include "core.h" 58 #include "card.h" 59 #include "crypto.h" 60 #include "host.h" 61 #include "bus.h" 62 #include "mmc_ops.h" 63 #include "quirks.h" 64 #include "sd_ops.h" 65 66 MODULE_ALIAS("mmc:block"); 67 #ifdef MODULE_PARAM_PREFIX 68 #undef MODULE_PARAM_PREFIX 69 #endif 70 #define MODULE_PARAM_PREFIX "mmcblk." 71 72 /* 73 * Set a 10 second timeout for polling write request busy state. Note, mmc core 74 * is setting a 3 second timeout for SD cards, and SDHCI has long had a 10 75 * second software timer to timeout the whole request, so 10 seconds should be 76 * ample. 77 */ 78 #define MMC_BLK_TIMEOUT_MS (10 * 1000) 79 #define MMC_EXTRACT_INDEX_FROM_ARG(x) ((x & 0x00FF0000) >> 16) 80 #define MMC_EXTRACT_VALUE_FROM_ARG(x) ((x & 0x0000FF00) >> 8) 81 82 /** 83 * struct rpmb_frame - rpmb frame as defined by eMMC 5.1 (JESD84-B51) 84 * 85 * @stuff : stuff bytes 86 * @key_mac : The authentication key or the message authentication 87 * code (MAC) depending on the request/response type. 88 * The MAC will be delivered in the last (or the only) 89 * block of data. 90 * @data : Data to be written or read by signed access. 91 * @nonce : Random number generated by the host for the requests 92 * and copied to the response by the RPMB engine. 93 * @write_counter: Counter value for the total amount of the successful 94 * authenticated data write requests made by the host. 95 * @addr : Address of the data to be programmed to or read 96 * from the RPMB. Address is the serial number of 97 * the accessed block (half sector 256B). 98 * @block_count : Number of blocks (half sectors, 256B) requested to be 99 * read/programmed. 100 * @result : Includes information about the status of the write counter 101 * (valid, expired) and result of the access made to the RPMB. 102 * @req_resp : Defines the type of request and response to/from the memory. 103 * 104 * The stuff bytes and big-endian properties are modeled to fit to the spec. 105 */ 106 struct rpmb_frame { 107 u8 stuff[196]; 108 u8 key_mac[32]; 109 u8 data[256]; 110 u8 nonce[16]; 111 __be32 write_counter; 112 __be16 addr; 113 __be16 block_count; 114 __be16 result; 115 __be16 req_resp; 116 } __packed; 117 118 #define RPMB_PROGRAM_KEY 0x1 /* Program RPMB Authentication Key */ 119 #define RPMB_GET_WRITE_COUNTER 0x2 /* Read RPMB write counter */ 120 #define RPMB_WRITE_DATA 0x3 /* Write data to RPMB partition */ 121 #define RPMB_READ_DATA 0x4 /* Read data from RPMB partition */ 122 #define RPMB_RESULT_READ 0x5 /* Read result request (Internal) */ 123 124 static DEFINE_MUTEX(block_mutex); 125 126 /* 127 * The defaults come from config options but can be overriden by module 128 * or bootarg options. 129 */ 130 static int perdev_minors = CONFIG_MMC_BLOCK_MINORS; 131 132 /* 133 * We've only got one major, so number of mmcblk devices is 134 * limited to (1 << 20) / number of minors per device. It is also 135 * limited by the MAX_DEVICES below. 136 */ 137 static int max_devices; 138 139 #define MAX_DEVICES 256 140 141 static DEFINE_IDA(mmc_blk_ida); 142 static DEFINE_IDA(mmc_rpmb_ida); 143 144 struct mmc_blk_busy_data { 145 struct mmc_card *card; 146 u32 status; 147 }; 148 149 /* 150 * There is one mmc_blk_data per slot. 151 */ 152 struct mmc_blk_data { 153 struct device *parent; 154 struct gendisk *disk; 155 struct mmc_queue queue; 156 struct list_head part; 157 struct list_head rpmbs; 158 159 unsigned int flags; 160 #define MMC_BLK_CMD23 (1 << 0) /* Can do SET_BLOCK_COUNT for multiblock */ 161 #define MMC_BLK_REL_WR (1 << 1) /* MMC Reliable write support */ 162 163 struct kref kref; 164 unsigned int read_only; 165 unsigned int part_type; 166 unsigned int reset_done; 167 #define MMC_BLK_READ BIT(0) 168 #define MMC_BLK_WRITE BIT(1) 169 #define MMC_BLK_DISCARD BIT(2) 170 #define MMC_BLK_SECDISCARD BIT(3) 171 #define MMC_BLK_CQE_RECOVERY BIT(4) 172 #define MMC_BLK_TRIM BIT(5) 173 174 /* 175 * Only set in main mmc_blk_data associated 176 * with mmc_card with dev_set_drvdata, and keeps 177 * track of the current selected device partition. 178 */ 179 unsigned int part_curr; 180 #define MMC_BLK_PART_INVALID UINT_MAX /* Unknown partition active */ 181 int area_type; 182 183 /* debugfs files (only in main mmc_blk_data) */ 184 struct dentry *status_dentry; 185 struct dentry *ext_csd_dentry; 186 }; 187 188 /* Device type for RPMB character devices */ 189 static dev_t mmc_rpmb_devt; 190 191 /* Bus type for RPMB character devices */ 192 static const struct bus_type mmc_rpmb_bus_type = { 193 .name = "mmc_rpmb", 194 }; 195 196 /** 197 * struct mmc_rpmb_data - special RPMB device type for these areas 198 * @dev: the device for the RPMB area 199 * @chrdev: character device for the RPMB area 200 * @id: unique device ID number 201 * @part_index: partition index (0 on first) 202 * @md: parent MMC block device 203 * @rdev: registered RPMB device 204 * @node: list item, so we can put this device on a list 205 */ 206 struct mmc_rpmb_data { 207 struct device dev; 208 struct cdev chrdev; 209 int id; 210 unsigned int part_index; 211 struct mmc_blk_data *md; 212 struct rpmb_dev *rdev; 213 struct list_head node; 214 }; 215 216 static DEFINE_MUTEX(open_lock); 217 218 module_param(perdev_minors, int, 0444); 219 MODULE_PARM_DESC(perdev_minors, "Minors numbers to allocate per device"); 220 221 static inline int mmc_blk_part_switch(struct mmc_card *card, 222 unsigned int part_type); 223 static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq, 224 struct mmc_card *card, 225 int recovery_mode, 226 struct mmc_queue *mq); 227 static void mmc_blk_hsq_req_done(struct mmc_request *mrq); 228 static int mmc_spi_err_check(struct mmc_card *card); 229 static int mmc_blk_busy_cb(void *cb_data, bool *busy); 230 231 static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk) 232 { 233 struct mmc_blk_data *md; 234 235 mutex_lock(&open_lock); 236 md = disk->private_data; 237 if (md && !kref_get_unless_zero(&md->kref)) 238 md = NULL; 239 mutex_unlock(&open_lock); 240 241 return md; 242 } 243 244 static inline int mmc_get_devidx(struct gendisk *disk) 245 { 246 int devidx = disk->first_minor / perdev_minors; 247 return devidx; 248 } 249 250 static void mmc_blk_kref_release(struct kref *ref) 251 { 252 struct mmc_blk_data *md = container_of(ref, struct mmc_blk_data, kref); 253 int devidx; 254 255 devidx = mmc_get_devidx(md->disk); 256 ida_free(&mmc_blk_ida, devidx); 257 258 mutex_lock(&open_lock); 259 md->disk->private_data = NULL; 260 mutex_unlock(&open_lock); 261 262 put_disk(md->disk); 263 kfree(md); 264 } 265 266 static void mmc_blk_put(struct mmc_blk_data *md) 267 { 268 kref_put(&md->kref, mmc_blk_kref_release); 269 } 270 271 static ssize_t power_ro_lock_show(struct device *dev, 272 struct device_attribute *attr, char *buf) 273 { 274 int ret; 275 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev)); 276 struct mmc_card *card = md->queue.card; 277 int locked = 0; 278 279 if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PERM_WP_EN) 280 locked = 2; 281 else if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PWR_WP_EN) 282 locked = 1; 283 284 ret = sysfs_emit(buf, "%d\n", locked); 285 286 mmc_blk_put(md); 287 288 return ret; 289 } 290 291 static ssize_t power_ro_lock_store(struct device *dev, 292 struct device_attribute *attr, const char *buf, size_t count) 293 { 294 int ret; 295 struct mmc_blk_data *md, *part_md; 296 struct mmc_queue *mq; 297 struct request *req; 298 unsigned long set; 299 300 if (kstrtoul(buf, 0, &set)) 301 return -EINVAL; 302 303 if (set != 1) 304 return count; 305 306 md = mmc_blk_get(dev_to_disk(dev)); 307 mq = &md->queue; 308 309 /* Dispatch locking to the block layer */ 310 req = blk_mq_alloc_request(mq->queue, REQ_OP_DRV_OUT, 0); 311 if (IS_ERR(req)) { 312 count = PTR_ERR(req); 313 goto out_put; 314 } 315 req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_BOOT_WP; 316 req_to_mmc_queue_req(req)->drv_op_result = -EIO; 317 blk_execute_rq(req, false); 318 ret = req_to_mmc_queue_req(req)->drv_op_result; 319 blk_mq_free_request(req); 320 321 if (!ret) { 322 pr_info("%s: Locking boot partition ro until next power on\n", 323 md->disk->disk_name); 324 set_disk_ro(md->disk, 1); 325 326 list_for_each_entry(part_md, &md->part, part) 327 if (part_md->area_type == MMC_BLK_DATA_AREA_BOOT) { 328 pr_info("%s: Locking boot partition ro until next power on\n", part_md->disk->disk_name); 329 set_disk_ro(part_md->disk, 1); 330 } 331 } 332 out_put: 333 mmc_blk_put(md); 334 return count; 335 } 336 337 static DEVICE_ATTR(ro_lock_until_next_power_on, 0, 338 power_ro_lock_show, power_ro_lock_store); 339 340 static ssize_t force_ro_show(struct device *dev, struct device_attribute *attr, 341 char *buf) 342 { 343 int ret; 344 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev)); 345 346 ret = sysfs_emit(buf, "%d\n", 347 get_disk_ro(dev_to_disk(dev)) ^ 348 md->read_only); 349 mmc_blk_put(md); 350 return ret; 351 } 352 353 static ssize_t force_ro_store(struct device *dev, struct device_attribute *attr, 354 const char *buf, size_t count) 355 { 356 int ret; 357 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev)); 358 unsigned long set; 359 360 if (kstrtoul(buf, 0, &set)) { 361 ret = -EINVAL; 362 goto out; 363 } 364 365 set_disk_ro(dev_to_disk(dev), set || md->read_only); 366 ret = count; 367 out: 368 mmc_blk_put(md); 369 return ret; 370 } 371 372 static DEVICE_ATTR(force_ro, 0644, force_ro_show, force_ro_store); 373 374 static struct attribute *mmc_disk_attrs[] = { 375 &dev_attr_force_ro.attr, 376 &dev_attr_ro_lock_until_next_power_on.attr, 377 NULL, 378 }; 379 380 static umode_t mmc_disk_attrs_is_visible(struct kobject *kobj, 381 struct attribute *a, int n) 382 { 383 struct device *dev = kobj_to_dev(kobj); 384 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev)); 385 umode_t mode = a->mode; 386 387 if (a == &dev_attr_ro_lock_until_next_power_on.attr && 388 (md->area_type & MMC_BLK_DATA_AREA_BOOT) && 389 md->queue.card->ext_csd.boot_ro_lockable) { 390 mode = S_IRUGO; 391 if (!(md->queue.card->ext_csd.boot_ro_lock & 392 EXT_CSD_BOOT_WP_B_PWR_WP_DIS)) 393 mode |= S_IWUSR; 394 } 395 396 mmc_blk_put(md); 397 return mode; 398 } 399 400 static const struct attribute_group mmc_disk_attr_group = { 401 .is_visible = mmc_disk_attrs_is_visible, 402 .attrs = mmc_disk_attrs, 403 }; 404 405 static const struct attribute_group *mmc_disk_attr_groups[] = { 406 &mmc_disk_attr_group, 407 NULL, 408 }; 409 410 static int mmc_blk_open(struct gendisk *disk, blk_mode_t mode) 411 { 412 struct mmc_blk_data *md = mmc_blk_get(disk); 413 int ret = -ENXIO; 414 415 mutex_lock(&block_mutex); 416 if (md) { 417 ret = 0; 418 if ((mode & BLK_OPEN_WRITE) && md->read_only) { 419 mmc_blk_put(md); 420 ret = -EROFS; 421 } 422 } 423 mutex_unlock(&block_mutex); 424 425 return ret; 426 } 427 428 static void mmc_blk_release(struct gendisk *disk) 429 { 430 struct mmc_blk_data *md = disk->private_data; 431 432 mutex_lock(&block_mutex); 433 mmc_blk_put(md); 434 mutex_unlock(&block_mutex); 435 } 436 437 static int 438 mmc_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo) 439 { 440 geo->cylinders = get_capacity(bdev->bd_disk) / (4 * 16); 441 geo->heads = 4; 442 geo->sectors = 16; 443 return 0; 444 } 445 446 struct mmc_blk_ioc_data { 447 struct mmc_ioc_cmd ic; 448 unsigned char *buf; 449 u64 buf_bytes; 450 unsigned int flags; 451 #define MMC_BLK_IOC_DROP BIT(0) /* drop this mrq */ 452 #define MMC_BLK_IOC_SBC BIT(1) /* use mrq.sbc */ 453 454 struct mmc_rpmb_data *rpmb; 455 }; 456 457 static struct mmc_blk_ioc_data *mmc_blk_ioctl_copy_from_user( 458 struct mmc_ioc_cmd __user *user) 459 { 460 struct mmc_blk_ioc_data *idata; 461 int err; 462 463 idata = kzalloc(sizeof(*idata), GFP_KERNEL); 464 if (!idata) { 465 err = -ENOMEM; 466 goto out; 467 } 468 469 if (copy_from_user(&idata->ic, user, sizeof(idata->ic))) { 470 err = -EFAULT; 471 goto idata_err; 472 } 473 474 idata->buf_bytes = (u64) idata->ic.blksz * idata->ic.blocks; 475 if (idata->buf_bytes > MMC_IOC_MAX_BYTES) { 476 err = -EOVERFLOW; 477 goto idata_err; 478 } 479 480 if (!idata->buf_bytes) { 481 idata->buf = NULL; 482 return idata; 483 } 484 485 idata->buf = memdup_user((void __user *)(unsigned long) 486 idata->ic.data_ptr, idata->buf_bytes); 487 if (IS_ERR(idata->buf)) { 488 err = PTR_ERR(idata->buf); 489 goto idata_err; 490 } 491 492 return idata; 493 494 idata_err: 495 kfree(idata); 496 out: 497 return ERR_PTR(err); 498 } 499 500 static int mmc_blk_ioctl_copy_to_user(struct mmc_ioc_cmd __user *ic_ptr, 501 struct mmc_blk_ioc_data *idata) 502 { 503 struct mmc_ioc_cmd *ic = &idata->ic; 504 505 if (copy_to_user(&(ic_ptr->response), ic->response, 506 sizeof(ic->response))) 507 return -EFAULT; 508 509 if (!idata->ic.write_flag) { 510 if (copy_to_user((void __user *)(unsigned long)ic->data_ptr, 511 idata->buf, idata->buf_bytes)) 512 return -EFAULT; 513 } 514 515 return 0; 516 } 517 518 static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md, 519 struct mmc_blk_ioc_data **idatas, int i) 520 { 521 struct mmc_command cmd = {}, sbc = {}; 522 struct mmc_data data = {}; 523 struct mmc_request mrq = {}; 524 struct scatterlist sg; 525 bool r1b_resp; 526 unsigned int busy_timeout_ms; 527 int err; 528 unsigned int target_part; 529 struct mmc_blk_ioc_data *idata = idatas[i]; 530 struct mmc_blk_ioc_data *prev_idata = NULL; 531 532 if (!card || !md || !idata) 533 return -EINVAL; 534 535 if (idata->flags & MMC_BLK_IOC_DROP) 536 return 0; 537 538 if (idata->flags & MMC_BLK_IOC_SBC && i > 0) 539 prev_idata = idatas[i - 1]; 540 541 /* 542 * The RPMB accesses comes in from the character device, so we 543 * need to target these explicitly. Else we just target the 544 * partition type for the block device the ioctl() was issued 545 * on. 546 */ 547 if (idata->rpmb) { 548 /* Support multiple RPMB partitions */ 549 target_part = idata->rpmb->part_index; 550 target_part |= EXT_CSD_PART_CONFIG_ACC_RPMB; 551 } else { 552 target_part = md->part_type; 553 } 554 555 cmd.opcode = idata->ic.opcode; 556 cmd.arg = idata->ic.arg; 557 cmd.flags = idata->ic.flags; 558 559 if (idata->buf_bytes) { 560 data.sg = &sg; 561 data.sg_len = 1; 562 data.blksz = idata->ic.blksz; 563 data.blocks = idata->ic.blocks; 564 565 sg_init_one(data.sg, idata->buf, idata->buf_bytes); 566 567 if (idata->ic.write_flag) 568 data.flags = MMC_DATA_WRITE; 569 else 570 data.flags = MMC_DATA_READ; 571 572 /* data.flags must already be set before doing this. */ 573 mmc_set_data_timeout(&data, card); 574 575 /* Allow overriding the timeout_ns for empirical tuning. */ 576 if (idata->ic.data_timeout_ns) 577 data.timeout_ns = idata->ic.data_timeout_ns; 578 579 mrq.data = &data; 580 } 581 582 mrq.cmd = &cmd; 583 584 err = mmc_blk_part_switch(card, target_part); 585 if (err) 586 return err; 587 588 if (idata->ic.is_acmd) { 589 err = mmc_app_cmd(card->host, card); 590 if (err) 591 return err; 592 } 593 594 if (idata->rpmb || prev_idata) { 595 sbc.opcode = MMC_SET_BLOCK_COUNT; 596 /* 597 * We don't do any blockcount validation because the max size 598 * may be increased by a future standard. We just copy the 599 * 'Reliable Write' bit here. 600 */ 601 sbc.arg = data.blocks | (idata->ic.write_flag & BIT(31)); 602 if (prev_idata) 603 sbc.arg = prev_idata->ic.arg; 604 sbc.flags = MMC_RSP_R1 | MMC_CMD_AC; 605 mrq.sbc = &sbc; 606 } 607 608 if ((MMC_EXTRACT_INDEX_FROM_ARG(cmd.arg) == EXT_CSD_SANITIZE_START) && 609 (cmd.opcode == MMC_SWITCH)) 610 return mmc_sanitize(card, idata->ic.cmd_timeout_ms); 611 612 /* If it's an R1B response we need some more preparations. */ 613 busy_timeout_ms = idata->ic.cmd_timeout_ms ? : MMC_BLK_TIMEOUT_MS; 614 r1b_resp = (cmd.flags & MMC_RSP_R1B) == MMC_RSP_R1B; 615 if (r1b_resp) 616 mmc_prepare_busy_cmd(card->host, &cmd, busy_timeout_ms); 617 618 mmc_wait_for_req(card->host, &mrq); 619 memcpy(&idata->ic.response, cmd.resp, sizeof(cmd.resp)); 620 621 if (prev_idata) { 622 memcpy(&prev_idata->ic.response, sbc.resp, sizeof(sbc.resp)); 623 if (sbc.error) { 624 dev_err(mmc_dev(card->host), "%s: sbc error %d\n", 625 __func__, sbc.error); 626 return sbc.error; 627 } 628 } 629 630 if (cmd.error) { 631 dev_err(mmc_dev(card->host), "%s: cmd error %d\n", 632 __func__, cmd.error); 633 return cmd.error; 634 } 635 if (data.error) { 636 dev_err(mmc_dev(card->host), "%s: data error %d\n", 637 __func__, data.error); 638 return data.error; 639 } 640 641 /* 642 * Make sure the cache of the PARTITION_CONFIG register and 643 * PARTITION_ACCESS bits is updated in case the ioctl ext_csd write 644 * changed it successfully. 645 */ 646 if ((MMC_EXTRACT_INDEX_FROM_ARG(cmd.arg) == EXT_CSD_PART_CONFIG) && 647 (cmd.opcode == MMC_SWITCH)) { 648 struct mmc_blk_data *main_md = dev_get_drvdata(&card->dev); 649 u8 value = MMC_EXTRACT_VALUE_FROM_ARG(cmd.arg); 650 651 /* 652 * Update cache so the next mmc_blk_part_switch call operates 653 * on up-to-date data. 654 */ 655 card->ext_csd.part_config = value; 656 main_md->part_curr = value & EXT_CSD_PART_CONFIG_ACC_MASK; 657 } 658 659 /* 660 * Make sure to update CACHE_CTRL in case it was changed. The cache 661 * will get turned back on if the card is re-initialized, e.g. 662 * suspend/resume or hw reset in recovery. 663 */ 664 if ((MMC_EXTRACT_INDEX_FROM_ARG(cmd.arg) == EXT_CSD_CACHE_CTRL) && 665 (cmd.opcode == MMC_SWITCH)) { 666 u8 value = MMC_EXTRACT_VALUE_FROM_ARG(cmd.arg) & 1; 667 668 card->ext_csd.cache_ctrl = value; 669 } 670 671 /* 672 * According to the SD specs, some commands require a delay after 673 * issuing the command. 674 */ 675 if (idata->ic.postsleep_min_us) 676 usleep_range(idata->ic.postsleep_min_us, idata->ic.postsleep_max_us); 677 678 if (mmc_host_is_spi(card->host)) { 679 if (idata->ic.write_flag || r1b_resp || cmd.flags & MMC_RSP_SPI_BUSY) 680 return mmc_spi_err_check(card); 681 return err; 682 } 683 684 /* 685 * Ensure RPMB, writes and R1B responses are completed by polling with 686 * CMD13. Note that, usually we don't need to poll when using HW busy 687 * detection, but here it's needed since some commands may indicate the 688 * error through the R1 status bits. 689 */ 690 if (idata->rpmb || idata->ic.write_flag || r1b_resp) { 691 struct mmc_blk_busy_data cb_data = { 692 .card = card, 693 }; 694 695 err = __mmc_poll_for_busy(card->host, 0, busy_timeout_ms, 696 &mmc_blk_busy_cb, &cb_data); 697 698 idata->ic.response[0] = cb_data.status; 699 } 700 701 return err; 702 } 703 704 static int mmc_blk_ioctl_cmd(struct mmc_blk_data *md, 705 struct mmc_ioc_cmd __user *ic_ptr, 706 struct mmc_rpmb_data *rpmb) 707 { 708 struct mmc_blk_ioc_data *idata; 709 struct mmc_blk_ioc_data *idatas[1]; 710 struct mmc_queue *mq; 711 struct mmc_card *card; 712 int err = 0, ioc_err = 0; 713 struct request *req; 714 715 idata = mmc_blk_ioctl_copy_from_user(ic_ptr); 716 if (IS_ERR(idata)) 717 return PTR_ERR(idata); 718 /* This will be NULL on non-RPMB ioctl():s */ 719 idata->rpmb = rpmb; 720 721 card = md->queue.card; 722 if (IS_ERR(card)) { 723 err = PTR_ERR(card); 724 goto cmd_done; 725 } 726 727 /* 728 * Dispatch the ioctl() into the block request queue. 729 */ 730 mq = &md->queue; 731 req = blk_mq_alloc_request(mq->queue, 732 idata->ic.write_flag ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0); 733 if (IS_ERR(req)) { 734 err = PTR_ERR(req); 735 goto cmd_done; 736 } 737 idatas[0] = idata; 738 req_to_mmc_queue_req(req)->drv_op = 739 rpmb ? MMC_DRV_OP_IOCTL_RPMB : MMC_DRV_OP_IOCTL; 740 req_to_mmc_queue_req(req)->drv_op_result = -EIO; 741 req_to_mmc_queue_req(req)->drv_op_data = idatas; 742 req_to_mmc_queue_req(req)->ioc_count = 1; 743 blk_execute_rq(req, false); 744 ioc_err = req_to_mmc_queue_req(req)->drv_op_result; 745 err = mmc_blk_ioctl_copy_to_user(ic_ptr, idata); 746 blk_mq_free_request(req); 747 748 cmd_done: 749 kfree(idata->buf); 750 kfree(idata); 751 return ioc_err ? ioc_err : err; 752 } 753 754 static int mmc_blk_ioctl_multi_cmd(struct mmc_blk_data *md, 755 struct mmc_ioc_multi_cmd __user *user, 756 struct mmc_rpmb_data *rpmb) 757 { 758 struct mmc_blk_ioc_data **idata = NULL; 759 struct mmc_ioc_cmd __user *cmds = user->cmds; 760 struct mmc_card *card; 761 struct mmc_queue *mq; 762 int err = 0, ioc_err = 0; 763 __u64 num_of_cmds; 764 unsigned int i, n; 765 struct request *req; 766 767 if (copy_from_user(&num_of_cmds, &user->num_of_cmds, 768 sizeof(num_of_cmds))) 769 return -EFAULT; 770 771 if (!num_of_cmds) 772 return 0; 773 774 if (num_of_cmds > MMC_IOC_MAX_CMDS) 775 return -EINVAL; 776 777 n = num_of_cmds; 778 idata = kcalloc(n, sizeof(*idata), GFP_KERNEL); 779 if (!idata) 780 return -ENOMEM; 781 782 for (i = 0; i < n; i++) { 783 idata[i] = mmc_blk_ioctl_copy_from_user(&cmds[i]); 784 if (IS_ERR(idata[i])) { 785 err = PTR_ERR(idata[i]); 786 n = i; 787 goto cmd_err; 788 } 789 /* This will be NULL on non-RPMB ioctl():s */ 790 idata[i]->rpmb = rpmb; 791 } 792 793 card = md->queue.card; 794 if (IS_ERR(card)) { 795 err = PTR_ERR(card); 796 goto cmd_err; 797 } 798 799 800 /* 801 * Dispatch the ioctl()s into the block request queue. 802 */ 803 mq = &md->queue; 804 req = blk_mq_alloc_request(mq->queue, 805 idata[0]->ic.write_flag ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0); 806 if (IS_ERR(req)) { 807 err = PTR_ERR(req); 808 goto cmd_err; 809 } 810 req_to_mmc_queue_req(req)->drv_op = 811 rpmb ? MMC_DRV_OP_IOCTL_RPMB : MMC_DRV_OP_IOCTL; 812 req_to_mmc_queue_req(req)->drv_op_result = -EIO; 813 req_to_mmc_queue_req(req)->drv_op_data = idata; 814 req_to_mmc_queue_req(req)->ioc_count = n; 815 blk_execute_rq(req, false); 816 ioc_err = req_to_mmc_queue_req(req)->drv_op_result; 817 818 /* copy to user if data and response */ 819 for (i = 0; i < n && !err; i++) 820 err = mmc_blk_ioctl_copy_to_user(&cmds[i], idata[i]); 821 822 blk_mq_free_request(req); 823 824 cmd_err: 825 for (i = 0; i < n; i++) { 826 kfree(idata[i]->buf); 827 kfree(idata[i]); 828 } 829 kfree(idata); 830 return ioc_err ? ioc_err : err; 831 } 832 833 static int mmc_blk_check_blkdev(struct block_device *bdev) 834 { 835 /* 836 * The caller must have CAP_SYS_RAWIO, and must be calling this on the 837 * whole block device, not on a partition. This prevents overspray 838 * between sibling partitions. 839 */ 840 if (!capable(CAP_SYS_RAWIO) || bdev_is_partition(bdev)) 841 return -EPERM; 842 return 0; 843 } 844 845 static int mmc_blk_ioctl(struct block_device *bdev, blk_mode_t mode, 846 unsigned int cmd, unsigned long arg) 847 { 848 struct mmc_blk_data *md; 849 int ret; 850 851 switch (cmd) { 852 case MMC_IOC_CMD: 853 ret = mmc_blk_check_blkdev(bdev); 854 if (ret) 855 return ret; 856 md = mmc_blk_get(bdev->bd_disk); 857 if (!md) 858 return -EINVAL; 859 ret = mmc_blk_ioctl_cmd(md, 860 (struct mmc_ioc_cmd __user *)arg, 861 NULL); 862 mmc_blk_put(md); 863 return ret; 864 case MMC_IOC_MULTI_CMD: 865 ret = mmc_blk_check_blkdev(bdev); 866 if (ret) 867 return ret; 868 md = mmc_blk_get(bdev->bd_disk); 869 if (!md) 870 return -EINVAL; 871 ret = mmc_blk_ioctl_multi_cmd(md, 872 (struct mmc_ioc_multi_cmd __user *)arg, 873 NULL); 874 mmc_blk_put(md); 875 return ret; 876 default: 877 return -EINVAL; 878 } 879 } 880 881 #ifdef CONFIG_COMPAT 882 static int mmc_blk_compat_ioctl(struct block_device *bdev, blk_mode_t mode, 883 unsigned int cmd, unsigned long arg) 884 { 885 return mmc_blk_ioctl(bdev, mode, cmd, (unsigned long) compat_ptr(arg)); 886 } 887 #endif 888 889 static int mmc_blk_alternative_gpt_sector(struct gendisk *disk, 890 sector_t *sector) 891 { 892 struct mmc_blk_data *md; 893 int ret; 894 895 md = mmc_blk_get(disk); 896 if (!md) 897 return -EINVAL; 898 899 if (md->queue.card) 900 ret = mmc_card_alternative_gpt_sector(md->queue.card, sector); 901 else 902 ret = -ENODEV; 903 904 mmc_blk_put(md); 905 906 return ret; 907 } 908 909 static const struct block_device_operations mmc_bdops = { 910 .open = mmc_blk_open, 911 .release = mmc_blk_release, 912 .getgeo = mmc_blk_getgeo, 913 .owner = THIS_MODULE, 914 .ioctl = mmc_blk_ioctl, 915 #ifdef CONFIG_COMPAT 916 .compat_ioctl = mmc_blk_compat_ioctl, 917 #endif 918 .alternative_gpt_sector = mmc_blk_alternative_gpt_sector, 919 }; 920 921 static int mmc_blk_part_switch_pre(struct mmc_card *card, 922 unsigned int part_type) 923 { 924 const unsigned int mask = EXT_CSD_PART_CONFIG_ACC_MASK; 925 const unsigned int rpmb = EXT_CSD_PART_CONFIG_ACC_RPMB; 926 int ret = 0; 927 928 if ((part_type & mask) == rpmb) { 929 if (card->ext_csd.cmdq_en) { 930 ret = mmc_cmdq_disable(card); 931 if (ret) 932 return ret; 933 } 934 mmc_retune_pause(card->host); 935 } 936 937 return ret; 938 } 939 940 static int mmc_blk_part_switch_post(struct mmc_card *card, 941 unsigned int part_type) 942 { 943 const unsigned int mask = EXT_CSD_PART_CONFIG_ACC_MASK; 944 const unsigned int rpmb = EXT_CSD_PART_CONFIG_ACC_RPMB; 945 int ret = 0; 946 947 if ((part_type & mask) == rpmb) { 948 mmc_retune_unpause(card->host); 949 if (card->reenable_cmdq && !card->ext_csd.cmdq_en) 950 ret = mmc_cmdq_enable(card); 951 } 952 953 return ret; 954 } 955 956 static inline int mmc_blk_part_switch(struct mmc_card *card, 957 unsigned int part_type) 958 { 959 int ret = 0; 960 struct mmc_blk_data *main_md = dev_get_drvdata(&card->dev); 961 962 if (main_md->part_curr == part_type) 963 return 0; 964 965 if (mmc_card_mmc(card)) { 966 u8 part_config = card->ext_csd.part_config; 967 968 ret = mmc_blk_part_switch_pre(card, part_type); 969 if (ret) 970 return ret; 971 972 part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK; 973 part_config |= part_type; 974 975 ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 976 EXT_CSD_PART_CONFIG, part_config, 977 card->ext_csd.part_time); 978 if (ret) { 979 mmc_blk_part_switch_post(card, part_type); 980 return ret; 981 } 982 983 card->ext_csd.part_config = part_config; 984 985 ret = mmc_blk_part_switch_post(card, main_md->part_curr); 986 } 987 988 main_md->part_curr = part_type; 989 return ret; 990 } 991 992 static int mmc_sd_num_wr_blocks(struct mmc_card *card, u32 *written_blocks) 993 { 994 int err; 995 u32 result; 996 __be32 *blocks; 997 u8 resp_sz = mmc_card_ult_capacity(card) ? 8 : 4; 998 unsigned int noio_flag; 999 1000 struct mmc_request mrq = {}; 1001 struct mmc_command cmd = {}; 1002 struct mmc_data data = {}; 1003 struct scatterlist sg; 1004 1005 err = mmc_app_cmd(card->host, card); 1006 if (err) 1007 return err; 1008 1009 cmd.opcode = SD_APP_SEND_NUM_WR_BLKS; 1010 cmd.arg = 0; 1011 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; 1012 1013 data.blksz = resp_sz; 1014 data.blocks = 1; 1015 data.flags = MMC_DATA_READ; 1016 data.sg = &sg; 1017 data.sg_len = 1; 1018 mmc_set_data_timeout(&data, card); 1019 1020 mrq.cmd = &cmd; 1021 mrq.data = &data; 1022 1023 noio_flag = memalloc_noio_save(); 1024 blocks = kmalloc(resp_sz, GFP_KERNEL); 1025 memalloc_noio_restore(noio_flag); 1026 if (!blocks) 1027 return -ENOMEM; 1028 1029 sg_init_one(&sg, blocks, resp_sz); 1030 1031 mmc_wait_for_req(card->host, &mrq); 1032 1033 if (mmc_card_ult_capacity(card)) { 1034 /* 1035 * Normally, ACMD22 returns the number of written sectors as 1036 * u32. SDUC, however, returns it as u64. This is not a 1037 * superfluous requirement, because SDUC writes may exceed 2TB. 1038 * For Linux mmc however, the previously write operation could 1039 * not be more than the block layer limits, thus just make room 1040 * for a u64 and cast the response back to u32. 1041 */ 1042 result = clamp_val(get_unaligned_be64(blocks), 0, UINT_MAX); 1043 } else { 1044 result = ntohl(*blocks); 1045 } 1046 kfree(blocks); 1047 1048 if (cmd.error || data.error) 1049 return -EIO; 1050 1051 *written_blocks = result; 1052 1053 return 0; 1054 } 1055 1056 static unsigned int mmc_blk_clock_khz(struct mmc_host *host) 1057 { 1058 if (host->actual_clock) 1059 return host->actual_clock / 1000; 1060 1061 /* Clock may be subject to a divisor, fudge it by a factor of 2. */ 1062 if (host->ios.clock) 1063 return host->ios.clock / 2000; 1064 1065 /* How can there be no clock */ 1066 WARN_ON_ONCE(1); 1067 return 100; /* 100 kHz is minimum possible value */ 1068 } 1069 1070 static unsigned int mmc_blk_data_timeout_ms(struct mmc_host *host, 1071 struct mmc_data *data) 1072 { 1073 unsigned int ms = DIV_ROUND_UP(data->timeout_ns, 1000000); 1074 unsigned int khz; 1075 1076 if (data->timeout_clks) { 1077 khz = mmc_blk_clock_khz(host); 1078 ms += DIV_ROUND_UP(data->timeout_clks, khz); 1079 } 1080 1081 return ms; 1082 } 1083 1084 /* 1085 * Attempts to reset the card and get back to the requested partition. 1086 * Therefore any error here must result in cancelling the block layer 1087 * request, it must not be reattempted without going through the mmc_blk 1088 * partition sanity checks. 1089 */ 1090 static int mmc_blk_reset(struct mmc_blk_data *md, struct mmc_host *host, 1091 int type) 1092 { 1093 int err; 1094 struct mmc_blk_data *main_md = dev_get_drvdata(&host->card->dev); 1095 1096 if (md->reset_done & type) 1097 return -EEXIST; 1098 1099 md->reset_done |= type; 1100 err = mmc_hw_reset(host->card); 1101 /* 1102 * A successful reset will leave the card in the main partition, but 1103 * upon failure it might not be, so set it to MMC_BLK_PART_INVALID 1104 * in that case. 1105 */ 1106 main_md->part_curr = err ? MMC_BLK_PART_INVALID : main_md->part_type; 1107 if (err) 1108 return err; 1109 /* Ensure we switch back to the correct partition */ 1110 if (mmc_blk_part_switch(host->card, md->part_type)) 1111 /* 1112 * We have failed to get back into the correct 1113 * partition, so we need to abort the whole request. 1114 */ 1115 return -ENODEV; 1116 return 0; 1117 } 1118 1119 static inline void mmc_blk_reset_success(struct mmc_blk_data *md, int type) 1120 { 1121 md->reset_done &= ~type; 1122 } 1123 1124 static void mmc_blk_check_sbc(struct mmc_queue_req *mq_rq) 1125 { 1126 struct mmc_blk_ioc_data **idata = mq_rq->drv_op_data; 1127 int i; 1128 1129 for (i = 1; i < mq_rq->ioc_count; i++) { 1130 if (idata[i - 1]->ic.opcode == MMC_SET_BLOCK_COUNT && 1131 mmc_op_multi(idata[i]->ic.opcode)) { 1132 idata[i - 1]->flags |= MMC_BLK_IOC_DROP; 1133 idata[i]->flags |= MMC_BLK_IOC_SBC; 1134 } 1135 } 1136 } 1137 1138 /* 1139 * The non-block commands come back from the block layer after it queued it and 1140 * processed it with all other requests and then they get issued in this 1141 * function. 1142 */ 1143 static void mmc_blk_issue_drv_op(struct mmc_queue *mq, struct request *req) 1144 { 1145 struct mmc_queue_req *mq_rq; 1146 struct mmc_card *card = mq->card; 1147 struct mmc_blk_data *md = mq->blkdata; 1148 struct mmc_blk_ioc_data **idata; 1149 bool rpmb_ioctl; 1150 u8 **ext_csd; 1151 u32 status; 1152 int ret; 1153 int i; 1154 1155 mq_rq = req_to_mmc_queue_req(req); 1156 rpmb_ioctl = (mq_rq->drv_op == MMC_DRV_OP_IOCTL_RPMB); 1157 1158 switch (mq_rq->drv_op) { 1159 case MMC_DRV_OP_IOCTL: 1160 if (card->ext_csd.cmdq_en) { 1161 ret = mmc_cmdq_disable(card); 1162 if (ret) 1163 break; 1164 } 1165 1166 mmc_blk_check_sbc(mq_rq); 1167 1168 fallthrough; 1169 case MMC_DRV_OP_IOCTL_RPMB: 1170 idata = mq_rq->drv_op_data; 1171 for (i = 0, ret = 0; i < mq_rq->ioc_count; i++) { 1172 ret = __mmc_blk_ioctl_cmd(card, md, idata, i); 1173 if (ret) 1174 break; 1175 } 1176 /* Always switch back to main area after RPMB access */ 1177 if (rpmb_ioctl) 1178 mmc_blk_part_switch(card, 0); 1179 else if (card->reenable_cmdq && !card->ext_csd.cmdq_en) 1180 mmc_cmdq_enable(card); 1181 break; 1182 case MMC_DRV_OP_BOOT_WP: 1183 ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BOOT_WP, 1184 card->ext_csd.boot_ro_lock | 1185 EXT_CSD_BOOT_WP_B_PWR_WP_EN, 1186 card->ext_csd.part_time); 1187 if (ret) 1188 pr_err("%s: Locking boot partition ro until next power on failed: %d\n", 1189 md->disk->disk_name, ret); 1190 else 1191 card->ext_csd.boot_ro_lock |= 1192 EXT_CSD_BOOT_WP_B_PWR_WP_EN; 1193 break; 1194 case MMC_DRV_OP_GET_CARD_STATUS: 1195 ret = mmc_send_status(card, &status); 1196 if (!ret) 1197 ret = status; 1198 break; 1199 case MMC_DRV_OP_GET_EXT_CSD: 1200 ext_csd = mq_rq->drv_op_data; 1201 ret = mmc_get_ext_csd(card, ext_csd); 1202 break; 1203 default: 1204 pr_err("%s: unknown driver specific operation\n", 1205 md->disk->disk_name); 1206 ret = -EINVAL; 1207 break; 1208 } 1209 mq_rq->drv_op_result = ret; 1210 blk_mq_end_request(req, ret ? BLK_STS_IOERR : BLK_STS_OK); 1211 } 1212 1213 static void mmc_blk_issue_erase_rq(struct mmc_queue *mq, struct request *req, 1214 int type, unsigned int erase_arg) 1215 { 1216 struct mmc_blk_data *md = mq->blkdata; 1217 struct mmc_card *card = md->queue.card; 1218 unsigned int nr; 1219 sector_t from; 1220 int err = 0; 1221 blk_status_t status = BLK_STS_OK; 1222 1223 if (!mmc_can_erase(card)) { 1224 status = BLK_STS_NOTSUPP; 1225 goto fail; 1226 } 1227 1228 from = blk_rq_pos(req); 1229 nr = blk_rq_sectors(req); 1230 1231 do { 1232 err = 0; 1233 if (card->quirks & MMC_QUIRK_INAND_CMD38) { 1234 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 1235 INAND_CMD38_ARG_EXT_CSD, 1236 erase_arg == MMC_TRIM_ARG ? 1237 INAND_CMD38_ARG_TRIM : 1238 INAND_CMD38_ARG_ERASE, 1239 card->ext_csd.generic_cmd6_time); 1240 } 1241 if (!err) 1242 err = mmc_erase(card, from, nr, erase_arg); 1243 } while (err == -EIO && !mmc_blk_reset(md, card->host, type)); 1244 if (err) 1245 status = BLK_STS_IOERR; 1246 else 1247 mmc_blk_reset_success(md, type); 1248 fail: 1249 blk_mq_end_request(req, status); 1250 } 1251 1252 static void mmc_blk_issue_trim_rq(struct mmc_queue *mq, struct request *req) 1253 { 1254 mmc_blk_issue_erase_rq(mq, req, MMC_BLK_TRIM, MMC_TRIM_ARG); 1255 } 1256 1257 static void mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req) 1258 { 1259 struct mmc_blk_data *md = mq->blkdata; 1260 struct mmc_card *card = md->queue.card; 1261 unsigned int arg = card->erase_arg; 1262 1263 if (mmc_card_broken_sd_discard(card)) 1264 arg = SD_ERASE_ARG; 1265 1266 mmc_blk_issue_erase_rq(mq, req, MMC_BLK_DISCARD, arg); 1267 } 1268 1269 static void mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq, 1270 struct request *req) 1271 { 1272 struct mmc_blk_data *md = mq->blkdata; 1273 struct mmc_card *card = md->queue.card; 1274 unsigned int nr, arg; 1275 sector_t from; 1276 int err = 0, type = MMC_BLK_SECDISCARD; 1277 blk_status_t status = BLK_STS_OK; 1278 1279 if (!(mmc_can_secure_erase_trim(card))) { 1280 status = BLK_STS_NOTSUPP; 1281 goto out; 1282 } 1283 1284 from = blk_rq_pos(req); 1285 nr = blk_rq_sectors(req); 1286 1287 if (mmc_can_trim(card) && !mmc_erase_group_aligned(card, from, nr)) 1288 arg = MMC_SECURE_TRIM1_ARG; 1289 else 1290 arg = MMC_SECURE_ERASE_ARG; 1291 1292 retry: 1293 if (card->quirks & MMC_QUIRK_INAND_CMD38) { 1294 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 1295 INAND_CMD38_ARG_EXT_CSD, 1296 arg == MMC_SECURE_TRIM1_ARG ? 1297 INAND_CMD38_ARG_SECTRIM1 : 1298 INAND_CMD38_ARG_SECERASE, 1299 card->ext_csd.generic_cmd6_time); 1300 if (err) 1301 goto out_retry; 1302 } 1303 1304 err = mmc_erase(card, from, nr, arg); 1305 if (err == -EIO) 1306 goto out_retry; 1307 if (err) { 1308 status = BLK_STS_IOERR; 1309 goto out; 1310 } 1311 1312 if (arg == MMC_SECURE_TRIM1_ARG) { 1313 if (card->quirks & MMC_QUIRK_INAND_CMD38) { 1314 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 1315 INAND_CMD38_ARG_EXT_CSD, 1316 INAND_CMD38_ARG_SECTRIM2, 1317 card->ext_csd.generic_cmd6_time); 1318 if (err) 1319 goto out_retry; 1320 } 1321 1322 err = mmc_erase(card, from, nr, MMC_SECURE_TRIM2_ARG); 1323 if (err == -EIO) 1324 goto out_retry; 1325 if (err) { 1326 status = BLK_STS_IOERR; 1327 goto out; 1328 } 1329 } 1330 1331 out_retry: 1332 if (err && !mmc_blk_reset(md, card->host, type)) 1333 goto retry; 1334 if (!err) 1335 mmc_blk_reset_success(md, type); 1336 out: 1337 blk_mq_end_request(req, status); 1338 } 1339 1340 static void mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req) 1341 { 1342 struct mmc_blk_data *md = mq->blkdata; 1343 struct mmc_card *card = md->queue.card; 1344 int ret = 0; 1345 1346 ret = mmc_flush_cache(card->host); 1347 blk_mq_end_request(req, ret ? BLK_STS_IOERR : BLK_STS_OK); 1348 } 1349 1350 /* 1351 * Reformat current write as a reliable write, supporting 1352 * both legacy and the enhanced reliable write MMC cards. 1353 * In each transfer we'll handle only as much as a single 1354 * reliable write can handle, thus finish the request in 1355 * partial completions. 1356 */ 1357 static inline void mmc_apply_rel_rw(struct mmc_blk_request *brq, 1358 struct mmc_card *card, 1359 struct request *req) 1360 { 1361 if (!(card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN)) { 1362 /* Legacy mode imposes restrictions on transfers. */ 1363 if (!IS_ALIGNED(blk_rq_pos(req), card->ext_csd.rel_sectors)) 1364 brq->data.blocks = 1; 1365 1366 if (brq->data.blocks > card->ext_csd.rel_sectors) 1367 brq->data.blocks = card->ext_csd.rel_sectors; 1368 else if (brq->data.blocks < card->ext_csd.rel_sectors) 1369 brq->data.blocks = 1; 1370 } 1371 } 1372 1373 #define CMD_ERRORS_EXCL_OOR \ 1374 (R1_ADDRESS_ERROR | /* Misaligned address */ \ 1375 R1_BLOCK_LEN_ERROR | /* Transferred block length incorrect */\ 1376 R1_WP_VIOLATION | /* Tried to write to protected block */ \ 1377 R1_CARD_ECC_FAILED | /* Card ECC failed */ \ 1378 R1_CC_ERROR | /* Card controller error */ \ 1379 R1_ERROR) /* General/unknown error */ 1380 1381 #define CMD_ERRORS \ 1382 (CMD_ERRORS_EXCL_OOR | \ 1383 R1_OUT_OF_RANGE) /* Command argument out of range */ \ 1384 1385 static void mmc_blk_eval_resp_error(struct mmc_blk_request *brq) 1386 { 1387 u32 val; 1388 1389 /* 1390 * Per the SD specification(physical layer version 4.10)[1], 1391 * section 4.3.3, it explicitly states that "When the last 1392 * block of user area is read using CMD18, the host should 1393 * ignore OUT_OF_RANGE error that may occur even the sequence 1394 * is correct". And JESD84-B51 for eMMC also has a similar 1395 * statement on section 6.8.3. 1396 * 1397 * Multiple block read/write could be done by either predefined 1398 * method, namely CMD23, or open-ending mode. For open-ending mode, 1399 * we should ignore the OUT_OF_RANGE error as it's normal behaviour. 1400 * 1401 * However the spec[1] doesn't tell us whether we should also 1402 * ignore that for predefined method. But per the spec[1], section 1403 * 4.15 Set Block Count Command, it says"If illegal block count 1404 * is set, out of range error will be indicated during read/write 1405 * operation (For example, data transfer is stopped at user area 1406 * boundary)." In another word, we could expect a out of range error 1407 * in the response for the following CMD18/25. And if argument of 1408 * CMD23 + the argument of CMD18/25 exceed the max number of blocks, 1409 * we could also expect to get a -ETIMEDOUT or any error number from 1410 * the host drivers due to missing data response(for write)/data(for 1411 * read), as the cards will stop the data transfer by itself per the 1412 * spec. So we only need to check R1_OUT_OF_RANGE for open-ending mode. 1413 */ 1414 1415 if (!brq->stop.error) { 1416 bool oor_with_open_end; 1417 /* If there is no error yet, check R1 response */ 1418 1419 val = brq->stop.resp[0] & CMD_ERRORS; 1420 oor_with_open_end = val & R1_OUT_OF_RANGE && !brq->mrq.sbc; 1421 1422 if (val && !oor_with_open_end) 1423 brq->stop.error = -EIO; 1424 } 1425 } 1426 1427 static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq, 1428 int recovery_mode, bool *do_rel_wr_p, 1429 bool *do_data_tag_p) 1430 { 1431 struct mmc_blk_data *md = mq->blkdata; 1432 struct mmc_card *card = md->queue.card; 1433 struct mmc_blk_request *brq = &mqrq->brq; 1434 struct request *req = mmc_queue_req_to_req(mqrq); 1435 bool do_rel_wr, do_data_tag; 1436 1437 /* 1438 * Reliable writes are used to implement Forced Unit Access and 1439 * are supported only on MMCs. 1440 */ 1441 do_rel_wr = (req->cmd_flags & REQ_FUA) && 1442 rq_data_dir(req) == WRITE && 1443 (md->flags & MMC_BLK_REL_WR); 1444 1445 memset(brq, 0, sizeof(struct mmc_blk_request)); 1446 1447 mmc_crypto_prepare_req(mqrq); 1448 1449 brq->mrq.data = &brq->data; 1450 brq->mrq.tag = req->tag; 1451 1452 brq->stop.opcode = MMC_STOP_TRANSMISSION; 1453 brq->stop.arg = 0; 1454 1455 if (rq_data_dir(req) == READ) { 1456 brq->data.flags = MMC_DATA_READ; 1457 brq->stop.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; 1458 } else { 1459 brq->data.flags = MMC_DATA_WRITE; 1460 brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; 1461 } 1462 1463 brq->data.blksz = 512; 1464 brq->data.blocks = blk_rq_sectors(req); 1465 brq->data.blk_addr = blk_rq_pos(req); 1466 1467 /* 1468 * The command queue supports 2 priorities: "high" (1) and "simple" (0). 1469 * The eMMC will give "high" priority tasks priority over "simple" 1470 * priority tasks. Here we always set "simple" priority by not setting 1471 * MMC_DATA_PRIO. 1472 */ 1473 1474 /* 1475 * The block layer doesn't support all sector count 1476 * restrictions, so we need to be prepared for too big 1477 * requests. 1478 */ 1479 if (brq->data.blocks > card->host->max_blk_count) 1480 brq->data.blocks = card->host->max_blk_count; 1481 1482 if (brq->data.blocks > 1) { 1483 /* 1484 * Some SD cards in SPI mode return a CRC error or even lock up 1485 * completely when trying to read the last block using a 1486 * multiblock read command. 1487 */ 1488 if (mmc_host_is_spi(card->host) && (rq_data_dir(req) == READ) && 1489 (blk_rq_pos(req) + blk_rq_sectors(req) == 1490 get_capacity(md->disk))) 1491 brq->data.blocks--; 1492 1493 /* 1494 * After a read error, we redo the request one (native) sector 1495 * at a time in order to accurately determine which 1496 * sectors can be read successfully. 1497 */ 1498 if (recovery_mode) 1499 brq->data.blocks = queue_physical_block_size(mq->queue) >> 9; 1500 1501 /* 1502 * Some controllers have HW issues while operating 1503 * in multiple I/O mode 1504 */ 1505 if (card->host->ops->multi_io_quirk) 1506 brq->data.blocks = card->host->ops->multi_io_quirk(card, 1507 (rq_data_dir(req) == READ) ? 1508 MMC_DATA_READ : MMC_DATA_WRITE, 1509 brq->data.blocks); 1510 } 1511 1512 if (do_rel_wr) { 1513 mmc_apply_rel_rw(brq, card, req); 1514 brq->data.flags |= MMC_DATA_REL_WR; 1515 } 1516 1517 /* 1518 * Data tag is used only during writing meta data to speed 1519 * up write and any subsequent read of this meta data 1520 */ 1521 do_data_tag = card->ext_csd.data_tag_unit_size && 1522 (req->cmd_flags & REQ_META) && 1523 (rq_data_dir(req) == WRITE) && 1524 ((brq->data.blocks * brq->data.blksz) >= 1525 card->ext_csd.data_tag_unit_size); 1526 1527 if (do_data_tag) 1528 brq->data.flags |= MMC_DATA_DAT_TAG; 1529 1530 mmc_set_data_timeout(&brq->data, card); 1531 1532 brq->data.sg = mqrq->sg; 1533 brq->data.sg_len = mmc_queue_map_sg(mq, mqrq); 1534 1535 /* 1536 * Adjust the sg list so it is the same size as the 1537 * request. 1538 */ 1539 if (brq->data.blocks != blk_rq_sectors(req)) { 1540 int i, data_size = brq->data.blocks << 9; 1541 struct scatterlist *sg; 1542 1543 for_each_sg(brq->data.sg, sg, brq->data.sg_len, i) { 1544 data_size -= sg->length; 1545 if (data_size <= 0) { 1546 sg->length += data_size; 1547 i++; 1548 break; 1549 } 1550 } 1551 brq->data.sg_len = i; 1552 } 1553 1554 if (do_rel_wr_p) 1555 *do_rel_wr_p = do_rel_wr; 1556 1557 if (do_data_tag_p) 1558 *do_data_tag_p = do_data_tag; 1559 } 1560 1561 #define MMC_CQE_RETRIES 2 1562 1563 static void mmc_blk_cqe_complete_rq(struct mmc_queue *mq, struct request *req) 1564 { 1565 struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req); 1566 struct mmc_request *mrq = &mqrq->brq.mrq; 1567 struct request_queue *q = req->q; 1568 struct mmc_host *host = mq->card->host; 1569 enum mmc_issue_type issue_type = mmc_issue_type(mq, req); 1570 unsigned long flags; 1571 bool put_card; 1572 int err; 1573 1574 mmc_cqe_post_req(host, mrq); 1575 1576 if (mrq->cmd && mrq->cmd->error) 1577 err = mrq->cmd->error; 1578 else if (mrq->data && mrq->data->error) 1579 err = mrq->data->error; 1580 else 1581 err = 0; 1582 1583 if (err) { 1584 if (mqrq->retries++ < MMC_CQE_RETRIES) 1585 blk_mq_requeue_request(req, true); 1586 else 1587 blk_mq_end_request(req, BLK_STS_IOERR); 1588 } else if (mrq->data) { 1589 if (blk_update_request(req, BLK_STS_OK, mrq->data->bytes_xfered)) 1590 blk_mq_requeue_request(req, true); 1591 else 1592 __blk_mq_end_request(req, BLK_STS_OK); 1593 } else if (mq->in_recovery) { 1594 blk_mq_requeue_request(req, true); 1595 } else { 1596 blk_mq_end_request(req, BLK_STS_OK); 1597 } 1598 1599 spin_lock_irqsave(&mq->lock, flags); 1600 1601 mq->in_flight[issue_type] -= 1; 1602 1603 put_card = (mmc_tot_in_flight(mq) == 0); 1604 1605 mmc_cqe_check_busy(mq); 1606 1607 spin_unlock_irqrestore(&mq->lock, flags); 1608 1609 if (!mq->cqe_busy) 1610 blk_mq_run_hw_queues(q, true); 1611 1612 if (put_card) 1613 mmc_put_card(mq->card, &mq->ctx); 1614 } 1615 1616 void mmc_blk_cqe_recovery(struct mmc_queue *mq) 1617 { 1618 struct mmc_card *card = mq->card; 1619 struct mmc_host *host = card->host; 1620 int err; 1621 1622 pr_debug("%s: CQE recovery start\n", mmc_hostname(host)); 1623 1624 err = mmc_cqe_recovery(host); 1625 if (err) 1626 mmc_blk_reset(mq->blkdata, host, MMC_BLK_CQE_RECOVERY); 1627 mmc_blk_reset_success(mq->blkdata, MMC_BLK_CQE_RECOVERY); 1628 1629 pr_debug("%s: CQE recovery done\n", mmc_hostname(host)); 1630 } 1631 1632 static void mmc_blk_cqe_req_done(struct mmc_request *mrq) 1633 { 1634 struct mmc_queue_req *mqrq = container_of(mrq, struct mmc_queue_req, 1635 brq.mrq); 1636 struct request *req = mmc_queue_req_to_req(mqrq); 1637 struct request_queue *q = req->q; 1638 struct mmc_queue *mq = q->queuedata; 1639 1640 /* 1641 * Block layer timeouts race with completions which means the normal 1642 * completion path cannot be used during recovery. 1643 */ 1644 if (mq->in_recovery) 1645 mmc_blk_cqe_complete_rq(mq, req); 1646 else if (likely(!blk_should_fake_timeout(req->q))) 1647 blk_mq_complete_request(req); 1648 } 1649 1650 static int mmc_blk_cqe_start_req(struct mmc_host *host, struct mmc_request *mrq) 1651 { 1652 mrq->done = mmc_blk_cqe_req_done; 1653 mrq->recovery_notifier = mmc_cqe_recovery_notifier; 1654 1655 return mmc_cqe_start_req(host, mrq); 1656 } 1657 1658 static struct mmc_request *mmc_blk_cqe_prep_dcmd(struct mmc_queue_req *mqrq, 1659 struct request *req) 1660 { 1661 struct mmc_blk_request *brq = &mqrq->brq; 1662 1663 memset(brq, 0, sizeof(*brq)); 1664 1665 brq->mrq.cmd = &brq->cmd; 1666 brq->mrq.tag = req->tag; 1667 1668 return &brq->mrq; 1669 } 1670 1671 static int mmc_blk_cqe_issue_flush(struct mmc_queue *mq, struct request *req) 1672 { 1673 struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req); 1674 struct mmc_request *mrq = mmc_blk_cqe_prep_dcmd(mqrq, req); 1675 1676 mrq->cmd->opcode = MMC_SWITCH; 1677 mrq->cmd->arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) | 1678 (EXT_CSD_FLUSH_CACHE << 16) | 1679 (1 << 8) | 1680 EXT_CSD_CMD_SET_NORMAL; 1681 mrq->cmd->flags = MMC_CMD_AC | MMC_RSP_R1B; 1682 1683 return mmc_blk_cqe_start_req(mq->card->host, mrq); 1684 } 1685 1686 static int mmc_blk_hsq_issue_rw_rq(struct mmc_queue *mq, struct request *req) 1687 { 1688 struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req); 1689 struct mmc_host *host = mq->card->host; 1690 int err; 1691 1692 mmc_blk_rw_rq_prep(mqrq, mq->card, 0, mq); 1693 mqrq->brq.mrq.done = mmc_blk_hsq_req_done; 1694 mmc_pre_req(host, &mqrq->brq.mrq); 1695 1696 err = mmc_cqe_start_req(host, &mqrq->brq.mrq); 1697 if (err) 1698 mmc_post_req(host, &mqrq->brq.mrq, err); 1699 1700 return err; 1701 } 1702 1703 static int mmc_blk_cqe_issue_rw_rq(struct mmc_queue *mq, struct request *req) 1704 { 1705 struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req); 1706 struct mmc_host *host = mq->card->host; 1707 1708 if (host->hsq_enabled) 1709 return mmc_blk_hsq_issue_rw_rq(mq, req); 1710 1711 mmc_blk_data_prep(mq, mqrq, 0, NULL, NULL); 1712 1713 return mmc_blk_cqe_start_req(mq->card->host, &mqrq->brq.mrq); 1714 } 1715 1716 static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq, 1717 struct mmc_card *card, 1718 int recovery_mode, 1719 struct mmc_queue *mq) 1720 { 1721 u32 readcmd, writecmd; 1722 struct mmc_blk_request *brq = &mqrq->brq; 1723 struct request *req = mmc_queue_req_to_req(mqrq); 1724 struct mmc_blk_data *md = mq->blkdata; 1725 bool do_rel_wr, do_data_tag; 1726 1727 mmc_blk_data_prep(mq, mqrq, recovery_mode, &do_rel_wr, &do_data_tag); 1728 1729 brq->mrq.cmd = &brq->cmd; 1730 1731 brq->cmd.arg = blk_rq_pos(req); 1732 if (!mmc_card_blockaddr(card)) 1733 brq->cmd.arg <<= 9; 1734 brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; 1735 1736 if (brq->data.blocks > 1 || do_rel_wr) { 1737 /* SPI multiblock writes terminate using a special 1738 * token, not a STOP_TRANSMISSION request. 1739 */ 1740 if (!mmc_host_is_spi(card->host) || 1741 rq_data_dir(req) == READ) 1742 brq->mrq.stop = &brq->stop; 1743 readcmd = MMC_READ_MULTIPLE_BLOCK; 1744 writecmd = MMC_WRITE_MULTIPLE_BLOCK; 1745 } else { 1746 brq->mrq.stop = NULL; 1747 readcmd = MMC_READ_SINGLE_BLOCK; 1748 writecmd = MMC_WRITE_BLOCK; 1749 } 1750 brq->cmd.opcode = rq_data_dir(req) == READ ? readcmd : writecmd; 1751 1752 /* 1753 * Pre-defined multi-block transfers are preferable to 1754 * open ended-ones (and necessary for reliable writes). 1755 * However, it is not sufficient to just send CMD23, 1756 * and avoid the final CMD12, as on an error condition 1757 * CMD12 (stop) needs to be sent anyway. This, coupled 1758 * with Auto-CMD23 enhancements provided by some 1759 * hosts, means that the complexity of dealing 1760 * with this is best left to the host. If CMD23 is 1761 * supported by card and host, we'll fill sbc in and let 1762 * the host deal with handling it correctly. This means 1763 * that for hosts that don't expose MMC_CAP_CMD23, no 1764 * change of behavior will be observed. 1765 * 1766 * N.B: Some MMC cards experience perf degradation. 1767 * We'll avoid using CMD23-bounded multiblock writes for 1768 * these, while retaining features like reliable writes. 1769 */ 1770 if ((md->flags & MMC_BLK_CMD23) && mmc_op_multi(brq->cmd.opcode) && 1771 (do_rel_wr || !(card->quirks & MMC_QUIRK_BLK_NO_CMD23) || 1772 do_data_tag)) { 1773 brq->sbc.opcode = MMC_SET_BLOCK_COUNT; 1774 brq->sbc.arg = brq->data.blocks | 1775 (do_rel_wr ? (1 << 31) : 0) | 1776 (do_data_tag ? (1 << 29) : 0); 1777 brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC; 1778 brq->mrq.sbc = &brq->sbc; 1779 } 1780 1781 if (mmc_card_ult_capacity(card)) { 1782 brq->cmd.ext_addr = blk_rq_pos(req) >> 32; 1783 brq->cmd.has_ext_addr = true; 1784 } 1785 } 1786 1787 #define MMC_MAX_RETRIES 5 1788 #define MMC_DATA_RETRIES 2 1789 #define MMC_NO_RETRIES (MMC_MAX_RETRIES + 1) 1790 1791 static int mmc_blk_send_stop(struct mmc_card *card, unsigned int timeout) 1792 { 1793 struct mmc_command cmd = { 1794 .opcode = MMC_STOP_TRANSMISSION, 1795 .flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC, 1796 /* Some hosts wait for busy anyway, so provide a busy timeout */ 1797 .busy_timeout = timeout, 1798 }; 1799 1800 return mmc_wait_for_cmd(card->host, &cmd, 5); 1801 } 1802 1803 static int mmc_blk_fix_state(struct mmc_card *card, struct request *req) 1804 { 1805 struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req); 1806 struct mmc_blk_request *brq = &mqrq->brq; 1807 unsigned int timeout = mmc_blk_data_timeout_ms(card->host, &brq->data); 1808 int err; 1809 1810 mmc_retune_hold_now(card->host); 1811 1812 mmc_blk_send_stop(card, timeout); 1813 1814 err = mmc_poll_for_busy(card, timeout, false, MMC_BUSY_IO); 1815 1816 mmc_retune_release(card->host); 1817 1818 return err; 1819 } 1820 1821 #define MMC_READ_SINGLE_RETRIES 2 1822 1823 /* Single (native) sector read during recovery */ 1824 static void mmc_blk_read_single(struct mmc_queue *mq, struct request *req) 1825 { 1826 struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req); 1827 struct mmc_request *mrq = &mqrq->brq.mrq; 1828 struct mmc_card *card = mq->card; 1829 struct mmc_host *host = card->host; 1830 blk_status_t error = BLK_STS_OK; 1831 size_t bytes_per_read = queue_physical_block_size(mq->queue); 1832 1833 do { 1834 u32 status; 1835 int err; 1836 int retries = 0; 1837 1838 while (retries++ <= MMC_READ_SINGLE_RETRIES) { 1839 mmc_blk_rw_rq_prep(mqrq, card, 1, mq); 1840 1841 mmc_wait_for_req(host, mrq); 1842 1843 err = mmc_send_status(card, &status); 1844 if (err) 1845 goto error_exit; 1846 1847 if (!mmc_host_is_spi(host) && 1848 !mmc_ready_for_data(status)) { 1849 err = mmc_blk_fix_state(card, req); 1850 if (err) 1851 goto error_exit; 1852 } 1853 1854 if (!mrq->cmd->error) 1855 break; 1856 } 1857 1858 if (mrq->cmd->error || 1859 mrq->data->error || 1860 (!mmc_host_is_spi(host) && 1861 (mrq->cmd->resp[0] & CMD_ERRORS || status & CMD_ERRORS))) 1862 error = BLK_STS_IOERR; 1863 else 1864 error = BLK_STS_OK; 1865 1866 } while (blk_update_request(req, error, bytes_per_read)); 1867 1868 return; 1869 1870 error_exit: 1871 mrq->data->bytes_xfered = 0; 1872 blk_update_request(req, BLK_STS_IOERR, bytes_per_read); 1873 /* Let it try the remaining request again */ 1874 if (mqrq->retries > MMC_MAX_RETRIES - 1) 1875 mqrq->retries = MMC_MAX_RETRIES - 1; 1876 } 1877 1878 static inline bool mmc_blk_oor_valid(struct mmc_blk_request *brq) 1879 { 1880 return !!brq->mrq.sbc; 1881 } 1882 1883 static inline u32 mmc_blk_stop_err_bits(struct mmc_blk_request *brq) 1884 { 1885 return mmc_blk_oor_valid(brq) ? CMD_ERRORS : CMD_ERRORS_EXCL_OOR; 1886 } 1887 1888 /* 1889 * Check for errors the host controller driver might not have seen such as 1890 * response mode errors or invalid card state. 1891 */ 1892 static bool mmc_blk_status_error(struct request *req, u32 status) 1893 { 1894 struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req); 1895 struct mmc_blk_request *brq = &mqrq->brq; 1896 struct mmc_queue *mq = req->q->queuedata; 1897 u32 stop_err_bits; 1898 1899 if (mmc_host_is_spi(mq->card->host)) 1900 return false; 1901 1902 stop_err_bits = mmc_blk_stop_err_bits(brq); 1903 1904 return brq->cmd.resp[0] & CMD_ERRORS || 1905 brq->stop.resp[0] & stop_err_bits || 1906 status & stop_err_bits || 1907 (rq_data_dir(req) == WRITE && !mmc_ready_for_data(status)); 1908 } 1909 1910 static inline bool mmc_blk_cmd_started(struct mmc_blk_request *brq) 1911 { 1912 return !brq->sbc.error && !brq->cmd.error && 1913 !(brq->cmd.resp[0] & CMD_ERRORS); 1914 } 1915 1916 /* 1917 * Requests are completed by mmc_blk_mq_complete_rq() which sets simple 1918 * policy: 1919 * 1. A request that has transferred at least some data is considered 1920 * successful and will be requeued if there is remaining data to 1921 * transfer. 1922 * 2. Otherwise the number of retries is incremented and the request 1923 * will be requeued if there are remaining retries. 1924 * 3. Otherwise the request will be errored out. 1925 * That means mmc_blk_mq_complete_rq() is controlled by bytes_xfered and 1926 * mqrq->retries. So there are only 4 possible actions here: 1927 * 1. do not accept the bytes_xfered value i.e. set it to zero 1928 * 2. change mqrq->retries to determine the number of retries 1929 * 3. try to reset the card 1930 * 4. read one sector at a time 1931 */ 1932 static void mmc_blk_mq_rw_recovery(struct mmc_queue *mq, struct request *req) 1933 { 1934 int type = rq_data_dir(req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE; 1935 struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req); 1936 struct mmc_blk_request *brq = &mqrq->brq; 1937 struct mmc_blk_data *md = mq->blkdata; 1938 struct mmc_card *card = mq->card; 1939 u32 status; 1940 u32 blocks; 1941 int err; 1942 1943 /* 1944 * Some errors the host driver might not have seen. Set the number of 1945 * bytes transferred to zero in that case. 1946 */ 1947 err = __mmc_send_status(card, &status, 0); 1948 if (err || mmc_blk_status_error(req, status)) 1949 brq->data.bytes_xfered = 0; 1950 1951 mmc_retune_release(card->host); 1952 1953 /* 1954 * Try again to get the status. This also provides an opportunity for 1955 * re-tuning. 1956 */ 1957 if (err) 1958 err = __mmc_send_status(card, &status, 0); 1959 1960 /* 1961 * Nothing more to do after the number of bytes transferred has been 1962 * updated and there is no card. 1963 */ 1964 if (err && mmc_detect_card_removed(card->host)) 1965 return; 1966 1967 /* Try to get back to "tran" state */ 1968 if (!mmc_host_is_spi(mq->card->host) && 1969 (err || !mmc_ready_for_data(status))) 1970 err = mmc_blk_fix_state(mq->card, req); 1971 1972 /* 1973 * Special case for SD cards where the card might record the number of 1974 * blocks written. 1975 */ 1976 if (!err && mmc_blk_cmd_started(brq) && mmc_card_sd(card) && 1977 rq_data_dir(req) == WRITE) { 1978 if (mmc_sd_num_wr_blocks(card, &blocks)) 1979 brq->data.bytes_xfered = 0; 1980 else 1981 brq->data.bytes_xfered = blocks << 9; 1982 } 1983 1984 /* Reset if the card is in a bad state */ 1985 if (!mmc_host_is_spi(mq->card->host) && 1986 err && mmc_blk_reset(md, card->host, type)) { 1987 pr_err("%s: recovery failed!\n", req->q->disk->disk_name); 1988 mqrq->retries = MMC_NO_RETRIES; 1989 return; 1990 } 1991 1992 /* 1993 * If anything was done, just return and if there is anything remaining 1994 * on the request it will get requeued. 1995 */ 1996 if (brq->data.bytes_xfered) 1997 return; 1998 1999 /* Reset before last retry */ 2000 if (mqrq->retries + 1 == MMC_MAX_RETRIES && 2001 mmc_blk_reset(md, card->host, type)) 2002 return; 2003 2004 /* Command errors fail fast, so use all MMC_MAX_RETRIES */ 2005 if (brq->sbc.error || brq->cmd.error) 2006 return; 2007 2008 /* Reduce the remaining retries for data errors */ 2009 if (mqrq->retries < MMC_MAX_RETRIES - MMC_DATA_RETRIES) { 2010 mqrq->retries = MMC_MAX_RETRIES - MMC_DATA_RETRIES; 2011 return; 2012 } 2013 2014 if (rq_data_dir(req) == READ && brq->data.blocks > 2015 queue_physical_block_size(mq->queue) >> 9) { 2016 /* Read one (native) sector at a time */ 2017 mmc_blk_read_single(mq, req); 2018 return; 2019 } 2020 } 2021 2022 static inline bool mmc_blk_rq_error(struct mmc_blk_request *brq) 2023 { 2024 mmc_blk_eval_resp_error(brq); 2025 2026 return brq->sbc.error || brq->cmd.error || brq->stop.error || 2027 brq->data.error || brq->cmd.resp[0] & CMD_ERRORS; 2028 } 2029 2030 static int mmc_spi_err_check(struct mmc_card *card) 2031 { 2032 u32 status = 0; 2033 int err; 2034 2035 /* 2036 * SPI does not have a TRAN state we have to wait on, instead the 2037 * card is ready again when it no longer holds the line LOW. 2038 * We still have to ensure two things here before we know the write 2039 * was successful: 2040 * 1. The card has not disconnected during busy and we actually read our 2041 * own pull-up, thinking it was still connected, so ensure it 2042 * still responds. 2043 * 2. Check for any error bits, in particular R1_SPI_IDLE to catch a 2044 * just reconnected card after being disconnected during busy. 2045 */ 2046 err = __mmc_send_status(card, &status, 0); 2047 if (err) 2048 return err; 2049 /* All R1 and R2 bits of SPI are errors in our case */ 2050 if (status) 2051 return -EIO; 2052 return 0; 2053 } 2054 2055 static int mmc_blk_busy_cb(void *cb_data, bool *busy) 2056 { 2057 struct mmc_blk_busy_data *data = cb_data; 2058 u32 status = 0; 2059 int err; 2060 2061 err = mmc_send_status(data->card, &status); 2062 if (err) 2063 return err; 2064 2065 /* Accumulate response error bits. */ 2066 data->status |= status; 2067 2068 *busy = !mmc_ready_for_data(status); 2069 return 0; 2070 } 2071 2072 static int mmc_blk_card_busy(struct mmc_card *card, struct request *req) 2073 { 2074 struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req); 2075 struct mmc_blk_busy_data cb_data; 2076 int err; 2077 2078 if (rq_data_dir(req) == READ) 2079 return 0; 2080 2081 if (mmc_host_is_spi(card->host)) { 2082 err = mmc_spi_err_check(card); 2083 if (err) 2084 mqrq->brq.data.bytes_xfered = 0; 2085 return err; 2086 } 2087 2088 cb_data.card = card; 2089 cb_data.status = 0; 2090 err = __mmc_poll_for_busy(card->host, 0, MMC_BLK_TIMEOUT_MS, 2091 &mmc_blk_busy_cb, &cb_data); 2092 2093 /* 2094 * Do not assume data transferred correctly if there are any error bits 2095 * set. 2096 */ 2097 if (cb_data.status & mmc_blk_stop_err_bits(&mqrq->brq)) { 2098 mqrq->brq.data.bytes_xfered = 0; 2099 err = err ? err : -EIO; 2100 } 2101 2102 /* Copy the exception bit so it will be seen later on */ 2103 if (mmc_card_mmc(card) && cb_data.status & R1_EXCEPTION_EVENT) 2104 mqrq->brq.cmd.resp[0] |= R1_EXCEPTION_EVENT; 2105 2106 return err; 2107 } 2108 2109 static inline void mmc_blk_rw_reset_success(struct mmc_queue *mq, 2110 struct request *req) 2111 { 2112 int type = rq_data_dir(req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE; 2113 2114 mmc_blk_reset_success(mq->blkdata, type); 2115 } 2116 2117 static void mmc_blk_mq_complete_rq(struct mmc_queue *mq, struct request *req) 2118 { 2119 struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req); 2120 unsigned int nr_bytes = mqrq->brq.data.bytes_xfered; 2121 2122 if (nr_bytes) { 2123 if (blk_update_request(req, BLK_STS_OK, nr_bytes)) 2124 blk_mq_requeue_request(req, true); 2125 else 2126 __blk_mq_end_request(req, BLK_STS_OK); 2127 } else if (!blk_rq_bytes(req)) { 2128 __blk_mq_end_request(req, BLK_STS_IOERR); 2129 } else if (mqrq->retries++ < MMC_MAX_RETRIES) { 2130 blk_mq_requeue_request(req, true); 2131 } else { 2132 if (mmc_card_removed(mq->card)) 2133 req->rq_flags |= RQF_QUIET; 2134 blk_mq_end_request(req, BLK_STS_IOERR); 2135 } 2136 } 2137 2138 static bool mmc_blk_urgent_bkops_needed(struct mmc_queue *mq, 2139 struct mmc_queue_req *mqrq) 2140 { 2141 return mmc_card_mmc(mq->card) && !mmc_host_is_spi(mq->card->host) && 2142 (mqrq->brq.cmd.resp[0] & R1_EXCEPTION_EVENT || 2143 mqrq->brq.stop.resp[0] & R1_EXCEPTION_EVENT); 2144 } 2145 2146 static void mmc_blk_urgent_bkops(struct mmc_queue *mq, 2147 struct mmc_queue_req *mqrq) 2148 { 2149 if (mmc_blk_urgent_bkops_needed(mq, mqrq)) 2150 mmc_run_bkops(mq->card); 2151 } 2152 2153 static void mmc_blk_hsq_req_done(struct mmc_request *mrq) 2154 { 2155 struct mmc_queue_req *mqrq = 2156 container_of(mrq, struct mmc_queue_req, brq.mrq); 2157 struct request *req = mmc_queue_req_to_req(mqrq); 2158 struct request_queue *q = req->q; 2159 struct mmc_queue *mq = q->queuedata; 2160 struct mmc_host *host = mq->card->host; 2161 unsigned long flags; 2162 2163 if (mmc_blk_rq_error(&mqrq->brq) || 2164 mmc_blk_urgent_bkops_needed(mq, mqrq)) { 2165 spin_lock_irqsave(&mq->lock, flags); 2166 mq->recovery_needed = true; 2167 mq->recovery_req = req; 2168 spin_unlock_irqrestore(&mq->lock, flags); 2169 2170 host->cqe_ops->cqe_recovery_start(host); 2171 2172 schedule_work(&mq->recovery_work); 2173 return; 2174 } 2175 2176 mmc_blk_rw_reset_success(mq, req); 2177 2178 /* 2179 * Block layer timeouts race with completions which means the normal 2180 * completion path cannot be used during recovery. 2181 */ 2182 if (mq->in_recovery) 2183 mmc_blk_cqe_complete_rq(mq, req); 2184 else if (likely(!blk_should_fake_timeout(req->q))) 2185 blk_mq_complete_request(req); 2186 } 2187 2188 void mmc_blk_mq_complete(struct request *req) 2189 { 2190 struct mmc_queue *mq = req->q->queuedata; 2191 struct mmc_host *host = mq->card->host; 2192 2193 if (host->cqe_enabled) 2194 mmc_blk_cqe_complete_rq(mq, req); 2195 else if (likely(!blk_should_fake_timeout(req->q))) 2196 mmc_blk_mq_complete_rq(mq, req); 2197 } 2198 2199 static void mmc_blk_mq_poll_completion(struct mmc_queue *mq, 2200 struct request *req) 2201 { 2202 struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req); 2203 struct mmc_host *host = mq->card->host; 2204 2205 if (mmc_blk_rq_error(&mqrq->brq) || 2206 mmc_blk_card_busy(mq->card, req)) { 2207 mmc_blk_mq_rw_recovery(mq, req); 2208 } else { 2209 mmc_blk_rw_reset_success(mq, req); 2210 mmc_retune_release(host); 2211 } 2212 2213 mmc_blk_urgent_bkops(mq, mqrq); 2214 } 2215 2216 static void mmc_blk_mq_dec_in_flight(struct mmc_queue *mq, enum mmc_issue_type issue_type) 2217 { 2218 unsigned long flags; 2219 bool put_card; 2220 2221 spin_lock_irqsave(&mq->lock, flags); 2222 2223 mq->in_flight[issue_type] -= 1; 2224 2225 put_card = (mmc_tot_in_flight(mq) == 0); 2226 2227 spin_unlock_irqrestore(&mq->lock, flags); 2228 2229 if (put_card) 2230 mmc_put_card(mq->card, &mq->ctx); 2231 } 2232 2233 static void mmc_blk_mq_post_req(struct mmc_queue *mq, struct request *req, 2234 bool can_sleep) 2235 { 2236 enum mmc_issue_type issue_type = mmc_issue_type(mq, req); 2237 struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req); 2238 struct mmc_request *mrq = &mqrq->brq.mrq; 2239 struct mmc_host *host = mq->card->host; 2240 2241 mmc_post_req(host, mrq, 0); 2242 2243 /* 2244 * Block layer timeouts race with completions which means the normal 2245 * completion path cannot be used during recovery. 2246 */ 2247 if (mq->in_recovery) { 2248 mmc_blk_mq_complete_rq(mq, req); 2249 } else if (likely(!blk_should_fake_timeout(req->q))) { 2250 if (can_sleep) 2251 blk_mq_complete_request_direct(req, mmc_blk_mq_complete); 2252 else 2253 blk_mq_complete_request(req); 2254 } 2255 2256 mmc_blk_mq_dec_in_flight(mq, issue_type); 2257 } 2258 2259 void mmc_blk_mq_recovery(struct mmc_queue *mq) 2260 { 2261 struct request *req = mq->recovery_req; 2262 struct mmc_host *host = mq->card->host; 2263 struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req); 2264 2265 mq->recovery_req = NULL; 2266 mq->rw_wait = false; 2267 2268 if (mmc_blk_rq_error(&mqrq->brq)) { 2269 mmc_retune_hold_now(host); 2270 mmc_blk_mq_rw_recovery(mq, req); 2271 } 2272 2273 mmc_blk_urgent_bkops(mq, mqrq); 2274 2275 mmc_blk_mq_post_req(mq, req, true); 2276 } 2277 2278 static void mmc_blk_mq_complete_prev_req(struct mmc_queue *mq, 2279 struct request **prev_req) 2280 { 2281 if (mmc_host_done_complete(mq->card->host)) 2282 return; 2283 2284 mutex_lock(&mq->complete_lock); 2285 2286 if (!mq->complete_req) 2287 goto out_unlock; 2288 2289 mmc_blk_mq_poll_completion(mq, mq->complete_req); 2290 2291 if (prev_req) 2292 *prev_req = mq->complete_req; 2293 else 2294 mmc_blk_mq_post_req(mq, mq->complete_req, true); 2295 2296 mq->complete_req = NULL; 2297 2298 out_unlock: 2299 mutex_unlock(&mq->complete_lock); 2300 } 2301 2302 void mmc_blk_mq_complete_work(struct work_struct *work) 2303 { 2304 struct mmc_queue *mq = container_of(work, struct mmc_queue, 2305 complete_work); 2306 2307 mmc_blk_mq_complete_prev_req(mq, NULL); 2308 } 2309 2310 static void mmc_blk_mq_req_done(struct mmc_request *mrq) 2311 { 2312 struct mmc_queue_req *mqrq = container_of(mrq, struct mmc_queue_req, 2313 brq.mrq); 2314 struct request *req = mmc_queue_req_to_req(mqrq); 2315 struct request_queue *q = req->q; 2316 struct mmc_queue *mq = q->queuedata; 2317 struct mmc_host *host = mq->card->host; 2318 unsigned long flags; 2319 2320 if (!mmc_host_done_complete(host)) { 2321 bool waiting; 2322 2323 /* 2324 * We cannot complete the request in this context, so record 2325 * that there is a request to complete, and that a following 2326 * request does not need to wait (although it does need to 2327 * complete complete_req first). 2328 */ 2329 spin_lock_irqsave(&mq->lock, flags); 2330 mq->complete_req = req; 2331 mq->rw_wait = false; 2332 waiting = mq->waiting; 2333 spin_unlock_irqrestore(&mq->lock, flags); 2334 2335 /* 2336 * If 'waiting' then the waiting task will complete this 2337 * request, otherwise queue a work to do it. Note that 2338 * complete_work may still race with the dispatch of a following 2339 * request. 2340 */ 2341 if (waiting) 2342 wake_up(&mq->wait); 2343 else 2344 queue_work(mq->card->complete_wq, &mq->complete_work); 2345 2346 return; 2347 } 2348 2349 /* Take the recovery path for errors or urgent background operations */ 2350 if (mmc_blk_rq_error(&mqrq->brq) || 2351 mmc_blk_urgent_bkops_needed(mq, mqrq)) { 2352 spin_lock_irqsave(&mq->lock, flags); 2353 mq->recovery_needed = true; 2354 mq->recovery_req = req; 2355 spin_unlock_irqrestore(&mq->lock, flags); 2356 wake_up(&mq->wait); 2357 schedule_work(&mq->recovery_work); 2358 return; 2359 } 2360 2361 mmc_blk_rw_reset_success(mq, req); 2362 2363 mq->rw_wait = false; 2364 wake_up(&mq->wait); 2365 2366 /* context unknown */ 2367 mmc_blk_mq_post_req(mq, req, false); 2368 } 2369 2370 static bool mmc_blk_rw_wait_cond(struct mmc_queue *mq, int *err) 2371 { 2372 unsigned long flags; 2373 bool done; 2374 2375 /* 2376 * Wait while there is another request in progress, but not if recovery 2377 * is needed. Also indicate whether there is a request waiting to start. 2378 */ 2379 spin_lock_irqsave(&mq->lock, flags); 2380 if (mq->recovery_needed) { 2381 *err = -EBUSY; 2382 done = true; 2383 } else { 2384 done = !mq->rw_wait; 2385 } 2386 mq->waiting = !done; 2387 spin_unlock_irqrestore(&mq->lock, flags); 2388 2389 return done; 2390 } 2391 2392 static int mmc_blk_rw_wait(struct mmc_queue *mq, struct request **prev_req) 2393 { 2394 int err = 0; 2395 2396 wait_event(mq->wait, mmc_blk_rw_wait_cond(mq, &err)); 2397 2398 /* Always complete the previous request if there is one */ 2399 mmc_blk_mq_complete_prev_req(mq, prev_req); 2400 2401 return err; 2402 } 2403 2404 static int mmc_blk_mq_issue_rw_rq(struct mmc_queue *mq, 2405 struct request *req) 2406 { 2407 struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req); 2408 struct mmc_host *host = mq->card->host; 2409 struct request *prev_req = NULL; 2410 int err = 0; 2411 2412 mmc_blk_rw_rq_prep(mqrq, mq->card, 0, mq); 2413 2414 mqrq->brq.mrq.done = mmc_blk_mq_req_done; 2415 2416 mmc_pre_req(host, &mqrq->brq.mrq); 2417 2418 err = mmc_blk_rw_wait(mq, &prev_req); 2419 if (err) 2420 goto out_post_req; 2421 2422 mq->rw_wait = true; 2423 2424 err = mmc_start_request(host, &mqrq->brq.mrq); 2425 2426 if (prev_req) 2427 mmc_blk_mq_post_req(mq, prev_req, true); 2428 2429 if (err) 2430 mq->rw_wait = false; 2431 2432 /* Release re-tuning here where there is no synchronization required */ 2433 if (err || mmc_host_done_complete(host)) 2434 mmc_retune_release(host); 2435 2436 out_post_req: 2437 if (err) 2438 mmc_post_req(host, &mqrq->brq.mrq, err); 2439 2440 return err; 2441 } 2442 2443 static int mmc_blk_wait_for_idle(struct mmc_queue *mq, struct mmc_host *host) 2444 { 2445 if (host->cqe_enabled) 2446 return host->cqe_ops->cqe_wait_for_idle(host); 2447 2448 return mmc_blk_rw_wait(mq, NULL); 2449 } 2450 2451 enum mmc_issued mmc_blk_mq_issue_rq(struct mmc_queue *mq, struct request *req) 2452 { 2453 struct mmc_blk_data *md = mq->blkdata; 2454 struct mmc_card *card = md->queue.card; 2455 struct mmc_host *host = card->host; 2456 int ret; 2457 2458 ret = mmc_blk_part_switch(card, md->part_type); 2459 if (ret) 2460 return MMC_REQ_FAILED_TO_START; 2461 2462 switch (mmc_issue_type(mq, req)) { 2463 case MMC_ISSUE_SYNC: 2464 ret = mmc_blk_wait_for_idle(mq, host); 2465 if (ret) 2466 return MMC_REQ_BUSY; 2467 switch (req_op(req)) { 2468 case REQ_OP_DRV_IN: 2469 case REQ_OP_DRV_OUT: 2470 mmc_blk_issue_drv_op(mq, req); 2471 break; 2472 case REQ_OP_DISCARD: 2473 mmc_blk_issue_discard_rq(mq, req); 2474 break; 2475 case REQ_OP_SECURE_ERASE: 2476 mmc_blk_issue_secdiscard_rq(mq, req); 2477 break; 2478 case REQ_OP_WRITE_ZEROES: 2479 mmc_blk_issue_trim_rq(mq, req); 2480 break; 2481 case REQ_OP_FLUSH: 2482 mmc_blk_issue_flush(mq, req); 2483 break; 2484 default: 2485 WARN_ON_ONCE(1); 2486 return MMC_REQ_FAILED_TO_START; 2487 } 2488 return MMC_REQ_FINISHED; 2489 case MMC_ISSUE_DCMD: 2490 case MMC_ISSUE_ASYNC: 2491 switch (req_op(req)) { 2492 case REQ_OP_FLUSH: 2493 if (!mmc_cache_enabled(host)) { 2494 blk_mq_end_request(req, BLK_STS_OK); 2495 return MMC_REQ_FINISHED; 2496 } 2497 ret = mmc_blk_cqe_issue_flush(mq, req); 2498 break; 2499 case REQ_OP_WRITE: 2500 card->written_flag = true; 2501 fallthrough; 2502 case REQ_OP_READ: 2503 if (host->cqe_enabled) 2504 ret = mmc_blk_cqe_issue_rw_rq(mq, req); 2505 else 2506 ret = mmc_blk_mq_issue_rw_rq(mq, req); 2507 break; 2508 default: 2509 WARN_ON_ONCE(1); 2510 ret = -EINVAL; 2511 } 2512 if (!ret) 2513 return MMC_REQ_STARTED; 2514 return ret == -EBUSY ? MMC_REQ_BUSY : MMC_REQ_FAILED_TO_START; 2515 default: 2516 WARN_ON_ONCE(1); 2517 return MMC_REQ_FAILED_TO_START; 2518 } 2519 } 2520 2521 static inline int mmc_blk_readonly(struct mmc_card *card) 2522 { 2523 return mmc_card_readonly(card) || 2524 !(card->csd.cmdclass & CCC_BLOCK_WRITE); 2525 } 2526 2527 /* 2528 * Search for a declared partitions node for the disk in mmc-card related node. 2529 * 2530 * This is to permit support for partition table defined in DT in special case 2531 * where a partition table is not written in the disk and is expected to be 2532 * passed from the running system. 2533 * 2534 * For the user disk, "partitions" node is searched. 2535 * For the special HW disk, "partitions-" node with the appended name is used 2536 * following this conversion table (to adhere to JEDEC naming) 2537 * - boot0 -> partitions-boot1 2538 * - boot1 -> partitions-boot2 2539 * - gp0 -> partitions-gp1 2540 * - gp1 -> partitions-gp2 2541 * - gp2 -> partitions-gp3 2542 * - gp3 -> partitions-gp4 2543 */ 2544 static struct fwnode_handle *mmc_blk_get_partitions_node(struct device *mmc_dev, 2545 const char *subname) 2546 { 2547 const char *node_name = "partitions"; 2548 2549 if (subname) { 2550 mmc_dev = mmc_dev->parent; 2551 2552 /* 2553 * Check if we are allocating a BOOT disk boot0/1 disk. 2554 * In DT we use the JEDEC naming boot1/2. 2555 */ 2556 if (!strcmp(subname, "boot0")) 2557 node_name = "partitions-boot1"; 2558 if (!strcmp(subname, "boot1")) 2559 node_name = "partitions-boot2"; 2560 /* 2561 * Check if we are allocating a GP disk gp0/1/2/3 disk. 2562 * In DT we use the JEDEC naming gp1/2/3/4. 2563 */ 2564 if (!strcmp(subname, "gp0")) 2565 node_name = "partitions-gp1"; 2566 if (!strcmp(subname, "gp1")) 2567 node_name = "partitions-gp2"; 2568 if (!strcmp(subname, "gp2")) 2569 node_name = "partitions-gp3"; 2570 if (!strcmp(subname, "gp3")) 2571 node_name = "partitions-gp4"; 2572 } 2573 2574 return device_get_named_child_node(mmc_dev, node_name); 2575 } 2576 2577 static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card, 2578 struct device *parent, 2579 sector_t size, 2580 bool default_ro, 2581 const char *subname, 2582 int area_type, 2583 unsigned int part_type) 2584 { 2585 struct fwnode_handle *disk_fwnode; 2586 struct mmc_blk_data *md; 2587 int devidx, ret; 2588 char cap_str[10]; 2589 unsigned int features = 0; 2590 2591 devidx = ida_alloc_max(&mmc_blk_ida, max_devices - 1, GFP_KERNEL); 2592 if (devidx < 0) { 2593 /* 2594 * We get -ENOSPC because there are no more any available 2595 * devidx. The reason may be that, either userspace haven't yet 2596 * unmounted the partitions, which postpones mmc_blk_release() 2597 * from being called, or the device has more partitions than 2598 * what we support. 2599 */ 2600 if (devidx == -ENOSPC) 2601 dev_err(mmc_dev(card->host), 2602 "no more device IDs available\n"); 2603 2604 return ERR_PTR(devidx); 2605 } 2606 2607 md = kzalloc(sizeof(*md), GFP_KERNEL); 2608 if (!md) { 2609 ret = -ENOMEM; 2610 goto out; 2611 } 2612 2613 md->area_type = area_type; 2614 2615 /* 2616 * Set the read-only status based on the supported commands 2617 * and the write protect switch. 2618 */ 2619 md->read_only = mmc_blk_readonly(card); 2620 2621 if (mmc_host_cmd23(card->host)) { 2622 if ((mmc_card_mmc(card) && 2623 card->csd.mmca_vsn >= CSD_SPEC_VER_3) || 2624 (mmc_card_sd(card) && !mmc_card_ult_capacity(card) && 2625 card->scr.cmds & SD_SCR_CMD23_SUPPORT)) 2626 md->flags |= MMC_BLK_CMD23; 2627 } 2628 2629 if (md->flags & MMC_BLK_CMD23 && 2630 ((card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN) || 2631 card->ext_csd.rel_sectors)) { 2632 md->flags |= MMC_BLK_REL_WR; 2633 features |= (BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA); 2634 } else if (mmc_cache_enabled(card->host)) { 2635 features |= BLK_FEAT_WRITE_CACHE; 2636 } 2637 2638 md->disk = mmc_init_queue(&md->queue, card, features); 2639 if (IS_ERR(md->disk)) { 2640 ret = PTR_ERR(md->disk); 2641 goto err_kfree; 2642 } 2643 2644 INIT_LIST_HEAD(&md->part); 2645 INIT_LIST_HEAD(&md->rpmbs); 2646 kref_init(&md->kref); 2647 2648 md->queue.blkdata = md; 2649 md->part_type = part_type; 2650 2651 md->disk->major = MMC_BLOCK_MAJOR; 2652 md->disk->minors = perdev_minors; 2653 md->disk->first_minor = devidx * perdev_minors; 2654 md->disk->fops = &mmc_bdops; 2655 md->disk->private_data = md; 2656 md->parent = parent; 2657 set_disk_ro(md->disk, md->read_only || default_ro); 2658 if (area_type & (MMC_BLK_DATA_AREA_RPMB | MMC_BLK_DATA_AREA_BOOT)) 2659 md->disk->flags |= GENHD_FL_NO_PART; 2660 2661 /* 2662 * As discussed on lkml, GENHD_FL_REMOVABLE should: 2663 * 2664 * - be set for removable media with permanent block devices 2665 * - be unset for removable block devices with permanent media 2666 * 2667 * Since MMC block devices clearly fall under the second 2668 * case, we do not set GENHD_FL_REMOVABLE. Userspace 2669 * should use the block device creation/destruction hotplug 2670 * messages to tell when the card is present. 2671 */ 2672 2673 snprintf(md->disk->disk_name, sizeof(md->disk->disk_name), 2674 "mmcblk%u%s", card->host->index, subname ? subname : ""); 2675 2676 set_capacity(md->disk, size); 2677 2678 string_get_size((u64)size, 512, STRING_UNITS_2, 2679 cap_str, sizeof(cap_str)); 2680 pr_info("%s: %s %s %s%s\n", 2681 md->disk->disk_name, mmc_card_id(card), mmc_card_name(card), 2682 cap_str, md->read_only ? " (ro)" : ""); 2683 2684 /* used in ->open, must be set before add_disk: */ 2685 if (area_type == MMC_BLK_DATA_AREA_MAIN) 2686 dev_set_drvdata(&card->dev, md); 2687 disk_fwnode = mmc_blk_get_partitions_node(parent, subname); 2688 ret = add_disk_fwnode(md->parent, md->disk, mmc_disk_attr_groups, 2689 disk_fwnode); 2690 if (ret) 2691 goto err_put_disk; 2692 return md; 2693 2694 err_put_disk: 2695 put_disk(md->disk); 2696 blk_mq_free_tag_set(&md->queue.tag_set); 2697 err_kfree: 2698 kfree(md); 2699 out: 2700 ida_free(&mmc_blk_ida, devidx); 2701 return ERR_PTR(ret); 2702 } 2703 2704 static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card) 2705 { 2706 sector_t size; 2707 2708 if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) { 2709 /* 2710 * The EXT_CSD sector count is in number or 512 byte 2711 * sectors. 2712 */ 2713 size = card->ext_csd.sectors; 2714 } else { 2715 /* 2716 * The CSD capacity field is in units of read_blkbits. 2717 * set_capacity takes units of 512 bytes. 2718 */ 2719 size = (typeof(sector_t))card->csd.capacity 2720 << (card->csd.read_blkbits - 9); 2721 } 2722 2723 return mmc_blk_alloc_req(card, &card->dev, size, false, NULL, 2724 MMC_BLK_DATA_AREA_MAIN, 0); 2725 } 2726 2727 static int mmc_blk_alloc_part(struct mmc_card *card, 2728 struct mmc_blk_data *md, 2729 unsigned int part_type, 2730 sector_t size, 2731 bool default_ro, 2732 const char *subname, 2733 int area_type) 2734 { 2735 struct mmc_blk_data *part_md; 2736 2737 part_md = mmc_blk_alloc_req(card, disk_to_dev(md->disk), size, default_ro, 2738 subname, area_type, part_type); 2739 if (IS_ERR(part_md)) 2740 return PTR_ERR(part_md); 2741 list_add(&part_md->part, &md->part); 2742 2743 return 0; 2744 } 2745 2746 /** 2747 * mmc_rpmb_ioctl() - ioctl handler for the RPMB chardev 2748 * @filp: the character device file 2749 * @cmd: the ioctl() command 2750 * @arg: the argument from userspace 2751 * 2752 * This will essentially just redirect the ioctl()s coming in over to 2753 * the main block device spawning the RPMB character device. 2754 */ 2755 static long mmc_rpmb_ioctl(struct file *filp, unsigned int cmd, 2756 unsigned long arg) 2757 { 2758 struct mmc_rpmb_data *rpmb = filp->private_data; 2759 int ret; 2760 2761 switch (cmd) { 2762 case MMC_IOC_CMD: 2763 ret = mmc_blk_ioctl_cmd(rpmb->md, 2764 (struct mmc_ioc_cmd __user *)arg, 2765 rpmb); 2766 break; 2767 case MMC_IOC_MULTI_CMD: 2768 ret = mmc_blk_ioctl_multi_cmd(rpmb->md, 2769 (struct mmc_ioc_multi_cmd __user *)arg, 2770 rpmb); 2771 break; 2772 default: 2773 ret = -EINVAL; 2774 break; 2775 } 2776 2777 return ret; 2778 } 2779 2780 #ifdef CONFIG_COMPAT 2781 static long mmc_rpmb_ioctl_compat(struct file *filp, unsigned int cmd, 2782 unsigned long arg) 2783 { 2784 return mmc_rpmb_ioctl(filp, cmd, (unsigned long)compat_ptr(arg)); 2785 } 2786 #endif 2787 2788 static int mmc_rpmb_chrdev_open(struct inode *inode, struct file *filp) 2789 { 2790 struct mmc_rpmb_data *rpmb = container_of(inode->i_cdev, 2791 struct mmc_rpmb_data, chrdev); 2792 2793 get_device(&rpmb->dev); 2794 filp->private_data = rpmb; 2795 2796 return nonseekable_open(inode, filp); 2797 } 2798 2799 static int mmc_rpmb_chrdev_release(struct inode *inode, struct file *filp) 2800 { 2801 struct mmc_rpmb_data *rpmb = container_of(inode->i_cdev, 2802 struct mmc_rpmb_data, chrdev); 2803 2804 put_device(&rpmb->dev); 2805 2806 return 0; 2807 } 2808 2809 static const struct file_operations mmc_rpmb_fileops = { 2810 .release = mmc_rpmb_chrdev_release, 2811 .open = mmc_rpmb_chrdev_open, 2812 .owner = THIS_MODULE, 2813 .unlocked_ioctl = mmc_rpmb_ioctl, 2814 #ifdef CONFIG_COMPAT 2815 .compat_ioctl = mmc_rpmb_ioctl_compat, 2816 #endif 2817 }; 2818 2819 static void mmc_blk_rpmb_device_release(struct device *dev) 2820 { 2821 struct mmc_rpmb_data *rpmb = dev_get_drvdata(dev); 2822 2823 rpmb_dev_unregister(rpmb->rdev); 2824 mmc_blk_put(rpmb->md); 2825 ida_free(&mmc_rpmb_ida, rpmb->id); 2826 kfree(rpmb); 2827 } 2828 2829 static void free_idata(struct mmc_blk_ioc_data **idata, unsigned int cmd_count) 2830 { 2831 unsigned int n; 2832 2833 for (n = 0; n < cmd_count; n++) 2834 kfree(idata[n]); 2835 kfree(idata); 2836 } 2837 2838 static struct mmc_blk_ioc_data **alloc_idata(struct mmc_rpmb_data *rpmb, 2839 unsigned int cmd_count) 2840 { 2841 struct mmc_blk_ioc_data **idata; 2842 unsigned int n; 2843 2844 idata = kcalloc(cmd_count, sizeof(*idata), GFP_KERNEL); 2845 if (!idata) 2846 return NULL; 2847 2848 for (n = 0; n < cmd_count; n++) { 2849 idata[n] = kcalloc(1, sizeof(**idata), GFP_KERNEL); 2850 if (!idata[n]) { 2851 free_idata(idata, n); 2852 return NULL; 2853 } 2854 idata[n]->rpmb = rpmb; 2855 } 2856 2857 return idata; 2858 } 2859 2860 static void set_idata(struct mmc_blk_ioc_data *idata, u32 opcode, 2861 int write_flag, u8 *buf, unsigned int buf_bytes) 2862 { 2863 /* 2864 * The size of an RPMB frame must match what's expected by the 2865 * hardware. 2866 */ 2867 BUILD_BUG_ON(sizeof(struct rpmb_frame) != 512); 2868 2869 idata->ic.opcode = opcode; 2870 idata->ic.flags = MMC_RSP_R1 | MMC_CMD_ADTC; 2871 idata->ic.write_flag = write_flag; 2872 idata->ic.blksz = sizeof(struct rpmb_frame); 2873 idata->ic.blocks = buf_bytes / idata->ic.blksz; 2874 idata->buf = buf; 2875 idata->buf_bytes = buf_bytes; 2876 } 2877 2878 static int mmc_route_rpmb_frames(struct device *dev, u8 *req, 2879 unsigned int req_len, u8 *resp, 2880 unsigned int resp_len) 2881 { 2882 struct rpmb_frame *frm = (struct rpmb_frame *)req; 2883 struct mmc_rpmb_data *rpmb = dev_get_drvdata(dev); 2884 struct mmc_blk_data *md = rpmb->md; 2885 struct mmc_blk_ioc_data **idata; 2886 struct mmc_queue_req *mq_rq; 2887 unsigned int cmd_count; 2888 struct request *rq; 2889 u16 req_type; 2890 bool write; 2891 int ret; 2892 2893 if (IS_ERR(md->queue.card)) 2894 return PTR_ERR(md->queue.card); 2895 2896 if (req_len < sizeof(*frm)) 2897 return -EINVAL; 2898 2899 req_type = be16_to_cpu(frm->req_resp); 2900 switch (req_type) { 2901 case RPMB_PROGRAM_KEY: 2902 if (req_len != sizeof(struct rpmb_frame) || 2903 resp_len != sizeof(struct rpmb_frame)) 2904 return -EINVAL; 2905 write = true; 2906 break; 2907 case RPMB_GET_WRITE_COUNTER: 2908 if (req_len != sizeof(struct rpmb_frame) || 2909 resp_len != sizeof(struct rpmb_frame)) 2910 return -EINVAL; 2911 write = false; 2912 break; 2913 case RPMB_WRITE_DATA: 2914 if (req_len % sizeof(struct rpmb_frame) || 2915 resp_len != sizeof(struct rpmb_frame)) 2916 return -EINVAL; 2917 write = true; 2918 break; 2919 case RPMB_READ_DATA: 2920 if (req_len != sizeof(struct rpmb_frame) || 2921 resp_len % sizeof(struct rpmb_frame)) 2922 return -EINVAL; 2923 write = false; 2924 break; 2925 default: 2926 return -EINVAL; 2927 } 2928 2929 if (write) 2930 cmd_count = 3; 2931 else 2932 cmd_count = 2; 2933 2934 idata = alloc_idata(rpmb, cmd_count); 2935 if (!idata) 2936 return -ENOMEM; 2937 2938 if (write) { 2939 struct rpmb_frame *frm = (struct rpmb_frame *)resp; 2940 2941 /* Send write request frame(s) */ 2942 set_idata(idata[0], MMC_WRITE_MULTIPLE_BLOCK, 2943 1 | MMC_CMD23_ARG_REL_WR, req, req_len); 2944 2945 /* Send result request frame */ 2946 memset(frm, 0, sizeof(*frm)); 2947 frm->req_resp = cpu_to_be16(RPMB_RESULT_READ); 2948 set_idata(idata[1], MMC_WRITE_MULTIPLE_BLOCK, 1, resp, 2949 resp_len); 2950 2951 /* Read response frame */ 2952 set_idata(idata[2], MMC_READ_MULTIPLE_BLOCK, 0, resp, resp_len); 2953 } else { 2954 /* Send write request frame(s) */ 2955 set_idata(idata[0], MMC_WRITE_MULTIPLE_BLOCK, 1, req, req_len); 2956 2957 /* Read response frame */ 2958 set_idata(idata[1], MMC_READ_MULTIPLE_BLOCK, 0, resp, resp_len); 2959 } 2960 2961 rq = blk_mq_alloc_request(md->queue.queue, REQ_OP_DRV_OUT, 0); 2962 if (IS_ERR(rq)) { 2963 ret = PTR_ERR(rq); 2964 goto out; 2965 } 2966 2967 mq_rq = req_to_mmc_queue_req(rq); 2968 mq_rq->drv_op = MMC_DRV_OP_IOCTL_RPMB; 2969 mq_rq->drv_op_result = -EIO; 2970 mq_rq->drv_op_data = idata; 2971 mq_rq->ioc_count = cmd_count; 2972 blk_execute_rq(rq, false); 2973 ret = req_to_mmc_queue_req(rq)->drv_op_result; 2974 2975 blk_mq_free_request(rq); 2976 2977 out: 2978 free_idata(idata, cmd_count); 2979 return ret; 2980 } 2981 2982 static int mmc_blk_alloc_rpmb_part(struct mmc_card *card, 2983 struct mmc_blk_data *md, 2984 unsigned int part_index, 2985 sector_t size, 2986 const char *subname) 2987 { 2988 int devidx, ret; 2989 char rpmb_name[DISK_NAME_LEN]; 2990 char cap_str[10]; 2991 struct mmc_rpmb_data *rpmb; 2992 2993 /* This creates the minor number for the RPMB char device */ 2994 devidx = ida_alloc_max(&mmc_rpmb_ida, max_devices - 1, GFP_KERNEL); 2995 if (devidx < 0) 2996 return devidx; 2997 2998 rpmb = kzalloc(sizeof(*rpmb), GFP_KERNEL); 2999 if (!rpmb) { 3000 ida_free(&mmc_rpmb_ida, devidx); 3001 return -ENOMEM; 3002 } 3003 3004 snprintf(rpmb_name, sizeof(rpmb_name), 3005 "mmcblk%u%s", card->host->index, subname ? subname : ""); 3006 3007 rpmb->id = devidx; 3008 rpmb->part_index = part_index; 3009 rpmb->dev.init_name = rpmb_name; 3010 rpmb->dev.bus = &mmc_rpmb_bus_type; 3011 rpmb->dev.devt = MKDEV(MAJOR(mmc_rpmb_devt), rpmb->id); 3012 rpmb->dev.parent = &card->dev; 3013 rpmb->dev.release = mmc_blk_rpmb_device_release; 3014 device_initialize(&rpmb->dev); 3015 dev_set_drvdata(&rpmb->dev, rpmb); 3016 mmc_blk_get(md->disk); 3017 rpmb->md = md; 3018 3019 cdev_init(&rpmb->chrdev, &mmc_rpmb_fileops); 3020 rpmb->chrdev.owner = THIS_MODULE; 3021 ret = cdev_device_add(&rpmb->chrdev, &rpmb->dev); 3022 if (ret) { 3023 pr_err("%s: could not add character device\n", rpmb_name); 3024 goto out_put_device; 3025 } 3026 3027 list_add(&rpmb->node, &md->rpmbs); 3028 3029 string_get_size((u64)size, 512, STRING_UNITS_2, 3030 cap_str, sizeof(cap_str)); 3031 3032 pr_info("%s: %s %s %s, chardev (%d:%d)\n", 3033 rpmb_name, mmc_card_id(card), mmc_card_name(card), cap_str, 3034 MAJOR(mmc_rpmb_devt), rpmb->id); 3035 3036 return 0; 3037 3038 out_put_device: 3039 put_device(&rpmb->dev); 3040 return ret; 3041 } 3042 3043 static void mmc_blk_remove_rpmb_part(struct mmc_rpmb_data *rpmb) 3044 3045 { 3046 cdev_device_del(&rpmb->chrdev, &rpmb->dev); 3047 put_device(&rpmb->dev); 3048 } 3049 3050 /* MMC Physical partitions consist of two boot partitions and 3051 * up to four general purpose partitions. 3052 * For each partition enabled in EXT_CSD a block device will be allocatedi 3053 * to provide access to the partition. 3054 */ 3055 3056 static int mmc_blk_alloc_parts(struct mmc_card *card, struct mmc_blk_data *md) 3057 { 3058 int idx, ret; 3059 3060 if (!mmc_card_mmc(card)) 3061 return 0; 3062 3063 for (idx = 0; idx < card->nr_parts; idx++) { 3064 if (card->part[idx].area_type & MMC_BLK_DATA_AREA_RPMB) { 3065 /* 3066 * RPMB partitions does not provide block access, they 3067 * are only accessed using ioctl():s. Thus create 3068 * special RPMB block devices that do not have a 3069 * backing block queue for these. 3070 */ 3071 ret = mmc_blk_alloc_rpmb_part(card, md, 3072 card->part[idx].part_cfg, 3073 card->part[idx].size >> 9, 3074 card->part[idx].name); 3075 if (ret) 3076 return ret; 3077 } else if (card->part[idx].size) { 3078 ret = mmc_blk_alloc_part(card, md, 3079 card->part[idx].part_cfg, 3080 card->part[idx].size >> 9, 3081 card->part[idx].force_ro, 3082 card->part[idx].name, 3083 card->part[idx].area_type); 3084 if (ret) 3085 return ret; 3086 } 3087 } 3088 3089 return 0; 3090 } 3091 3092 static void mmc_blk_remove_req(struct mmc_blk_data *md) 3093 { 3094 /* 3095 * Flush remaining requests and free queues. It is freeing the queue 3096 * that stops new requests from being accepted. 3097 */ 3098 del_gendisk(md->disk); 3099 mmc_cleanup_queue(&md->queue); 3100 mmc_blk_put(md); 3101 } 3102 3103 static void mmc_blk_remove_parts(struct mmc_card *card, 3104 struct mmc_blk_data *md) 3105 { 3106 struct list_head *pos, *q; 3107 struct mmc_blk_data *part_md; 3108 struct mmc_rpmb_data *rpmb; 3109 3110 /* Remove RPMB partitions */ 3111 list_for_each_safe(pos, q, &md->rpmbs) { 3112 rpmb = list_entry(pos, struct mmc_rpmb_data, node); 3113 list_del(pos); 3114 mmc_blk_remove_rpmb_part(rpmb); 3115 } 3116 /* Remove block partitions */ 3117 list_for_each_safe(pos, q, &md->part) { 3118 part_md = list_entry(pos, struct mmc_blk_data, part); 3119 list_del(pos); 3120 mmc_blk_remove_req(part_md); 3121 } 3122 } 3123 3124 #ifdef CONFIG_DEBUG_FS 3125 3126 static int mmc_dbg_card_status_get(void *data, u64 *val) 3127 { 3128 struct mmc_card *card = data; 3129 struct mmc_blk_data *md = dev_get_drvdata(&card->dev); 3130 struct mmc_queue *mq = &md->queue; 3131 struct request *req; 3132 int ret; 3133 3134 /* Ask the block layer about the card status */ 3135 req = blk_mq_alloc_request(mq->queue, REQ_OP_DRV_IN, 0); 3136 if (IS_ERR(req)) 3137 return PTR_ERR(req); 3138 req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_GET_CARD_STATUS; 3139 req_to_mmc_queue_req(req)->drv_op_result = -EIO; 3140 blk_execute_rq(req, false); 3141 ret = req_to_mmc_queue_req(req)->drv_op_result; 3142 if (ret >= 0) { 3143 *val = ret; 3144 ret = 0; 3145 } 3146 blk_mq_free_request(req); 3147 3148 return ret; 3149 } 3150 DEFINE_DEBUGFS_ATTRIBUTE(mmc_dbg_card_status_fops, mmc_dbg_card_status_get, 3151 NULL, "%08llx\n"); 3152 3153 /* That is two digits * 512 + 1 for newline */ 3154 #define EXT_CSD_STR_LEN 1025 3155 3156 static int mmc_ext_csd_open(struct inode *inode, struct file *filp) 3157 { 3158 struct mmc_card *card = inode->i_private; 3159 struct mmc_blk_data *md = dev_get_drvdata(&card->dev); 3160 struct mmc_queue *mq = &md->queue; 3161 struct request *req; 3162 char *buf; 3163 ssize_t n = 0; 3164 u8 *ext_csd; 3165 int err, i; 3166 3167 buf = kmalloc(EXT_CSD_STR_LEN + 1, GFP_KERNEL); 3168 if (!buf) 3169 return -ENOMEM; 3170 3171 /* Ask the block layer for the EXT CSD */ 3172 req = blk_mq_alloc_request(mq->queue, REQ_OP_DRV_IN, 0); 3173 if (IS_ERR(req)) { 3174 err = PTR_ERR(req); 3175 goto out_free; 3176 } 3177 req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_GET_EXT_CSD; 3178 req_to_mmc_queue_req(req)->drv_op_result = -EIO; 3179 req_to_mmc_queue_req(req)->drv_op_data = &ext_csd; 3180 blk_execute_rq(req, false); 3181 err = req_to_mmc_queue_req(req)->drv_op_result; 3182 blk_mq_free_request(req); 3183 if (err) { 3184 pr_err("FAILED %d\n", err); 3185 goto out_free; 3186 } 3187 3188 for (i = 0; i < 512; i++) 3189 n += sprintf(buf + n, "%02x", ext_csd[i]); 3190 n += sprintf(buf + n, "\n"); 3191 3192 if (n != EXT_CSD_STR_LEN) { 3193 err = -EINVAL; 3194 kfree(ext_csd); 3195 goto out_free; 3196 } 3197 3198 filp->private_data = buf; 3199 kfree(ext_csd); 3200 return 0; 3201 3202 out_free: 3203 kfree(buf); 3204 return err; 3205 } 3206 3207 static ssize_t mmc_ext_csd_read(struct file *filp, char __user *ubuf, 3208 size_t cnt, loff_t *ppos) 3209 { 3210 char *buf = filp->private_data; 3211 3212 return simple_read_from_buffer(ubuf, cnt, ppos, 3213 buf, EXT_CSD_STR_LEN); 3214 } 3215 3216 static int mmc_ext_csd_release(struct inode *inode, struct file *file) 3217 { 3218 kfree(file->private_data); 3219 return 0; 3220 } 3221 3222 static const struct file_operations mmc_dbg_ext_csd_fops = { 3223 .open = mmc_ext_csd_open, 3224 .read = mmc_ext_csd_read, 3225 .release = mmc_ext_csd_release, 3226 .llseek = default_llseek, 3227 }; 3228 3229 static void mmc_blk_add_debugfs(struct mmc_card *card, struct mmc_blk_data *md) 3230 { 3231 struct dentry *root; 3232 3233 if (!card->debugfs_root) 3234 return; 3235 3236 root = card->debugfs_root; 3237 3238 if (mmc_card_mmc(card) || mmc_card_sd(card)) { 3239 md->status_dentry = 3240 debugfs_create_file_unsafe("status", 0400, root, 3241 card, 3242 &mmc_dbg_card_status_fops); 3243 } 3244 3245 if (mmc_card_mmc(card)) { 3246 md->ext_csd_dentry = 3247 debugfs_create_file("ext_csd", S_IRUSR, root, card, 3248 &mmc_dbg_ext_csd_fops); 3249 } 3250 } 3251 3252 static void mmc_blk_remove_debugfs(struct mmc_card *card, 3253 struct mmc_blk_data *md) 3254 { 3255 if (!card->debugfs_root) 3256 return; 3257 3258 debugfs_remove(md->status_dentry); 3259 md->status_dentry = NULL; 3260 3261 debugfs_remove(md->ext_csd_dentry); 3262 md->ext_csd_dentry = NULL; 3263 } 3264 3265 #else 3266 3267 static void mmc_blk_add_debugfs(struct mmc_card *card, struct mmc_blk_data *md) 3268 { 3269 } 3270 3271 static void mmc_blk_remove_debugfs(struct mmc_card *card, 3272 struct mmc_blk_data *md) 3273 { 3274 } 3275 3276 #endif /* CONFIG_DEBUG_FS */ 3277 3278 static void mmc_blk_rpmb_add(struct mmc_card *card) 3279 { 3280 struct mmc_blk_data *md = dev_get_drvdata(&card->dev); 3281 struct mmc_rpmb_data *rpmb; 3282 struct rpmb_dev *rdev; 3283 unsigned int n; 3284 u32 cid[4]; 3285 struct rpmb_descr descr = { 3286 .type = RPMB_TYPE_EMMC, 3287 .route_frames = mmc_route_rpmb_frames, 3288 .reliable_wr_count = card->ext_csd.enhanced_rpmb_supported ? 3289 2 : 32, 3290 .capacity = card->ext_csd.raw_rpmb_size_mult, 3291 .dev_id = (void *)cid, 3292 .dev_id_len = sizeof(cid), 3293 }; 3294 3295 /* 3296 * Provice CID as an octet array. The CID needs to be interpreted 3297 * when used as input to derive the RPMB key since some fields 3298 * will change due to firmware updates. 3299 */ 3300 for (n = 0; n < 4; n++) 3301 cid[n] = be32_to_cpu((__force __be32)card->raw_cid[n]); 3302 3303 list_for_each_entry(rpmb, &md->rpmbs, node) { 3304 rdev = rpmb_dev_register(&rpmb->dev, &descr); 3305 if (IS_ERR(rdev)) { 3306 pr_warn("%s: could not register RPMB device\n", 3307 dev_name(&rpmb->dev)); 3308 continue; 3309 } 3310 rpmb->rdev = rdev; 3311 } 3312 } 3313 3314 static int mmc_blk_probe(struct mmc_card *card) 3315 { 3316 struct mmc_blk_data *md; 3317 int ret = 0; 3318 3319 /* 3320 * Check that the card supports the command class(es) we need. 3321 */ 3322 if (!(card->csd.cmdclass & CCC_BLOCK_READ)) 3323 return -ENODEV; 3324 3325 mmc_fixup_device(card, mmc_blk_fixups); 3326 3327 card->complete_wq = alloc_workqueue("mmc_complete", 3328 WQ_MEM_RECLAIM | WQ_HIGHPRI, 0); 3329 if (!card->complete_wq) { 3330 pr_err("Failed to create mmc completion workqueue"); 3331 return -ENOMEM; 3332 } 3333 3334 md = mmc_blk_alloc(card); 3335 if (IS_ERR(md)) { 3336 ret = PTR_ERR(md); 3337 goto out_free; 3338 } 3339 3340 ret = mmc_blk_alloc_parts(card, md); 3341 if (ret) 3342 goto out; 3343 3344 /* Add two debugfs entries */ 3345 mmc_blk_add_debugfs(card, md); 3346 3347 pm_runtime_set_autosuspend_delay(&card->dev, 3000); 3348 pm_runtime_use_autosuspend(&card->dev); 3349 3350 /* 3351 * Don't enable runtime PM for SD-combo cards here. Leave that 3352 * decision to be taken during the SDIO init sequence instead. 3353 */ 3354 if (!mmc_card_sd_combo(card)) { 3355 pm_runtime_set_active(&card->dev); 3356 pm_runtime_enable(&card->dev); 3357 } 3358 3359 mmc_blk_rpmb_add(card); 3360 3361 return 0; 3362 3363 out: 3364 mmc_blk_remove_parts(card, md); 3365 mmc_blk_remove_req(md); 3366 out_free: 3367 destroy_workqueue(card->complete_wq); 3368 return ret; 3369 } 3370 3371 static void mmc_blk_remove(struct mmc_card *card) 3372 { 3373 struct mmc_blk_data *md = dev_get_drvdata(&card->dev); 3374 3375 mmc_blk_remove_debugfs(card, md); 3376 mmc_blk_remove_parts(card, md); 3377 pm_runtime_get_sync(&card->dev); 3378 if (md->part_curr != md->part_type) { 3379 mmc_claim_host(card->host); 3380 mmc_blk_part_switch(card, md->part_type); 3381 mmc_release_host(card->host); 3382 } 3383 if (!mmc_card_sd_combo(card)) 3384 pm_runtime_disable(&card->dev); 3385 pm_runtime_put_noidle(&card->dev); 3386 mmc_blk_remove_req(md); 3387 destroy_workqueue(card->complete_wq); 3388 } 3389 3390 static int _mmc_blk_suspend(struct mmc_card *card) 3391 { 3392 struct mmc_blk_data *part_md; 3393 struct mmc_blk_data *md = dev_get_drvdata(&card->dev); 3394 3395 if (md) { 3396 mmc_queue_suspend(&md->queue); 3397 list_for_each_entry(part_md, &md->part, part) { 3398 mmc_queue_suspend(&part_md->queue); 3399 } 3400 } 3401 return 0; 3402 } 3403 3404 static void mmc_blk_shutdown(struct mmc_card *card) 3405 { 3406 _mmc_blk_suspend(card); 3407 } 3408 3409 #ifdef CONFIG_PM_SLEEP 3410 static int mmc_blk_suspend(struct device *dev) 3411 { 3412 struct mmc_card *card = mmc_dev_to_card(dev); 3413 3414 return _mmc_blk_suspend(card); 3415 } 3416 3417 static int mmc_blk_resume(struct device *dev) 3418 { 3419 struct mmc_blk_data *part_md; 3420 struct mmc_blk_data *md = dev_get_drvdata(dev); 3421 3422 if (md) { 3423 /* 3424 * Resume involves the card going into idle state, 3425 * so current partition is always the main one. 3426 */ 3427 md->part_curr = md->part_type; 3428 mmc_queue_resume(&md->queue); 3429 list_for_each_entry(part_md, &md->part, part) { 3430 mmc_queue_resume(&part_md->queue); 3431 } 3432 } 3433 return 0; 3434 } 3435 #endif 3436 3437 static SIMPLE_DEV_PM_OPS(mmc_blk_pm_ops, mmc_blk_suspend, mmc_blk_resume); 3438 3439 static struct mmc_driver mmc_driver = { 3440 .drv = { 3441 .name = "mmcblk", 3442 .pm = &mmc_blk_pm_ops, 3443 }, 3444 .probe = mmc_blk_probe, 3445 .remove = mmc_blk_remove, 3446 .shutdown = mmc_blk_shutdown, 3447 }; 3448 3449 static int __init mmc_blk_init(void) 3450 { 3451 int res; 3452 3453 res = bus_register(&mmc_rpmb_bus_type); 3454 if (res < 0) { 3455 pr_err("mmcblk: could not register RPMB bus type\n"); 3456 return res; 3457 } 3458 res = alloc_chrdev_region(&mmc_rpmb_devt, 0, MAX_DEVICES, "rpmb"); 3459 if (res < 0) { 3460 pr_err("mmcblk: failed to allocate rpmb chrdev region\n"); 3461 goto out_bus_unreg; 3462 } 3463 3464 if (perdev_minors != CONFIG_MMC_BLOCK_MINORS) 3465 pr_info("mmcblk: using %d minors per device\n", perdev_minors); 3466 3467 max_devices = min(MAX_DEVICES, (1 << MINORBITS) / perdev_minors); 3468 3469 res = register_blkdev(MMC_BLOCK_MAJOR, "mmc"); 3470 if (res) 3471 goto out_chrdev_unreg; 3472 3473 res = mmc_register_driver(&mmc_driver); 3474 if (res) 3475 goto out_blkdev_unreg; 3476 3477 return 0; 3478 3479 out_blkdev_unreg: 3480 unregister_blkdev(MMC_BLOCK_MAJOR, "mmc"); 3481 out_chrdev_unreg: 3482 unregister_chrdev_region(mmc_rpmb_devt, MAX_DEVICES); 3483 out_bus_unreg: 3484 bus_unregister(&mmc_rpmb_bus_type); 3485 return res; 3486 } 3487 3488 static void __exit mmc_blk_exit(void) 3489 { 3490 mmc_unregister_driver(&mmc_driver); 3491 unregister_blkdev(MMC_BLOCK_MAJOR, "mmc"); 3492 unregister_chrdev_region(mmc_rpmb_devt, MAX_DEVICES); 3493 bus_unregister(&mmc_rpmb_bus_type); 3494 } 3495 3496 module_init(mmc_blk_init); 3497 module_exit(mmc_blk_exit); 3498 3499 MODULE_LICENSE("GPL"); 3500 MODULE_DESCRIPTION("Multimedia Card (MMC) block device driver"); 3501