1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Userspace block device - block device which IO is handled from userspace 4 * 5 * Take full use of io_uring passthrough command for communicating with 6 * ublk userspace daemon(ublksrvd) for handling basic IO request. 7 * 8 * Copyright 2022 Ming Lei <ming.lei@redhat.com> 9 * 10 * (part of code stolen from loop.c) 11 */ 12 #include <linux/module.h> 13 #include <linux/moduleparam.h> 14 #include <linux/sched.h> 15 #include <linux/fs.h> 16 #include <linux/pagemap.h> 17 #include <linux/file.h> 18 #include <linux/stat.h> 19 #include <linux/errno.h> 20 #include <linux/major.h> 21 #include <linux/wait.h> 22 #include <linux/blkdev.h> 23 #include <linux/init.h> 24 #include <linux/swap.h> 25 #include <linux/slab.h> 26 #include <linux/compat.h> 27 #include <linux/mutex.h> 28 #include <linux/writeback.h> 29 #include <linux/completion.h> 30 #include <linux/highmem.h> 31 #include <linux/sysfs.h> 32 #include <linux/miscdevice.h> 33 #include <linux/falloc.h> 34 #include <linux/uio.h> 35 #include <linux/ioprio.h> 36 #include <linux/sched/mm.h> 37 #include <linux/uaccess.h> 38 #include <linux/cdev.h> 39 #include <linux/io_uring/cmd.h> 40 #include <linux/blk-mq.h> 41 #include <linux/delay.h> 42 #include <linux/mm.h> 43 #include <asm/page.h> 44 #include <linux/task_work.h> 45 #include <linux/namei.h> 46 #include <linux/kref.h> 47 #include <uapi/linux/ublk_cmd.h> 48 49 #define UBLK_MINORS (1U << MINORBITS) 50 51 #define UBLK_INVALID_BUF_IDX ((u16)-1) 52 53 /* private ioctl command mirror */ 54 #define UBLK_CMD_DEL_DEV_ASYNC _IOC_NR(UBLK_U_CMD_DEL_DEV_ASYNC) 55 #define UBLK_CMD_UPDATE_SIZE _IOC_NR(UBLK_U_CMD_UPDATE_SIZE) 56 #define UBLK_CMD_QUIESCE_DEV _IOC_NR(UBLK_U_CMD_QUIESCE_DEV) 57 58 #define UBLK_IO_REGISTER_IO_BUF _IOC_NR(UBLK_U_IO_REGISTER_IO_BUF) 59 #define UBLK_IO_UNREGISTER_IO_BUF _IOC_NR(UBLK_U_IO_UNREGISTER_IO_BUF) 60 61 /* All UBLK_F_* have to be included into UBLK_F_ALL */ 62 #define UBLK_F_ALL (UBLK_F_SUPPORT_ZERO_COPY \ 63 | UBLK_F_URING_CMD_COMP_IN_TASK \ 64 | UBLK_F_NEED_GET_DATA \ 65 | UBLK_F_USER_RECOVERY \ 66 | UBLK_F_USER_RECOVERY_REISSUE \ 67 | UBLK_F_UNPRIVILEGED_DEV \ 68 | UBLK_F_CMD_IOCTL_ENCODE \ 69 | UBLK_F_USER_COPY \ 70 | UBLK_F_ZONED \ 71 | UBLK_F_USER_RECOVERY_FAIL_IO \ 72 | UBLK_F_UPDATE_SIZE \ 73 | UBLK_F_AUTO_BUF_REG \ 74 | UBLK_F_QUIESCE \ 75 | UBLK_F_PER_IO_DAEMON \ 76 | UBLK_F_BUF_REG_OFF_DAEMON) 77 78 #define UBLK_F_ALL_RECOVERY_FLAGS (UBLK_F_USER_RECOVERY \ 79 | UBLK_F_USER_RECOVERY_REISSUE \ 80 | UBLK_F_USER_RECOVERY_FAIL_IO) 81 82 /* All UBLK_PARAM_TYPE_* should be included here */ 83 #define UBLK_PARAM_TYPE_ALL \ 84 (UBLK_PARAM_TYPE_BASIC | UBLK_PARAM_TYPE_DISCARD | \ 85 UBLK_PARAM_TYPE_DEVT | UBLK_PARAM_TYPE_ZONED | \ 86 UBLK_PARAM_TYPE_DMA_ALIGN | UBLK_PARAM_TYPE_SEGMENT) 87 88 struct ublk_uring_cmd_pdu { 89 /* 90 * Store requests in same batch temporarily for queuing them to 91 * daemon context. 92 * 93 * It should have been stored to request payload, but we do want 94 * to avoid extra pre-allocation, and uring_cmd payload is always 95 * free for us 96 */ 97 union { 98 struct request *req; 99 struct request *req_list; 100 }; 101 102 /* 103 * The following two are valid in this cmd whole lifetime, and 104 * setup in ublk uring_cmd handler 105 */ 106 struct ublk_queue *ubq; 107 108 u16 tag; 109 }; 110 111 /* 112 * io command is active: sqe cmd is received, and its cqe isn't done 113 * 114 * If the flag is set, the io command is owned by ublk driver, and waited 115 * for incoming blk-mq request from the ublk block device. 116 * 117 * If the flag is cleared, the io command will be completed, and owned by 118 * ublk server. 119 */ 120 #define UBLK_IO_FLAG_ACTIVE 0x01 121 122 /* 123 * IO command is completed via cqe, and it is being handled by ublksrv, and 124 * not committed yet 125 * 126 * Basically exclusively with UBLK_IO_FLAG_ACTIVE, so can be served for 127 * cross verification 128 */ 129 #define UBLK_IO_FLAG_OWNED_BY_SRV 0x02 130 131 /* 132 * UBLK_IO_FLAG_NEED_GET_DATA is set because IO command requires 133 * get data buffer address from ublksrv. 134 * 135 * Then, bio data could be copied into this data buffer for a WRITE request 136 * after the IO command is issued again and UBLK_IO_FLAG_NEED_GET_DATA is unset. 137 */ 138 #define UBLK_IO_FLAG_NEED_GET_DATA 0x08 139 140 /* 141 * request buffer is registered automatically, so we have to unregister it 142 * before completing this request. 143 * 144 * io_uring will unregister buffer automatically for us during exiting. 145 */ 146 #define UBLK_IO_FLAG_AUTO_BUF_REG 0x10 147 148 /* atomic RW with ubq->cancel_lock */ 149 #define UBLK_IO_FLAG_CANCELED 0x80000000 150 151 /* 152 * Initialize refcount to a large number to include any registered buffers. 153 * UBLK_IO_COMMIT_AND_FETCH_REQ will release these references minus those for 154 * any buffers registered on the io daemon task. 155 */ 156 #define UBLK_REFCOUNT_INIT (REFCOUNT_MAX / 2) 157 158 union ublk_io_buf { 159 __u64 addr; 160 struct ublk_auto_buf_reg auto_reg; 161 }; 162 163 struct ublk_io { 164 union ublk_io_buf buf; 165 unsigned int flags; 166 int res; 167 168 union { 169 /* valid if UBLK_IO_FLAG_ACTIVE is set */ 170 struct io_uring_cmd *cmd; 171 /* valid if UBLK_IO_FLAG_OWNED_BY_SRV is set */ 172 struct request *req; 173 }; 174 175 struct task_struct *task; 176 177 /* 178 * The number of uses of this I/O by the ublk server 179 * if user copy or zero copy are enabled: 180 * - UBLK_REFCOUNT_INIT from dispatch to the server 181 * until UBLK_IO_COMMIT_AND_FETCH_REQ 182 * - 1 for each inflight ublk_ch_{read,write}_iter() call 183 * - 1 for each io_uring registered buffer not registered on task 184 * The I/O can only be completed once all references are dropped. 185 * User copy and buffer registration operations are only permitted 186 * if the reference count is nonzero. 187 */ 188 refcount_t ref; 189 /* Count of buffers registered on task and not yet unregistered */ 190 unsigned task_registered_buffers; 191 192 void *buf_ctx_handle; 193 } ____cacheline_aligned_in_smp; 194 195 struct ublk_queue { 196 int q_id; 197 int q_depth; 198 199 unsigned long flags; 200 struct ublksrv_io_desc *io_cmd_buf; 201 202 bool force_abort; 203 bool canceling; 204 bool fail_io; /* copy of dev->state == UBLK_S_DEV_FAIL_IO */ 205 spinlock_t cancel_lock; 206 struct ublk_device *dev; 207 struct ublk_io ios[] __counted_by(q_depth); 208 }; 209 210 struct ublk_device { 211 struct gendisk *ub_disk; 212 213 struct ublksrv_ctrl_dev_info dev_info; 214 215 struct blk_mq_tag_set tag_set; 216 217 struct cdev cdev; 218 struct device cdev_dev; 219 220 #define UB_STATE_OPEN 0 221 #define UB_STATE_USED 1 222 #define UB_STATE_DELETED 2 223 unsigned long state; 224 int ub_number; 225 226 struct mutex mutex; 227 228 spinlock_t lock; 229 struct mm_struct *mm; 230 231 struct ublk_params params; 232 233 struct completion completion; 234 u32 nr_io_ready; 235 bool unprivileged_daemons; 236 struct mutex cancel_mutex; 237 bool canceling; 238 pid_t ublksrv_tgid; 239 struct delayed_work exit_work; 240 241 struct ublk_queue *queues[]; 242 }; 243 244 /* header of ublk_params */ 245 struct ublk_params_header { 246 __u32 len; 247 __u32 types; 248 }; 249 250 static void ublk_io_release(void *priv); 251 static void ublk_stop_dev_unlocked(struct ublk_device *ub); 252 static void ublk_abort_queue(struct ublk_device *ub, struct ublk_queue *ubq); 253 static inline struct request *__ublk_check_and_get_req(struct ublk_device *ub, 254 u16 q_id, u16 tag, struct ublk_io *io, size_t offset); 255 static inline unsigned int ublk_req_build_flags(struct request *req); 256 257 static inline struct ublksrv_io_desc * 258 ublk_get_iod(const struct ublk_queue *ubq, unsigned tag) 259 { 260 return &ubq->io_cmd_buf[tag]; 261 } 262 263 static inline bool ublk_dev_is_zoned(const struct ublk_device *ub) 264 { 265 return ub->dev_info.flags & UBLK_F_ZONED; 266 } 267 268 static inline bool ublk_queue_is_zoned(const struct ublk_queue *ubq) 269 { 270 return ubq->flags & UBLK_F_ZONED; 271 } 272 273 #ifdef CONFIG_BLK_DEV_ZONED 274 275 struct ublk_zoned_report_desc { 276 __u64 sector; 277 __u32 operation; 278 __u32 nr_zones; 279 }; 280 281 static DEFINE_XARRAY(ublk_zoned_report_descs); 282 283 static int ublk_zoned_insert_report_desc(const struct request *req, 284 struct ublk_zoned_report_desc *desc) 285 { 286 return xa_insert(&ublk_zoned_report_descs, (unsigned long)req, 287 desc, GFP_KERNEL); 288 } 289 290 static struct ublk_zoned_report_desc *ublk_zoned_erase_report_desc( 291 const struct request *req) 292 { 293 return xa_erase(&ublk_zoned_report_descs, (unsigned long)req); 294 } 295 296 static struct ublk_zoned_report_desc *ublk_zoned_get_report_desc( 297 const struct request *req) 298 { 299 return xa_load(&ublk_zoned_report_descs, (unsigned long)req); 300 } 301 302 static int ublk_get_nr_zones(const struct ublk_device *ub) 303 { 304 const struct ublk_param_basic *p = &ub->params.basic; 305 306 /* Zone size is a power of 2 */ 307 return p->dev_sectors >> ilog2(p->chunk_sectors); 308 } 309 310 static int ublk_revalidate_disk_zones(struct ublk_device *ub) 311 { 312 return blk_revalidate_disk_zones(ub->ub_disk); 313 } 314 315 static int ublk_dev_param_zoned_validate(const struct ublk_device *ub) 316 { 317 const struct ublk_param_zoned *p = &ub->params.zoned; 318 int nr_zones; 319 320 if (!ublk_dev_is_zoned(ub)) 321 return -EINVAL; 322 323 if (!p->max_zone_append_sectors) 324 return -EINVAL; 325 326 nr_zones = ublk_get_nr_zones(ub); 327 328 if (p->max_active_zones > nr_zones) 329 return -EINVAL; 330 331 if (p->max_open_zones > nr_zones) 332 return -EINVAL; 333 334 return 0; 335 } 336 337 static void ublk_dev_param_zoned_apply(struct ublk_device *ub) 338 { 339 ub->ub_disk->nr_zones = ublk_get_nr_zones(ub); 340 } 341 342 /* Based on virtblk_alloc_report_buffer */ 343 static void *ublk_alloc_report_buffer(struct ublk_device *ublk, 344 unsigned int nr_zones, size_t *buflen) 345 { 346 struct request_queue *q = ublk->ub_disk->queue; 347 size_t bufsize; 348 void *buf; 349 350 nr_zones = min_t(unsigned int, nr_zones, 351 ublk->ub_disk->nr_zones); 352 353 bufsize = nr_zones * sizeof(struct blk_zone); 354 bufsize = 355 min_t(size_t, bufsize, queue_max_hw_sectors(q) << SECTOR_SHIFT); 356 357 while (bufsize >= sizeof(struct blk_zone)) { 358 buf = kvmalloc(bufsize, GFP_KERNEL | __GFP_NORETRY); 359 if (buf) { 360 *buflen = bufsize; 361 return buf; 362 } 363 bufsize >>= 1; 364 } 365 366 *buflen = 0; 367 return NULL; 368 } 369 370 static int ublk_report_zones(struct gendisk *disk, sector_t sector, 371 unsigned int nr_zones, struct blk_report_zones_args *args) 372 { 373 struct ublk_device *ub = disk->private_data; 374 unsigned int zone_size_sectors = disk->queue->limits.chunk_sectors; 375 unsigned int first_zone = sector >> ilog2(zone_size_sectors); 376 unsigned int done_zones = 0; 377 unsigned int max_zones_per_request; 378 int ret; 379 struct blk_zone *buffer; 380 size_t buffer_length; 381 382 nr_zones = min_t(unsigned int, ub->ub_disk->nr_zones - first_zone, 383 nr_zones); 384 385 buffer = ublk_alloc_report_buffer(ub, nr_zones, &buffer_length); 386 if (!buffer) 387 return -ENOMEM; 388 389 max_zones_per_request = buffer_length / sizeof(struct blk_zone); 390 391 while (done_zones < nr_zones) { 392 unsigned int remaining_zones = nr_zones - done_zones; 393 unsigned int zones_in_request = 394 min_t(unsigned int, remaining_zones, max_zones_per_request); 395 struct request *req; 396 struct ublk_zoned_report_desc desc; 397 blk_status_t status; 398 399 memset(buffer, 0, buffer_length); 400 401 req = blk_mq_alloc_request(disk->queue, REQ_OP_DRV_IN, 0); 402 if (IS_ERR(req)) { 403 ret = PTR_ERR(req); 404 goto out; 405 } 406 407 desc.operation = UBLK_IO_OP_REPORT_ZONES; 408 desc.sector = sector; 409 desc.nr_zones = zones_in_request; 410 ret = ublk_zoned_insert_report_desc(req, &desc); 411 if (ret) 412 goto free_req; 413 414 ret = blk_rq_map_kern(req, buffer, buffer_length, GFP_KERNEL); 415 if (ret) 416 goto erase_desc; 417 418 status = blk_execute_rq(req, 0); 419 ret = blk_status_to_errno(status); 420 erase_desc: 421 ublk_zoned_erase_report_desc(req); 422 free_req: 423 blk_mq_free_request(req); 424 if (ret) 425 goto out; 426 427 for (unsigned int i = 0; i < zones_in_request; i++) { 428 struct blk_zone *zone = buffer + i; 429 430 /* A zero length zone means no more zones in this response */ 431 if (!zone->len) 432 break; 433 434 ret = disk_report_zone(disk, zone, i, args); 435 if (ret) 436 goto out; 437 438 done_zones++; 439 sector += zone_size_sectors; 440 441 } 442 } 443 444 ret = done_zones; 445 446 out: 447 kvfree(buffer); 448 return ret; 449 } 450 451 static blk_status_t ublk_setup_iod_zoned(struct ublk_queue *ubq, 452 struct request *req) 453 { 454 struct ublksrv_io_desc *iod = ublk_get_iod(ubq, req->tag); 455 struct ublk_io *io = &ubq->ios[req->tag]; 456 struct ublk_zoned_report_desc *desc; 457 u32 ublk_op; 458 459 switch (req_op(req)) { 460 case REQ_OP_ZONE_OPEN: 461 ublk_op = UBLK_IO_OP_ZONE_OPEN; 462 break; 463 case REQ_OP_ZONE_CLOSE: 464 ublk_op = UBLK_IO_OP_ZONE_CLOSE; 465 break; 466 case REQ_OP_ZONE_FINISH: 467 ublk_op = UBLK_IO_OP_ZONE_FINISH; 468 break; 469 case REQ_OP_ZONE_RESET: 470 ublk_op = UBLK_IO_OP_ZONE_RESET; 471 break; 472 case REQ_OP_ZONE_APPEND: 473 ublk_op = UBLK_IO_OP_ZONE_APPEND; 474 break; 475 case REQ_OP_ZONE_RESET_ALL: 476 ublk_op = UBLK_IO_OP_ZONE_RESET_ALL; 477 break; 478 case REQ_OP_DRV_IN: 479 desc = ublk_zoned_get_report_desc(req); 480 if (!desc) 481 return BLK_STS_IOERR; 482 ublk_op = desc->operation; 483 switch (ublk_op) { 484 case UBLK_IO_OP_REPORT_ZONES: 485 iod->op_flags = ublk_op | ublk_req_build_flags(req); 486 iod->nr_zones = desc->nr_zones; 487 iod->start_sector = desc->sector; 488 return BLK_STS_OK; 489 default: 490 return BLK_STS_IOERR; 491 } 492 case REQ_OP_DRV_OUT: 493 /* We do not support drv_out */ 494 return BLK_STS_NOTSUPP; 495 default: 496 return BLK_STS_IOERR; 497 } 498 499 iod->op_flags = ublk_op | ublk_req_build_flags(req); 500 iod->nr_sectors = blk_rq_sectors(req); 501 iod->start_sector = blk_rq_pos(req); 502 iod->addr = io->buf.addr; 503 504 return BLK_STS_OK; 505 } 506 507 #else 508 509 #define ublk_report_zones (NULL) 510 511 static int ublk_dev_param_zoned_validate(const struct ublk_device *ub) 512 { 513 return -EOPNOTSUPP; 514 } 515 516 static void ublk_dev_param_zoned_apply(struct ublk_device *ub) 517 { 518 } 519 520 static int ublk_revalidate_disk_zones(struct ublk_device *ub) 521 { 522 return 0; 523 } 524 525 static blk_status_t ublk_setup_iod_zoned(struct ublk_queue *ubq, 526 struct request *req) 527 { 528 return BLK_STS_NOTSUPP; 529 } 530 531 #endif 532 533 static inline void __ublk_complete_rq(struct request *req, struct ublk_io *io, 534 bool need_map); 535 536 static dev_t ublk_chr_devt; 537 static const struct class ublk_chr_class = { 538 .name = "ublk-char", 539 }; 540 541 static DEFINE_IDR(ublk_index_idr); 542 static DEFINE_SPINLOCK(ublk_idr_lock); 543 static wait_queue_head_t ublk_idr_wq; /* wait until one idr is freed */ 544 545 static DEFINE_MUTEX(ublk_ctl_mutex); 546 547 548 #define UBLK_MAX_UBLKS UBLK_MINORS 549 550 /* 551 * Max unprivileged ublk devices allowed to add 552 * 553 * It can be extended to one per-user limit in future or even controlled 554 * by cgroup. 555 */ 556 static unsigned int unprivileged_ublks_max = 64; 557 static unsigned int unprivileged_ublks_added; /* protected by ublk_ctl_mutex */ 558 559 static struct miscdevice ublk_misc; 560 561 static inline unsigned ublk_pos_to_hwq(loff_t pos) 562 { 563 return ((pos - UBLKSRV_IO_BUF_OFFSET) >> UBLK_QID_OFF) & 564 UBLK_QID_BITS_MASK; 565 } 566 567 static inline unsigned ublk_pos_to_buf_off(loff_t pos) 568 { 569 return (pos - UBLKSRV_IO_BUF_OFFSET) & UBLK_IO_BUF_BITS_MASK; 570 } 571 572 static inline unsigned ublk_pos_to_tag(loff_t pos) 573 { 574 return ((pos - UBLKSRV_IO_BUF_OFFSET) >> UBLK_TAG_OFF) & 575 UBLK_TAG_BITS_MASK; 576 } 577 578 static void ublk_dev_param_basic_apply(struct ublk_device *ub) 579 { 580 const struct ublk_param_basic *p = &ub->params.basic; 581 582 if (p->attrs & UBLK_ATTR_READ_ONLY) 583 set_disk_ro(ub->ub_disk, true); 584 585 set_capacity(ub->ub_disk, p->dev_sectors); 586 } 587 588 static int ublk_validate_params(const struct ublk_device *ub) 589 { 590 /* basic param is the only one which must be set */ 591 if (ub->params.types & UBLK_PARAM_TYPE_BASIC) { 592 const struct ublk_param_basic *p = &ub->params.basic; 593 594 if (p->logical_bs_shift > PAGE_SHIFT || p->logical_bs_shift < 9) 595 return -EINVAL; 596 597 if (p->logical_bs_shift > p->physical_bs_shift) 598 return -EINVAL; 599 600 if (p->max_sectors > (ub->dev_info.max_io_buf_bytes >> 9)) 601 return -EINVAL; 602 603 if (ublk_dev_is_zoned(ub) && !p->chunk_sectors) 604 return -EINVAL; 605 } else 606 return -EINVAL; 607 608 if (ub->params.types & UBLK_PARAM_TYPE_DISCARD) { 609 const struct ublk_param_discard *p = &ub->params.discard; 610 611 /* So far, only support single segment discard */ 612 if (p->max_discard_sectors && p->max_discard_segments != 1) 613 return -EINVAL; 614 615 if (!p->discard_granularity) 616 return -EINVAL; 617 } 618 619 /* dev_t is read-only */ 620 if (ub->params.types & UBLK_PARAM_TYPE_DEVT) 621 return -EINVAL; 622 623 if (ub->params.types & UBLK_PARAM_TYPE_ZONED) 624 return ublk_dev_param_zoned_validate(ub); 625 else if (ublk_dev_is_zoned(ub)) 626 return -EINVAL; 627 628 if (ub->params.types & UBLK_PARAM_TYPE_DMA_ALIGN) { 629 const struct ublk_param_dma_align *p = &ub->params.dma; 630 631 if (p->alignment >= PAGE_SIZE) 632 return -EINVAL; 633 634 if (!is_power_of_2(p->alignment + 1)) 635 return -EINVAL; 636 } 637 638 if (ub->params.types & UBLK_PARAM_TYPE_SEGMENT) { 639 const struct ublk_param_segment *p = &ub->params.seg; 640 641 if (!is_power_of_2(p->seg_boundary_mask + 1)) 642 return -EINVAL; 643 644 if (p->seg_boundary_mask + 1 < UBLK_MIN_SEGMENT_SIZE) 645 return -EINVAL; 646 if (p->max_segment_size < UBLK_MIN_SEGMENT_SIZE) 647 return -EINVAL; 648 } 649 650 return 0; 651 } 652 653 static void ublk_apply_params(struct ublk_device *ub) 654 { 655 ublk_dev_param_basic_apply(ub); 656 657 if (ub->params.types & UBLK_PARAM_TYPE_ZONED) 658 ublk_dev_param_zoned_apply(ub); 659 } 660 661 static inline bool ublk_support_zero_copy(const struct ublk_queue *ubq) 662 { 663 return ubq->flags & UBLK_F_SUPPORT_ZERO_COPY; 664 } 665 666 static inline bool ublk_dev_support_zero_copy(const struct ublk_device *ub) 667 { 668 return ub->dev_info.flags & UBLK_F_SUPPORT_ZERO_COPY; 669 } 670 671 static inline bool ublk_support_auto_buf_reg(const struct ublk_queue *ubq) 672 { 673 return ubq->flags & UBLK_F_AUTO_BUF_REG; 674 } 675 676 static inline bool ublk_dev_support_auto_buf_reg(const struct ublk_device *ub) 677 { 678 return ub->dev_info.flags & UBLK_F_AUTO_BUF_REG; 679 } 680 681 static inline bool ublk_support_user_copy(const struct ublk_queue *ubq) 682 { 683 return ubq->flags & UBLK_F_USER_COPY; 684 } 685 686 static inline bool ublk_dev_support_user_copy(const struct ublk_device *ub) 687 { 688 return ub->dev_info.flags & UBLK_F_USER_COPY; 689 } 690 691 static inline bool ublk_need_map_io(const struct ublk_queue *ubq) 692 { 693 return !ublk_support_user_copy(ubq) && !ublk_support_zero_copy(ubq) && 694 !ublk_support_auto_buf_reg(ubq); 695 } 696 697 static inline bool ublk_dev_need_map_io(const struct ublk_device *ub) 698 { 699 return !ublk_dev_support_user_copy(ub) && 700 !ublk_dev_support_zero_copy(ub) && 701 !ublk_dev_support_auto_buf_reg(ub); 702 } 703 704 static inline bool ublk_need_req_ref(const struct ublk_queue *ubq) 705 { 706 /* 707 * read()/write() is involved in user copy, so request reference 708 * has to be grabbed 709 * 710 * for zero copy, request buffer need to be registered to io_uring 711 * buffer table, so reference is needed 712 * 713 * For auto buffer register, ublk server still may issue 714 * UBLK_IO_COMMIT_AND_FETCH_REQ before one registered buffer is used up, 715 * so reference is required too. 716 */ 717 return ublk_support_user_copy(ubq) || ublk_support_zero_copy(ubq) || 718 ublk_support_auto_buf_reg(ubq); 719 } 720 721 static inline bool ublk_dev_need_req_ref(const struct ublk_device *ub) 722 { 723 return ublk_dev_support_user_copy(ub) || 724 ublk_dev_support_zero_copy(ub) || 725 ublk_dev_support_auto_buf_reg(ub); 726 } 727 728 static inline void ublk_init_req_ref(const struct ublk_queue *ubq, 729 struct ublk_io *io) 730 { 731 if (ublk_need_req_ref(ubq)) 732 refcount_set(&io->ref, UBLK_REFCOUNT_INIT); 733 } 734 735 static inline bool ublk_get_req_ref(struct ublk_io *io) 736 { 737 return refcount_inc_not_zero(&io->ref); 738 } 739 740 static inline void ublk_put_req_ref(struct ublk_io *io, struct request *req) 741 { 742 if (!refcount_dec_and_test(&io->ref)) 743 return; 744 745 /* ublk_need_map_io() and ublk_need_req_ref() are mutually exclusive */ 746 __ublk_complete_rq(req, io, false); 747 } 748 749 static inline bool ublk_sub_req_ref(struct ublk_io *io) 750 { 751 unsigned sub_refs = UBLK_REFCOUNT_INIT - io->task_registered_buffers; 752 753 io->task_registered_buffers = 0; 754 return refcount_sub_and_test(sub_refs, &io->ref); 755 } 756 757 static inline bool ublk_need_get_data(const struct ublk_queue *ubq) 758 { 759 return ubq->flags & UBLK_F_NEED_GET_DATA; 760 } 761 762 static inline bool ublk_dev_need_get_data(const struct ublk_device *ub) 763 { 764 return ub->dev_info.flags & UBLK_F_NEED_GET_DATA; 765 } 766 767 /* Called in slow path only, keep it noinline for trace purpose */ 768 static noinline struct ublk_device *ublk_get_device(struct ublk_device *ub) 769 { 770 if (kobject_get_unless_zero(&ub->cdev_dev.kobj)) 771 return ub; 772 return NULL; 773 } 774 775 /* Called in slow path only, keep it noinline for trace purpose */ 776 static noinline void ublk_put_device(struct ublk_device *ub) 777 { 778 put_device(&ub->cdev_dev); 779 } 780 781 static inline struct ublk_queue *ublk_get_queue(struct ublk_device *dev, 782 int qid) 783 { 784 return dev->queues[qid]; 785 } 786 787 static inline bool ublk_rq_has_data(const struct request *rq) 788 { 789 return bio_has_data(rq->bio); 790 } 791 792 static inline struct ublksrv_io_desc * 793 ublk_queue_cmd_buf(struct ublk_device *ub, int q_id) 794 { 795 return ublk_get_queue(ub, q_id)->io_cmd_buf; 796 } 797 798 static inline int __ublk_queue_cmd_buf_size(int depth) 799 { 800 return round_up(depth * sizeof(struct ublksrv_io_desc), PAGE_SIZE); 801 } 802 803 static inline int ublk_queue_cmd_buf_size(struct ublk_device *ub) 804 { 805 return __ublk_queue_cmd_buf_size(ub->dev_info.queue_depth); 806 } 807 808 static int ublk_max_cmd_buf_size(void) 809 { 810 return __ublk_queue_cmd_buf_size(UBLK_MAX_QUEUE_DEPTH); 811 } 812 813 /* 814 * Should I/O outstanding to the ublk server when it exits be reissued? 815 * If not, outstanding I/O will get errors. 816 */ 817 static inline bool ublk_nosrv_should_reissue_outstanding(struct ublk_device *ub) 818 { 819 return (ub->dev_info.flags & UBLK_F_USER_RECOVERY) && 820 (ub->dev_info.flags & UBLK_F_USER_RECOVERY_REISSUE); 821 } 822 823 /* 824 * Should I/O issued while there is no ublk server queue? If not, I/O 825 * issued while there is no ublk server will get errors. 826 */ 827 static inline bool ublk_nosrv_dev_should_queue_io(struct ublk_device *ub) 828 { 829 return (ub->dev_info.flags & UBLK_F_USER_RECOVERY) && 830 !(ub->dev_info.flags & UBLK_F_USER_RECOVERY_FAIL_IO); 831 } 832 833 /* 834 * Same as ublk_nosrv_dev_should_queue_io, but uses a queue-local copy 835 * of the device flags for smaller cache footprint - better for fast 836 * paths. 837 */ 838 static inline bool ublk_nosrv_should_queue_io(struct ublk_queue *ubq) 839 { 840 return (ubq->flags & UBLK_F_USER_RECOVERY) && 841 !(ubq->flags & UBLK_F_USER_RECOVERY_FAIL_IO); 842 } 843 844 /* 845 * Should ublk devices be stopped (i.e. no recovery possible) when the 846 * ublk server exits? If not, devices can be used again by a future 847 * incarnation of a ublk server via the start_recovery/end_recovery 848 * commands. 849 */ 850 static inline bool ublk_nosrv_should_stop_dev(struct ublk_device *ub) 851 { 852 return !(ub->dev_info.flags & UBLK_F_USER_RECOVERY); 853 } 854 855 static inline bool ublk_dev_in_recoverable_state(struct ublk_device *ub) 856 { 857 return ub->dev_info.state == UBLK_S_DEV_QUIESCED || 858 ub->dev_info.state == UBLK_S_DEV_FAIL_IO; 859 } 860 861 static void ublk_free_disk(struct gendisk *disk) 862 { 863 struct ublk_device *ub = disk->private_data; 864 865 clear_bit(UB_STATE_USED, &ub->state); 866 ublk_put_device(ub); 867 } 868 869 static void ublk_store_owner_uid_gid(unsigned int *owner_uid, 870 unsigned int *owner_gid) 871 { 872 kuid_t uid; 873 kgid_t gid; 874 875 current_uid_gid(&uid, &gid); 876 877 *owner_uid = from_kuid(&init_user_ns, uid); 878 *owner_gid = from_kgid(&init_user_ns, gid); 879 } 880 881 static int ublk_open(struct gendisk *disk, blk_mode_t mode) 882 { 883 struct ublk_device *ub = disk->private_data; 884 885 if (capable(CAP_SYS_ADMIN)) 886 return 0; 887 888 /* 889 * If it is one unprivileged device, only owner can open 890 * the disk. Otherwise it could be one trap made by one 891 * evil user who grants this disk's privileges to other 892 * users deliberately. 893 * 894 * This way is reasonable too given anyone can create 895 * unprivileged device, and no need other's grant. 896 */ 897 if (ub->dev_info.flags & UBLK_F_UNPRIVILEGED_DEV) { 898 unsigned int curr_uid, curr_gid; 899 900 ublk_store_owner_uid_gid(&curr_uid, &curr_gid); 901 902 if (curr_uid != ub->dev_info.owner_uid || curr_gid != 903 ub->dev_info.owner_gid) 904 return -EPERM; 905 } 906 907 return 0; 908 } 909 910 static const struct block_device_operations ub_fops = { 911 .owner = THIS_MODULE, 912 .open = ublk_open, 913 .free_disk = ublk_free_disk, 914 .report_zones = ublk_report_zones, 915 }; 916 917 /* 918 * Copy data between request pages and io_iter, and 'offset' 919 * is the start point of linear offset of request. 920 */ 921 static size_t ublk_copy_user_pages(const struct request *req, 922 unsigned offset, struct iov_iter *uiter, int dir) 923 { 924 struct req_iterator iter; 925 struct bio_vec bv; 926 size_t done = 0; 927 928 rq_for_each_segment(bv, req, iter) { 929 unsigned len; 930 void *bv_buf; 931 size_t copied; 932 933 if (offset >= bv.bv_len) { 934 offset -= bv.bv_len; 935 continue; 936 } 937 938 len = bv.bv_len - offset; 939 bv_buf = kmap_local_page(bv.bv_page) + bv.bv_offset + offset; 940 if (dir == ITER_DEST) 941 copied = copy_to_iter(bv_buf, len, uiter); 942 else 943 copied = copy_from_iter(bv_buf, len, uiter); 944 945 kunmap_local(bv_buf); 946 947 done += copied; 948 if (copied < len) 949 break; 950 951 offset = 0; 952 } 953 return done; 954 } 955 956 static inline bool ublk_need_map_req(const struct request *req) 957 { 958 return ublk_rq_has_data(req) && req_op(req) == REQ_OP_WRITE; 959 } 960 961 static inline bool ublk_need_unmap_req(const struct request *req) 962 { 963 return ublk_rq_has_data(req) && 964 (req_op(req) == REQ_OP_READ || req_op(req) == REQ_OP_DRV_IN); 965 } 966 967 static unsigned int ublk_map_io(const struct ublk_queue *ubq, 968 const struct request *req, 969 const struct ublk_io *io) 970 { 971 const unsigned int rq_bytes = blk_rq_bytes(req); 972 973 if (!ublk_need_map_io(ubq)) 974 return rq_bytes; 975 976 /* 977 * no zero copy, we delay copy WRITE request data into ublksrv 978 * context and the big benefit is that pinning pages in current 979 * context is pretty fast, see ublk_pin_user_pages 980 */ 981 if (ublk_need_map_req(req)) { 982 struct iov_iter iter; 983 const int dir = ITER_DEST; 984 985 import_ubuf(dir, u64_to_user_ptr(io->buf.addr), rq_bytes, &iter); 986 return ublk_copy_user_pages(req, 0, &iter, dir); 987 } 988 return rq_bytes; 989 } 990 991 static unsigned int ublk_unmap_io(bool need_map, 992 const struct request *req, 993 const struct ublk_io *io) 994 { 995 const unsigned int rq_bytes = blk_rq_bytes(req); 996 997 if (!need_map) 998 return rq_bytes; 999 1000 if (ublk_need_unmap_req(req)) { 1001 struct iov_iter iter; 1002 const int dir = ITER_SOURCE; 1003 1004 WARN_ON_ONCE(io->res > rq_bytes); 1005 1006 import_ubuf(dir, u64_to_user_ptr(io->buf.addr), io->res, &iter); 1007 return ublk_copy_user_pages(req, 0, &iter, dir); 1008 } 1009 return rq_bytes; 1010 } 1011 1012 static inline unsigned int ublk_req_build_flags(struct request *req) 1013 { 1014 unsigned flags = 0; 1015 1016 if (req->cmd_flags & REQ_FAILFAST_DEV) 1017 flags |= UBLK_IO_F_FAILFAST_DEV; 1018 1019 if (req->cmd_flags & REQ_FAILFAST_TRANSPORT) 1020 flags |= UBLK_IO_F_FAILFAST_TRANSPORT; 1021 1022 if (req->cmd_flags & REQ_FAILFAST_DRIVER) 1023 flags |= UBLK_IO_F_FAILFAST_DRIVER; 1024 1025 if (req->cmd_flags & REQ_META) 1026 flags |= UBLK_IO_F_META; 1027 1028 if (req->cmd_flags & REQ_FUA) 1029 flags |= UBLK_IO_F_FUA; 1030 1031 if (req->cmd_flags & REQ_NOUNMAP) 1032 flags |= UBLK_IO_F_NOUNMAP; 1033 1034 if (req->cmd_flags & REQ_SWAP) 1035 flags |= UBLK_IO_F_SWAP; 1036 1037 return flags; 1038 } 1039 1040 static blk_status_t ublk_setup_iod(struct ublk_queue *ubq, struct request *req) 1041 { 1042 struct ublksrv_io_desc *iod = ublk_get_iod(ubq, req->tag); 1043 struct ublk_io *io = &ubq->ios[req->tag]; 1044 u32 ublk_op; 1045 1046 switch (req_op(req)) { 1047 case REQ_OP_READ: 1048 ublk_op = UBLK_IO_OP_READ; 1049 break; 1050 case REQ_OP_WRITE: 1051 ublk_op = UBLK_IO_OP_WRITE; 1052 break; 1053 case REQ_OP_FLUSH: 1054 ublk_op = UBLK_IO_OP_FLUSH; 1055 break; 1056 case REQ_OP_DISCARD: 1057 ublk_op = UBLK_IO_OP_DISCARD; 1058 break; 1059 case REQ_OP_WRITE_ZEROES: 1060 ublk_op = UBLK_IO_OP_WRITE_ZEROES; 1061 break; 1062 default: 1063 if (ublk_queue_is_zoned(ubq)) 1064 return ublk_setup_iod_zoned(ubq, req); 1065 return BLK_STS_IOERR; 1066 } 1067 1068 /* need to translate since kernel may change */ 1069 iod->op_flags = ublk_op | ublk_req_build_flags(req); 1070 iod->nr_sectors = blk_rq_sectors(req); 1071 iod->start_sector = blk_rq_pos(req); 1072 iod->addr = io->buf.addr; 1073 1074 return BLK_STS_OK; 1075 } 1076 1077 static inline struct ublk_uring_cmd_pdu *ublk_get_uring_cmd_pdu( 1078 struct io_uring_cmd *ioucmd) 1079 { 1080 return io_uring_cmd_to_pdu(ioucmd, struct ublk_uring_cmd_pdu); 1081 } 1082 1083 static void ublk_end_request(struct request *req, blk_status_t error) 1084 { 1085 local_bh_disable(); 1086 blk_mq_end_request(req, error); 1087 local_bh_enable(); 1088 } 1089 1090 /* todo: handle partial completion */ 1091 static inline void __ublk_complete_rq(struct request *req, struct ublk_io *io, 1092 bool need_map) 1093 { 1094 unsigned int unmapped_bytes; 1095 blk_status_t res = BLK_STS_OK; 1096 bool requeue; 1097 1098 /* failed read IO if nothing is read */ 1099 if (!io->res && req_op(req) == REQ_OP_READ) 1100 io->res = -EIO; 1101 1102 if (io->res < 0) { 1103 res = errno_to_blk_status(io->res); 1104 goto exit; 1105 } 1106 1107 /* 1108 * FLUSH, DISCARD or WRITE_ZEROES usually won't return bytes returned, so end them 1109 * directly. 1110 * 1111 * Both the two needn't unmap. 1112 */ 1113 if (req_op(req) != REQ_OP_READ && req_op(req) != REQ_OP_WRITE && 1114 req_op(req) != REQ_OP_DRV_IN) 1115 goto exit; 1116 1117 /* for READ request, writing data in iod->addr to rq buffers */ 1118 unmapped_bytes = ublk_unmap_io(need_map, req, io); 1119 1120 /* 1121 * Extremely impossible since we got data filled in just before 1122 * 1123 * Re-read simply for this unlikely case. 1124 */ 1125 if (unlikely(unmapped_bytes < io->res)) 1126 io->res = unmapped_bytes; 1127 1128 /* 1129 * Run bio->bi_end_io() with softirqs disabled. If the final fput 1130 * happens off this path, then that will prevent ublk's blkdev_release() 1131 * from being called on current's task work, see fput() implementation. 1132 * 1133 * Otherwise, ublk server may not provide forward progress in case of 1134 * reading the partition table from bdev_open() with disk->open_mutex 1135 * held, and causes dead lock as we could already be holding 1136 * disk->open_mutex here. 1137 * 1138 * Preferably we would not be doing IO with a mutex held that is also 1139 * used for release, but this work-around will suffice for now. 1140 */ 1141 local_bh_disable(); 1142 requeue = blk_update_request(req, BLK_STS_OK, io->res); 1143 local_bh_enable(); 1144 if (requeue) 1145 blk_mq_requeue_request(req, true); 1146 else if (likely(!blk_should_fake_timeout(req->q))) 1147 __blk_mq_end_request(req, BLK_STS_OK); 1148 1149 return; 1150 exit: 1151 ublk_end_request(req, res); 1152 } 1153 1154 static struct io_uring_cmd *__ublk_prep_compl_io_cmd(struct ublk_io *io, 1155 struct request *req) 1156 { 1157 /* read cmd first because req will overwrite it */ 1158 struct io_uring_cmd *cmd = io->cmd; 1159 1160 /* mark this cmd owned by ublksrv */ 1161 io->flags |= UBLK_IO_FLAG_OWNED_BY_SRV; 1162 1163 /* 1164 * clear ACTIVE since we are done with this sqe/cmd slot 1165 * We can only accept io cmd in case of being not active. 1166 */ 1167 io->flags &= ~UBLK_IO_FLAG_ACTIVE; 1168 1169 io->req = req; 1170 return cmd; 1171 } 1172 1173 static void ublk_complete_io_cmd(struct ublk_io *io, struct request *req, 1174 int res, unsigned issue_flags) 1175 { 1176 struct io_uring_cmd *cmd = __ublk_prep_compl_io_cmd(io, req); 1177 1178 /* tell ublksrv one io request is coming */ 1179 io_uring_cmd_done(cmd, res, issue_flags); 1180 } 1181 1182 #define UBLK_REQUEUE_DELAY_MS 3 1183 1184 static inline void __ublk_abort_rq(struct ublk_queue *ubq, 1185 struct request *rq) 1186 { 1187 /* We cannot process this rq so just requeue it. */ 1188 if (ublk_nosrv_dev_should_queue_io(ubq->dev)) 1189 blk_mq_requeue_request(rq, false); 1190 else 1191 ublk_end_request(rq, BLK_STS_IOERR); 1192 } 1193 1194 static void 1195 ublk_auto_buf_reg_fallback(const struct ublk_queue *ubq, unsigned tag) 1196 { 1197 struct ublksrv_io_desc *iod = ublk_get_iod(ubq, tag); 1198 1199 iod->op_flags |= UBLK_IO_F_NEED_REG_BUF; 1200 } 1201 1202 enum auto_buf_reg_res { 1203 AUTO_BUF_REG_FAIL, 1204 AUTO_BUF_REG_FALLBACK, 1205 AUTO_BUF_REG_OK, 1206 }; 1207 1208 static void ublk_prep_auto_buf_reg_io(const struct ublk_queue *ubq, 1209 struct request *req, struct ublk_io *io, 1210 struct io_uring_cmd *cmd, 1211 enum auto_buf_reg_res res) 1212 { 1213 if (res == AUTO_BUF_REG_OK) { 1214 io->task_registered_buffers = 1; 1215 io->buf_ctx_handle = io_uring_cmd_ctx_handle(cmd); 1216 io->flags |= UBLK_IO_FLAG_AUTO_BUF_REG; 1217 } 1218 ublk_init_req_ref(ubq, io); 1219 __ublk_prep_compl_io_cmd(io, req); 1220 } 1221 1222 static enum auto_buf_reg_res 1223 __ublk_do_auto_buf_reg(const struct ublk_queue *ubq, struct request *req, 1224 struct ublk_io *io, struct io_uring_cmd *cmd, 1225 unsigned int issue_flags) 1226 { 1227 int ret; 1228 1229 ret = io_buffer_register_bvec(cmd, req, ublk_io_release, 1230 io->buf.auto_reg.index, issue_flags); 1231 if (ret) { 1232 if (io->buf.auto_reg.flags & UBLK_AUTO_BUF_REG_FALLBACK) { 1233 ublk_auto_buf_reg_fallback(ubq, req->tag); 1234 return AUTO_BUF_REG_FALLBACK; 1235 } 1236 ublk_end_request(req, BLK_STS_IOERR); 1237 return AUTO_BUF_REG_FAIL; 1238 } 1239 1240 return AUTO_BUF_REG_OK; 1241 } 1242 1243 static void ublk_do_auto_buf_reg(const struct ublk_queue *ubq, struct request *req, 1244 struct ublk_io *io, struct io_uring_cmd *cmd, 1245 unsigned int issue_flags) 1246 { 1247 enum auto_buf_reg_res res = __ublk_do_auto_buf_reg(ubq, req, io, cmd, 1248 issue_flags); 1249 1250 if (res != AUTO_BUF_REG_FAIL) { 1251 ublk_prep_auto_buf_reg_io(ubq, req, io, cmd, res); 1252 io_uring_cmd_done(cmd, UBLK_IO_RES_OK, issue_flags); 1253 } 1254 } 1255 1256 static bool ublk_start_io(const struct ublk_queue *ubq, struct request *req, 1257 struct ublk_io *io) 1258 { 1259 unsigned mapped_bytes = ublk_map_io(ubq, req, io); 1260 1261 /* partially mapped, update io descriptor */ 1262 if (unlikely(mapped_bytes != blk_rq_bytes(req))) { 1263 /* 1264 * Nothing mapped, retry until we succeed. 1265 * 1266 * We may never succeed in mapping any bytes here because 1267 * of OOM. TODO: reserve one buffer with single page pinned 1268 * for providing forward progress guarantee. 1269 */ 1270 if (unlikely(!mapped_bytes)) { 1271 blk_mq_requeue_request(req, false); 1272 blk_mq_delay_kick_requeue_list(req->q, 1273 UBLK_REQUEUE_DELAY_MS); 1274 return false; 1275 } 1276 1277 ublk_get_iod(ubq, req->tag)->nr_sectors = 1278 mapped_bytes >> 9; 1279 } 1280 1281 return true; 1282 } 1283 1284 static void ublk_dispatch_req(struct ublk_queue *ubq, struct request *req) 1285 { 1286 unsigned int issue_flags = IO_URING_CMD_TASK_WORK_ISSUE_FLAGS; 1287 int tag = req->tag; 1288 struct ublk_io *io = &ubq->ios[tag]; 1289 1290 pr_devel("%s: complete: qid %d tag %d io_flags %x addr %llx\n", 1291 __func__, ubq->q_id, req->tag, io->flags, 1292 ublk_get_iod(ubq, req->tag)->addr); 1293 1294 /* 1295 * Task is exiting if either: 1296 * 1297 * (1) current != io->task. 1298 * io_uring_cmd_complete_in_task() tries to run task_work 1299 * in a workqueue if cmd's task is PF_EXITING. 1300 * 1301 * (2) current->flags & PF_EXITING. 1302 */ 1303 if (unlikely(current != io->task || current->flags & PF_EXITING)) { 1304 __ublk_abort_rq(ubq, req); 1305 return; 1306 } 1307 1308 if (ublk_need_get_data(ubq) && ublk_need_map_req(req)) { 1309 /* 1310 * We have not handled UBLK_IO_NEED_GET_DATA command yet, 1311 * so immediately pass UBLK_IO_RES_NEED_GET_DATA to ublksrv 1312 * and notify it. 1313 */ 1314 io->flags |= UBLK_IO_FLAG_NEED_GET_DATA; 1315 pr_devel("%s: need get data. qid %d tag %d io_flags %x\n", 1316 __func__, ubq->q_id, req->tag, io->flags); 1317 ublk_complete_io_cmd(io, req, UBLK_IO_RES_NEED_GET_DATA, 1318 issue_flags); 1319 return; 1320 } 1321 1322 if (!ublk_start_io(ubq, req, io)) 1323 return; 1324 1325 if (ublk_support_auto_buf_reg(ubq) && ublk_rq_has_data(req)) { 1326 ublk_do_auto_buf_reg(ubq, req, io, io->cmd, issue_flags); 1327 } else { 1328 ublk_init_req_ref(ubq, io); 1329 ublk_complete_io_cmd(io, req, UBLK_IO_RES_OK, issue_flags); 1330 } 1331 } 1332 1333 static void ublk_cmd_tw_cb(struct io_tw_req tw_req, io_tw_token_t tw) 1334 { 1335 struct io_uring_cmd *cmd = io_uring_cmd_from_tw(tw_req); 1336 struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd); 1337 struct ublk_queue *ubq = pdu->ubq; 1338 1339 ublk_dispatch_req(ubq, pdu->req); 1340 } 1341 1342 static void ublk_queue_cmd(struct ublk_queue *ubq, struct request *rq) 1343 { 1344 struct io_uring_cmd *cmd = ubq->ios[rq->tag].cmd; 1345 struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd); 1346 1347 pdu->req = rq; 1348 io_uring_cmd_complete_in_task(cmd, ublk_cmd_tw_cb); 1349 } 1350 1351 static void ublk_cmd_list_tw_cb(struct io_tw_req tw_req, io_tw_token_t tw) 1352 { 1353 struct io_uring_cmd *cmd = io_uring_cmd_from_tw(tw_req); 1354 struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd); 1355 struct request *rq = pdu->req_list; 1356 struct request *next; 1357 1358 do { 1359 next = rq->rq_next; 1360 rq->rq_next = NULL; 1361 ublk_dispatch_req(rq->mq_hctx->driver_data, rq); 1362 rq = next; 1363 } while (rq); 1364 } 1365 1366 static void ublk_queue_cmd_list(struct ublk_io *io, struct rq_list *l) 1367 { 1368 struct io_uring_cmd *cmd = io->cmd; 1369 struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd); 1370 1371 pdu->req_list = rq_list_peek(l); 1372 rq_list_init(l); 1373 io_uring_cmd_complete_in_task(cmd, ublk_cmd_list_tw_cb); 1374 } 1375 1376 static enum blk_eh_timer_return ublk_timeout(struct request *rq) 1377 { 1378 struct ublk_queue *ubq = rq->mq_hctx->driver_data; 1379 pid_t tgid = ubq->dev->ublksrv_tgid; 1380 struct task_struct *p; 1381 struct pid *pid; 1382 1383 if (!(ubq->flags & UBLK_F_UNPRIVILEGED_DEV)) 1384 return BLK_EH_RESET_TIMER; 1385 1386 if (unlikely(!tgid)) 1387 return BLK_EH_RESET_TIMER; 1388 1389 rcu_read_lock(); 1390 pid = find_vpid(tgid); 1391 p = pid_task(pid, PIDTYPE_PID); 1392 if (p) 1393 send_sig(SIGKILL, p, 0); 1394 rcu_read_unlock(); 1395 return BLK_EH_DONE; 1396 } 1397 1398 static blk_status_t ublk_prep_req(struct ublk_queue *ubq, struct request *rq, 1399 bool check_cancel) 1400 { 1401 blk_status_t res; 1402 1403 if (unlikely(READ_ONCE(ubq->fail_io))) 1404 return BLK_STS_TARGET; 1405 1406 /* With recovery feature enabled, force_abort is set in 1407 * ublk_stop_dev() before calling del_gendisk(). We have to 1408 * abort all requeued and new rqs here to let del_gendisk() 1409 * move on. Besides, we cannot not call io_uring_cmd_complete_in_task() 1410 * to avoid UAF on io_uring ctx. 1411 * 1412 * Note: force_abort is guaranteed to be seen because it is set 1413 * before request queue is unqiuesced. 1414 */ 1415 if (ublk_nosrv_should_queue_io(ubq) && 1416 unlikely(READ_ONCE(ubq->force_abort))) 1417 return BLK_STS_IOERR; 1418 1419 if (check_cancel && unlikely(ubq->canceling)) 1420 return BLK_STS_IOERR; 1421 1422 /* fill iod to slot in io cmd buffer */ 1423 res = ublk_setup_iod(ubq, rq); 1424 if (unlikely(res != BLK_STS_OK)) 1425 return BLK_STS_IOERR; 1426 1427 blk_mq_start_request(rq); 1428 return BLK_STS_OK; 1429 } 1430 1431 static blk_status_t ublk_queue_rq(struct blk_mq_hw_ctx *hctx, 1432 const struct blk_mq_queue_data *bd) 1433 { 1434 struct ublk_queue *ubq = hctx->driver_data; 1435 struct request *rq = bd->rq; 1436 blk_status_t res; 1437 1438 res = ublk_prep_req(ubq, rq, false); 1439 if (res != BLK_STS_OK) 1440 return res; 1441 1442 /* 1443 * ->canceling has to be handled after ->force_abort and ->fail_io 1444 * is dealt with, otherwise this request may not be failed in case 1445 * of recovery, and cause hang when deleting disk 1446 */ 1447 if (unlikely(ubq->canceling)) { 1448 __ublk_abort_rq(ubq, rq); 1449 return BLK_STS_OK; 1450 } 1451 1452 ublk_queue_cmd(ubq, rq); 1453 return BLK_STS_OK; 1454 } 1455 1456 static inline bool ublk_belong_to_same_batch(const struct ublk_io *io, 1457 const struct ublk_io *io2) 1458 { 1459 return (io_uring_cmd_ctx_handle(io->cmd) == 1460 io_uring_cmd_ctx_handle(io2->cmd)) && 1461 (io->task == io2->task); 1462 } 1463 1464 static void ublk_queue_rqs(struct rq_list *rqlist) 1465 { 1466 struct rq_list requeue_list = { }; 1467 struct rq_list submit_list = { }; 1468 struct ublk_io *io = NULL; 1469 struct request *req; 1470 1471 while ((req = rq_list_pop(rqlist))) { 1472 struct ublk_queue *this_q = req->mq_hctx->driver_data; 1473 struct ublk_io *this_io = &this_q->ios[req->tag]; 1474 1475 if (ublk_prep_req(this_q, req, true) != BLK_STS_OK) { 1476 rq_list_add_tail(&requeue_list, req); 1477 continue; 1478 } 1479 1480 if (io && !ublk_belong_to_same_batch(io, this_io) && 1481 !rq_list_empty(&submit_list)) 1482 ublk_queue_cmd_list(io, &submit_list); 1483 io = this_io; 1484 rq_list_add_tail(&submit_list, req); 1485 } 1486 1487 if (!rq_list_empty(&submit_list)) 1488 ublk_queue_cmd_list(io, &submit_list); 1489 *rqlist = requeue_list; 1490 } 1491 1492 static int ublk_init_hctx(struct blk_mq_hw_ctx *hctx, void *driver_data, 1493 unsigned int hctx_idx) 1494 { 1495 struct ublk_device *ub = driver_data; 1496 struct ublk_queue *ubq = ublk_get_queue(ub, hctx->queue_num); 1497 1498 hctx->driver_data = ubq; 1499 return 0; 1500 } 1501 1502 static const struct blk_mq_ops ublk_mq_ops = { 1503 .queue_rq = ublk_queue_rq, 1504 .queue_rqs = ublk_queue_rqs, 1505 .init_hctx = ublk_init_hctx, 1506 .timeout = ublk_timeout, 1507 }; 1508 1509 static void ublk_queue_reinit(struct ublk_device *ub, struct ublk_queue *ubq) 1510 { 1511 int i; 1512 1513 for (i = 0; i < ubq->q_depth; i++) { 1514 struct ublk_io *io = &ubq->ios[i]; 1515 1516 /* 1517 * UBLK_IO_FLAG_CANCELED is kept for avoiding to touch 1518 * io->cmd 1519 */ 1520 io->flags &= UBLK_IO_FLAG_CANCELED; 1521 io->cmd = NULL; 1522 io->buf.addr = 0; 1523 1524 /* 1525 * old task is PF_EXITING, put it now 1526 * 1527 * It could be NULL in case of closing one quiesced 1528 * device. 1529 */ 1530 if (io->task) { 1531 put_task_struct(io->task); 1532 io->task = NULL; 1533 } 1534 1535 WARN_ON_ONCE(refcount_read(&io->ref)); 1536 WARN_ON_ONCE(io->task_registered_buffers); 1537 } 1538 } 1539 1540 static int ublk_ch_open(struct inode *inode, struct file *filp) 1541 { 1542 struct ublk_device *ub = container_of(inode->i_cdev, 1543 struct ublk_device, cdev); 1544 1545 if (test_and_set_bit(UB_STATE_OPEN, &ub->state)) 1546 return -EBUSY; 1547 filp->private_data = ub; 1548 ub->ublksrv_tgid = current->tgid; 1549 return 0; 1550 } 1551 1552 static void ublk_reset_ch_dev(struct ublk_device *ub) 1553 { 1554 int i; 1555 1556 for (i = 0; i < ub->dev_info.nr_hw_queues; i++) 1557 ublk_queue_reinit(ub, ublk_get_queue(ub, i)); 1558 1559 /* set to NULL, otherwise new tasks cannot mmap io_cmd_buf */ 1560 ub->mm = NULL; 1561 ub->nr_io_ready = 0; 1562 ub->unprivileged_daemons = false; 1563 ub->ublksrv_tgid = -1; 1564 } 1565 1566 static struct gendisk *ublk_get_disk(struct ublk_device *ub) 1567 { 1568 struct gendisk *disk; 1569 1570 spin_lock(&ub->lock); 1571 disk = ub->ub_disk; 1572 if (disk) 1573 get_device(disk_to_dev(disk)); 1574 spin_unlock(&ub->lock); 1575 1576 return disk; 1577 } 1578 1579 static void ublk_put_disk(struct gendisk *disk) 1580 { 1581 if (disk) 1582 put_device(disk_to_dev(disk)); 1583 } 1584 1585 /* 1586 * Use this function to ensure that ->canceling is consistently set for 1587 * the device and all queues. Do not set these flags directly. 1588 * 1589 * Caller must ensure that: 1590 * - cancel_mutex is held. This ensures that there is no concurrent 1591 * access to ub->canceling and no concurrent writes to ubq->canceling. 1592 * - there are no concurrent reads of ubq->canceling from the queue_rq 1593 * path. This can be done by quiescing the queue, or through other 1594 * means. 1595 */ 1596 static void ublk_set_canceling(struct ublk_device *ub, bool canceling) 1597 __must_hold(&ub->cancel_mutex) 1598 { 1599 int i; 1600 1601 ub->canceling = canceling; 1602 for (i = 0; i < ub->dev_info.nr_hw_queues; i++) 1603 ublk_get_queue(ub, i)->canceling = canceling; 1604 } 1605 1606 static bool ublk_check_and_reset_active_ref(struct ublk_device *ub) 1607 { 1608 int i, j; 1609 1610 if (!ublk_dev_need_req_ref(ub)) 1611 return false; 1612 1613 for (i = 0; i < ub->dev_info.nr_hw_queues; i++) { 1614 struct ublk_queue *ubq = ublk_get_queue(ub, i); 1615 1616 for (j = 0; j < ubq->q_depth; j++) { 1617 struct ublk_io *io = &ubq->ios[j]; 1618 unsigned int refs = refcount_read(&io->ref) + 1619 io->task_registered_buffers; 1620 1621 /* 1622 * UBLK_REFCOUNT_INIT or zero means no active 1623 * reference 1624 */ 1625 if (refs != UBLK_REFCOUNT_INIT && refs != 0) 1626 return true; 1627 1628 /* reset to zero if the io hasn't active references */ 1629 refcount_set(&io->ref, 0); 1630 io->task_registered_buffers = 0; 1631 } 1632 } 1633 return false; 1634 } 1635 1636 static void ublk_ch_release_work_fn(struct work_struct *work) 1637 { 1638 struct ublk_device *ub = 1639 container_of(work, struct ublk_device, exit_work.work); 1640 struct gendisk *disk; 1641 int i; 1642 1643 /* 1644 * For zero-copy and auto buffer register modes, I/O references 1645 * might not be dropped naturally when the daemon is killed, but 1646 * io_uring guarantees that registered bvec kernel buffers are 1647 * unregistered finally when freeing io_uring context, then the 1648 * active references are dropped. 1649 * 1650 * Wait until active references are dropped for avoiding use-after-free 1651 * 1652 * registered buffer may be unregistered in io_ring's release hander, 1653 * so have to wait by scheduling work function for avoiding the two 1654 * file release dependency. 1655 */ 1656 if (ublk_check_and_reset_active_ref(ub)) { 1657 schedule_delayed_work(&ub->exit_work, 1); 1658 return; 1659 } 1660 1661 /* 1662 * disk isn't attached yet, either device isn't live, or it has 1663 * been removed already, so we needn't to do anything 1664 */ 1665 disk = ublk_get_disk(ub); 1666 if (!disk) 1667 goto out; 1668 1669 /* 1670 * All uring_cmd are done now, so abort any request outstanding to 1671 * the ublk server 1672 * 1673 * This can be done in lockless way because ublk server has been 1674 * gone 1675 * 1676 * More importantly, we have to provide forward progress guarantee 1677 * without holding ub->mutex, otherwise control task grabbing 1678 * ub->mutex triggers deadlock 1679 * 1680 * All requests may be inflight, so ->canceling may not be set, set 1681 * it now. 1682 */ 1683 mutex_lock(&ub->cancel_mutex); 1684 ublk_set_canceling(ub, true); 1685 for (i = 0; i < ub->dev_info.nr_hw_queues; i++) 1686 ublk_abort_queue(ub, ublk_get_queue(ub, i)); 1687 mutex_unlock(&ub->cancel_mutex); 1688 blk_mq_kick_requeue_list(disk->queue); 1689 1690 /* 1691 * All infligh requests have been completed or requeued and any new 1692 * request will be failed or requeued via `->canceling` now, so it is 1693 * fine to grab ub->mutex now. 1694 */ 1695 mutex_lock(&ub->mutex); 1696 1697 /* double check after grabbing lock */ 1698 if (!ub->ub_disk) 1699 goto unlock; 1700 1701 /* 1702 * Transition the device to the nosrv state. What exactly this 1703 * means depends on the recovery flags 1704 */ 1705 if (ublk_nosrv_should_stop_dev(ub)) { 1706 /* 1707 * Allow any pending/future I/O to pass through quickly 1708 * with an error. This is needed because del_gendisk 1709 * waits for all pending I/O to complete 1710 */ 1711 for (i = 0; i < ub->dev_info.nr_hw_queues; i++) 1712 WRITE_ONCE(ublk_get_queue(ub, i)->force_abort, true); 1713 1714 ublk_stop_dev_unlocked(ub); 1715 } else { 1716 if (ublk_nosrv_dev_should_queue_io(ub)) { 1717 /* ->canceling is set and all requests are aborted */ 1718 ub->dev_info.state = UBLK_S_DEV_QUIESCED; 1719 } else { 1720 ub->dev_info.state = UBLK_S_DEV_FAIL_IO; 1721 for (i = 0; i < ub->dev_info.nr_hw_queues; i++) 1722 WRITE_ONCE(ublk_get_queue(ub, i)->fail_io, true); 1723 } 1724 } 1725 unlock: 1726 mutex_unlock(&ub->mutex); 1727 ublk_put_disk(disk); 1728 1729 /* all uring_cmd has been done now, reset device & ubq */ 1730 ublk_reset_ch_dev(ub); 1731 out: 1732 clear_bit(UB_STATE_OPEN, &ub->state); 1733 1734 /* put the reference grabbed in ublk_ch_release() */ 1735 ublk_put_device(ub); 1736 } 1737 1738 static int ublk_ch_release(struct inode *inode, struct file *filp) 1739 { 1740 struct ublk_device *ub = filp->private_data; 1741 1742 /* 1743 * Grab ublk device reference, so it won't be gone until we are 1744 * really released from work function. 1745 */ 1746 ublk_get_device(ub); 1747 1748 INIT_DELAYED_WORK(&ub->exit_work, ublk_ch_release_work_fn); 1749 schedule_delayed_work(&ub->exit_work, 0); 1750 return 0; 1751 } 1752 1753 /* map pre-allocated per-queue cmd buffer to ublksrv daemon */ 1754 static int ublk_ch_mmap(struct file *filp, struct vm_area_struct *vma) 1755 { 1756 struct ublk_device *ub = filp->private_data; 1757 size_t sz = vma->vm_end - vma->vm_start; 1758 unsigned max_sz = ublk_max_cmd_buf_size(); 1759 unsigned long pfn, end, phys_off = vma->vm_pgoff << PAGE_SHIFT; 1760 int q_id, ret = 0; 1761 1762 spin_lock(&ub->lock); 1763 if (!ub->mm) 1764 ub->mm = current->mm; 1765 if (current->mm != ub->mm) 1766 ret = -EINVAL; 1767 spin_unlock(&ub->lock); 1768 1769 if (ret) 1770 return ret; 1771 1772 if (vma->vm_flags & VM_WRITE) 1773 return -EPERM; 1774 1775 end = UBLKSRV_CMD_BUF_OFFSET + ub->dev_info.nr_hw_queues * max_sz; 1776 if (phys_off < UBLKSRV_CMD_BUF_OFFSET || phys_off >= end) 1777 return -EINVAL; 1778 1779 q_id = (phys_off - UBLKSRV_CMD_BUF_OFFSET) / max_sz; 1780 pr_devel("%s: qid %d, pid %d, addr %lx pg_off %lx sz %lu\n", 1781 __func__, q_id, current->pid, vma->vm_start, 1782 phys_off, (unsigned long)sz); 1783 1784 if (sz != ublk_queue_cmd_buf_size(ub)) 1785 return -EINVAL; 1786 1787 pfn = virt_to_phys(ublk_queue_cmd_buf(ub, q_id)) >> PAGE_SHIFT; 1788 return remap_pfn_range(vma, vma->vm_start, pfn, sz, vma->vm_page_prot); 1789 } 1790 1791 static void __ublk_fail_req(struct ublk_device *ub, struct ublk_io *io, 1792 struct request *req) 1793 { 1794 WARN_ON_ONCE(io->flags & UBLK_IO_FLAG_ACTIVE); 1795 1796 if (ublk_nosrv_should_reissue_outstanding(ub)) 1797 blk_mq_requeue_request(req, false); 1798 else { 1799 io->res = -EIO; 1800 __ublk_complete_rq(req, io, ublk_dev_need_map_io(ub)); 1801 } 1802 } 1803 1804 /* 1805 * Called from ublk char device release handler, when any uring_cmd is 1806 * done, meantime request queue is "quiesced" since all inflight requests 1807 * can't be completed because ublk server is dead. 1808 * 1809 * So no one can hold our request IO reference any more, simply ignore the 1810 * reference, and complete the request immediately 1811 */ 1812 static void ublk_abort_queue(struct ublk_device *ub, struct ublk_queue *ubq) 1813 { 1814 int i; 1815 1816 for (i = 0; i < ubq->q_depth; i++) { 1817 struct ublk_io *io = &ubq->ios[i]; 1818 1819 if (io->flags & UBLK_IO_FLAG_OWNED_BY_SRV) 1820 __ublk_fail_req(ub, io, io->req); 1821 } 1822 } 1823 1824 static void ublk_start_cancel(struct ublk_device *ub) 1825 { 1826 struct gendisk *disk = ublk_get_disk(ub); 1827 1828 /* Our disk has been dead */ 1829 if (!disk) 1830 return; 1831 1832 mutex_lock(&ub->cancel_mutex); 1833 if (ub->canceling) 1834 goto out; 1835 /* 1836 * Now we are serialized with ublk_queue_rq() 1837 * 1838 * Make sure that ubq->canceling is set when queue is frozen, 1839 * because ublk_queue_rq() has to rely on this flag for avoiding to 1840 * touch completed uring_cmd 1841 */ 1842 blk_mq_quiesce_queue(disk->queue); 1843 ublk_set_canceling(ub, true); 1844 blk_mq_unquiesce_queue(disk->queue); 1845 out: 1846 mutex_unlock(&ub->cancel_mutex); 1847 ublk_put_disk(disk); 1848 } 1849 1850 static void ublk_cancel_cmd(struct ublk_queue *ubq, unsigned tag, 1851 unsigned int issue_flags) 1852 { 1853 struct ublk_io *io = &ubq->ios[tag]; 1854 struct ublk_device *ub = ubq->dev; 1855 struct request *req; 1856 bool done; 1857 1858 if (!(io->flags & UBLK_IO_FLAG_ACTIVE)) 1859 return; 1860 1861 /* 1862 * Don't try to cancel this command if the request is started for 1863 * avoiding race between io_uring_cmd_done() and 1864 * io_uring_cmd_complete_in_task(). 1865 * 1866 * Either the started request will be aborted via __ublk_abort_rq(), 1867 * then this uring_cmd is canceled next time, or it will be done in 1868 * task work function ublk_dispatch_req() because io_uring guarantees 1869 * that ublk_dispatch_req() is always called 1870 */ 1871 req = blk_mq_tag_to_rq(ub->tag_set.tags[ubq->q_id], tag); 1872 if (req && blk_mq_request_started(req) && req->tag == tag) 1873 return; 1874 1875 spin_lock(&ubq->cancel_lock); 1876 done = !!(io->flags & UBLK_IO_FLAG_CANCELED); 1877 if (!done) 1878 io->flags |= UBLK_IO_FLAG_CANCELED; 1879 spin_unlock(&ubq->cancel_lock); 1880 1881 if (!done) 1882 io_uring_cmd_done(io->cmd, UBLK_IO_RES_ABORT, issue_flags); 1883 } 1884 1885 /* 1886 * The ublk char device won't be closed when calling cancel fn, so both 1887 * ublk device and queue are guaranteed to be live 1888 * 1889 * Two-stage cancel: 1890 * 1891 * - make every active uring_cmd done in ->cancel_fn() 1892 * 1893 * - aborting inflight ublk IO requests in ublk char device release handler, 1894 * which depends on 1st stage because device can only be closed iff all 1895 * uring_cmd are done 1896 * 1897 * Do _not_ try to acquire ub->mutex before all inflight requests are 1898 * aborted, otherwise deadlock may be caused. 1899 */ 1900 static void ublk_uring_cmd_cancel_fn(struct io_uring_cmd *cmd, 1901 unsigned int issue_flags) 1902 { 1903 struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd); 1904 struct ublk_queue *ubq = pdu->ubq; 1905 struct task_struct *task; 1906 struct ublk_io *io; 1907 1908 if (WARN_ON_ONCE(!ubq)) 1909 return; 1910 1911 if (WARN_ON_ONCE(pdu->tag >= ubq->q_depth)) 1912 return; 1913 1914 task = io_uring_cmd_get_task(cmd); 1915 io = &ubq->ios[pdu->tag]; 1916 if (WARN_ON_ONCE(task && task != io->task)) 1917 return; 1918 1919 ublk_start_cancel(ubq->dev); 1920 1921 WARN_ON_ONCE(io->cmd != cmd); 1922 ublk_cancel_cmd(ubq, pdu->tag, issue_flags); 1923 } 1924 1925 static inline bool ublk_dev_ready(const struct ublk_device *ub) 1926 { 1927 u32 total = (u32)ub->dev_info.nr_hw_queues * ub->dev_info.queue_depth; 1928 1929 return ub->nr_io_ready == total; 1930 } 1931 1932 static void ublk_cancel_queue(struct ublk_queue *ubq) 1933 { 1934 int i; 1935 1936 for (i = 0; i < ubq->q_depth; i++) 1937 ublk_cancel_cmd(ubq, i, IO_URING_F_UNLOCKED); 1938 } 1939 1940 /* Cancel all pending commands, must be called after del_gendisk() returns */ 1941 static void ublk_cancel_dev(struct ublk_device *ub) 1942 { 1943 int i; 1944 1945 for (i = 0; i < ub->dev_info.nr_hw_queues; i++) 1946 ublk_cancel_queue(ublk_get_queue(ub, i)); 1947 } 1948 1949 static bool ublk_check_inflight_rq(struct request *rq, void *data) 1950 { 1951 bool *idle = data; 1952 1953 if (blk_mq_request_started(rq)) { 1954 *idle = false; 1955 return false; 1956 } 1957 return true; 1958 } 1959 1960 static void ublk_wait_tagset_rqs_idle(struct ublk_device *ub) 1961 { 1962 bool idle; 1963 1964 WARN_ON_ONCE(!blk_queue_quiesced(ub->ub_disk->queue)); 1965 while (true) { 1966 idle = true; 1967 blk_mq_tagset_busy_iter(&ub->tag_set, 1968 ublk_check_inflight_rq, &idle); 1969 if (idle) 1970 break; 1971 msleep(UBLK_REQUEUE_DELAY_MS); 1972 } 1973 } 1974 1975 static void ublk_force_abort_dev(struct ublk_device *ub) 1976 { 1977 int i; 1978 1979 pr_devel("%s: force abort ub: dev_id %d state %s\n", 1980 __func__, ub->dev_info.dev_id, 1981 ub->dev_info.state == UBLK_S_DEV_LIVE ? 1982 "LIVE" : "QUIESCED"); 1983 blk_mq_quiesce_queue(ub->ub_disk->queue); 1984 if (ub->dev_info.state == UBLK_S_DEV_LIVE) 1985 ublk_wait_tagset_rqs_idle(ub); 1986 1987 for (i = 0; i < ub->dev_info.nr_hw_queues; i++) 1988 ublk_get_queue(ub, i)->force_abort = true; 1989 blk_mq_unquiesce_queue(ub->ub_disk->queue); 1990 /* We may have requeued some rqs in ublk_quiesce_queue() */ 1991 blk_mq_kick_requeue_list(ub->ub_disk->queue); 1992 } 1993 1994 static struct gendisk *ublk_detach_disk(struct ublk_device *ub) 1995 { 1996 struct gendisk *disk; 1997 1998 /* Sync with ublk_abort_queue() by holding the lock */ 1999 spin_lock(&ub->lock); 2000 disk = ub->ub_disk; 2001 ub->dev_info.state = UBLK_S_DEV_DEAD; 2002 ub->dev_info.ublksrv_pid = -1; 2003 ub->ub_disk = NULL; 2004 spin_unlock(&ub->lock); 2005 2006 return disk; 2007 } 2008 2009 static void ublk_stop_dev_unlocked(struct ublk_device *ub) 2010 __must_hold(&ub->mutex) 2011 { 2012 struct gendisk *disk; 2013 2014 if (ub->dev_info.state == UBLK_S_DEV_DEAD) 2015 return; 2016 2017 if (ublk_nosrv_dev_should_queue_io(ub)) 2018 ublk_force_abort_dev(ub); 2019 del_gendisk(ub->ub_disk); 2020 disk = ublk_detach_disk(ub); 2021 put_disk(disk); 2022 } 2023 2024 static void ublk_stop_dev(struct ublk_device *ub) 2025 { 2026 mutex_lock(&ub->mutex); 2027 ublk_stop_dev_unlocked(ub); 2028 mutex_unlock(&ub->mutex); 2029 ublk_cancel_dev(ub); 2030 } 2031 2032 /* reset ublk io_uring queue & io flags */ 2033 static void ublk_reset_io_flags(struct ublk_device *ub) 2034 { 2035 int i, j; 2036 2037 for (i = 0; i < ub->dev_info.nr_hw_queues; i++) { 2038 struct ublk_queue *ubq = ublk_get_queue(ub, i); 2039 2040 /* UBLK_IO_FLAG_CANCELED can be cleared now */ 2041 spin_lock(&ubq->cancel_lock); 2042 for (j = 0; j < ubq->q_depth; j++) 2043 ubq->ios[j].flags &= ~UBLK_IO_FLAG_CANCELED; 2044 spin_unlock(&ubq->cancel_lock); 2045 ubq->fail_io = false; 2046 } 2047 mutex_lock(&ub->cancel_mutex); 2048 ublk_set_canceling(ub, false); 2049 mutex_unlock(&ub->cancel_mutex); 2050 } 2051 2052 /* device can only be started after all IOs are ready */ 2053 static void ublk_mark_io_ready(struct ublk_device *ub) 2054 __must_hold(&ub->mutex) 2055 { 2056 if (!ub->unprivileged_daemons && !capable(CAP_SYS_ADMIN)) 2057 ub->unprivileged_daemons = true; 2058 2059 ub->nr_io_ready++; 2060 if (ublk_dev_ready(ub)) { 2061 /* now we are ready for handling ublk io request */ 2062 ublk_reset_io_flags(ub); 2063 complete_all(&ub->completion); 2064 } 2065 } 2066 2067 static inline int ublk_check_cmd_op(u32 cmd_op) 2068 { 2069 u32 ioc_type = _IOC_TYPE(cmd_op); 2070 2071 if (!IS_ENABLED(CONFIG_BLKDEV_UBLK_LEGACY_OPCODES) && ioc_type != 'u') 2072 return -EOPNOTSUPP; 2073 2074 if (ioc_type != 'u' && ioc_type != 0) 2075 return -EOPNOTSUPP; 2076 2077 return 0; 2078 } 2079 2080 static inline int ublk_set_auto_buf_reg(struct ublk_io *io, struct io_uring_cmd *cmd) 2081 { 2082 struct ublk_auto_buf_reg buf; 2083 2084 buf = ublk_sqe_addr_to_auto_buf_reg(READ_ONCE(cmd->sqe->addr)); 2085 2086 if (buf.reserved0 || buf.reserved1) 2087 return -EINVAL; 2088 2089 if (buf.flags & ~UBLK_AUTO_BUF_REG_F_MASK) 2090 return -EINVAL; 2091 io->buf.auto_reg = buf; 2092 return 0; 2093 } 2094 2095 static int ublk_handle_auto_buf_reg(struct ublk_io *io, 2096 struct io_uring_cmd *cmd, 2097 u16 *buf_idx) 2098 { 2099 if (io->flags & UBLK_IO_FLAG_AUTO_BUF_REG) { 2100 io->flags &= ~UBLK_IO_FLAG_AUTO_BUF_REG; 2101 2102 /* 2103 * `UBLK_F_AUTO_BUF_REG` only works iff `UBLK_IO_FETCH_REQ` 2104 * and `UBLK_IO_COMMIT_AND_FETCH_REQ` are issued from same 2105 * `io_ring_ctx`. 2106 * 2107 * If this uring_cmd's io_ring_ctx isn't same with the 2108 * one for registering the buffer, it is ublk server's 2109 * responsibility for unregistering the buffer, otherwise 2110 * this ublk request gets stuck. 2111 */ 2112 if (io->buf_ctx_handle == io_uring_cmd_ctx_handle(cmd)) 2113 *buf_idx = io->buf.auto_reg.index; 2114 } 2115 2116 return ublk_set_auto_buf_reg(io, cmd); 2117 } 2118 2119 /* Once we return, `io->req` can't be used any more */ 2120 static inline struct request * 2121 ublk_fill_io_cmd(struct ublk_io *io, struct io_uring_cmd *cmd) 2122 { 2123 struct request *req = io->req; 2124 2125 io->cmd = cmd; 2126 io->flags |= UBLK_IO_FLAG_ACTIVE; 2127 /* now this cmd slot is owned by ublk driver */ 2128 io->flags &= ~UBLK_IO_FLAG_OWNED_BY_SRV; 2129 2130 return req; 2131 } 2132 2133 static inline int 2134 ublk_config_io_buf(const struct ublk_device *ub, struct ublk_io *io, 2135 struct io_uring_cmd *cmd, unsigned long buf_addr, 2136 u16 *buf_idx) 2137 { 2138 if (ublk_dev_support_auto_buf_reg(ub)) 2139 return ublk_handle_auto_buf_reg(io, cmd, buf_idx); 2140 2141 io->buf.addr = buf_addr; 2142 return 0; 2143 } 2144 2145 static inline void ublk_prep_cancel(struct io_uring_cmd *cmd, 2146 unsigned int issue_flags, 2147 struct ublk_queue *ubq, unsigned int tag) 2148 { 2149 struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd); 2150 2151 /* 2152 * Safe to refer to @ubq since ublk_queue won't be died until its 2153 * commands are completed 2154 */ 2155 pdu->ubq = ubq; 2156 pdu->tag = tag; 2157 io_uring_cmd_mark_cancelable(cmd, issue_flags); 2158 } 2159 2160 static void ublk_io_release(void *priv) 2161 { 2162 struct request *rq = priv; 2163 struct ublk_queue *ubq = rq->mq_hctx->driver_data; 2164 struct ublk_io *io = &ubq->ios[rq->tag]; 2165 2166 /* 2167 * task_registered_buffers may be 0 if buffers were registered off task 2168 * but unregistered on task. Or after UBLK_IO_COMMIT_AND_FETCH_REQ. 2169 */ 2170 if (current == io->task && io->task_registered_buffers) 2171 io->task_registered_buffers--; 2172 else 2173 ublk_put_req_ref(io, rq); 2174 } 2175 2176 static int ublk_register_io_buf(struct io_uring_cmd *cmd, 2177 struct ublk_device *ub, 2178 u16 q_id, u16 tag, 2179 struct ublk_io *io, 2180 unsigned int index, unsigned int issue_flags) 2181 { 2182 struct request *req; 2183 int ret; 2184 2185 if (!ublk_dev_support_zero_copy(ub)) 2186 return -EINVAL; 2187 2188 req = __ublk_check_and_get_req(ub, q_id, tag, io, 0); 2189 if (!req) 2190 return -EINVAL; 2191 2192 ret = io_buffer_register_bvec(cmd, req, ublk_io_release, index, 2193 issue_flags); 2194 if (ret) { 2195 ublk_put_req_ref(io, req); 2196 return ret; 2197 } 2198 2199 return 0; 2200 } 2201 2202 static int 2203 ublk_daemon_register_io_buf(struct io_uring_cmd *cmd, 2204 struct ublk_device *ub, 2205 u16 q_id, u16 tag, struct ublk_io *io, 2206 unsigned index, unsigned issue_flags) 2207 { 2208 unsigned new_registered_buffers; 2209 struct request *req = io->req; 2210 int ret; 2211 2212 /* 2213 * Ensure there are still references for ublk_sub_req_ref() to release. 2214 * If not, fall back on the thread-safe buffer registration. 2215 */ 2216 new_registered_buffers = io->task_registered_buffers + 1; 2217 if (unlikely(new_registered_buffers >= UBLK_REFCOUNT_INIT)) 2218 return ublk_register_io_buf(cmd, ub, q_id, tag, io, index, 2219 issue_flags); 2220 2221 if (!ublk_dev_support_zero_copy(ub) || !ublk_rq_has_data(req)) 2222 return -EINVAL; 2223 2224 ret = io_buffer_register_bvec(cmd, req, ublk_io_release, index, 2225 issue_flags); 2226 if (ret) 2227 return ret; 2228 2229 io->task_registered_buffers = new_registered_buffers; 2230 return 0; 2231 } 2232 2233 static int ublk_unregister_io_buf(struct io_uring_cmd *cmd, 2234 const struct ublk_device *ub, 2235 unsigned int index, unsigned int issue_flags) 2236 { 2237 if (!(ub->dev_info.flags & UBLK_F_SUPPORT_ZERO_COPY)) 2238 return -EINVAL; 2239 2240 return io_buffer_unregister_bvec(cmd, index, issue_flags); 2241 } 2242 2243 static int ublk_check_fetch_buf(const struct ublk_device *ub, __u64 buf_addr) 2244 { 2245 if (ublk_dev_need_map_io(ub)) { 2246 /* 2247 * FETCH_RQ has to provide IO buffer if NEED GET 2248 * DATA is not enabled 2249 */ 2250 if (!buf_addr && !ublk_dev_need_get_data(ub)) 2251 return -EINVAL; 2252 } else if (buf_addr) { 2253 /* User copy requires addr to be unset */ 2254 return -EINVAL; 2255 } 2256 return 0; 2257 } 2258 2259 static int __ublk_fetch(struct io_uring_cmd *cmd, struct ublk_device *ub, 2260 struct ublk_io *io) 2261 { 2262 /* UBLK_IO_FETCH_REQ is only allowed before dev is setup */ 2263 if (ublk_dev_ready(ub)) 2264 return -EBUSY; 2265 2266 /* allow each command to be FETCHed at most once */ 2267 if (io->flags & UBLK_IO_FLAG_ACTIVE) 2268 return -EINVAL; 2269 2270 WARN_ON_ONCE(io->flags & UBLK_IO_FLAG_OWNED_BY_SRV); 2271 2272 ublk_fill_io_cmd(io, cmd); 2273 2274 WRITE_ONCE(io->task, get_task_struct(current)); 2275 ublk_mark_io_ready(ub); 2276 2277 return 0; 2278 } 2279 2280 static int ublk_fetch(struct io_uring_cmd *cmd, struct ublk_device *ub, 2281 struct ublk_io *io, __u64 buf_addr) 2282 { 2283 int ret; 2284 2285 /* 2286 * When handling FETCH command for setting up ublk uring queue, 2287 * ub->mutex is the innermost lock, and we won't block for handling 2288 * FETCH, so it is fine even for IO_URING_F_NONBLOCK. 2289 */ 2290 mutex_lock(&ub->mutex); 2291 ret = __ublk_fetch(cmd, ub, io); 2292 if (!ret) 2293 ret = ublk_config_io_buf(ub, io, cmd, buf_addr, NULL); 2294 mutex_unlock(&ub->mutex); 2295 return ret; 2296 } 2297 2298 static int ublk_check_commit_and_fetch(const struct ublk_device *ub, 2299 struct ublk_io *io, __u64 buf_addr) 2300 { 2301 struct request *req = io->req; 2302 2303 if (ublk_dev_need_map_io(ub)) { 2304 /* 2305 * COMMIT_AND_FETCH_REQ has to provide IO buffer if 2306 * NEED GET DATA is not enabled or it is Read IO. 2307 */ 2308 if (!buf_addr && (!ublk_dev_need_get_data(ub) || 2309 req_op(req) == REQ_OP_READ)) 2310 return -EINVAL; 2311 } else if (req_op(req) != REQ_OP_ZONE_APPEND && buf_addr) { 2312 /* 2313 * User copy requires addr to be unset when command is 2314 * not zone append 2315 */ 2316 return -EINVAL; 2317 } 2318 2319 return 0; 2320 } 2321 2322 static bool ublk_need_complete_req(const struct ublk_device *ub, 2323 struct ublk_io *io) 2324 { 2325 if (ublk_dev_need_req_ref(ub)) 2326 return ublk_sub_req_ref(io); 2327 return true; 2328 } 2329 2330 static bool ublk_get_data(const struct ublk_queue *ubq, struct ublk_io *io, 2331 struct request *req) 2332 { 2333 /* 2334 * We have handled UBLK_IO_NEED_GET_DATA command, 2335 * so clear UBLK_IO_FLAG_NEED_GET_DATA now and just 2336 * do the copy work. 2337 */ 2338 io->flags &= ~UBLK_IO_FLAG_NEED_GET_DATA; 2339 /* update iod->addr because ublksrv may have passed a new io buffer */ 2340 ublk_get_iod(ubq, req->tag)->addr = io->buf.addr; 2341 pr_devel("%s: update iod->addr: qid %d tag %d io_flags %x addr %llx\n", 2342 __func__, ubq->q_id, req->tag, io->flags, 2343 ublk_get_iod(ubq, req->tag)->addr); 2344 2345 return ublk_start_io(ubq, req, io); 2346 } 2347 2348 static int ublk_ch_uring_cmd_local(struct io_uring_cmd *cmd, 2349 unsigned int issue_flags) 2350 { 2351 /* May point to userspace-mapped memory */ 2352 const struct ublksrv_io_cmd *ub_src = io_uring_sqe_cmd(cmd->sqe); 2353 u16 buf_idx = UBLK_INVALID_BUF_IDX; 2354 struct ublk_device *ub = cmd->file->private_data; 2355 struct ublk_queue *ubq; 2356 struct ublk_io *io = NULL; 2357 u32 cmd_op = cmd->cmd_op; 2358 u16 q_id = READ_ONCE(ub_src->q_id); 2359 u16 tag = READ_ONCE(ub_src->tag); 2360 s32 result = READ_ONCE(ub_src->result); 2361 u64 addr = READ_ONCE(ub_src->addr); /* unioned with zone_append_lba */ 2362 struct request *req; 2363 int ret; 2364 bool compl; 2365 2366 WARN_ON_ONCE(issue_flags & IO_URING_F_UNLOCKED); 2367 2368 pr_devel("%s: received: cmd op %d queue %d tag %d result %d\n", 2369 __func__, cmd->cmd_op, q_id, tag, result); 2370 2371 ret = ublk_check_cmd_op(cmd_op); 2372 if (ret) 2373 goto out; 2374 2375 /* 2376 * io_buffer_unregister_bvec() doesn't access the ubq or io, 2377 * so no need to validate the q_id, tag, or task 2378 */ 2379 if (_IOC_NR(cmd_op) == UBLK_IO_UNREGISTER_IO_BUF) 2380 return ublk_unregister_io_buf(cmd, ub, addr, issue_flags); 2381 2382 ret = -EINVAL; 2383 if (q_id >= ub->dev_info.nr_hw_queues) 2384 goto out; 2385 2386 ubq = ublk_get_queue(ub, q_id); 2387 2388 if (tag >= ub->dev_info.queue_depth) 2389 goto out; 2390 2391 io = &ubq->ios[tag]; 2392 /* UBLK_IO_FETCH_REQ can be handled on any task, which sets io->task */ 2393 if (unlikely(_IOC_NR(cmd_op) == UBLK_IO_FETCH_REQ)) { 2394 ret = ublk_check_fetch_buf(ub, addr); 2395 if (ret) 2396 goto out; 2397 ret = ublk_fetch(cmd, ub, io, addr); 2398 if (ret) 2399 goto out; 2400 2401 ublk_prep_cancel(cmd, issue_flags, ubq, tag); 2402 return -EIOCBQUEUED; 2403 } 2404 2405 if (READ_ONCE(io->task) != current) { 2406 /* 2407 * ublk_register_io_buf() accesses only the io's refcount, 2408 * so can be handled on any task 2409 */ 2410 if (_IOC_NR(cmd_op) == UBLK_IO_REGISTER_IO_BUF) 2411 return ublk_register_io_buf(cmd, ub, q_id, tag, io, 2412 addr, issue_flags); 2413 2414 goto out; 2415 } 2416 2417 /* there is pending io cmd, something must be wrong */ 2418 if (!(io->flags & UBLK_IO_FLAG_OWNED_BY_SRV)) { 2419 ret = -EBUSY; 2420 goto out; 2421 } 2422 2423 /* 2424 * ensure that the user issues UBLK_IO_NEED_GET_DATA 2425 * iff the driver have set the UBLK_IO_FLAG_NEED_GET_DATA. 2426 */ 2427 if ((!!(io->flags & UBLK_IO_FLAG_NEED_GET_DATA)) 2428 ^ (_IOC_NR(cmd_op) == UBLK_IO_NEED_GET_DATA)) 2429 goto out; 2430 2431 switch (_IOC_NR(cmd_op)) { 2432 case UBLK_IO_REGISTER_IO_BUF: 2433 return ublk_daemon_register_io_buf(cmd, ub, q_id, tag, io, addr, 2434 issue_flags); 2435 case UBLK_IO_COMMIT_AND_FETCH_REQ: 2436 ret = ublk_check_commit_and_fetch(ub, io, addr); 2437 if (ret) 2438 goto out; 2439 io->res = result; 2440 req = ublk_fill_io_cmd(io, cmd); 2441 ret = ublk_config_io_buf(ub, io, cmd, addr, &buf_idx); 2442 compl = ublk_need_complete_req(ub, io); 2443 2444 /* can't touch 'ublk_io' any more */ 2445 if (buf_idx != UBLK_INVALID_BUF_IDX) 2446 io_buffer_unregister_bvec(cmd, buf_idx, issue_flags); 2447 if (req_op(req) == REQ_OP_ZONE_APPEND) 2448 req->__sector = addr; 2449 if (compl) 2450 __ublk_complete_rq(req, io, ublk_dev_need_map_io(ub)); 2451 2452 if (ret) 2453 goto out; 2454 break; 2455 case UBLK_IO_NEED_GET_DATA: 2456 /* 2457 * ublk_get_data() may fail and fallback to requeue, so keep 2458 * uring_cmd active first and prepare for handling new requeued 2459 * request 2460 */ 2461 req = ublk_fill_io_cmd(io, cmd); 2462 ret = ublk_config_io_buf(ub, io, cmd, addr, NULL); 2463 WARN_ON_ONCE(ret); 2464 if (likely(ublk_get_data(ubq, io, req))) { 2465 __ublk_prep_compl_io_cmd(io, req); 2466 return UBLK_IO_RES_OK; 2467 } 2468 break; 2469 default: 2470 goto out; 2471 } 2472 ublk_prep_cancel(cmd, issue_flags, ubq, tag); 2473 return -EIOCBQUEUED; 2474 2475 out: 2476 pr_devel("%s: complete: cmd op %d, tag %d ret %x io_flags %x\n", 2477 __func__, cmd_op, tag, ret, io ? io->flags : 0); 2478 return ret; 2479 } 2480 2481 static inline struct request *__ublk_check_and_get_req(struct ublk_device *ub, 2482 u16 q_id, u16 tag, struct ublk_io *io, size_t offset) 2483 { 2484 struct request *req; 2485 2486 /* 2487 * can't use io->req in case of concurrent UBLK_IO_COMMIT_AND_FETCH_REQ, 2488 * which would overwrite it with io->cmd 2489 */ 2490 req = blk_mq_tag_to_rq(ub->tag_set.tags[q_id], tag); 2491 if (!req) 2492 return NULL; 2493 2494 if (!ublk_get_req_ref(io)) 2495 return NULL; 2496 2497 if (unlikely(!blk_mq_request_started(req) || req->tag != tag)) 2498 goto fail_put; 2499 2500 if (!ublk_rq_has_data(req)) 2501 goto fail_put; 2502 2503 if (offset > blk_rq_bytes(req)) 2504 goto fail_put; 2505 2506 return req; 2507 fail_put: 2508 ublk_put_req_ref(io, req); 2509 return NULL; 2510 } 2511 2512 static void ublk_ch_uring_cmd_cb(struct io_tw_req tw_req, io_tw_token_t tw) 2513 { 2514 unsigned int issue_flags = IO_URING_CMD_TASK_WORK_ISSUE_FLAGS; 2515 struct io_uring_cmd *cmd = io_uring_cmd_from_tw(tw_req); 2516 int ret = ublk_ch_uring_cmd_local(cmd, issue_flags); 2517 2518 if (ret != -EIOCBQUEUED) 2519 io_uring_cmd_done(cmd, ret, issue_flags); 2520 } 2521 2522 static int ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags) 2523 { 2524 if (unlikely(issue_flags & IO_URING_F_CANCEL)) { 2525 ublk_uring_cmd_cancel_fn(cmd, issue_flags); 2526 return 0; 2527 } 2528 2529 /* well-implemented server won't run into unlocked */ 2530 if (unlikely(issue_flags & IO_URING_F_UNLOCKED)) { 2531 io_uring_cmd_complete_in_task(cmd, ublk_ch_uring_cmd_cb); 2532 return -EIOCBQUEUED; 2533 } 2534 2535 return ublk_ch_uring_cmd_local(cmd, issue_flags); 2536 } 2537 2538 static inline bool ublk_check_ubuf_dir(const struct request *req, 2539 int ubuf_dir) 2540 { 2541 /* copy ubuf to request pages */ 2542 if ((req_op(req) == REQ_OP_READ || req_op(req) == REQ_OP_DRV_IN) && 2543 ubuf_dir == ITER_SOURCE) 2544 return true; 2545 2546 /* copy request pages to ubuf */ 2547 if ((req_op(req) == REQ_OP_WRITE || 2548 req_op(req) == REQ_OP_ZONE_APPEND) && 2549 ubuf_dir == ITER_DEST) 2550 return true; 2551 2552 return false; 2553 } 2554 2555 static struct request *ublk_check_and_get_req(struct kiocb *iocb, 2556 struct iov_iter *iter, size_t *off, int dir, 2557 struct ublk_io **io) 2558 { 2559 struct ublk_device *ub = iocb->ki_filp->private_data; 2560 struct ublk_queue *ubq; 2561 struct request *req; 2562 size_t buf_off; 2563 u16 tag, q_id; 2564 2565 if (!user_backed_iter(iter)) 2566 return ERR_PTR(-EACCES); 2567 2568 if (ub->dev_info.state == UBLK_S_DEV_DEAD) 2569 return ERR_PTR(-EACCES); 2570 2571 tag = ublk_pos_to_tag(iocb->ki_pos); 2572 q_id = ublk_pos_to_hwq(iocb->ki_pos); 2573 buf_off = ublk_pos_to_buf_off(iocb->ki_pos); 2574 2575 if (q_id >= ub->dev_info.nr_hw_queues) 2576 return ERR_PTR(-EINVAL); 2577 2578 ubq = ublk_get_queue(ub, q_id); 2579 if (!ublk_dev_support_user_copy(ub)) 2580 return ERR_PTR(-EACCES); 2581 2582 if (tag >= ub->dev_info.queue_depth) 2583 return ERR_PTR(-EINVAL); 2584 2585 *io = &ubq->ios[tag]; 2586 req = __ublk_check_and_get_req(ub, q_id, tag, *io, buf_off); 2587 if (!req) 2588 return ERR_PTR(-EINVAL); 2589 2590 if (!ublk_check_ubuf_dir(req, dir)) 2591 goto fail; 2592 2593 *off = buf_off; 2594 return req; 2595 fail: 2596 ublk_put_req_ref(*io, req); 2597 return ERR_PTR(-EACCES); 2598 } 2599 2600 static ssize_t ublk_ch_read_iter(struct kiocb *iocb, struct iov_iter *to) 2601 { 2602 struct request *req; 2603 struct ublk_io *io; 2604 size_t buf_off; 2605 size_t ret; 2606 2607 req = ublk_check_and_get_req(iocb, to, &buf_off, ITER_DEST, &io); 2608 if (IS_ERR(req)) 2609 return PTR_ERR(req); 2610 2611 ret = ublk_copy_user_pages(req, buf_off, to, ITER_DEST); 2612 ublk_put_req_ref(io, req); 2613 2614 return ret; 2615 } 2616 2617 static ssize_t ublk_ch_write_iter(struct kiocb *iocb, struct iov_iter *from) 2618 { 2619 struct request *req; 2620 struct ublk_io *io; 2621 size_t buf_off; 2622 size_t ret; 2623 2624 req = ublk_check_and_get_req(iocb, from, &buf_off, ITER_SOURCE, &io); 2625 if (IS_ERR(req)) 2626 return PTR_ERR(req); 2627 2628 ret = ublk_copy_user_pages(req, buf_off, from, ITER_SOURCE); 2629 ublk_put_req_ref(io, req); 2630 2631 return ret; 2632 } 2633 2634 static const struct file_operations ublk_ch_fops = { 2635 .owner = THIS_MODULE, 2636 .open = ublk_ch_open, 2637 .release = ublk_ch_release, 2638 .read_iter = ublk_ch_read_iter, 2639 .write_iter = ublk_ch_write_iter, 2640 .uring_cmd = ublk_ch_uring_cmd, 2641 .mmap = ublk_ch_mmap, 2642 }; 2643 2644 static void ublk_deinit_queue(struct ublk_device *ub, int q_id) 2645 { 2646 struct ublk_queue *ubq = ub->queues[q_id]; 2647 int size, i; 2648 2649 if (!ubq) 2650 return; 2651 2652 size = ublk_queue_cmd_buf_size(ub); 2653 2654 for (i = 0; i < ubq->q_depth; i++) { 2655 struct ublk_io *io = &ubq->ios[i]; 2656 if (io->task) 2657 put_task_struct(io->task); 2658 WARN_ON_ONCE(refcount_read(&io->ref)); 2659 WARN_ON_ONCE(io->task_registered_buffers); 2660 } 2661 2662 if (ubq->io_cmd_buf) 2663 free_pages((unsigned long)ubq->io_cmd_buf, get_order(size)); 2664 2665 kvfree(ubq); 2666 ub->queues[q_id] = NULL; 2667 } 2668 2669 static int ublk_get_queue_numa_node(struct ublk_device *ub, int q_id) 2670 { 2671 unsigned int cpu; 2672 2673 /* Find first CPU mapped to this queue */ 2674 for_each_possible_cpu(cpu) { 2675 if (ub->tag_set.map[HCTX_TYPE_DEFAULT].mq_map[cpu] == q_id) 2676 return cpu_to_node(cpu); 2677 } 2678 2679 return NUMA_NO_NODE; 2680 } 2681 2682 static int ublk_init_queue(struct ublk_device *ub, int q_id) 2683 { 2684 int depth = ub->dev_info.queue_depth; 2685 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO; 2686 struct ublk_queue *ubq; 2687 struct page *page; 2688 int numa_node; 2689 int size; 2690 2691 /* Determine NUMA node based on queue's CPU affinity */ 2692 numa_node = ublk_get_queue_numa_node(ub, q_id); 2693 2694 /* Allocate queue structure on local NUMA node */ 2695 ubq = kvzalloc_node(struct_size(ubq, ios, depth), GFP_KERNEL, 2696 numa_node); 2697 if (!ubq) 2698 return -ENOMEM; 2699 2700 spin_lock_init(&ubq->cancel_lock); 2701 ubq->flags = ub->dev_info.flags; 2702 ubq->q_id = q_id; 2703 ubq->q_depth = depth; 2704 size = ublk_queue_cmd_buf_size(ub); 2705 2706 /* Allocate I/O command buffer on local NUMA node */ 2707 page = alloc_pages_node(numa_node, gfp_flags, get_order(size)); 2708 if (!page) { 2709 kvfree(ubq); 2710 return -ENOMEM; 2711 } 2712 ubq->io_cmd_buf = page_address(page); 2713 2714 ub->queues[q_id] = ubq; 2715 ubq->dev = ub; 2716 return 0; 2717 } 2718 2719 static void ublk_deinit_queues(struct ublk_device *ub) 2720 { 2721 int i; 2722 2723 for (i = 0; i < ub->dev_info.nr_hw_queues; i++) 2724 ublk_deinit_queue(ub, i); 2725 } 2726 2727 static int ublk_init_queues(struct ublk_device *ub) 2728 { 2729 int i, ret; 2730 2731 for (i = 0; i < ub->dev_info.nr_hw_queues; i++) { 2732 ret = ublk_init_queue(ub, i); 2733 if (ret) 2734 goto fail; 2735 } 2736 2737 init_completion(&ub->completion); 2738 return 0; 2739 2740 fail: 2741 ublk_deinit_queues(ub); 2742 return ret; 2743 } 2744 2745 static int ublk_alloc_dev_number(struct ublk_device *ub, int idx) 2746 { 2747 int i = idx; 2748 int err; 2749 2750 spin_lock(&ublk_idr_lock); 2751 /* allocate id, if @id >= 0, we're requesting that specific id */ 2752 if (i >= 0) { 2753 err = idr_alloc(&ublk_index_idr, ub, i, i + 1, GFP_NOWAIT); 2754 if (err == -ENOSPC) 2755 err = -EEXIST; 2756 } else { 2757 err = idr_alloc(&ublk_index_idr, ub, 0, UBLK_MAX_UBLKS, 2758 GFP_NOWAIT); 2759 } 2760 spin_unlock(&ublk_idr_lock); 2761 2762 if (err >= 0) 2763 ub->ub_number = err; 2764 2765 return err; 2766 } 2767 2768 static void ublk_free_dev_number(struct ublk_device *ub) 2769 { 2770 spin_lock(&ublk_idr_lock); 2771 idr_remove(&ublk_index_idr, ub->ub_number); 2772 wake_up_all(&ublk_idr_wq); 2773 spin_unlock(&ublk_idr_lock); 2774 } 2775 2776 static void ublk_cdev_rel(struct device *dev) 2777 { 2778 struct ublk_device *ub = container_of(dev, struct ublk_device, cdev_dev); 2779 2780 blk_mq_free_tag_set(&ub->tag_set); 2781 ublk_deinit_queues(ub); 2782 ublk_free_dev_number(ub); 2783 mutex_destroy(&ub->mutex); 2784 mutex_destroy(&ub->cancel_mutex); 2785 kfree(ub); 2786 } 2787 2788 static int ublk_add_chdev(struct ublk_device *ub) 2789 { 2790 struct device *dev = &ub->cdev_dev; 2791 int minor = ub->ub_number; 2792 int ret; 2793 2794 dev->parent = ublk_misc.this_device; 2795 dev->devt = MKDEV(MAJOR(ublk_chr_devt), minor); 2796 dev->class = &ublk_chr_class; 2797 dev->release = ublk_cdev_rel; 2798 device_initialize(dev); 2799 2800 ret = dev_set_name(dev, "ublkc%d", minor); 2801 if (ret) 2802 goto fail; 2803 2804 cdev_init(&ub->cdev, &ublk_ch_fops); 2805 ret = cdev_device_add(&ub->cdev, dev); 2806 if (ret) 2807 goto fail; 2808 2809 if (ub->dev_info.flags & UBLK_F_UNPRIVILEGED_DEV) 2810 unprivileged_ublks_added++; 2811 return 0; 2812 fail: 2813 put_device(dev); 2814 return ret; 2815 } 2816 2817 /* align max io buffer size with PAGE_SIZE */ 2818 static void ublk_align_max_io_size(struct ublk_device *ub) 2819 { 2820 unsigned int max_io_bytes = ub->dev_info.max_io_buf_bytes; 2821 2822 ub->dev_info.max_io_buf_bytes = 2823 round_down(max_io_bytes, PAGE_SIZE); 2824 } 2825 2826 static int ublk_add_tag_set(struct ublk_device *ub) 2827 { 2828 ub->tag_set.ops = &ublk_mq_ops; 2829 ub->tag_set.nr_hw_queues = ub->dev_info.nr_hw_queues; 2830 ub->tag_set.queue_depth = ub->dev_info.queue_depth; 2831 ub->tag_set.numa_node = NUMA_NO_NODE; 2832 ub->tag_set.driver_data = ub; 2833 return blk_mq_alloc_tag_set(&ub->tag_set); 2834 } 2835 2836 static void ublk_remove(struct ublk_device *ub) 2837 { 2838 bool unprivileged; 2839 2840 ublk_stop_dev(ub); 2841 cdev_device_del(&ub->cdev, &ub->cdev_dev); 2842 unprivileged = ub->dev_info.flags & UBLK_F_UNPRIVILEGED_DEV; 2843 ublk_put_device(ub); 2844 2845 if (unprivileged) 2846 unprivileged_ublks_added--; 2847 } 2848 2849 static struct ublk_device *ublk_get_device_from_id(int idx) 2850 { 2851 struct ublk_device *ub = NULL; 2852 2853 if (idx < 0) 2854 return NULL; 2855 2856 spin_lock(&ublk_idr_lock); 2857 ub = idr_find(&ublk_index_idr, idx); 2858 if (ub) 2859 ub = ublk_get_device(ub); 2860 spin_unlock(&ublk_idr_lock); 2861 2862 return ub; 2863 } 2864 2865 static int ublk_ctrl_start_dev(struct ublk_device *ub, 2866 const struct ublksrv_ctrl_cmd *header) 2867 { 2868 const struct ublk_param_basic *p = &ub->params.basic; 2869 int ublksrv_pid = (int)header->data[0]; 2870 struct queue_limits lim = { 2871 .logical_block_size = 1 << p->logical_bs_shift, 2872 .physical_block_size = 1 << p->physical_bs_shift, 2873 .io_min = 1 << p->io_min_shift, 2874 .io_opt = 1 << p->io_opt_shift, 2875 .max_hw_sectors = p->max_sectors, 2876 .chunk_sectors = p->chunk_sectors, 2877 .virt_boundary_mask = p->virt_boundary_mask, 2878 .max_segments = USHRT_MAX, 2879 .max_segment_size = UINT_MAX, 2880 .dma_alignment = 3, 2881 }; 2882 struct gendisk *disk; 2883 int ret = -EINVAL; 2884 2885 if (ublksrv_pid <= 0) 2886 return -EINVAL; 2887 if (!(ub->params.types & UBLK_PARAM_TYPE_BASIC)) 2888 return -EINVAL; 2889 2890 if (ub->params.types & UBLK_PARAM_TYPE_DISCARD) { 2891 const struct ublk_param_discard *pd = &ub->params.discard; 2892 2893 lim.discard_alignment = pd->discard_alignment; 2894 lim.discard_granularity = pd->discard_granularity; 2895 lim.max_hw_discard_sectors = pd->max_discard_sectors; 2896 lim.max_write_zeroes_sectors = pd->max_write_zeroes_sectors; 2897 lim.max_discard_segments = pd->max_discard_segments; 2898 } 2899 2900 if (ub->params.types & UBLK_PARAM_TYPE_ZONED) { 2901 const struct ublk_param_zoned *p = &ub->params.zoned; 2902 2903 if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED)) 2904 return -EOPNOTSUPP; 2905 2906 lim.features |= BLK_FEAT_ZONED; 2907 lim.max_active_zones = p->max_active_zones; 2908 lim.max_open_zones = p->max_open_zones; 2909 lim.max_hw_zone_append_sectors = p->max_zone_append_sectors; 2910 } 2911 2912 if (ub->params.basic.attrs & UBLK_ATTR_VOLATILE_CACHE) { 2913 lim.features |= BLK_FEAT_WRITE_CACHE; 2914 if (ub->params.basic.attrs & UBLK_ATTR_FUA) 2915 lim.features |= BLK_FEAT_FUA; 2916 } 2917 2918 if (ub->params.basic.attrs & UBLK_ATTR_ROTATIONAL) 2919 lim.features |= BLK_FEAT_ROTATIONAL; 2920 2921 if (ub->params.types & UBLK_PARAM_TYPE_DMA_ALIGN) 2922 lim.dma_alignment = ub->params.dma.alignment; 2923 2924 if (ub->params.types & UBLK_PARAM_TYPE_SEGMENT) { 2925 lim.seg_boundary_mask = ub->params.seg.seg_boundary_mask; 2926 lim.max_segment_size = ub->params.seg.max_segment_size; 2927 lim.max_segments = ub->params.seg.max_segments; 2928 } 2929 2930 if (wait_for_completion_interruptible(&ub->completion) != 0) 2931 return -EINTR; 2932 2933 if (ub->ublksrv_tgid != ublksrv_pid) 2934 return -EINVAL; 2935 2936 mutex_lock(&ub->mutex); 2937 if (ub->dev_info.state == UBLK_S_DEV_LIVE || 2938 test_bit(UB_STATE_USED, &ub->state)) { 2939 ret = -EEXIST; 2940 goto out_unlock; 2941 } 2942 2943 disk = blk_mq_alloc_disk(&ub->tag_set, &lim, NULL); 2944 if (IS_ERR(disk)) { 2945 ret = PTR_ERR(disk); 2946 goto out_unlock; 2947 } 2948 sprintf(disk->disk_name, "ublkb%d", ub->ub_number); 2949 disk->fops = &ub_fops; 2950 disk->private_data = ub; 2951 2952 ub->dev_info.ublksrv_pid = ublksrv_pid; 2953 ub->ub_disk = disk; 2954 2955 ublk_apply_params(ub); 2956 2957 /* don't probe partitions if any daemon task is un-trusted */ 2958 if (ub->unprivileged_daemons) 2959 set_bit(GD_SUPPRESS_PART_SCAN, &disk->state); 2960 2961 ublk_get_device(ub); 2962 ub->dev_info.state = UBLK_S_DEV_LIVE; 2963 2964 if (ublk_dev_is_zoned(ub)) { 2965 ret = ublk_revalidate_disk_zones(ub); 2966 if (ret) 2967 goto out_put_cdev; 2968 } 2969 2970 ret = add_disk(disk); 2971 if (ret) 2972 goto out_put_cdev; 2973 2974 set_bit(UB_STATE_USED, &ub->state); 2975 2976 out_put_cdev: 2977 if (ret) { 2978 ublk_detach_disk(ub); 2979 ublk_put_device(ub); 2980 } 2981 if (ret) 2982 put_disk(disk); 2983 out_unlock: 2984 mutex_unlock(&ub->mutex); 2985 return ret; 2986 } 2987 2988 static int ublk_ctrl_get_queue_affinity(struct ublk_device *ub, 2989 const struct ublksrv_ctrl_cmd *header) 2990 { 2991 void __user *argp = (void __user *)(unsigned long)header->addr; 2992 cpumask_var_t cpumask; 2993 unsigned long queue; 2994 unsigned int retlen; 2995 unsigned int i; 2996 int ret; 2997 2998 if (header->len * BITS_PER_BYTE < nr_cpu_ids) 2999 return -EINVAL; 3000 if (header->len & (sizeof(unsigned long)-1)) 3001 return -EINVAL; 3002 if (!header->addr) 3003 return -EINVAL; 3004 3005 queue = header->data[0]; 3006 if (queue >= ub->dev_info.nr_hw_queues) 3007 return -EINVAL; 3008 3009 if (!zalloc_cpumask_var(&cpumask, GFP_KERNEL)) 3010 return -ENOMEM; 3011 3012 for_each_possible_cpu(i) { 3013 if (ub->tag_set.map[HCTX_TYPE_DEFAULT].mq_map[i] == queue) 3014 cpumask_set_cpu(i, cpumask); 3015 } 3016 3017 ret = -EFAULT; 3018 retlen = min_t(unsigned short, header->len, cpumask_size()); 3019 if (copy_to_user(argp, cpumask, retlen)) 3020 goto out_free_cpumask; 3021 if (retlen != header->len && 3022 clear_user(argp + retlen, header->len - retlen)) 3023 goto out_free_cpumask; 3024 3025 ret = 0; 3026 out_free_cpumask: 3027 free_cpumask_var(cpumask); 3028 return ret; 3029 } 3030 3031 static inline void ublk_dump_dev_info(struct ublksrv_ctrl_dev_info *info) 3032 { 3033 pr_devel("%s: dev id %d flags %llx\n", __func__, 3034 info->dev_id, info->flags); 3035 pr_devel("\t nr_hw_queues %d queue_depth %d\n", 3036 info->nr_hw_queues, info->queue_depth); 3037 } 3038 3039 static int ublk_ctrl_add_dev(const struct ublksrv_ctrl_cmd *header) 3040 { 3041 void __user *argp = (void __user *)(unsigned long)header->addr; 3042 struct ublksrv_ctrl_dev_info info; 3043 struct ublk_device *ub; 3044 int ret = -EINVAL; 3045 3046 if (header->len < sizeof(info) || !header->addr) 3047 return -EINVAL; 3048 if (header->queue_id != (u16)-1) { 3049 pr_warn("%s: queue_id is wrong %x\n", 3050 __func__, header->queue_id); 3051 return -EINVAL; 3052 } 3053 3054 if (copy_from_user(&info, argp, sizeof(info))) 3055 return -EFAULT; 3056 3057 if (info.queue_depth > UBLK_MAX_QUEUE_DEPTH || !info.queue_depth || 3058 info.nr_hw_queues > UBLK_MAX_NR_QUEUES || !info.nr_hw_queues) 3059 return -EINVAL; 3060 3061 if (capable(CAP_SYS_ADMIN)) 3062 info.flags &= ~UBLK_F_UNPRIVILEGED_DEV; 3063 else if (!(info.flags & UBLK_F_UNPRIVILEGED_DEV)) 3064 return -EPERM; 3065 3066 /* forbid nonsense combinations of recovery flags */ 3067 switch (info.flags & UBLK_F_ALL_RECOVERY_FLAGS) { 3068 case 0: 3069 case UBLK_F_USER_RECOVERY: 3070 case (UBLK_F_USER_RECOVERY | UBLK_F_USER_RECOVERY_REISSUE): 3071 case (UBLK_F_USER_RECOVERY | UBLK_F_USER_RECOVERY_FAIL_IO): 3072 break; 3073 default: 3074 pr_warn("%s: invalid recovery flags %llx\n", __func__, 3075 info.flags & UBLK_F_ALL_RECOVERY_FLAGS); 3076 return -EINVAL; 3077 } 3078 3079 if ((info.flags & UBLK_F_QUIESCE) && !(info.flags & UBLK_F_USER_RECOVERY)) { 3080 pr_warn("UBLK_F_QUIESCE requires UBLK_F_USER_RECOVERY\n"); 3081 return -EINVAL; 3082 } 3083 3084 /* 3085 * unprivileged device can't be trusted, but RECOVERY and 3086 * RECOVERY_REISSUE still may hang error handling, so can't 3087 * support recovery features for unprivileged ublk now 3088 * 3089 * TODO: provide forward progress for RECOVERY handler, so that 3090 * unprivileged device can benefit from it 3091 */ 3092 if (info.flags & UBLK_F_UNPRIVILEGED_DEV) { 3093 info.flags &= ~(UBLK_F_USER_RECOVERY_REISSUE | 3094 UBLK_F_USER_RECOVERY); 3095 3096 /* 3097 * For USER_COPY, we depends on userspace to fill request 3098 * buffer by pwrite() to ublk char device, which can't be 3099 * used for unprivileged device 3100 * 3101 * Same with zero copy or auto buffer register. 3102 */ 3103 if (info.flags & (UBLK_F_USER_COPY | UBLK_F_SUPPORT_ZERO_COPY | 3104 UBLK_F_AUTO_BUF_REG)) 3105 return -EINVAL; 3106 } 3107 3108 /* the created device is always owned by current user */ 3109 ublk_store_owner_uid_gid(&info.owner_uid, &info.owner_gid); 3110 3111 if (header->dev_id != info.dev_id) { 3112 pr_warn("%s: dev id not match %u %u\n", 3113 __func__, header->dev_id, info.dev_id); 3114 return -EINVAL; 3115 } 3116 3117 if (header->dev_id != U32_MAX && header->dev_id >= UBLK_MAX_UBLKS) { 3118 pr_warn("%s: dev id is too large. Max supported is %d\n", 3119 __func__, UBLK_MAX_UBLKS - 1); 3120 return -EINVAL; 3121 } 3122 3123 ublk_dump_dev_info(&info); 3124 3125 ret = mutex_lock_killable(&ublk_ctl_mutex); 3126 if (ret) 3127 return ret; 3128 3129 ret = -EACCES; 3130 if ((info.flags & UBLK_F_UNPRIVILEGED_DEV) && 3131 unprivileged_ublks_added >= unprivileged_ublks_max) 3132 goto out_unlock; 3133 3134 ret = -ENOMEM; 3135 ub = kzalloc(struct_size(ub, queues, info.nr_hw_queues), GFP_KERNEL); 3136 if (!ub) 3137 goto out_unlock; 3138 mutex_init(&ub->mutex); 3139 spin_lock_init(&ub->lock); 3140 mutex_init(&ub->cancel_mutex); 3141 3142 ret = ublk_alloc_dev_number(ub, header->dev_id); 3143 if (ret < 0) 3144 goto out_free_ub; 3145 3146 memcpy(&ub->dev_info, &info, sizeof(info)); 3147 3148 /* update device id */ 3149 ub->dev_info.dev_id = ub->ub_number; 3150 3151 /* 3152 * 64bit flags will be copied back to userspace as feature 3153 * negotiation result, so have to clear flags which driver 3154 * doesn't support yet, then userspace can get correct flags 3155 * (features) to handle. 3156 */ 3157 ub->dev_info.flags &= UBLK_F_ALL; 3158 3159 ub->dev_info.flags |= UBLK_F_CMD_IOCTL_ENCODE | 3160 UBLK_F_URING_CMD_COMP_IN_TASK | 3161 UBLK_F_PER_IO_DAEMON | 3162 UBLK_F_BUF_REG_OFF_DAEMON; 3163 3164 /* GET_DATA isn't needed any more with USER_COPY or ZERO COPY */ 3165 if (ub->dev_info.flags & (UBLK_F_USER_COPY | UBLK_F_SUPPORT_ZERO_COPY | 3166 UBLK_F_AUTO_BUF_REG)) 3167 ub->dev_info.flags &= ~UBLK_F_NEED_GET_DATA; 3168 3169 /* 3170 * Zoned storage support requires reuse `ublksrv_io_cmd->addr` for 3171 * returning write_append_lba, which is only allowed in case of 3172 * user copy or zero copy 3173 */ 3174 if (ublk_dev_is_zoned(ub) && 3175 (!IS_ENABLED(CONFIG_BLK_DEV_ZONED) || !(ub->dev_info.flags & 3176 (UBLK_F_USER_COPY | UBLK_F_SUPPORT_ZERO_COPY)))) { 3177 ret = -EINVAL; 3178 goto out_free_dev_number; 3179 } 3180 3181 ub->dev_info.nr_hw_queues = min_t(unsigned int, 3182 ub->dev_info.nr_hw_queues, nr_cpu_ids); 3183 ublk_align_max_io_size(ub); 3184 3185 ret = ublk_add_tag_set(ub); 3186 if (ret) 3187 goto out_free_dev_number; 3188 3189 ret = ublk_init_queues(ub); 3190 if (ret) 3191 goto out_free_tag_set; 3192 3193 ret = -EFAULT; 3194 if (copy_to_user(argp, &ub->dev_info, sizeof(info))) 3195 goto out_deinit_queues; 3196 3197 /* 3198 * Add the char dev so that ublksrv daemon can be setup. 3199 * ublk_add_chdev() will cleanup everything if it fails. 3200 */ 3201 ret = ublk_add_chdev(ub); 3202 goto out_unlock; 3203 3204 out_deinit_queues: 3205 ublk_deinit_queues(ub); 3206 out_free_tag_set: 3207 blk_mq_free_tag_set(&ub->tag_set); 3208 out_free_dev_number: 3209 ublk_free_dev_number(ub); 3210 out_free_ub: 3211 mutex_destroy(&ub->mutex); 3212 mutex_destroy(&ub->cancel_mutex); 3213 kfree(ub); 3214 out_unlock: 3215 mutex_unlock(&ublk_ctl_mutex); 3216 return ret; 3217 } 3218 3219 static inline bool ublk_idr_freed(int id) 3220 { 3221 void *ptr; 3222 3223 spin_lock(&ublk_idr_lock); 3224 ptr = idr_find(&ublk_index_idr, id); 3225 spin_unlock(&ublk_idr_lock); 3226 3227 return ptr == NULL; 3228 } 3229 3230 static int ublk_ctrl_del_dev(struct ublk_device **p_ub, bool wait) 3231 { 3232 struct ublk_device *ub = *p_ub; 3233 int idx = ub->ub_number; 3234 int ret; 3235 3236 ret = mutex_lock_killable(&ublk_ctl_mutex); 3237 if (ret) 3238 return ret; 3239 3240 if (!test_bit(UB_STATE_DELETED, &ub->state)) { 3241 ublk_remove(ub); 3242 set_bit(UB_STATE_DELETED, &ub->state); 3243 } 3244 3245 /* Mark the reference as consumed */ 3246 *p_ub = NULL; 3247 ublk_put_device(ub); 3248 mutex_unlock(&ublk_ctl_mutex); 3249 3250 /* 3251 * Wait until the idr is removed, then it can be reused after 3252 * DEL_DEV command is returned. 3253 * 3254 * If we returns because of user interrupt, future delete command 3255 * may come: 3256 * 3257 * - the device number isn't freed, this device won't or needn't 3258 * be deleted again, since UB_STATE_DELETED is set, and device 3259 * will be released after the last reference is dropped 3260 * 3261 * - the device number is freed already, we will not find this 3262 * device via ublk_get_device_from_id() 3263 */ 3264 if (wait && wait_event_interruptible(ublk_idr_wq, ublk_idr_freed(idx))) 3265 return -EINTR; 3266 return 0; 3267 } 3268 3269 static inline void ublk_ctrl_cmd_dump(struct io_uring_cmd *cmd) 3270 { 3271 const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe); 3272 3273 pr_devel("%s: cmd_op %x, dev id %d qid %d data %llx buf %llx len %u\n", 3274 __func__, cmd->cmd_op, header->dev_id, header->queue_id, 3275 header->data[0], header->addr, header->len); 3276 } 3277 3278 static int ublk_ctrl_stop_dev(struct ublk_device *ub) 3279 { 3280 ublk_stop_dev(ub); 3281 return 0; 3282 } 3283 3284 static int ublk_ctrl_get_dev_info(struct ublk_device *ub, 3285 const struct ublksrv_ctrl_cmd *header) 3286 { 3287 void __user *argp = (void __user *)(unsigned long)header->addr; 3288 3289 if (header->len < sizeof(struct ublksrv_ctrl_dev_info) || !header->addr) 3290 return -EINVAL; 3291 3292 if (copy_to_user(argp, &ub->dev_info, sizeof(ub->dev_info))) 3293 return -EFAULT; 3294 3295 return 0; 3296 } 3297 3298 /* TYPE_DEVT is readonly, so fill it up before returning to userspace */ 3299 static void ublk_ctrl_fill_params_devt(struct ublk_device *ub) 3300 { 3301 ub->params.devt.char_major = MAJOR(ub->cdev_dev.devt); 3302 ub->params.devt.char_minor = MINOR(ub->cdev_dev.devt); 3303 3304 if (ub->ub_disk) { 3305 ub->params.devt.disk_major = MAJOR(disk_devt(ub->ub_disk)); 3306 ub->params.devt.disk_minor = MINOR(disk_devt(ub->ub_disk)); 3307 } else { 3308 ub->params.devt.disk_major = 0; 3309 ub->params.devt.disk_minor = 0; 3310 } 3311 ub->params.types |= UBLK_PARAM_TYPE_DEVT; 3312 } 3313 3314 static int ublk_ctrl_get_params(struct ublk_device *ub, 3315 const struct ublksrv_ctrl_cmd *header) 3316 { 3317 void __user *argp = (void __user *)(unsigned long)header->addr; 3318 struct ublk_params_header ph; 3319 int ret; 3320 3321 if (header->len <= sizeof(ph) || !header->addr) 3322 return -EINVAL; 3323 3324 if (copy_from_user(&ph, argp, sizeof(ph))) 3325 return -EFAULT; 3326 3327 if (ph.len > header->len || !ph.len) 3328 return -EINVAL; 3329 3330 if (ph.len > sizeof(struct ublk_params)) 3331 ph.len = sizeof(struct ublk_params); 3332 3333 mutex_lock(&ub->mutex); 3334 ublk_ctrl_fill_params_devt(ub); 3335 if (copy_to_user(argp, &ub->params, ph.len)) 3336 ret = -EFAULT; 3337 else 3338 ret = 0; 3339 mutex_unlock(&ub->mutex); 3340 3341 return ret; 3342 } 3343 3344 static int ublk_ctrl_set_params(struct ublk_device *ub, 3345 const struct ublksrv_ctrl_cmd *header) 3346 { 3347 void __user *argp = (void __user *)(unsigned long)header->addr; 3348 struct ublk_params_header ph; 3349 int ret = -EFAULT; 3350 3351 if (header->len <= sizeof(ph) || !header->addr) 3352 return -EINVAL; 3353 3354 if (copy_from_user(&ph, argp, sizeof(ph))) 3355 return -EFAULT; 3356 3357 if (ph.len > header->len || !ph.len || !ph.types) 3358 return -EINVAL; 3359 3360 if (ph.len > sizeof(struct ublk_params)) 3361 ph.len = sizeof(struct ublk_params); 3362 3363 mutex_lock(&ub->mutex); 3364 if (test_bit(UB_STATE_USED, &ub->state)) { 3365 /* 3366 * Parameters can only be changed when device hasn't 3367 * been started yet 3368 */ 3369 ret = -EACCES; 3370 } else if (copy_from_user(&ub->params, argp, ph.len)) { 3371 ret = -EFAULT; 3372 } else { 3373 /* clear all we don't support yet */ 3374 ub->params.types &= UBLK_PARAM_TYPE_ALL; 3375 ret = ublk_validate_params(ub); 3376 if (ret) 3377 ub->params.types = 0; 3378 } 3379 mutex_unlock(&ub->mutex); 3380 3381 return ret; 3382 } 3383 3384 static int ublk_ctrl_start_recovery(struct ublk_device *ub, 3385 const struct ublksrv_ctrl_cmd *header) 3386 { 3387 int ret = -EINVAL; 3388 3389 mutex_lock(&ub->mutex); 3390 if (ublk_nosrv_should_stop_dev(ub)) 3391 goto out_unlock; 3392 /* 3393 * START_RECOVERY is only allowd after: 3394 * 3395 * (1) UB_STATE_OPEN is not set, which means the dying process is exited 3396 * and related io_uring ctx is freed so file struct of /dev/ublkcX is 3397 * released. 3398 * 3399 * and one of the following holds 3400 * 3401 * (2) UBLK_S_DEV_QUIESCED is set, which means the quiesce_work: 3402 * (a)has quiesced request queue 3403 * (b)has requeued every inflight rqs whose io_flags is ACTIVE 3404 * (c)has requeued/aborted every inflight rqs whose io_flags is NOT ACTIVE 3405 * (d)has completed/camceled all ioucmds owned by ther dying process 3406 * 3407 * (3) UBLK_S_DEV_FAIL_IO is set, which means the queue is not 3408 * quiesced, but all I/O is being immediately errored 3409 */ 3410 if (test_bit(UB_STATE_OPEN, &ub->state) || !ublk_dev_in_recoverable_state(ub)) { 3411 ret = -EBUSY; 3412 goto out_unlock; 3413 } 3414 pr_devel("%s: start recovery for dev id %d.\n", __func__, header->dev_id); 3415 init_completion(&ub->completion); 3416 ret = 0; 3417 out_unlock: 3418 mutex_unlock(&ub->mutex); 3419 return ret; 3420 } 3421 3422 static int ublk_ctrl_end_recovery(struct ublk_device *ub, 3423 const struct ublksrv_ctrl_cmd *header) 3424 { 3425 int ublksrv_pid = (int)header->data[0]; 3426 int ret = -EINVAL; 3427 3428 pr_devel("%s: Waiting for all FETCH_REQs, dev id %d...\n", __func__, 3429 header->dev_id); 3430 3431 if (wait_for_completion_interruptible(&ub->completion)) 3432 return -EINTR; 3433 3434 pr_devel("%s: All FETCH_REQs received, dev id %d\n", __func__, 3435 header->dev_id); 3436 3437 if (ub->ublksrv_tgid != ublksrv_pid) 3438 return -EINVAL; 3439 3440 mutex_lock(&ub->mutex); 3441 if (ublk_nosrv_should_stop_dev(ub)) 3442 goto out_unlock; 3443 3444 if (!ublk_dev_in_recoverable_state(ub)) { 3445 ret = -EBUSY; 3446 goto out_unlock; 3447 } 3448 ub->dev_info.ublksrv_pid = ublksrv_pid; 3449 ub->dev_info.state = UBLK_S_DEV_LIVE; 3450 pr_devel("%s: new ublksrv_pid %d, dev id %d\n", 3451 __func__, ublksrv_pid, header->dev_id); 3452 blk_mq_kick_requeue_list(ub->ub_disk->queue); 3453 ret = 0; 3454 out_unlock: 3455 mutex_unlock(&ub->mutex); 3456 return ret; 3457 } 3458 3459 static int ublk_ctrl_get_features(const struct ublksrv_ctrl_cmd *header) 3460 { 3461 void __user *argp = (void __user *)(unsigned long)header->addr; 3462 u64 features = UBLK_F_ALL; 3463 3464 if (header->len != UBLK_FEATURES_LEN || !header->addr) 3465 return -EINVAL; 3466 3467 if (copy_to_user(argp, &features, UBLK_FEATURES_LEN)) 3468 return -EFAULT; 3469 3470 return 0; 3471 } 3472 3473 static void ublk_ctrl_set_size(struct ublk_device *ub, const struct ublksrv_ctrl_cmd *header) 3474 { 3475 struct ublk_param_basic *p = &ub->params.basic; 3476 u64 new_size = header->data[0]; 3477 3478 mutex_lock(&ub->mutex); 3479 p->dev_sectors = new_size; 3480 set_capacity_and_notify(ub->ub_disk, p->dev_sectors); 3481 mutex_unlock(&ub->mutex); 3482 } 3483 3484 struct count_busy { 3485 const struct ublk_queue *ubq; 3486 unsigned int nr_busy; 3487 }; 3488 3489 static bool ublk_count_busy_req(struct request *rq, void *data) 3490 { 3491 struct count_busy *idle = data; 3492 3493 if (!blk_mq_request_started(rq) && rq->mq_hctx->driver_data == idle->ubq) 3494 idle->nr_busy += 1; 3495 return true; 3496 } 3497 3498 /* uring_cmd is guaranteed to be active if the associated request is idle */ 3499 static bool ubq_has_idle_io(const struct ublk_queue *ubq) 3500 { 3501 struct count_busy data = { 3502 .ubq = ubq, 3503 }; 3504 3505 blk_mq_tagset_busy_iter(&ubq->dev->tag_set, ublk_count_busy_req, &data); 3506 return data.nr_busy < ubq->q_depth; 3507 } 3508 3509 /* Wait until each hw queue has at least one idle IO */ 3510 static int ublk_wait_for_idle_io(struct ublk_device *ub, 3511 unsigned int timeout_ms) 3512 { 3513 unsigned int elapsed = 0; 3514 int ret; 3515 3516 while (elapsed < timeout_ms && !signal_pending(current)) { 3517 unsigned int queues_cancelable = 0; 3518 int i; 3519 3520 for (i = 0; i < ub->dev_info.nr_hw_queues; i++) { 3521 struct ublk_queue *ubq = ublk_get_queue(ub, i); 3522 3523 queues_cancelable += !!ubq_has_idle_io(ubq); 3524 } 3525 3526 /* 3527 * Each queue needs at least one active command for 3528 * notifying ublk server 3529 */ 3530 if (queues_cancelable == ub->dev_info.nr_hw_queues) 3531 break; 3532 3533 msleep(UBLK_REQUEUE_DELAY_MS); 3534 elapsed += UBLK_REQUEUE_DELAY_MS; 3535 } 3536 3537 if (signal_pending(current)) 3538 ret = -EINTR; 3539 else if (elapsed >= timeout_ms) 3540 ret = -EBUSY; 3541 else 3542 ret = 0; 3543 3544 return ret; 3545 } 3546 3547 static int ublk_ctrl_quiesce_dev(struct ublk_device *ub, 3548 const struct ublksrv_ctrl_cmd *header) 3549 { 3550 /* zero means wait forever */ 3551 u64 timeout_ms = header->data[0]; 3552 struct gendisk *disk; 3553 int ret = -ENODEV; 3554 3555 if (!(ub->dev_info.flags & UBLK_F_QUIESCE)) 3556 return -EOPNOTSUPP; 3557 3558 mutex_lock(&ub->mutex); 3559 disk = ublk_get_disk(ub); 3560 if (!disk) 3561 goto unlock; 3562 if (ub->dev_info.state == UBLK_S_DEV_DEAD) 3563 goto put_disk; 3564 3565 ret = 0; 3566 /* already in expected state */ 3567 if (ub->dev_info.state != UBLK_S_DEV_LIVE) 3568 goto put_disk; 3569 3570 /* Mark the device as canceling */ 3571 mutex_lock(&ub->cancel_mutex); 3572 blk_mq_quiesce_queue(disk->queue); 3573 ublk_set_canceling(ub, true); 3574 blk_mq_unquiesce_queue(disk->queue); 3575 mutex_unlock(&ub->cancel_mutex); 3576 3577 if (!timeout_ms) 3578 timeout_ms = UINT_MAX; 3579 ret = ublk_wait_for_idle_io(ub, timeout_ms); 3580 3581 put_disk: 3582 ublk_put_disk(disk); 3583 unlock: 3584 mutex_unlock(&ub->mutex); 3585 3586 /* Cancel pending uring_cmd */ 3587 if (!ret) 3588 ublk_cancel_dev(ub); 3589 return ret; 3590 } 3591 3592 /* 3593 * All control commands are sent via /dev/ublk-control, so we have to check 3594 * the destination device's permission 3595 */ 3596 static int ublk_char_dev_permission(struct ublk_device *ub, 3597 const char *dev_path, int mask) 3598 { 3599 int err; 3600 struct path path; 3601 struct kstat stat; 3602 3603 err = kern_path(dev_path, LOOKUP_FOLLOW, &path); 3604 if (err) 3605 return err; 3606 3607 err = vfs_getattr(&path, &stat, STATX_TYPE, AT_STATX_SYNC_AS_STAT); 3608 if (err) 3609 goto exit; 3610 3611 err = -EPERM; 3612 if (stat.rdev != ub->cdev_dev.devt || !S_ISCHR(stat.mode)) 3613 goto exit; 3614 3615 err = inode_permission(&nop_mnt_idmap, 3616 d_backing_inode(path.dentry), mask); 3617 exit: 3618 path_put(&path); 3619 return err; 3620 } 3621 3622 static int ublk_ctrl_uring_cmd_permission(struct ublk_device *ub, 3623 struct io_uring_cmd *cmd) 3624 { 3625 struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)io_uring_sqe_cmd(cmd->sqe); 3626 bool unprivileged = ub->dev_info.flags & UBLK_F_UNPRIVILEGED_DEV; 3627 void __user *argp = (void __user *)(unsigned long)header->addr; 3628 char *dev_path = NULL; 3629 int ret = 0; 3630 int mask; 3631 3632 if (!unprivileged) { 3633 if (!capable(CAP_SYS_ADMIN)) 3634 return -EPERM; 3635 /* 3636 * The new added command of UBLK_CMD_GET_DEV_INFO2 includes 3637 * char_dev_path in payload too, since userspace may not 3638 * know if the specified device is created as unprivileged 3639 * mode. 3640 */ 3641 if (_IOC_NR(cmd->cmd_op) != UBLK_CMD_GET_DEV_INFO2) 3642 return 0; 3643 } 3644 3645 /* 3646 * User has to provide the char device path for unprivileged ublk 3647 * 3648 * header->addr always points to the dev path buffer, and 3649 * header->dev_path_len records length of dev path buffer. 3650 */ 3651 if (!header->dev_path_len || header->dev_path_len > PATH_MAX) 3652 return -EINVAL; 3653 3654 if (header->len < header->dev_path_len) 3655 return -EINVAL; 3656 3657 dev_path = memdup_user_nul(argp, header->dev_path_len); 3658 if (IS_ERR(dev_path)) 3659 return PTR_ERR(dev_path); 3660 3661 ret = -EINVAL; 3662 switch (_IOC_NR(cmd->cmd_op)) { 3663 case UBLK_CMD_GET_DEV_INFO: 3664 case UBLK_CMD_GET_DEV_INFO2: 3665 case UBLK_CMD_GET_QUEUE_AFFINITY: 3666 case UBLK_CMD_GET_PARAMS: 3667 case (_IOC_NR(UBLK_U_CMD_GET_FEATURES)): 3668 mask = MAY_READ; 3669 break; 3670 case UBLK_CMD_START_DEV: 3671 case UBLK_CMD_STOP_DEV: 3672 case UBLK_CMD_ADD_DEV: 3673 case UBLK_CMD_DEL_DEV: 3674 case UBLK_CMD_SET_PARAMS: 3675 case UBLK_CMD_START_USER_RECOVERY: 3676 case UBLK_CMD_END_USER_RECOVERY: 3677 case UBLK_CMD_UPDATE_SIZE: 3678 case UBLK_CMD_QUIESCE_DEV: 3679 mask = MAY_READ | MAY_WRITE; 3680 break; 3681 default: 3682 goto exit; 3683 } 3684 3685 ret = ublk_char_dev_permission(ub, dev_path, mask); 3686 if (!ret) { 3687 header->len -= header->dev_path_len; 3688 header->addr += header->dev_path_len; 3689 } 3690 pr_devel("%s: dev id %d cmd_op %x uid %d gid %d path %s ret %d\n", 3691 __func__, ub->ub_number, cmd->cmd_op, 3692 ub->dev_info.owner_uid, ub->dev_info.owner_gid, 3693 dev_path, ret); 3694 exit: 3695 kfree(dev_path); 3696 return ret; 3697 } 3698 3699 static bool ublk_ctrl_uring_cmd_may_sleep(u32 cmd_op) 3700 { 3701 switch (_IOC_NR(cmd_op)) { 3702 case UBLK_CMD_GET_QUEUE_AFFINITY: 3703 case UBLK_CMD_GET_DEV_INFO: 3704 case UBLK_CMD_GET_DEV_INFO2: 3705 case _IOC_NR(UBLK_U_CMD_GET_FEATURES): 3706 return false; 3707 default: 3708 return true; 3709 } 3710 } 3711 3712 static int ublk_ctrl_uring_cmd(struct io_uring_cmd *cmd, 3713 unsigned int issue_flags) 3714 { 3715 const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe); 3716 struct ublk_device *ub = NULL; 3717 u32 cmd_op = cmd->cmd_op; 3718 int ret = -EINVAL; 3719 3720 if (ublk_ctrl_uring_cmd_may_sleep(cmd_op) && 3721 issue_flags & IO_URING_F_NONBLOCK) 3722 return -EAGAIN; 3723 3724 ublk_ctrl_cmd_dump(cmd); 3725 3726 if (!(issue_flags & IO_URING_F_SQE128)) 3727 goto out; 3728 3729 ret = ublk_check_cmd_op(cmd_op); 3730 if (ret) 3731 goto out; 3732 3733 if (cmd_op == UBLK_U_CMD_GET_FEATURES) { 3734 ret = ublk_ctrl_get_features(header); 3735 goto out; 3736 } 3737 3738 if (_IOC_NR(cmd_op) != UBLK_CMD_ADD_DEV) { 3739 ret = -ENODEV; 3740 ub = ublk_get_device_from_id(header->dev_id); 3741 if (!ub) 3742 goto out; 3743 3744 ret = ublk_ctrl_uring_cmd_permission(ub, cmd); 3745 if (ret) 3746 goto put_dev; 3747 } 3748 3749 switch (_IOC_NR(cmd_op)) { 3750 case UBLK_CMD_START_DEV: 3751 ret = ublk_ctrl_start_dev(ub, header); 3752 break; 3753 case UBLK_CMD_STOP_DEV: 3754 ret = ublk_ctrl_stop_dev(ub); 3755 break; 3756 case UBLK_CMD_GET_DEV_INFO: 3757 case UBLK_CMD_GET_DEV_INFO2: 3758 ret = ublk_ctrl_get_dev_info(ub, header); 3759 break; 3760 case UBLK_CMD_ADD_DEV: 3761 ret = ublk_ctrl_add_dev(header); 3762 break; 3763 case UBLK_CMD_DEL_DEV: 3764 ret = ublk_ctrl_del_dev(&ub, true); 3765 break; 3766 case UBLK_CMD_DEL_DEV_ASYNC: 3767 ret = ublk_ctrl_del_dev(&ub, false); 3768 break; 3769 case UBLK_CMD_GET_QUEUE_AFFINITY: 3770 ret = ublk_ctrl_get_queue_affinity(ub, header); 3771 break; 3772 case UBLK_CMD_GET_PARAMS: 3773 ret = ublk_ctrl_get_params(ub, header); 3774 break; 3775 case UBLK_CMD_SET_PARAMS: 3776 ret = ublk_ctrl_set_params(ub, header); 3777 break; 3778 case UBLK_CMD_START_USER_RECOVERY: 3779 ret = ublk_ctrl_start_recovery(ub, header); 3780 break; 3781 case UBLK_CMD_END_USER_RECOVERY: 3782 ret = ublk_ctrl_end_recovery(ub, header); 3783 break; 3784 case UBLK_CMD_UPDATE_SIZE: 3785 ublk_ctrl_set_size(ub, header); 3786 ret = 0; 3787 break; 3788 case UBLK_CMD_QUIESCE_DEV: 3789 ret = ublk_ctrl_quiesce_dev(ub, header); 3790 break; 3791 default: 3792 ret = -EOPNOTSUPP; 3793 break; 3794 } 3795 3796 put_dev: 3797 if (ub) 3798 ublk_put_device(ub); 3799 out: 3800 pr_devel("%s: cmd done ret %d cmd_op %x, dev id %d qid %d\n", 3801 __func__, ret, cmd->cmd_op, header->dev_id, header->queue_id); 3802 return ret; 3803 } 3804 3805 static const struct file_operations ublk_ctl_fops = { 3806 .open = nonseekable_open, 3807 .uring_cmd = ublk_ctrl_uring_cmd, 3808 .owner = THIS_MODULE, 3809 .llseek = noop_llseek, 3810 }; 3811 3812 static struct miscdevice ublk_misc = { 3813 .minor = MISC_DYNAMIC_MINOR, 3814 .name = "ublk-control", 3815 .fops = &ublk_ctl_fops, 3816 }; 3817 3818 static int __init ublk_init(void) 3819 { 3820 int ret; 3821 3822 BUILD_BUG_ON((u64)UBLKSRV_IO_BUF_OFFSET + 3823 UBLKSRV_IO_BUF_TOTAL_SIZE < UBLKSRV_IO_BUF_OFFSET); 3824 BUILD_BUG_ON(sizeof(struct ublk_auto_buf_reg) != 8); 3825 3826 init_waitqueue_head(&ublk_idr_wq); 3827 3828 ret = misc_register(&ublk_misc); 3829 if (ret) 3830 return ret; 3831 3832 ret = alloc_chrdev_region(&ublk_chr_devt, 0, UBLK_MINORS, "ublk-char"); 3833 if (ret) 3834 goto unregister_mis; 3835 3836 ret = class_register(&ublk_chr_class); 3837 if (ret) 3838 goto free_chrdev_region; 3839 3840 return 0; 3841 3842 free_chrdev_region: 3843 unregister_chrdev_region(ublk_chr_devt, UBLK_MINORS); 3844 unregister_mis: 3845 misc_deregister(&ublk_misc); 3846 return ret; 3847 } 3848 3849 static void __exit ublk_exit(void) 3850 { 3851 struct ublk_device *ub; 3852 int id; 3853 3854 idr_for_each_entry(&ublk_index_idr, ub, id) 3855 ublk_remove(ub); 3856 3857 class_unregister(&ublk_chr_class); 3858 misc_deregister(&ublk_misc); 3859 3860 idr_destroy(&ublk_index_idr); 3861 unregister_chrdev_region(ublk_chr_devt, UBLK_MINORS); 3862 } 3863 3864 module_init(ublk_init); 3865 module_exit(ublk_exit); 3866 3867 static int ublk_set_max_unprivileged_ublks(const char *buf, 3868 const struct kernel_param *kp) 3869 { 3870 return param_set_uint_minmax(buf, kp, 0, UBLK_MAX_UBLKS); 3871 } 3872 3873 static int ublk_get_max_unprivileged_ublks(char *buf, 3874 const struct kernel_param *kp) 3875 { 3876 return sysfs_emit(buf, "%u\n", unprivileged_ublks_max); 3877 } 3878 3879 static const struct kernel_param_ops ublk_max_unprivileged_ublks_ops = { 3880 .set = ublk_set_max_unprivileged_ublks, 3881 .get = ublk_get_max_unprivileged_ublks, 3882 }; 3883 3884 module_param_cb(ublks_max, &ublk_max_unprivileged_ublks_ops, 3885 &unprivileged_ublks_max, 0644); 3886 MODULE_PARM_DESC(ublks_max, "max number of unprivileged ublk devices allowed to add(default: 64)"); 3887 3888 MODULE_AUTHOR("Ming Lei <ming.lei@redhat.com>"); 3889 MODULE_DESCRIPTION("Userspace block device"); 3890 MODULE_LICENSE("GPL"); 3891