1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Userspace block device - block device which IO is handled from userspace
4 *
5 * Take full use of io_uring passthrough command for communicating with
6 * ublk userspace daemon(ublksrvd) for handling basic IO request.
7 *
8 * Copyright 2022 Ming Lei <ming.lei@redhat.com>
9 *
10 * (part of code stolen from loop.c)
11 */
12 #include <linux/module.h>
13 #include <linux/moduleparam.h>
14 #include <linux/sched.h>
15 #include <linux/fs.h>
16 #include <linux/pagemap.h>
17 #include <linux/file.h>
18 #include <linux/stat.h>
19 #include <linux/errno.h>
20 #include <linux/major.h>
21 #include <linux/wait.h>
22 #include <linux/blkdev.h>
23 #include <linux/init.h>
24 #include <linux/swap.h>
25 #include <linux/slab.h>
26 #include <linux/compat.h>
27 #include <linux/mutex.h>
28 #include <linux/writeback.h>
29 #include <linux/completion.h>
30 #include <linux/highmem.h>
31 #include <linux/sysfs.h>
32 #include <linux/miscdevice.h>
33 #include <linux/falloc.h>
34 #include <linux/uio.h>
35 #include <linux/ioprio.h>
36 #include <linux/sched/mm.h>
37 #include <linux/uaccess.h>
38 #include <linux/cdev.h>
39 #include <linux/io_uring/cmd.h>
40 #include <linux/blk-mq.h>
41 #include <linux/delay.h>
42 #include <linux/mm.h>
43 #include <asm/page.h>
44 #include <linux/task_work.h>
45 #include <linux/namei.h>
46 #include <linux/kref.h>
47 #include <uapi/linux/ublk_cmd.h>
48
49 #define UBLK_MINORS (1U << MINORBITS)
50
51 /* private ioctl command mirror */
52 #define UBLK_CMD_DEL_DEV_ASYNC _IOC_NR(UBLK_U_CMD_DEL_DEV_ASYNC)
53
54 #define UBLK_IO_REGISTER_IO_BUF _IOC_NR(UBLK_U_IO_REGISTER_IO_BUF)
55 #define UBLK_IO_UNREGISTER_IO_BUF _IOC_NR(UBLK_U_IO_UNREGISTER_IO_BUF)
56
57 /* All UBLK_F_* have to be included into UBLK_F_ALL */
58 #define UBLK_F_ALL (UBLK_F_SUPPORT_ZERO_COPY \
59 | UBLK_F_URING_CMD_COMP_IN_TASK \
60 | UBLK_F_NEED_GET_DATA \
61 | UBLK_F_USER_RECOVERY \
62 | UBLK_F_USER_RECOVERY_REISSUE \
63 | UBLK_F_UNPRIVILEGED_DEV \
64 | UBLK_F_CMD_IOCTL_ENCODE \
65 | UBLK_F_USER_COPY \
66 | UBLK_F_ZONED \
67 | UBLK_F_USER_RECOVERY_FAIL_IO)
68
69 #define UBLK_F_ALL_RECOVERY_FLAGS (UBLK_F_USER_RECOVERY \
70 | UBLK_F_USER_RECOVERY_REISSUE \
71 | UBLK_F_USER_RECOVERY_FAIL_IO)
72
73 /* All UBLK_PARAM_TYPE_* should be included here */
74 #define UBLK_PARAM_TYPE_ALL \
75 (UBLK_PARAM_TYPE_BASIC | UBLK_PARAM_TYPE_DISCARD | \
76 UBLK_PARAM_TYPE_DEVT | UBLK_PARAM_TYPE_ZONED | \
77 UBLK_PARAM_TYPE_DMA_ALIGN | UBLK_PARAM_TYPE_SEGMENT)
78
79 struct ublk_rq_data {
80 struct kref ref;
81 };
82
83 struct ublk_uring_cmd_pdu {
84 /*
85 * Store requests in same batch temporarily for queuing them to
86 * daemon context.
87 *
88 * It should have been stored to request payload, but we do want
89 * to avoid extra pre-allocation, and uring_cmd payload is always
90 * free for us
91 */
92 union {
93 struct request *req;
94 struct request *req_list;
95 };
96
97 /*
98 * The following two are valid in this cmd whole lifetime, and
99 * setup in ublk uring_cmd handler
100 */
101 struct ublk_queue *ubq;
102 u16 tag;
103 };
104
105 /*
106 * io command is active: sqe cmd is received, and its cqe isn't done
107 *
108 * If the flag is set, the io command is owned by ublk driver, and waited
109 * for incoming blk-mq request from the ublk block device.
110 *
111 * If the flag is cleared, the io command will be completed, and owned by
112 * ublk server.
113 */
114 #define UBLK_IO_FLAG_ACTIVE 0x01
115
116 /*
117 * IO command is completed via cqe, and it is being handled by ublksrv, and
118 * not committed yet
119 *
120 * Basically exclusively with UBLK_IO_FLAG_ACTIVE, so can be served for
121 * cross verification
122 */
123 #define UBLK_IO_FLAG_OWNED_BY_SRV 0x02
124
125 /*
126 * IO command is aborted, so this flag is set in case of
127 * !UBLK_IO_FLAG_ACTIVE.
128 *
129 * After this flag is observed, any pending or new incoming request
130 * associated with this io command will be failed immediately
131 */
132 #define UBLK_IO_FLAG_ABORTED 0x04
133
134 /*
135 * UBLK_IO_FLAG_NEED_GET_DATA is set because IO command requires
136 * get data buffer address from ublksrv.
137 *
138 * Then, bio data could be copied into this data buffer for a WRITE request
139 * after the IO command is issued again and UBLK_IO_FLAG_NEED_GET_DATA is unset.
140 */
141 #define UBLK_IO_FLAG_NEED_GET_DATA 0x08
142
143 /* atomic RW with ubq->cancel_lock */
144 #define UBLK_IO_FLAG_CANCELED 0x80000000
145
146 struct ublk_io {
147 /* userspace buffer address from io cmd */
148 __u64 addr;
149 unsigned int flags;
150 int res;
151
152 struct io_uring_cmd *cmd;
153 };
154
155 struct ublk_queue {
156 int q_id;
157 int q_depth;
158
159 unsigned long flags;
160 struct task_struct *ubq_daemon;
161 struct ublksrv_io_desc *io_cmd_buf;
162
163 bool force_abort;
164 bool timeout;
165 bool canceling;
166 bool fail_io; /* copy of dev->state == UBLK_S_DEV_FAIL_IO */
167 unsigned short nr_io_ready; /* how many ios setup */
168 spinlock_t cancel_lock;
169 struct ublk_device *dev;
170 struct ublk_io ios[];
171 };
172
173 struct ublk_device {
174 struct gendisk *ub_disk;
175
176 char *__queues;
177
178 unsigned int queue_size;
179 struct ublksrv_ctrl_dev_info dev_info;
180
181 struct blk_mq_tag_set tag_set;
182
183 struct cdev cdev;
184 struct device cdev_dev;
185
186 #define UB_STATE_OPEN 0
187 #define UB_STATE_USED 1
188 #define UB_STATE_DELETED 2
189 unsigned long state;
190 int ub_number;
191
192 struct mutex mutex;
193
194 spinlock_t lock;
195 struct mm_struct *mm;
196
197 struct ublk_params params;
198
199 struct completion completion;
200 unsigned int nr_queues_ready;
201 unsigned int nr_privileged_daemon;
202
203 struct work_struct nosrv_work;
204 };
205
206 /* header of ublk_params */
207 struct ublk_params_header {
208 __u32 len;
209 __u32 types;
210 };
211
212 static bool ublk_abort_requests(struct ublk_device *ub, struct ublk_queue *ubq);
213
214 static inline struct request *__ublk_check_and_get_req(struct ublk_device *ub,
215 struct ublk_queue *ubq, int tag, size_t offset);
216 static inline unsigned int ublk_req_build_flags(struct request *req);
217 static inline struct ublksrv_io_desc *ublk_get_iod(struct ublk_queue *ubq,
218 int tag);
ublk_dev_is_user_copy(const struct ublk_device * ub)219 static inline bool ublk_dev_is_user_copy(const struct ublk_device *ub)
220 {
221 return ub->dev_info.flags & (UBLK_F_USER_COPY | UBLK_F_SUPPORT_ZERO_COPY);
222 }
223
ublk_dev_is_zoned(const struct ublk_device * ub)224 static inline bool ublk_dev_is_zoned(const struct ublk_device *ub)
225 {
226 return ub->dev_info.flags & UBLK_F_ZONED;
227 }
228
ublk_queue_is_zoned(struct ublk_queue * ubq)229 static inline bool ublk_queue_is_zoned(struct ublk_queue *ubq)
230 {
231 return ubq->flags & UBLK_F_ZONED;
232 }
233
234 #ifdef CONFIG_BLK_DEV_ZONED
235
236 struct ublk_zoned_report_desc {
237 __u64 sector;
238 __u32 operation;
239 __u32 nr_zones;
240 };
241
242 static DEFINE_XARRAY(ublk_zoned_report_descs);
243
ublk_zoned_insert_report_desc(const struct request * req,struct ublk_zoned_report_desc * desc)244 static int ublk_zoned_insert_report_desc(const struct request *req,
245 struct ublk_zoned_report_desc *desc)
246 {
247 return xa_insert(&ublk_zoned_report_descs, (unsigned long)req,
248 desc, GFP_KERNEL);
249 }
250
ublk_zoned_erase_report_desc(const struct request * req)251 static struct ublk_zoned_report_desc *ublk_zoned_erase_report_desc(
252 const struct request *req)
253 {
254 return xa_erase(&ublk_zoned_report_descs, (unsigned long)req);
255 }
256
ublk_zoned_get_report_desc(const struct request * req)257 static struct ublk_zoned_report_desc *ublk_zoned_get_report_desc(
258 const struct request *req)
259 {
260 return xa_load(&ublk_zoned_report_descs, (unsigned long)req);
261 }
262
ublk_get_nr_zones(const struct ublk_device * ub)263 static int ublk_get_nr_zones(const struct ublk_device *ub)
264 {
265 const struct ublk_param_basic *p = &ub->params.basic;
266
267 /* Zone size is a power of 2 */
268 return p->dev_sectors >> ilog2(p->chunk_sectors);
269 }
270
ublk_revalidate_disk_zones(struct ublk_device * ub)271 static int ublk_revalidate_disk_zones(struct ublk_device *ub)
272 {
273 return blk_revalidate_disk_zones(ub->ub_disk);
274 }
275
ublk_dev_param_zoned_validate(const struct ublk_device * ub)276 static int ublk_dev_param_zoned_validate(const struct ublk_device *ub)
277 {
278 const struct ublk_param_zoned *p = &ub->params.zoned;
279 int nr_zones;
280
281 if (!ublk_dev_is_zoned(ub))
282 return -EINVAL;
283
284 if (!p->max_zone_append_sectors)
285 return -EINVAL;
286
287 nr_zones = ublk_get_nr_zones(ub);
288
289 if (p->max_active_zones > nr_zones)
290 return -EINVAL;
291
292 if (p->max_open_zones > nr_zones)
293 return -EINVAL;
294
295 return 0;
296 }
297
ublk_dev_param_zoned_apply(struct ublk_device * ub)298 static void ublk_dev_param_zoned_apply(struct ublk_device *ub)
299 {
300 ub->ub_disk->nr_zones = ublk_get_nr_zones(ub);
301 }
302
303 /* Based on virtblk_alloc_report_buffer */
ublk_alloc_report_buffer(struct ublk_device * ublk,unsigned int nr_zones,size_t * buflen)304 static void *ublk_alloc_report_buffer(struct ublk_device *ublk,
305 unsigned int nr_zones, size_t *buflen)
306 {
307 struct request_queue *q = ublk->ub_disk->queue;
308 size_t bufsize;
309 void *buf;
310
311 nr_zones = min_t(unsigned int, nr_zones,
312 ublk->ub_disk->nr_zones);
313
314 bufsize = nr_zones * sizeof(struct blk_zone);
315 bufsize =
316 min_t(size_t, bufsize, queue_max_hw_sectors(q) << SECTOR_SHIFT);
317
318 while (bufsize >= sizeof(struct blk_zone)) {
319 buf = kvmalloc(bufsize, GFP_KERNEL | __GFP_NORETRY);
320 if (buf) {
321 *buflen = bufsize;
322 return buf;
323 }
324 bufsize >>= 1;
325 }
326
327 *buflen = 0;
328 return NULL;
329 }
330
ublk_report_zones(struct gendisk * disk,sector_t sector,unsigned int nr_zones,report_zones_cb cb,void * data)331 static int ublk_report_zones(struct gendisk *disk, sector_t sector,
332 unsigned int nr_zones, report_zones_cb cb, void *data)
333 {
334 struct ublk_device *ub = disk->private_data;
335 unsigned int zone_size_sectors = disk->queue->limits.chunk_sectors;
336 unsigned int first_zone = sector >> ilog2(zone_size_sectors);
337 unsigned int done_zones = 0;
338 unsigned int max_zones_per_request;
339 int ret;
340 struct blk_zone *buffer;
341 size_t buffer_length;
342
343 nr_zones = min_t(unsigned int, ub->ub_disk->nr_zones - first_zone,
344 nr_zones);
345
346 buffer = ublk_alloc_report_buffer(ub, nr_zones, &buffer_length);
347 if (!buffer)
348 return -ENOMEM;
349
350 max_zones_per_request = buffer_length / sizeof(struct blk_zone);
351
352 while (done_zones < nr_zones) {
353 unsigned int remaining_zones = nr_zones - done_zones;
354 unsigned int zones_in_request =
355 min_t(unsigned int, remaining_zones, max_zones_per_request);
356 struct request *req;
357 struct ublk_zoned_report_desc desc;
358 blk_status_t status;
359
360 memset(buffer, 0, buffer_length);
361
362 req = blk_mq_alloc_request(disk->queue, REQ_OP_DRV_IN, 0);
363 if (IS_ERR(req)) {
364 ret = PTR_ERR(req);
365 goto out;
366 }
367
368 desc.operation = UBLK_IO_OP_REPORT_ZONES;
369 desc.sector = sector;
370 desc.nr_zones = zones_in_request;
371 ret = ublk_zoned_insert_report_desc(req, &desc);
372 if (ret)
373 goto free_req;
374
375 ret = blk_rq_map_kern(disk->queue, req, buffer, buffer_length,
376 GFP_KERNEL);
377 if (ret)
378 goto erase_desc;
379
380 status = blk_execute_rq(req, 0);
381 ret = blk_status_to_errno(status);
382 erase_desc:
383 ublk_zoned_erase_report_desc(req);
384 free_req:
385 blk_mq_free_request(req);
386 if (ret)
387 goto out;
388
389 for (unsigned int i = 0; i < zones_in_request; i++) {
390 struct blk_zone *zone = buffer + i;
391
392 /* A zero length zone means no more zones in this response */
393 if (!zone->len)
394 break;
395
396 ret = cb(zone, i, data);
397 if (ret)
398 goto out;
399
400 done_zones++;
401 sector += zone_size_sectors;
402
403 }
404 }
405
406 ret = done_zones;
407
408 out:
409 kvfree(buffer);
410 return ret;
411 }
412
ublk_setup_iod_zoned(struct ublk_queue * ubq,struct request * req)413 static blk_status_t ublk_setup_iod_zoned(struct ublk_queue *ubq,
414 struct request *req)
415 {
416 struct ublksrv_io_desc *iod = ublk_get_iod(ubq, req->tag);
417 struct ublk_io *io = &ubq->ios[req->tag];
418 struct ublk_zoned_report_desc *desc;
419 u32 ublk_op;
420
421 switch (req_op(req)) {
422 case REQ_OP_ZONE_OPEN:
423 ublk_op = UBLK_IO_OP_ZONE_OPEN;
424 break;
425 case REQ_OP_ZONE_CLOSE:
426 ublk_op = UBLK_IO_OP_ZONE_CLOSE;
427 break;
428 case REQ_OP_ZONE_FINISH:
429 ublk_op = UBLK_IO_OP_ZONE_FINISH;
430 break;
431 case REQ_OP_ZONE_RESET:
432 ublk_op = UBLK_IO_OP_ZONE_RESET;
433 break;
434 case REQ_OP_ZONE_APPEND:
435 ublk_op = UBLK_IO_OP_ZONE_APPEND;
436 break;
437 case REQ_OP_ZONE_RESET_ALL:
438 ublk_op = UBLK_IO_OP_ZONE_RESET_ALL;
439 break;
440 case REQ_OP_DRV_IN:
441 desc = ublk_zoned_get_report_desc(req);
442 if (!desc)
443 return BLK_STS_IOERR;
444 ublk_op = desc->operation;
445 switch (ublk_op) {
446 case UBLK_IO_OP_REPORT_ZONES:
447 iod->op_flags = ublk_op | ublk_req_build_flags(req);
448 iod->nr_zones = desc->nr_zones;
449 iod->start_sector = desc->sector;
450 return BLK_STS_OK;
451 default:
452 return BLK_STS_IOERR;
453 }
454 case REQ_OP_DRV_OUT:
455 /* We do not support drv_out */
456 return BLK_STS_NOTSUPP;
457 default:
458 return BLK_STS_IOERR;
459 }
460
461 iod->op_flags = ublk_op | ublk_req_build_flags(req);
462 iod->nr_sectors = blk_rq_sectors(req);
463 iod->start_sector = blk_rq_pos(req);
464 iod->addr = io->addr;
465
466 return BLK_STS_OK;
467 }
468
469 #else
470
471 #define ublk_report_zones (NULL)
472
ublk_dev_param_zoned_validate(const struct ublk_device * ub)473 static int ublk_dev_param_zoned_validate(const struct ublk_device *ub)
474 {
475 return -EOPNOTSUPP;
476 }
477
ublk_dev_param_zoned_apply(struct ublk_device * ub)478 static void ublk_dev_param_zoned_apply(struct ublk_device *ub)
479 {
480 }
481
ublk_revalidate_disk_zones(struct ublk_device * ub)482 static int ublk_revalidate_disk_zones(struct ublk_device *ub)
483 {
484 return 0;
485 }
486
ublk_setup_iod_zoned(struct ublk_queue * ubq,struct request * req)487 static blk_status_t ublk_setup_iod_zoned(struct ublk_queue *ubq,
488 struct request *req)
489 {
490 return BLK_STS_NOTSUPP;
491 }
492
493 #endif
494
495 static inline void __ublk_complete_rq(struct request *req);
496 static void ublk_complete_rq(struct kref *ref);
497
498 static dev_t ublk_chr_devt;
499 static const struct class ublk_chr_class = {
500 .name = "ublk-char",
501 };
502
503 static DEFINE_IDR(ublk_index_idr);
504 static DEFINE_SPINLOCK(ublk_idr_lock);
505 static wait_queue_head_t ublk_idr_wq; /* wait until one idr is freed */
506
507 static DEFINE_MUTEX(ublk_ctl_mutex);
508
509
510 #define UBLK_MAX_UBLKS UBLK_MINORS
511
512 /*
513 * Max unprivileged ublk devices allowed to add
514 *
515 * It can be extended to one per-user limit in future or even controlled
516 * by cgroup.
517 */
518 static unsigned int unprivileged_ublks_max = 64;
519 static unsigned int unprivileged_ublks_added; /* protected by ublk_ctl_mutex */
520
521 static struct miscdevice ublk_misc;
522
ublk_pos_to_hwq(loff_t pos)523 static inline unsigned ublk_pos_to_hwq(loff_t pos)
524 {
525 return ((pos - UBLKSRV_IO_BUF_OFFSET) >> UBLK_QID_OFF) &
526 UBLK_QID_BITS_MASK;
527 }
528
ublk_pos_to_buf_off(loff_t pos)529 static inline unsigned ublk_pos_to_buf_off(loff_t pos)
530 {
531 return (pos - UBLKSRV_IO_BUF_OFFSET) & UBLK_IO_BUF_BITS_MASK;
532 }
533
ublk_pos_to_tag(loff_t pos)534 static inline unsigned ublk_pos_to_tag(loff_t pos)
535 {
536 return ((pos - UBLKSRV_IO_BUF_OFFSET) >> UBLK_TAG_OFF) &
537 UBLK_TAG_BITS_MASK;
538 }
539
ublk_dev_param_basic_apply(struct ublk_device * ub)540 static void ublk_dev_param_basic_apply(struct ublk_device *ub)
541 {
542 const struct ublk_param_basic *p = &ub->params.basic;
543
544 if (p->attrs & UBLK_ATTR_READ_ONLY)
545 set_disk_ro(ub->ub_disk, true);
546
547 set_capacity(ub->ub_disk, p->dev_sectors);
548 }
549
ublk_validate_params(const struct ublk_device * ub)550 static int ublk_validate_params(const struct ublk_device *ub)
551 {
552 /* basic param is the only one which must be set */
553 if (ub->params.types & UBLK_PARAM_TYPE_BASIC) {
554 const struct ublk_param_basic *p = &ub->params.basic;
555
556 if (p->logical_bs_shift > PAGE_SHIFT || p->logical_bs_shift < 9)
557 return -EINVAL;
558
559 if (p->logical_bs_shift > p->physical_bs_shift)
560 return -EINVAL;
561
562 if (p->max_sectors > (ub->dev_info.max_io_buf_bytes >> 9))
563 return -EINVAL;
564
565 if (ublk_dev_is_zoned(ub) && !p->chunk_sectors)
566 return -EINVAL;
567 } else
568 return -EINVAL;
569
570 if (ub->params.types & UBLK_PARAM_TYPE_DISCARD) {
571 const struct ublk_param_discard *p = &ub->params.discard;
572
573 /* So far, only support single segment discard */
574 if (p->max_discard_sectors && p->max_discard_segments != 1)
575 return -EINVAL;
576
577 if (!p->discard_granularity)
578 return -EINVAL;
579 }
580
581 /* dev_t is read-only */
582 if (ub->params.types & UBLK_PARAM_TYPE_DEVT)
583 return -EINVAL;
584
585 if (ub->params.types & UBLK_PARAM_TYPE_ZONED)
586 return ublk_dev_param_zoned_validate(ub);
587 else if (ublk_dev_is_zoned(ub))
588 return -EINVAL;
589
590 if (ub->params.types & UBLK_PARAM_TYPE_DMA_ALIGN) {
591 const struct ublk_param_dma_align *p = &ub->params.dma;
592
593 if (p->alignment >= PAGE_SIZE)
594 return -EINVAL;
595
596 if (!is_power_of_2(p->alignment + 1))
597 return -EINVAL;
598 }
599
600 if (ub->params.types & UBLK_PARAM_TYPE_SEGMENT) {
601 const struct ublk_param_segment *p = &ub->params.seg;
602
603 if (!is_power_of_2(p->seg_boundary_mask + 1))
604 return -EINVAL;
605
606 if (p->seg_boundary_mask + 1 < UBLK_MIN_SEGMENT_SIZE)
607 return -EINVAL;
608 if (p->max_segment_size < UBLK_MIN_SEGMENT_SIZE)
609 return -EINVAL;
610 }
611
612 return 0;
613 }
614
ublk_apply_params(struct ublk_device * ub)615 static void ublk_apply_params(struct ublk_device *ub)
616 {
617 ublk_dev_param_basic_apply(ub);
618
619 if (ub->params.types & UBLK_PARAM_TYPE_ZONED)
620 ublk_dev_param_zoned_apply(ub);
621 }
622
ublk_support_user_copy(const struct ublk_queue * ubq)623 static inline bool ublk_support_user_copy(const struct ublk_queue *ubq)
624 {
625 return ubq->flags & (UBLK_F_USER_COPY | UBLK_F_SUPPORT_ZERO_COPY);
626 }
627
ublk_need_map_io(const struct ublk_queue * ubq)628 static inline bool ublk_need_map_io(const struct ublk_queue *ubq)
629 {
630 return !ublk_support_user_copy(ubq);
631 }
632
ublk_need_req_ref(const struct ublk_queue * ubq)633 static inline bool ublk_need_req_ref(const struct ublk_queue *ubq)
634 {
635 /*
636 * read()/write() is involved in user copy, so request reference
637 * has to be grabbed
638 */
639 return ublk_support_user_copy(ubq);
640 }
641
ublk_init_req_ref(const struct ublk_queue * ubq,struct request * req)642 static inline void ublk_init_req_ref(const struct ublk_queue *ubq,
643 struct request *req)
644 {
645 if (ublk_need_req_ref(ubq)) {
646 struct ublk_rq_data *data = blk_mq_rq_to_pdu(req);
647
648 kref_init(&data->ref);
649 }
650 }
651
ublk_get_req_ref(const struct ublk_queue * ubq,struct request * req)652 static inline bool ublk_get_req_ref(const struct ublk_queue *ubq,
653 struct request *req)
654 {
655 if (ublk_need_req_ref(ubq)) {
656 struct ublk_rq_data *data = blk_mq_rq_to_pdu(req);
657
658 return kref_get_unless_zero(&data->ref);
659 }
660
661 return true;
662 }
663
ublk_put_req_ref(const struct ublk_queue * ubq,struct request * req)664 static inline void ublk_put_req_ref(const struct ublk_queue *ubq,
665 struct request *req)
666 {
667 if (ublk_need_req_ref(ubq)) {
668 struct ublk_rq_data *data = blk_mq_rq_to_pdu(req);
669
670 kref_put(&data->ref, ublk_complete_rq);
671 } else {
672 __ublk_complete_rq(req);
673 }
674 }
675
ublk_need_get_data(const struct ublk_queue * ubq)676 static inline bool ublk_need_get_data(const struct ublk_queue *ubq)
677 {
678 return ubq->flags & UBLK_F_NEED_GET_DATA;
679 }
680
681 /* Called in slow path only, keep it noinline for trace purpose */
ublk_get_device(struct ublk_device * ub)682 static noinline struct ublk_device *ublk_get_device(struct ublk_device *ub)
683 {
684 if (kobject_get_unless_zero(&ub->cdev_dev.kobj))
685 return ub;
686 return NULL;
687 }
688
689 /* Called in slow path only, keep it noinline for trace purpose */
ublk_put_device(struct ublk_device * ub)690 static noinline void ublk_put_device(struct ublk_device *ub)
691 {
692 put_device(&ub->cdev_dev);
693 }
694
ublk_get_queue(struct ublk_device * dev,int qid)695 static inline struct ublk_queue *ublk_get_queue(struct ublk_device *dev,
696 int qid)
697 {
698 return (struct ublk_queue *)&(dev->__queues[qid * dev->queue_size]);
699 }
700
ublk_rq_has_data(const struct request * rq)701 static inline bool ublk_rq_has_data(const struct request *rq)
702 {
703 return bio_has_data(rq->bio);
704 }
705
ublk_get_iod(struct ublk_queue * ubq,int tag)706 static inline struct ublksrv_io_desc *ublk_get_iod(struct ublk_queue *ubq,
707 int tag)
708 {
709 return &ubq->io_cmd_buf[tag];
710 }
711
712 static inline struct ublksrv_io_desc *
ublk_queue_cmd_buf(struct ublk_device * ub,int q_id)713 ublk_queue_cmd_buf(struct ublk_device *ub, int q_id)
714 {
715 return ublk_get_queue(ub, q_id)->io_cmd_buf;
716 }
717
__ublk_queue_cmd_buf_size(int depth)718 static inline int __ublk_queue_cmd_buf_size(int depth)
719 {
720 return round_up(depth * sizeof(struct ublksrv_io_desc), PAGE_SIZE);
721 }
722
ublk_queue_cmd_buf_size(struct ublk_device * ub,int q_id)723 static inline int ublk_queue_cmd_buf_size(struct ublk_device *ub, int q_id)
724 {
725 struct ublk_queue *ubq = ublk_get_queue(ub, q_id);
726
727 return __ublk_queue_cmd_buf_size(ubq->q_depth);
728 }
729
ublk_max_cmd_buf_size(void)730 static int ublk_max_cmd_buf_size(void)
731 {
732 return __ublk_queue_cmd_buf_size(UBLK_MAX_QUEUE_DEPTH);
733 }
734
735 /*
736 * Should I/O outstanding to the ublk server when it exits be reissued?
737 * If not, outstanding I/O will get errors.
738 */
ublk_nosrv_should_reissue_outstanding(struct ublk_device * ub)739 static inline bool ublk_nosrv_should_reissue_outstanding(struct ublk_device *ub)
740 {
741 return (ub->dev_info.flags & UBLK_F_USER_RECOVERY) &&
742 (ub->dev_info.flags & UBLK_F_USER_RECOVERY_REISSUE);
743 }
744
745 /*
746 * Should I/O issued while there is no ublk server queue? If not, I/O
747 * issued while there is no ublk server will get errors.
748 */
ublk_nosrv_dev_should_queue_io(struct ublk_device * ub)749 static inline bool ublk_nosrv_dev_should_queue_io(struct ublk_device *ub)
750 {
751 return (ub->dev_info.flags & UBLK_F_USER_RECOVERY) &&
752 !(ub->dev_info.flags & UBLK_F_USER_RECOVERY_FAIL_IO);
753 }
754
755 /*
756 * Same as ublk_nosrv_dev_should_queue_io, but uses a queue-local copy
757 * of the device flags for smaller cache footprint - better for fast
758 * paths.
759 */
ublk_nosrv_should_queue_io(struct ublk_queue * ubq)760 static inline bool ublk_nosrv_should_queue_io(struct ublk_queue *ubq)
761 {
762 return (ubq->flags & UBLK_F_USER_RECOVERY) &&
763 !(ubq->flags & UBLK_F_USER_RECOVERY_FAIL_IO);
764 }
765
766 /*
767 * Should ublk devices be stopped (i.e. no recovery possible) when the
768 * ublk server exits? If not, devices can be used again by a future
769 * incarnation of a ublk server via the start_recovery/end_recovery
770 * commands.
771 */
ublk_nosrv_should_stop_dev(struct ublk_device * ub)772 static inline bool ublk_nosrv_should_stop_dev(struct ublk_device *ub)
773 {
774 return !(ub->dev_info.flags & UBLK_F_USER_RECOVERY);
775 }
776
ublk_dev_in_recoverable_state(struct ublk_device * ub)777 static inline bool ublk_dev_in_recoverable_state(struct ublk_device *ub)
778 {
779 return ub->dev_info.state == UBLK_S_DEV_QUIESCED ||
780 ub->dev_info.state == UBLK_S_DEV_FAIL_IO;
781 }
782
ublk_free_disk(struct gendisk * disk)783 static void ublk_free_disk(struct gendisk *disk)
784 {
785 struct ublk_device *ub = disk->private_data;
786
787 clear_bit(UB_STATE_USED, &ub->state);
788 ublk_put_device(ub);
789 }
790
ublk_store_owner_uid_gid(unsigned int * owner_uid,unsigned int * owner_gid)791 static void ublk_store_owner_uid_gid(unsigned int *owner_uid,
792 unsigned int *owner_gid)
793 {
794 kuid_t uid;
795 kgid_t gid;
796
797 current_uid_gid(&uid, &gid);
798
799 *owner_uid = from_kuid(&init_user_ns, uid);
800 *owner_gid = from_kgid(&init_user_ns, gid);
801 }
802
ublk_open(struct gendisk * disk,blk_mode_t mode)803 static int ublk_open(struct gendisk *disk, blk_mode_t mode)
804 {
805 struct ublk_device *ub = disk->private_data;
806
807 if (capable(CAP_SYS_ADMIN))
808 return 0;
809
810 /*
811 * If it is one unprivileged device, only owner can open
812 * the disk. Otherwise it could be one trap made by one
813 * evil user who grants this disk's privileges to other
814 * users deliberately.
815 *
816 * This way is reasonable too given anyone can create
817 * unprivileged device, and no need other's grant.
818 */
819 if (ub->dev_info.flags & UBLK_F_UNPRIVILEGED_DEV) {
820 unsigned int curr_uid, curr_gid;
821
822 ublk_store_owner_uid_gid(&curr_uid, &curr_gid);
823
824 if (curr_uid != ub->dev_info.owner_uid || curr_gid !=
825 ub->dev_info.owner_gid)
826 return -EPERM;
827 }
828
829 return 0;
830 }
831
832 static const struct block_device_operations ub_fops = {
833 .owner = THIS_MODULE,
834 .open = ublk_open,
835 .free_disk = ublk_free_disk,
836 .report_zones = ublk_report_zones,
837 };
838
839 #define UBLK_MAX_PIN_PAGES 32
840
841 struct ublk_io_iter {
842 struct page *pages[UBLK_MAX_PIN_PAGES];
843 struct bio *bio;
844 struct bvec_iter iter;
845 };
846
847 /* return how many pages are copied */
ublk_copy_io_pages(struct ublk_io_iter * data,size_t total,size_t pg_off,int dir)848 static void ublk_copy_io_pages(struct ublk_io_iter *data,
849 size_t total, size_t pg_off, int dir)
850 {
851 unsigned done = 0;
852 unsigned pg_idx = 0;
853
854 while (done < total) {
855 struct bio_vec bv = bio_iter_iovec(data->bio, data->iter);
856 unsigned int bytes = min3(bv.bv_len, (unsigned)total - done,
857 (unsigned)(PAGE_SIZE - pg_off));
858 void *bv_buf = bvec_kmap_local(&bv);
859 void *pg_buf = kmap_local_page(data->pages[pg_idx]);
860
861 if (dir == ITER_DEST)
862 memcpy(pg_buf + pg_off, bv_buf, bytes);
863 else
864 memcpy(bv_buf, pg_buf + pg_off, bytes);
865
866 kunmap_local(pg_buf);
867 kunmap_local(bv_buf);
868
869 /* advance page array */
870 pg_off += bytes;
871 if (pg_off == PAGE_SIZE) {
872 pg_idx += 1;
873 pg_off = 0;
874 }
875
876 done += bytes;
877
878 /* advance bio */
879 bio_advance_iter_single(data->bio, &data->iter, bytes);
880 if (!data->iter.bi_size) {
881 data->bio = data->bio->bi_next;
882 if (data->bio == NULL)
883 break;
884 data->iter = data->bio->bi_iter;
885 }
886 }
887 }
888
ublk_advance_io_iter(const struct request * req,struct ublk_io_iter * iter,unsigned int offset)889 static bool ublk_advance_io_iter(const struct request *req,
890 struct ublk_io_iter *iter, unsigned int offset)
891 {
892 struct bio *bio = req->bio;
893
894 for_each_bio(bio) {
895 if (bio->bi_iter.bi_size > offset) {
896 iter->bio = bio;
897 iter->iter = bio->bi_iter;
898 bio_advance_iter(iter->bio, &iter->iter, offset);
899 return true;
900 }
901 offset -= bio->bi_iter.bi_size;
902 }
903 return false;
904 }
905
906 /*
907 * Copy data between request pages and io_iter, and 'offset'
908 * is the start point of linear offset of request.
909 */
ublk_copy_user_pages(const struct request * req,unsigned offset,struct iov_iter * uiter,int dir)910 static size_t ublk_copy_user_pages(const struct request *req,
911 unsigned offset, struct iov_iter *uiter, int dir)
912 {
913 struct ublk_io_iter iter;
914 size_t done = 0;
915
916 if (!ublk_advance_io_iter(req, &iter, offset))
917 return 0;
918
919 while (iov_iter_count(uiter) && iter.bio) {
920 unsigned nr_pages;
921 ssize_t len;
922 size_t off;
923 int i;
924
925 len = iov_iter_get_pages2(uiter, iter.pages,
926 iov_iter_count(uiter),
927 UBLK_MAX_PIN_PAGES, &off);
928 if (len <= 0)
929 return done;
930
931 ublk_copy_io_pages(&iter, len, off, dir);
932 nr_pages = DIV_ROUND_UP(len + off, PAGE_SIZE);
933 for (i = 0; i < nr_pages; i++) {
934 if (dir == ITER_DEST)
935 set_page_dirty(iter.pages[i]);
936 put_page(iter.pages[i]);
937 }
938 done += len;
939 }
940
941 return done;
942 }
943
ublk_need_map_req(const struct request * req)944 static inline bool ublk_need_map_req(const struct request *req)
945 {
946 return ublk_rq_has_data(req) && req_op(req) == REQ_OP_WRITE;
947 }
948
ublk_need_unmap_req(const struct request * req)949 static inline bool ublk_need_unmap_req(const struct request *req)
950 {
951 return ublk_rq_has_data(req) &&
952 (req_op(req) == REQ_OP_READ || req_op(req) == REQ_OP_DRV_IN);
953 }
954
ublk_map_io(const struct ublk_queue * ubq,const struct request * req,struct ublk_io * io)955 static int ublk_map_io(const struct ublk_queue *ubq, const struct request *req,
956 struct ublk_io *io)
957 {
958 const unsigned int rq_bytes = blk_rq_bytes(req);
959
960 if (!ublk_need_map_io(ubq))
961 return rq_bytes;
962
963 /*
964 * no zero copy, we delay copy WRITE request data into ublksrv
965 * context and the big benefit is that pinning pages in current
966 * context is pretty fast, see ublk_pin_user_pages
967 */
968 if (ublk_need_map_req(req)) {
969 struct iov_iter iter;
970 const int dir = ITER_DEST;
971
972 import_ubuf(dir, u64_to_user_ptr(io->addr), rq_bytes, &iter);
973 return ublk_copy_user_pages(req, 0, &iter, dir);
974 }
975 return rq_bytes;
976 }
977
ublk_unmap_io(const struct ublk_queue * ubq,const struct request * req,struct ublk_io * io)978 static int ublk_unmap_io(const struct ublk_queue *ubq,
979 const struct request *req,
980 struct ublk_io *io)
981 {
982 const unsigned int rq_bytes = blk_rq_bytes(req);
983
984 if (!ublk_need_map_io(ubq))
985 return rq_bytes;
986
987 if (ublk_need_unmap_req(req)) {
988 struct iov_iter iter;
989 const int dir = ITER_SOURCE;
990
991 WARN_ON_ONCE(io->res > rq_bytes);
992
993 import_ubuf(dir, u64_to_user_ptr(io->addr), io->res, &iter);
994 return ublk_copy_user_pages(req, 0, &iter, dir);
995 }
996 return rq_bytes;
997 }
998
ublk_req_build_flags(struct request * req)999 static inline unsigned int ublk_req_build_flags(struct request *req)
1000 {
1001 unsigned flags = 0;
1002
1003 if (req->cmd_flags & REQ_FAILFAST_DEV)
1004 flags |= UBLK_IO_F_FAILFAST_DEV;
1005
1006 if (req->cmd_flags & REQ_FAILFAST_TRANSPORT)
1007 flags |= UBLK_IO_F_FAILFAST_TRANSPORT;
1008
1009 if (req->cmd_flags & REQ_FAILFAST_DRIVER)
1010 flags |= UBLK_IO_F_FAILFAST_DRIVER;
1011
1012 if (req->cmd_flags & REQ_META)
1013 flags |= UBLK_IO_F_META;
1014
1015 if (req->cmd_flags & REQ_FUA)
1016 flags |= UBLK_IO_F_FUA;
1017
1018 if (req->cmd_flags & REQ_NOUNMAP)
1019 flags |= UBLK_IO_F_NOUNMAP;
1020
1021 if (req->cmd_flags & REQ_SWAP)
1022 flags |= UBLK_IO_F_SWAP;
1023
1024 return flags;
1025 }
1026
ublk_setup_iod(struct ublk_queue * ubq,struct request * req)1027 static blk_status_t ublk_setup_iod(struct ublk_queue *ubq, struct request *req)
1028 {
1029 struct ublksrv_io_desc *iod = ublk_get_iod(ubq, req->tag);
1030 struct ublk_io *io = &ubq->ios[req->tag];
1031 enum req_op op = req_op(req);
1032 u32 ublk_op;
1033
1034 if (!ublk_queue_is_zoned(ubq) &&
1035 (op_is_zone_mgmt(op) || op == REQ_OP_ZONE_APPEND))
1036 return BLK_STS_IOERR;
1037
1038 switch (req_op(req)) {
1039 case REQ_OP_READ:
1040 ublk_op = UBLK_IO_OP_READ;
1041 break;
1042 case REQ_OP_WRITE:
1043 ublk_op = UBLK_IO_OP_WRITE;
1044 break;
1045 case REQ_OP_FLUSH:
1046 ublk_op = UBLK_IO_OP_FLUSH;
1047 break;
1048 case REQ_OP_DISCARD:
1049 ublk_op = UBLK_IO_OP_DISCARD;
1050 break;
1051 case REQ_OP_WRITE_ZEROES:
1052 ublk_op = UBLK_IO_OP_WRITE_ZEROES;
1053 break;
1054 default:
1055 if (ublk_queue_is_zoned(ubq))
1056 return ublk_setup_iod_zoned(ubq, req);
1057 return BLK_STS_IOERR;
1058 }
1059
1060 /* need to translate since kernel may change */
1061 iod->op_flags = ublk_op | ublk_req_build_flags(req);
1062 iod->nr_sectors = blk_rq_sectors(req);
1063 iod->start_sector = blk_rq_pos(req);
1064 iod->addr = io->addr;
1065
1066 return BLK_STS_OK;
1067 }
1068
ublk_get_uring_cmd_pdu(struct io_uring_cmd * ioucmd)1069 static inline struct ublk_uring_cmd_pdu *ublk_get_uring_cmd_pdu(
1070 struct io_uring_cmd *ioucmd)
1071 {
1072 return io_uring_cmd_to_pdu(ioucmd, struct ublk_uring_cmd_pdu);
1073 }
1074
ubq_daemon_is_dying(struct ublk_queue * ubq)1075 static inline bool ubq_daemon_is_dying(struct ublk_queue *ubq)
1076 {
1077 return ubq->ubq_daemon->flags & PF_EXITING;
1078 }
1079
1080 /* todo: handle partial completion */
__ublk_complete_rq(struct request * req)1081 static inline void __ublk_complete_rq(struct request *req)
1082 {
1083 struct ublk_queue *ubq = req->mq_hctx->driver_data;
1084 struct ublk_io *io = &ubq->ios[req->tag];
1085 unsigned int unmapped_bytes;
1086 blk_status_t res = BLK_STS_OK;
1087
1088 /* called from ublk_abort_queue() code path */
1089 if (io->flags & UBLK_IO_FLAG_ABORTED) {
1090 res = BLK_STS_IOERR;
1091 goto exit;
1092 }
1093
1094 /* failed read IO if nothing is read */
1095 if (!io->res && req_op(req) == REQ_OP_READ)
1096 io->res = -EIO;
1097
1098 if (io->res < 0) {
1099 res = errno_to_blk_status(io->res);
1100 goto exit;
1101 }
1102
1103 /*
1104 * FLUSH, DISCARD or WRITE_ZEROES usually won't return bytes returned, so end them
1105 * directly.
1106 *
1107 * Both the two needn't unmap.
1108 */
1109 if (req_op(req) != REQ_OP_READ && req_op(req) != REQ_OP_WRITE &&
1110 req_op(req) != REQ_OP_DRV_IN)
1111 goto exit;
1112
1113 /* for READ request, writing data in iod->addr to rq buffers */
1114 unmapped_bytes = ublk_unmap_io(ubq, req, io);
1115
1116 /*
1117 * Extremely impossible since we got data filled in just before
1118 *
1119 * Re-read simply for this unlikely case.
1120 */
1121 if (unlikely(unmapped_bytes < io->res))
1122 io->res = unmapped_bytes;
1123
1124 if (blk_update_request(req, BLK_STS_OK, io->res))
1125 blk_mq_requeue_request(req, true);
1126 else
1127 __blk_mq_end_request(req, BLK_STS_OK);
1128
1129 return;
1130 exit:
1131 blk_mq_end_request(req, res);
1132 }
1133
ublk_complete_rq(struct kref * ref)1134 static void ublk_complete_rq(struct kref *ref)
1135 {
1136 struct ublk_rq_data *data = container_of(ref, struct ublk_rq_data,
1137 ref);
1138 struct request *req = blk_mq_rq_from_pdu(data);
1139
1140 __ublk_complete_rq(req);
1141 }
1142
1143 /*
1144 * Since ublk_rq_task_work_cb always fails requests immediately during
1145 * exiting, __ublk_fail_req() is only called from abort context during
1146 * exiting. So lock is unnecessary.
1147 *
1148 * Also aborting may not be started yet, keep in mind that one failed
1149 * request may be issued by block layer again.
1150 */
__ublk_fail_req(struct ublk_queue * ubq,struct ublk_io * io,struct request * req)1151 static void __ublk_fail_req(struct ublk_queue *ubq, struct ublk_io *io,
1152 struct request *req)
1153 {
1154 WARN_ON_ONCE(io->flags & UBLK_IO_FLAG_ACTIVE);
1155
1156 if (ublk_nosrv_should_reissue_outstanding(ubq->dev))
1157 blk_mq_requeue_request(req, false);
1158 else
1159 ublk_put_req_ref(ubq, req);
1160 }
1161
ubq_complete_io_cmd(struct ublk_io * io,int res,unsigned issue_flags)1162 static void ubq_complete_io_cmd(struct ublk_io *io, int res,
1163 unsigned issue_flags)
1164 {
1165 /* mark this cmd owned by ublksrv */
1166 io->flags |= UBLK_IO_FLAG_OWNED_BY_SRV;
1167
1168 /*
1169 * clear ACTIVE since we are done with this sqe/cmd slot
1170 * We can only accept io cmd in case of being not active.
1171 */
1172 io->flags &= ~UBLK_IO_FLAG_ACTIVE;
1173
1174 /* tell ublksrv one io request is coming */
1175 io_uring_cmd_done(io->cmd, res, 0, issue_flags);
1176 }
1177
1178 #define UBLK_REQUEUE_DELAY_MS 3
1179
__ublk_abort_rq(struct ublk_queue * ubq,struct request * rq)1180 static inline void __ublk_abort_rq(struct ublk_queue *ubq,
1181 struct request *rq)
1182 {
1183 /* We cannot process this rq so just requeue it. */
1184 if (ublk_nosrv_dev_should_queue_io(ubq->dev))
1185 blk_mq_requeue_request(rq, false);
1186 else
1187 blk_mq_end_request(rq, BLK_STS_IOERR);
1188 }
1189
ublk_dispatch_req(struct ublk_queue * ubq,struct request * req,unsigned int issue_flags)1190 static void ublk_dispatch_req(struct ublk_queue *ubq,
1191 struct request *req,
1192 unsigned int issue_flags)
1193 {
1194 int tag = req->tag;
1195 struct ublk_io *io = &ubq->ios[tag];
1196 unsigned int mapped_bytes;
1197
1198 pr_devel("%s: complete: op %d, qid %d tag %d io_flags %x addr %llx\n",
1199 __func__, io->cmd->cmd_op, ubq->q_id, req->tag, io->flags,
1200 ublk_get_iod(ubq, req->tag)->addr);
1201
1202 /*
1203 * Task is exiting if either:
1204 *
1205 * (1) current != ubq_daemon.
1206 * io_uring_cmd_complete_in_task() tries to run task_work
1207 * in a workqueue if ubq_daemon(cmd's task) is PF_EXITING.
1208 *
1209 * (2) current->flags & PF_EXITING.
1210 */
1211 if (unlikely(current != ubq->ubq_daemon || current->flags & PF_EXITING)) {
1212 __ublk_abort_rq(ubq, req);
1213 return;
1214 }
1215
1216 if (ublk_need_get_data(ubq) && ublk_need_map_req(req)) {
1217 /*
1218 * We have not handled UBLK_IO_NEED_GET_DATA command yet,
1219 * so immepdately pass UBLK_IO_RES_NEED_GET_DATA to ublksrv
1220 * and notify it.
1221 */
1222 if (!(io->flags & UBLK_IO_FLAG_NEED_GET_DATA)) {
1223 io->flags |= UBLK_IO_FLAG_NEED_GET_DATA;
1224 pr_devel("%s: need get data. op %d, qid %d tag %d io_flags %x\n",
1225 __func__, io->cmd->cmd_op, ubq->q_id,
1226 req->tag, io->flags);
1227 ubq_complete_io_cmd(io, UBLK_IO_RES_NEED_GET_DATA, issue_flags);
1228 return;
1229 }
1230 /*
1231 * We have handled UBLK_IO_NEED_GET_DATA command,
1232 * so clear UBLK_IO_FLAG_NEED_GET_DATA now and just
1233 * do the copy work.
1234 */
1235 io->flags &= ~UBLK_IO_FLAG_NEED_GET_DATA;
1236 /* update iod->addr because ublksrv may have passed a new io buffer */
1237 ublk_get_iod(ubq, req->tag)->addr = io->addr;
1238 pr_devel("%s: update iod->addr: op %d, qid %d tag %d io_flags %x addr %llx\n",
1239 __func__, io->cmd->cmd_op, ubq->q_id, req->tag, io->flags,
1240 ublk_get_iod(ubq, req->tag)->addr);
1241 }
1242
1243 mapped_bytes = ublk_map_io(ubq, req, io);
1244
1245 /* partially mapped, update io descriptor */
1246 if (unlikely(mapped_bytes != blk_rq_bytes(req))) {
1247 /*
1248 * Nothing mapped, retry until we succeed.
1249 *
1250 * We may never succeed in mapping any bytes here because
1251 * of OOM. TODO: reserve one buffer with single page pinned
1252 * for providing forward progress guarantee.
1253 */
1254 if (unlikely(!mapped_bytes)) {
1255 blk_mq_requeue_request(req, false);
1256 blk_mq_delay_kick_requeue_list(req->q,
1257 UBLK_REQUEUE_DELAY_MS);
1258 return;
1259 }
1260
1261 ublk_get_iod(ubq, req->tag)->nr_sectors =
1262 mapped_bytes >> 9;
1263 }
1264
1265 ublk_init_req_ref(ubq, req);
1266 ubq_complete_io_cmd(io, UBLK_IO_RES_OK, issue_flags);
1267 }
1268
ublk_cmd_tw_cb(struct io_uring_cmd * cmd,unsigned int issue_flags)1269 static void ublk_cmd_tw_cb(struct io_uring_cmd *cmd,
1270 unsigned int issue_flags)
1271 {
1272 struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd);
1273 struct ublk_queue *ubq = pdu->ubq;
1274
1275 ublk_dispatch_req(ubq, pdu->req, issue_flags);
1276 }
1277
ublk_queue_cmd(struct ublk_queue * ubq,struct request * rq)1278 static void ublk_queue_cmd(struct ublk_queue *ubq, struct request *rq)
1279 {
1280 struct io_uring_cmd *cmd = ubq->ios[rq->tag].cmd;
1281 struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd);
1282
1283 pdu->req = rq;
1284 io_uring_cmd_complete_in_task(cmd, ublk_cmd_tw_cb);
1285 }
1286
ublk_cmd_list_tw_cb(struct io_uring_cmd * cmd,unsigned int issue_flags)1287 static void ublk_cmd_list_tw_cb(struct io_uring_cmd *cmd,
1288 unsigned int issue_flags)
1289 {
1290 struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd);
1291 struct request *rq = pdu->req_list;
1292 struct ublk_queue *ubq = pdu->ubq;
1293 struct request *next;
1294
1295 do {
1296 next = rq->rq_next;
1297 rq->rq_next = NULL;
1298 ublk_dispatch_req(ubq, rq, issue_flags);
1299 rq = next;
1300 } while (rq);
1301 }
1302
ublk_queue_cmd_list(struct ublk_queue * ubq,struct rq_list * l)1303 static void ublk_queue_cmd_list(struct ublk_queue *ubq, struct rq_list *l)
1304 {
1305 struct request *rq = rq_list_peek(l);
1306 struct io_uring_cmd *cmd = ubq->ios[rq->tag].cmd;
1307 struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd);
1308
1309 pdu->req_list = rq;
1310 rq_list_init(l);
1311 io_uring_cmd_complete_in_task(cmd, ublk_cmd_list_tw_cb);
1312 }
1313
ublk_timeout(struct request * rq)1314 static enum blk_eh_timer_return ublk_timeout(struct request *rq)
1315 {
1316 struct ublk_queue *ubq = rq->mq_hctx->driver_data;
1317 unsigned int nr_inflight = 0;
1318 int i;
1319
1320 if (ubq->flags & UBLK_F_UNPRIVILEGED_DEV) {
1321 if (!ubq->timeout) {
1322 send_sig(SIGKILL, ubq->ubq_daemon, 0);
1323 ubq->timeout = true;
1324 }
1325
1326 return BLK_EH_DONE;
1327 }
1328
1329 if (!ubq_daemon_is_dying(ubq))
1330 return BLK_EH_RESET_TIMER;
1331
1332 for (i = 0; i < ubq->q_depth; i++) {
1333 struct ublk_io *io = &ubq->ios[i];
1334
1335 if (!(io->flags & UBLK_IO_FLAG_ACTIVE))
1336 nr_inflight++;
1337 }
1338
1339 /* cancelable uring_cmd can't help us if all commands are in-flight */
1340 if (nr_inflight == ubq->q_depth) {
1341 struct ublk_device *ub = ubq->dev;
1342
1343 if (ublk_abort_requests(ub, ubq)) {
1344 schedule_work(&ub->nosrv_work);
1345 }
1346 return BLK_EH_DONE;
1347 }
1348
1349 return BLK_EH_RESET_TIMER;
1350 }
1351
ublk_prep_req(struct ublk_queue * ubq,struct request * rq)1352 static blk_status_t ublk_prep_req(struct ublk_queue *ubq, struct request *rq)
1353 {
1354 blk_status_t res;
1355
1356 if (unlikely(ubq->fail_io))
1357 return BLK_STS_TARGET;
1358
1359 /* With recovery feature enabled, force_abort is set in
1360 * ublk_stop_dev() before calling del_gendisk(). We have to
1361 * abort all requeued and new rqs here to let del_gendisk()
1362 * move on. Besides, we cannot not call io_uring_cmd_complete_in_task()
1363 * to avoid UAF on io_uring ctx.
1364 *
1365 * Note: force_abort is guaranteed to be seen because it is set
1366 * before request queue is unqiuesced.
1367 */
1368 if (ublk_nosrv_should_queue_io(ubq) && unlikely(ubq->force_abort))
1369 return BLK_STS_IOERR;
1370
1371 if (unlikely(ubq->canceling))
1372 return BLK_STS_IOERR;
1373
1374 /* fill iod to slot in io cmd buffer */
1375 res = ublk_setup_iod(ubq, rq);
1376 if (unlikely(res != BLK_STS_OK))
1377 return BLK_STS_IOERR;
1378
1379 blk_mq_start_request(rq);
1380 return BLK_STS_OK;
1381 }
1382
ublk_queue_rq(struct blk_mq_hw_ctx * hctx,const struct blk_mq_queue_data * bd)1383 static blk_status_t ublk_queue_rq(struct blk_mq_hw_ctx *hctx,
1384 const struct blk_mq_queue_data *bd)
1385 {
1386 struct ublk_queue *ubq = hctx->driver_data;
1387 struct request *rq = bd->rq;
1388 blk_status_t res;
1389
1390 res = ublk_prep_req(ubq, rq);
1391 if (res != BLK_STS_OK)
1392 return res;
1393
1394 /*
1395 * ->canceling has to be handled after ->force_abort and ->fail_io
1396 * is dealt with, otherwise this request may not be failed in case
1397 * of recovery, and cause hang when deleting disk
1398 */
1399 if (unlikely(ubq->canceling)) {
1400 __ublk_abort_rq(ubq, rq);
1401 return BLK_STS_OK;
1402 }
1403
1404 ublk_queue_cmd(ubq, rq);
1405 return BLK_STS_OK;
1406 }
1407
ublk_queue_rqs(struct rq_list * rqlist)1408 static void ublk_queue_rqs(struct rq_list *rqlist)
1409 {
1410 struct rq_list requeue_list = { };
1411 struct rq_list submit_list = { };
1412 struct ublk_queue *ubq = NULL;
1413 struct request *req;
1414
1415 while ((req = rq_list_pop(rqlist))) {
1416 struct ublk_queue *this_q = req->mq_hctx->driver_data;
1417
1418 if (ubq && ubq != this_q && !rq_list_empty(&submit_list))
1419 ublk_queue_cmd_list(ubq, &submit_list);
1420 ubq = this_q;
1421
1422 if (ublk_prep_req(ubq, req) == BLK_STS_OK)
1423 rq_list_add_tail(&submit_list, req);
1424 else
1425 rq_list_add_tail(&requeue_list, req);
1426 }
1427
1428 if (ubq && !rq_list_empty(&submit_list))
1429 ublk_queue_cmd_list(ubq, &submit_list);
1430 *rqlist = requeue_list;
1431 }
1432
ublk_init_hctx(struct blk_mq_hw_ctx * hctx,void * driver_data,unsigned int hctx_idx)1433 static int ublk_init_hctx(struct blk_mq_hw_ctx *hctx, void *driver_data,
1434 unsigned int hctx_idx)
1435 {
1436 struct ublk_device *ub = driver_data;
1437 struct ublk_queue *ubq = ublk_get_queue(ub, hctx->queue_num);
1438
1439 hctx->driver_data = ubq;
1440 return 0;
1441 }
1442
1443 static const struct blk_mq_ops ublk_mq_ops = {
1444 .queue_rq = ublk_queue_rq,
1445 .queue_rqs = ublk_queue_rqs,
1446 .init_hctx = ublk_init_hctx,
1447 .timeout = ublk_timeout,
1448 };
1449
ublk_ch_open(struct inode * inode,struct file * filp)1450 static int ublk_ch_open(struct inode *inode, struct file *filp)
1451 {
1452 struct ublk_device *ub = container_of(inode->i_cdev,
1453 struct ublk_device, cdev);
1454
1455 if (test_and_set_bit(UB_STATE_OPEN, &ub->state))
1456 return -EBUSY;
1457 filp->private_data = ub;
1458 return 0;
1459 }
1460
ublk_ch_release(struct inode * inode,struct file * filp)1461 static int ublk_ch_release(struct inode *inode, struct file *filp)
1462 {
1463 struct ublk_device *ub = filp->private_data;
1464
1465 clear_bit(UB_STATE_OPEN, &ub->state);
1466 return 0;
1467 }
1468
1469 /* map pre-allocated per-queue cmd buffer to ublksrv daemon */
ublk_ch_mmap(struct file * filp,struct vm_area_struct * vma)1470 static int ublk_ch_mmap(struct file *filp, struct vm_area_struct *vma)
1471 {
1472 struct ublk_device *ub = filp->private_data;
1473 size_t sz = vma->vm_end - vma->vm_start;
1474 unsigned max_sz = ublk_max_cmd_buf_size();
1475 unsigned long pfn, end, phys_off = vma->vm_pgoff << PAGE_SHIFT;
1476 int q_id, ret = 0;
1477
1478 spin_lock(&ub->lock);
1479 if (!ub->mm)
1480 ub->mm = current->mm;
1481 if (current->mm != ub->mm)
1482 ret = -EINVAL;
1483 spin_unlock(&ub->lock);
1484
1485 if (ret)
1486 return ret;
1487
1488 if (vma->vm_flags & VM_WRITE)
1489 return -EPERM;
1490
1491 end = UBLKSRV_CMD_BUF_OFFSET + ub->dev_info.nr_hw_queues * max_sz;
1492 if (phys_off < UBLKSRV_CMD_BUF_OFFSET || phys_off >= end)
1493 return -EINVAL;
1494
1495 q_id = (phys_off - UBLKSRV_CMD_BUF_OFFSET) / max_sz;
1496 pr_devel("%s: qid %d, pid %d, addr %lx pg_off %lx sz %lu\n",
1497 __func__, q_id, current->pid, vma->vm_start,
1498 phys_off, (unsigned long)sz);
1499
1500 if (sz != ublk_queue_cmd_buf_size(ub, q_id))
1501 return -EINVAL;
1502
1503 pfn = virt_to_phys(ublk_queue_cmd_buf(ub, q_id)) >> PAGE_SHIFT;
1504 return remap_pfn_range(vma, vma->vm_start, pfn, sz, vma->vm_page_prot);
1505 }
1506
ublk_commit_completion(struct ublk_device * ub,const struct ublksrv_io_cmd * ub_cmd)1507 static void ublk_commit_completion(struct ublk_device *ub,
1508 const struct ublksrv_io_cmd *ub_cmd)
1509 {
1510 u32 qid = ub_cmd->q_id, tag = ub_cmd->tag;
1511 struct ublk_queue *ubq = ublk_get_queue(ub, qid);
1512 struct ublk_io *io = &ubq->ios[tag];
1513 struct request *req;
1514
1515 /* now this cmd slot is owned by nbd driver */
1516 io->flags &= ~UBLK_IO_FLAG_OWNED_BY_SRV;
1517 io->res = ub_cmd->result;
1518
1519 /* find the io request and complete */
1520 req = blk_mq_tag_to_rq(ub->tag_set.tags[qid], tag);
1521 if (WARN_ON_ONCE(unlikely(!req)))
1522 return;
1523
1524 if (req_op(req) == REQ_OP_ZONE_APPEND)
1525 req->__sector = ub_cmd->zone_append_lba;
1526
1527 if (likely(!blk_should_fake_timeout(req->q)))
1528 ublk_put_req_ref(ubq, req);
1529 }
1530
1531 /*
1532 * Called from ubq_daemon context via cancel fn, meantime quiesce ublk
1533 * blk-mq queue, so we are called exclusively with blk-mq and ubq_daemon
1534 * context, so everything is serialized.
1535 */
ublk_abort_queue(struct ublk_device * ub,struct ublk_queue * ubq)1536 static void ublk_abort_queue(struct ublk_device *ub, struct ublk_queue *ubq)
1537 {
1538 int i;
1539
1540 for (i = 0; i < ubq->q_depth; i++) {
1541 struct ublk_io *io = &ubq->ios[i];
1542
1543 if (!(io->flags & UBLK_IO_FLAG_ACTIVE)) {
1544 struct request *rq;
1545
1546 /*
1547 * Either we fail the request or ublk_rq_task_work_cb
1548 * will do it
1549 */
1550 rq = blk_mq_tag_to_rq(ub->tag_set.tags[ubq->q_id], i);
1551 if (rq && blk_mq_request_started(rq)) {
1552 io->flags |= UBLK_IO_FLAG_ABORTED;
1553 __ublk_fail_req(ubq, io, rq);
1554 }
1555 }
1556 }
1557 }
1558
1559 /* Must be called when queue is frozen */
ublk_mark_queue_canceling(struct ublk_queue * ubq)1560 static bool ublk_mark_queue_canceling(struct ublk_queue *ubq)
1561 {
1562 bool canceled;
1563
1564 spin_lock(&ubq->cancel_lock);
1565 canceled = ubq->canceling;
1566 if (!canceled)
1567 ubq->canceling = true;
1568 spin_unlock(&ubq->cancel_lock);
1569
1570 return canceled;
1571 }
1572
ublk_abort_requests(struct ublk_device * ub,struct ublk_queue * ubq)1573 static bool ublk_abort_requests(struct ublk_device *ub, struct ublk_queue *ubq)
1574 {
1575 bool was_canceled = ubq->canceling;
1576 struct gendisk *disk;
1577
1578 if (was_canceled)
1579 return false;
1580
1581 spin_lock(&ub->lock);
1582 disk = ub->ub_disk;
1583 if (disk)
1584 get_device(disk_to_dev(disk));
1585 spin_unlock(&ub->lock);
1586
1587 /* Our disk has been dead */
1588 if (!disk)
1589 return false;
1590
1591 /*
1592 * Now we are serialized with ublk_queue_rq()
1593 *
1594 * Make sure that ubq->canceling is set when queue is frozen,
1595 * because ublk_queue_rq() has to rely on this flag for avoiding to
1596 * touch completed uring_cmd
1597 */
1598 blk_mq_quiesce_queue(disk->queue);
1599 was_canceled = ublk_mark_queue_canceling(ubq);
1600 if (!was_canceled) {
1601 /* abort queue is for making forward progress */
1602 ublk_abort_queue(ub, ubq);
1603 }
1604 blk_mq_unquiesce_queue(disk->queue);
1605 put_device(disk_to_dev(disk));
1606
1607 return !was_canceled;
1608 }
1609
ublk_cancel_cmd(struct ublk_queue * ubq,struct ublk_io * io,unsigned int issue_flags)1610 static void ublk_cancel_cmd(struct ublk_queue *ubq, struct ublk_io *io,
1611 unsigned int issue_flags)
1612 {
1613 bool done;
1614
1615 if (!(io->flags & UBLK_IO_FLAG_ACTIVE))
1616 return;
1617
1618 spin_lock(&ubq->cancel_lock);
1619 done = !!(io->flags & UBLK_IO_FLAG_CANCELED);
1620 if (!done)
1621 io->flags |= UBLK_IO_FLAG_CANCELED;
1622 spin_unlock(&ubq->cancel_lock);
1623
1624 if (!done)
1625 io_uring_cmd_done(io->cmd, UBLK_IO_RES_ABORT, 0, issue_flags);
1626 }
1627
1628 /*
1629 * The ublk char device won't be closed when calling cancel fn, so both
1630 * ublk device and queue are guaranteed to be live
1631 */
ublk_uring_cmd_cancel_fn(struct io_uring_cmd * cmd,unsigned int issue_flags)1632 static void ublk_uring_cmd_cancel_fn(struct io_uring_cmd *cmd,
1633 unsigned int issue_flags)
1634 {
1635 struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd);
1636 struct ublk_queue *ubq = pdu->ubq;
1637 struct task_struct *task;
1638 struct ublk_device *ub;
1639 bool need_schedule;
1640 struct ublk_io *io;
1641
1642 if (WARN_ON_ONCE(!ubq))
1643 return;
1644
1645 if (WARN_ON_ONCE(pdu->tag >= ubq->q_depth))
1646 return;
1647
1648 task = io_uring_cmd_get_task(cmd);
1649 if (WARN_ON_ONCE(task && task != ubq->ubq_daemon))
1650 return;
1651
1652 ub = ubq->dev;
1653 need_schedule = ublk_abort_requests(ub, ubq);
1654
1655 io = &ubq->ios[pdu->tag];
1656 WARN_ON_ONCE(io->cmd != cmd);
1657 ublk_cancel_cmd(ubq, io, issue_flags);
1658
1659 if (need_schedule) {
1660 schedule_work(&ub->nosrv_work);
1661 }
1662 }
1663
ublk_queue_ready(struct ublk_queue * ubq)1664 static inline bool ublk_queue_ready(struct ublk_queue *ubq)
1665 {
1666 return ubq->nr_io_ready == ubq->q_depth;
1667 }
1668
ublk_cancel_queue(struct ublk_queue * ubq)1669 static void ublk_cancel_queue(struct ublk_queue *ubq)
1670 {
1671 int i;
1672
1673 for (i = 0; i < ubq->q_depth; i++)
1674 ublk_cancel_cmd(ubq, &ubq->ios[i], IO_URING_F_UNLOCKED);
1675 }
1676
1677 /* Cancel all pending commands, must be called after del_gendisk() returns */
ublk_cancel_dev(struct ublk_device * ub)1678 static void ublk_cancel_dev(struct ublk_device *ub)
1679 {
1680 int i;
1681
1682 for (i = 0; i < ub->dev_info.nr_hw_queues; i++)
1683 ublk_cancel_queue(ublk_get_queue(ub, i));
1684 }
1685
ublk_check_inflight_rq(struct request * rq,void * data)1686 static bool ublk_check_inflight_rq(struct request *rq, void *data)
1687 {
1688 bool *idle = data;
1689
1690 if (blk_mq_request_started(rq)) {
1691 *idle = false;
1692 return false;
1693 }
1694 return true;
1695 }
1696
ublk_wait_tagset_rqs_idle(struct ublk_device * ub)1697 static void ublk_wait_tagset_rqs_idle(struct ublk_device *ub)
1698 {
1699 bool idle;
1700
1701 WARN_ON_ONCE(!blk_queue_quiesced(ub->ub_disk->queue));
1702 while (true) {
1703 idle = true;
1704 blk_mq_tagset_busy_iter(&ub->tag_set,
1705 ublk_check_inflight_rq, &idle);
1706 if (idle)
1707 break;
1708 msleep(UBLK_REQUEUE_DELAY_MS);
1709 }
1710 }
1711
__ublk_quiesce_dev(struct ublk_device * ub)1712 static void __ublk_quiesce_dev(struct ublk_device *ub)
1713 {
1714 pr_devel("%s: quiesce ub: dev_id %d state %s\n",
1715 __func__, ub->dev_info.dev_id,
1716 ub->dev_info.state == UBLK_S_DEV_LIVE ?
1717 "LIVE" : "QUIESCED");
1718 blk_mq_quiesce_queue(ub->ub_disk->queue);
1719 ublk_wait_tagset_rqs_idle(ub);
1720 ub->dev_info.state = UBLK_S_DEV_QUIESCED;
1721 }
1722
ublk_unquiesce_dev(struct ublk_device * ub)1723 static void ublk_unquiesce_dev(struct ublk_device *ub)
1724 {
1725 int i;
1726
1727 pr_devel("%s: unquiesce ub: dev_id %d state %s\n",
1728 __func__, ub->dev_info.dev_id,
1729 ub->dev_info.state == UBLK_S_DEV_LIVE ?
1730 "LIVE" : "QUIESCED");
1731 /* quiesce_work has run. We let requeued rqs be aborted
1732 * before running fallback_wq. "force_abort" must be seen
1733 * after request queue is unqiuesced. Then del_gendisk()
1734 * can move on.
1735 */
1736 for (i = 0; i < ub->dev_info.nr_hw_queues; i++)
1737 ublk_get_queue(ub, i)->force_abort = true;
1738
1739 blk_mq_unquiesce_queue(ub->ub_disk->queue);
1740 /* We may have requeued some rqs in ublk_quiesce_queue() */
1741 blk_mq_kick_requeue_list(ub->ub_disk->queue);
1742 }
1743
ublk_detach_disk(struct ublk_device * ub)1744 static struct gendisk *ublk_detach_disk(struct ublk_device *ub)
1745 {
1746 struct gendisk *disk;
1747
1748 /* Sync with ublk_abort_queue() by holding the lock */
1749 spin_lock(&ub->lock);
1750 disk = ub->ub_disk;
1751 ub->dev_info.state = UBLK_S_DEV_DEAD;
1752 ub->dev_info.ublksrv_pid = -1;
1753 ub->ub_disk = NULL;
1754 spin_unlock(&ub->lock);
1755
1756 return disk;
1757 }
1758
ublk_stop_dev(struct ublk_device * ub)1759 static void ublk_stop_dev(struct ublk_device *ub)
1760 {
1761 struct gendisk *disk;
1762
1763 mutex_lock(&ub->mutex);
1764 if (ub->dev_info.state == UBLK_S_DEV_DEAD)
1765 goto unlock;
1766 if (ublk_nosrv_dev_should_queue_io(ub)) {
1767 if (ub->dev_info.state == UBLK_S_DEV_LIVE)
1768 __ublk_quiesce_dev(ub);
1769 ublk_unquiesce_dev(ub);
1770 }
1771 del_gendisk(ub->ub_disk);
1772 disk = ublk_detach_disk(ub);
1773 put_disk(disk);
1774 unlock:
1775 mutex_unlock(&ub->mutex);
1776 ublk_cancel_dev(ub);
1777 }
1778
ublk_nosrv_work(struct work_struct * work)1779 static void ublk_nosrv_work(struct work_struct *work)
1780 {
1781 struct ublk_device *ub =
1782 container_of(work, struct ublk_device, nosrv_work);
1783 int i;
1784
1785 if (ublk_nosrv_should_stop_dev(ub)) {
1786 ublk_stop_dev(ub);
1787 return;
1788 }
1789
1790 mutex_lock(&ub->mutex);
1791 if (ub->dev_info.state != UBLK_S_DEV_LIVE)
1792 goto unlock;
1793
1794 if (ublk_nosrv_dev_should_queue_io(ub)) {
1795 __ublk_quiesce_dev(ub);
1796 } else {
1797 blk_mq_quiesce_queue(ub->ub_disk->queue);
1798 ub->dev_info.state = UBLK_S_DEV_FAIL_IO;
1799 for (i = 0; i < ub->dev_info.nr_hw_queues; i++) {
1800 ublk_get_queue(ub, i)->fail_io = true;
1801 }
1802 blk_mq_unquiesce_queue(ub->ub_disk->queue);
1803 }
1804
1805 unlock:
1806 mutex_unlock(&ub->mutex);
1807 ublk_cancel_dev(ub);
1808 }
1809
1810 /* device can only be started after all IOs are ready */
ublk_mark_io_ready(struct ublk_device * ub,struct ublk_queue * ubq)1811 static void ublk_mark_io_ready(struct ublk_device *ub, struct ublk_queue *ubq)
1812 {
1813 mutex_lock(&ub->mutex);
1814 ubq->nr_io_ready++;
1815 if (ublk_queue_ready(ubq)) {
1816 ubq->ubq_daemon = current;
1817 get_task_struct(ubq->ubq_daemon);
1818 ub->nr_queues_ready++;
1819
1820 if (capable(CAP_SYS_ADMIN))
1821 ub->nr_privileged_daemon++;
1822 }
1823 if (ub->nr_queues_ready == ub->dev_info.nr_hw_queues)
1824 complete_all(&ub->completion);
1825 mutex_unlock(&ub->mutex);
1826 }
1827
ublk_handle_need_get_data(struct ublk_device * ub,int q_id,int tag)1828 static void ublk_handle_need_get_data(struct ublk_device *ub, int q_id,
1829 int tag)
1830 {
1831 struct ublk_queue *ubq = ublk_get_queue(ub, q_id);
1832 struct request *req = blk_mq_tag_to_rq(ub->tag_set.tags[q_id], tag);
1833
1834 ublk_queue_cmd(ubq, req);
1835 }
1836
ublk_check_cmd_op(u32 cmd_op)1837 static inline int ublk_check_cmd_op(u32 cmd_op)
1838 {
1839 u32 ioc_type = _IOC_TYPE(cmd_op);
1840
1841 if (!IS_ENABLED(CONFIG_BLKDEV_UBLK_LEGACY_OPCODES) && ioc_type != 'u')
1842 return -EOPNOTSUPP;
1843
1844 if (ioc_type != 'u' && ioc_type != 0)
1845 return -EOPNOTSUPP;
1846
1847 return 0;
1848 }
1849
ublk_fill_io_cmd(struct ublk_io * io,struct io_uring_cmd * cmd,unsigned long buf_addr)1850 static inline void ublk_fill_io_cmd(struct ublk_io *io,
1851 struct io_uring_cmd *cmd, unsigned long buf_addr)
1852 {
1853 io->cmd = cmd;
1854 io->flags |= UBLK_IO_FLAG_ACTIVE;
1855 io->addr = buf_addr;
1856 }
1857
ublk_prep_cancel(struct io_uring_cmd * cmd,unsigned int issue_flags,struct ublk_queue * ubq,unsigned int tag)1858 static inline void ublk_prep_cancel(struct io_uring_cmd *cmd,
1859 unsigned int issue_flags,
1860 struct ublk_queue *ubq, unsigned int tag)
1861 {
1862 struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd);
1863
1864 /*
1865 * Safe to refer to @ubq since ublk_queue won't be died until its
1866 * commands are completed
1867 */
1868 pdu->ubq = ubq;
1869 pdu->tag = tag;
1870 io_uring_cmd_mark_cancelable(cmd, issue_flags);
1871 }
1872
ublk_io_release(void * priv)1873 static void ublk_io_release(void *priv)
1874 {
1875 struct request *rq = priv;
1876 struct ublk_queue *ubq = rq->mq_hctx->driver_data;
1877
1878 ublk_put_req_ref(ubq, rq);
1879 }
1880
ublk_register_io_buf(struct io_uring_cmd * cmd,struct ublk_queue * ubq,unsigned int tag,unsigned int index,unsigned int issue_flags)1881 static int ublk_register_io_buf(struct io_uring_cmd *cmd,
1882 struct ublk_queue *ubq, unsigned int tag,
1883 unsigned int index, unsigned int issue_flags)
1884 {
1885 struct ublk_device *ub = cmd->file->private_data;
1886 struct request *req;
1887 int ret;
1888
1889 req = __ublk_check_and_get_req(ub, ubq, tag, 0);
1890 if (!req)
1891 return -EINVAL;
1892
1893 ret = io_buffer_register_bvec(cmd, req, ublk_io_release, index,
1894 issue_flags);
1895 if (ret) {
1896 ublk_put_req_ref(ubq, req);
1897 return ret;
1898 }
1899
1900 return 0;
1901 }
1902
ublk_unregister_io_buf(struct io_uring_cmd * cmd,unsigned int index,unsigned int issue_flags)1903 static int ublk_unregister_io_buf(struct io_uring_cmd *cmd,
1904 unsigned int index, unsigned int issue_flags)
1905 {
1906 return io_buffer_unregister_bvec(cmd, index, issue_flags);
1907 }
1908
__ublk_ch_uring_cmd(struct io_uring_cmd * cmd,unsigned int issue_flags,const struct ublksrv_io_cmd * ub_cmd)1909 static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd,
1910 unsigned int issue_flags,
1911 const struct ublksrv_io_cmd *ub_cmd)
1912 {
1913 struct ublk_device *ub = cmd->file->private_data;
1914 struct ublk_queue *ubq;
1915 struct ublk_io *io;
1916 u32 cmd_op = cmd->cmd_op;
1917 unsigned tag = ub_cmd->tag;
1918 int ret = -EINVAL;
1919 struct request *req;
1920
1921 pr_devel("%s: received: cmd op %d queue %d tag %d result %d\n",
1922 __func__, cmd->cmd_op, ub_cmd->q_id, tag,
1923 ub_cmd->result);
1924
1925 if (ub_cmd->q_id >= ub->dev_info.nr_hw_queues)
1926 goto out;
1927
1928 ubq = ublk_get_queue(ub, ub_cmd->q_id);
1929 if (!ubq || ub_cmd->q_id != ubq->q_id)
1930 goto out;
1931
1932 if (ubq->ubq_daemon && ubq->ubq_daemon != current)
1933 goto out;
1934
1935 if (tag >= ubq->q_depth)
1936 goto out;
1937
1938 io = &ubq->ios[tag];
1939
1940 /* there is pending io cmd, something must be wrong */
1941 if (io->flags & UBLK_IO_FLAG_ACTIVE) {
1942 ret = -EBUSY;
1943 goto out;
1944 }
1945
1946 /*
1947 * ensure that the user issues UBLK_IO_NEED_GET_DATA
1948 * iff the driver have set the UBLK_IO_FLAG_NEED_GET_DATA.
1949 */
1950 if ((!!(io->flags & UBLK_IO_FLAG_NEED_GET_DATA))
1951 ^ (_IOC_NR(cmd_op) == UBLK_IO_NEED_GET_DATA))
1952 goto out;
1953
1954 ret = ublk_check_cmd_op(cmd_op);
1955 if (ret)
1956 goto out;
1957
1958 ret = -EINVAL;
1959 switch (_IOC_NR(cmd_op)) {
1960 case UBLK_IO_REGISTER_IO_BUF:
1961 return ublk_register_io_buf(cmd, ubq, tag, ub_cmd->addr, issue_flags);
1962 case UBLK_IO_UNREGISTER_IO_BUF:
1963 return ublk_unregister_io_buf(cmd, ub_cmd->addr, issue_flags);
1964 case UBLK_IO_FETCH_REQ:
1965 /* UBLK_IO_FETCH_REQ is only allowed before queue is setup */
1966 if (ublk_queue_ready(ubq)) {
1967 ret = -EBUSY;
1968 goto out;
1969 }
1970 /*
1971 * The io is being handled by server, so COMMIT_RQ is expected
1972 * instead of FETCH_REQ
1973 */
1974 if (io->flags & UBLK_IO_FLAG_OWNED_BY_SRV)
1975 goto out;
1976
1977 if (ublk_need_map_io(ubq)) {
1978 /*
1979 * FETCH_RQ has to provide IO buffer if NEED GET
1980 * DATA is not enabled
1981 */
1982 if (!ub_cmd->addr && !ublk_need_get_data(ubq))
1983 goto out;
1984 } else if (ub_cmd->addr) {
1985 /* User copy requires addr to be unset */
1986 ret = -EINVAL;
1987 goto out;
1988 }
1989
1990 ublk_fill_io_cmd(io, cmd, ub_cmd->addr);
1991 ublk_mark_io_ready(ub, ubq);
1992 break;
1993 case UBLK_IO_COMMIT_AND_FETCH_REQ:
1994 req = blk_mq_tag_to_rq(ub->tag_set.tags[ub_cmd->q_id], tag);
1995
1996 if (!(io->flags & UBLK_IO_FLAG_OWNED_BY_SRV))
1997 goto out;
1998
1999 if (ublk_need_map_io(ubq)) {
2000 /*
2001 * COMMIT_AND_FETCH_REQ has to provide IO buffer if
2002 * NEED GET DATA is not enabled or it is Read IO.
2003 */
2004 if (!ub_cmd->addr && (!ublk_need_get_data(ubq) ||
2005 req_op(req) == REQ_OP_READ))
2006 goto out;
2007 } else if (req_op(req) != REQ_OP_ZONE_APPEND && ub_cmd->addr) {
2008 /*
2009 * User copy requires addr to be unset when command is
2010 * not zone append
2011 */
2012 ret = -EINVAL;
2013 goto out;
2014 }
2015
2016 ublk_fill_io_cmd(io, cmd, ub_cmd->addr);
2017 ublk_commit_completion(ub, ub_cmd);
2018 break;
2019 case UBLK_IO_NEED_GET_DATA:
2020 if (!(io->flags & UBLK_IO_FLAG_OWNED_BY_SRV))
2021 goto out;
2022 ublk_fill_io_cmd(io, cmd, ub_cmd->addr);
2023 ublk_handle_need_get_data(ub, ub_cmd->q_id, ub_cmd->tag);
2024 break;
2025 default:
2026 goto out;
2027 }
2028 ublk_prep_cancel(cmd, issue_flags, ubq, tag);
2029 return -EIOCBQUEUED;
2030
2031 out:
2032 pr_devel("%s: complete: cmd op %d, tag %d ret %x io_flags %x\n",
2033 __func__, cmd_op, tag, ret, io->flags);
2034 return ret;
2035 }
2036
__ublk_check_and_get_req(struct ublk_device * ub,struct ublk_queue * ubq,int tag,size_t offset)2037 static inline struct request *__ublk_check_and_get_req(struct ublk_device *ub,
2038 struct ublk_queue *ubq, int tag, size_t offset)
2039 {
2040 struct request *req;
2041
2042 if (!ublk_need_req_ref(ubq))
2043 return NULL;
2044
2045 req = blk_mq_tag_to_rq(ub->tag_set.tags[ubq->q_id], tag);
2046 if (!req)
2047 return NULL;
2048
2049 if (!ublk_get_req_ref(ubq, req))
2050 return NULL;
2051
2052 if (unlikely(!blk_mq_request_started(req) || req->tag != tag))
2053 goto fail_put;
2054
2055 if (!ublk_rq_has_data(req))
2056 goto fail_put;
2057
2058 if (offset > blk_rq_bytes(req))
2059 goto fail_put;
2060
2061 return req;
2062 fail_put:
2063 ublk_put_req_ref(ubq, req);
2064 return NULL;
2065 }
2066
ublk_ch_uring_cmd_local(struct io_uring_cmd * cmd,unsigned int issue_flags)2067 static inline int ublk_ch_uring_cmd_local(struct io_uring_cmd *cmd,
2068 unsigned int issue_flags)
2069 {
2070 /*
2071 * Not necessary for async retry, but let's keep it simple and always
2072 * copy the values to avoid any potential reuse.
2073 */
2074 const struct ublksrv_io_cmd *ub_src = io_uring_sqe_cmd(cmd->sqe);
2075 const struct ublksrv_io_cmd ub_cmd = {
2076 .q_id = READ_ONCE(ub_src->q_id),
2077 .tag = READ_ONCE(ub_src->tag),
2078 .result = READ_ONCE(ub_src->result),
2079 .addr = READ_ONCE(ub_src->addr)
2080 };
2081
2082 WARN_ON_ONCE(issue_flags & IO_URING_F_UNLOCKED);
2083
2084 return __ublk_ch_uring_cmd(cmd, issue_flags, &ub_cmd);
2085 }
2086
ublk_ch_uring_cmd_cb(struct io_uring_cmd * cmd,unsigned int issue_flags)2087 static void ublk_ch_uring_cmd_cb(struct io_uring_cmd *cmd,
2088 unsigned int issue_flags)
2089 {
2090 int ret = ublk_ch_uring_cmd_local(cmd, issue_flags);
2091
2092 if (ret != -EIOCBQUEUED)
2093 io_uring_cmd_done(cmd, ret, 0, issue_flags);
2094 }
2095
ublk_ch_uring_cmd(struct io_uring_cmd * cmd,unsigned int issue_flags)2096 static int ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
2097 {
2098 if (unlikely(issue_flags & IO_URING_F_CANCEL)) {
2099 ublk_uring_cmd_cancel_fn(cmd, issue_flags);
2100 return 0;
2101 }
2102
2103 /* well-implemented server won't run into unlocked */
2104 if (unlikely(issue_flags & IO_URING_F_UNLOCKED)) {
2105 io_uring_cmd_complete_in_task(cmd, ublk_ch_uring_cmd_cb);
2106 return -EIOCBQUEUED;
2107 }
2108
2109 return ublk_ch_uring_cmd_local(cmd, issue_flags);
2110 }
2111
ublk_check_ubuf_dir(const struct request * req,int ubuf_dir)2112 static inline bool ublk_check_ubuf_dir(const struct request *req,
2113 int ubuf_dir)
2114 {
2115 /* copy ubuf to request pages */
2116 if ((req_op(req) == REQ_OP_READ || req_op(req) == REQ_OP_DRV_IN) &&
2117 ubuf_dir == ITER_SOURCE)
2118 return true;
2119
2120 /* copy request pages to ubuf */
2121 if ((req_op(req) == REQ_OP_WRITE ||
2122 req_op(req) == REQ_OP_ZONE_APPEND) &&
2123 ubuf_dir == ITER_DEST)
2124 return true;
2125
2126 return false;
2127 }
2128
ublk_check_and_get_req(struct kiocb * iocb,struct iov_iter * iter,size_t * off,int dir)2129 static struct request *ublk_check_and_get_req(struct kiocb *iocb,
2130 struct iov_iter *iter, size_t *off, int dir)
2131 {
2132 struct ublk_device *ub = iocb->ki_filp->private_data;
2133 struct ublk_queue *ubq;
2134 struct request *req;
2135 size_t buf_off;
2136 u16 tag, q_id;
2137
2138 if (!ub)
2139 return ERR_PTR(-EACCES);
2140
2141 if (!user_backed_iter(iter))
2142 return ERR_PTR(-EACCES);
2143
2144 if (ub->dev_info.state == UBLK_S_DEV_DEAD)
2145 return ERR_PTR(-EACCES);
2146
2147 tag = ublk_pos_to_tag(iocb->ki_pos);
2148 q_id = ublk_pos_to_hwq(iocb->ki_pos);
2149 buf_off = ublk_pos_to_buf_off(iocb->ki_pos);
2150
2151 if (q_id >= ub->dev_info.nr_hw_queues)
2152 return ERR_PTR(-EINVAL);
2153
2154 ubq = ublk_get_queue(ub, q_id);
2155 if (!ubq)
2156 return ERR_PTR(-EINVAL);
2157
2158 if (tag >= ubq->q_depth)
2159 return ERR_PTR(-EINVAL);
2160
2161 req = __ublk_check_and_get_req(ub, ubq, tag, buf_off);
2162 if (!req)
2163 return ERR_PTR(-EINVAL);
2164
2165 if (!req->mq_hctx || !req->mq_hctx->driver_data)
2166 goto fail;
2167
2168 if (!ublk_check_ubuf_dir(req, dir))
2169 goto fail;
2170
2171 *off = buf_off;
2172 return req;
2173 fail:
2174 ublk_put_req_ref(ubq, req);
2175 return ERR_PTR(-EACCES);
2176 }
2177
ublk_ch_read_iter(struct kiocb * iocb,struct iov_iter * to)2178 static ssize_t ublk_ch_read_iter(struct kiocb *iocb, struct iov_iter *to)
2179 {
2180 struct ublk_queue *ubq;
2181 struct request *req;
2182 size_t buf_off;
2183 size_t ret;
2184
2185 req = ublk_check_and_get_req(iocb, to, &buf_off, ITER_DEST);
2186 if (IS_ERR(req))
2187 return PTR_ERR(req);
2188
2189 ret = ublk_copy_user_pages(req, buf_off, to, ITER_DEST);
2190 ubq = req->mq_hctx->driver_data;
2191 ublk_put_req_ref(ubq, req);
2192
2193 return ret;
2194 }
2195
ublk_ch_write_iter(struct kiocb * iocb,struct iov_iter * from)2196 static ssize_t ublk_ch_write_iter(struct kiocb *iocb, struct iov_iter *from)
2197 {
2198 struct ublk_queue *ubq;
2199 struct request *req;
2200 size_t buf_off;
2201 size_t ret;
2202
2203 req = ublk_check_and_get_req(iocb, from, &buf_off, ITER_SOURCE);
2204 if (IS_ERR(req))
2205 return PTR_ERR(req);
2206
2207 ret = ublk_copy_user_pages(req, buf_off, from, ITER_SOURCE);
2208 ubq = req->mq_hctx->driver_data;
2209 ublk_put_req_ref(ubq, req);
2210
2211 return ret;
2212 }
2213
2214 static const struct file_operations ublk_ch_fops = {
2215 .owner = THIS_MODULE,
2216 .open = ublk_ch_open,
2217 .release = ublk_ch_release,
2218 .read_iter = ublk_ch_read_iter,
2219 .write_iter = ublk_ch_write_iter,
2220 .uring_cmd = ublk_ch_uring_cmd,
2221 .mmap = ublk_ch_mmap,
2222 };
2223
ublk_deinit_queue(struct ublk_device * ub,int q_id)2224 static void ublk_deinit_queue(struct ublk_device *ub, int q_id)
2225 {
2226 int size = ublk_queue_cmd_buf_size(ub, q_id);
2227 struct ublk_queue *ubq = ublk_get_queue(ub, q_id);
2228
2229 if (ubq->ubq_daemon)
2230 put_task_struct(ubq->ubq_daemon);
2231 if (ubq->io_cmd_buf)
2232 free_pages((unsigned long)ubq->io_cmd_buf, get_order(size));
2233 }
2234
ublk_init_queue(struct ublk_device * ub,int q_id)2235 static int ublk_init_queue(struct ublk_device *ub, int q_id)
2236 {
2237 struct ublk_queue *ubq = ublk_get_queue(ub, q_id);
2238 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO;
2239 void *ptr;
2240 int size;
2241
2242 spin_lock_init(&ubq->cancel_lock);
2243 ubq->flags = ub->dev_info.flags;
2244 ubq->q_id = q_id;
2245 ubq->q_depth = ub->dev_info.queue_depth;
2246 size = ublk_queue_cmd_buf_size(ub, q_id);
2247
2248 ptr = (void *) __get_free_pages(gfp_flags, get_order(size));
2249 if (!ptr)
2250 return -ENOMEM;
2251
2252 ubq->io_cmd_buf = ptr;
2253 ubq->dev = ub;
2254 return 0;
2255 }
2256
ublk_deinit_queues(struct ublk_device * ub)2257 static void ublk_deinit_queues(struct ublk_device *ub)
2258 {
2259 int nr_queues = ub->dev_info.nr_hw_queues;
2260 int i;
2261
2262 if (!ub->__queues)
2263 return;
2264
2265 for (i = 0; i < nr_queues; i++)
2266 ublk_deinit_queue(ub, i);
2267 kfree(ub->__queues);
2268 }
2269
ublk_init_queues(struct ublk_device * ub)2270 static int ublk_init_queues(struct ublk_device *ub)
2271 {
2272 int nr_queues = ub->dev_info.nr_hw_queues;
2273 int depth = ub->dev_info.queue_depth;
2274 int ubq_size = sizeof(struct ublk_queue) + depth * sizeof(struct ublk_io);
2275 int i, ret = -ENOMEM;
2276
2277 ub->queue_size = ubq_size;
2278 ub->__queues = kcalloc(nr_queues, ubq_size, GFP_KERNEL);
2279 if (!ub->__queues)
2280 return ret;
2281
2282 for (i = 0; i < nr_queues; i++) {
2283 if (ublk_init_queue(ub, i))
2284 goto fail;
2285 }
2286
2287 init_completion(&ub->completion);
2288 return 0;
2289
2290 fail:
2291 ublk_deinit_queues(ub);
2292 return ret;
2293 }
2294
ublk_alloc_dev_number(struct ublk_device * ub,int idx)2295 static int ublk_alloc_dev_number(struct ublk_device *ub, int idx)
2296 {
2297 int i = idx;
2298 int err;
2299
2300 spin_lock(&ublk_idr_lock);
2301 /* allocate id, if @id >= 0, we're requesting that specific id */
2302 if (i >= 0) {
2303 err = idr_alloc(&ublk_index_idr, ub, i, i + 1, GFP_NOWAIT);
2304 if (err == -ENOSPC)
2305 err = -EEXIST;
2306 } else {
2307 err = idr_alloc(&ublk_index_idr, ub, 0, UBLK_MAX_UBLKS,
2308 GFP_NOWAIT);
2309 }
2310 spin_unlock(&ublk_idr_lock);
2311
2312 if (err >= 0)
2313 ub->ub_number = err;
2314
2315 return err;
2316 }
2317
ublk_free_dev_number(struct ublk_device * ub)2318 static void ublk_free_dev_number(struct ublk_device *ub)
2319 {
2320 spin_lock(&ublk_idr_lock);
2321 idr_remove(&ublk_index_idr, ub->ub_number);
2322 wake_up_all(&ublk_idr_wq);
2323 spin_unlock(&ublk_idr_lock);
2324 }
2325
ublk_cdev_rel(struct device * dev)2326 static void ublk_cdev_rel(struct device *dev)
2327 {
2328 struct ublk_device *ub = container_of(dev, struct ublk_device, cdev_dev);
2329
2330 blk_mq_free_tag_set(&ub->tag_set);
2331 ublk_deinit_queues(ub);
2332 ublk_free_dev_number(ub);
2333 mutex_destroy(&ub->mutex);
2334 kfree(ub);
2335 }
2336
ublk_add_chdev(struct ublk_device * ub)2337 static int ublk_add_chdev(struct ublk_device *ub)
2338 {
2339 struct device *dev = &ub->cdev_dev;
2340 int minor = ub->ub_number;
2341 int ret;
2342
2343 dev->parent = ublk_misc.this_device;
2344 dev->devt = MKDEV(MAJOR(ublk_chr_devt), minor);
2345 dev->class = &ublk_chr_class;
2346 dev->release = ublk_cdev_rel;
2347 device_initialize(dev);
2348
2349 ret = dev_set_name(dev, "ublkc%d", minor);
2350 if (ret)
2351 goto fail;
2352
2353 cdev_init(&ub->cdev, &ublk_ch_fops);
2354 ret = cdev_device_add(&ub->cdev, dev);
2355 if (ret)
2356 goto fail;
2357
2358 if (ub->dev_info.flags & UBLK_F_UNPRIVILEGED_DEV)
2359 unprivileged_ublks_added++;
2360 return 0;
2361 fail:
2362 put_device(dev);
2363 return ret;
2364 }
2365
2366 /* align max io buffer size with PAGE_SIZE */
ublk_align_max_io_size(struct ublk_device * ub)2367 static void ublk_align_max_io_size(struct ublk_device *ub)
2368 {
2369 unsigned int max_io_bytes = ub->dev_info.max_io_buf_bytes;
2370
2371 ub->dev_info.max_io_buf_bytes =
2372 round_down(max_io_bytes, PAGE_SIZE);
2373 }
2374
ublk_add_tag_set(struct ublk_device * ub)2375 static int ublk_add_tag_set(struct ublk_device *ub)
2376 {
2377 ub->tag_set.ops = &ublk_mq_ops;
2378 ub->tag_set.nr_hw_queues = ub->dev_info.nr_hw_queues;
2379 ub->tag_set.queue_depth = ub->dev_info.queue_depth;
2380 ub->tag_set.numa_node = NUMA_NO_NODE;
2381 ub->tag_set.cmd_size = sizeof(struct ublk_rq_data);
2382 ub->tag_set.driver_data = ub;
2383 return blk_mq_alloc_tag_set(&ub->tag_set);
2384 }
2385
ublk_remove(struct ublk_device * ub)2386 static void ublk_remove(struct ublk_device *ub)
2387 {
2388 bool unprivileged;
2389
2390 ublk_stop_dev(ub);
2391 cancel_work_sync(&ub->nosrv_work);
2392 cdev_device_del(&ub->cdev, &ub->cdev_dev);
2393 unprivileged = ub->dev_info.flags & UBLK_F_UNPRIVILEGED_DEV;
2394 ublk_put_device(ub);
2395
2396 if (unprivileged)
2397 unprivileged_ublks_added--;
2398 }
2399
ublk_get_device_from_id(int idx)2400 static struct ublk_device *ublk_get_device_from_id(int idx)
2401 {
2402 struct ublk_device *ub = NULL;
2403
2404 if (idx < 0)
2405 return NULL;
2406
2407 spin_lock(&ublk_idr_lock);
2408 ub = idr_find(&ublk_index_idr, idx);
2409 if (ub)
2410 ub = ublk_get_device(ub);
2411 spin_unlock(&ublk_idr_lock);
2412
2413 return ub;
2414 }
2415
ublk_ctrl_start_dev(struct ublk_device * ub,struct io_uring_cmd * cmd)2416 static int ublk_ctrl_start_dev(struct ublk_device *ub, struct io_uring_cmd *cmd)
2417 {
2418 const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
2419 const struct ublk_param_basic *p = &ub->params.basic;
2420 int ublksrv_pid = (int)header->data[0];
2421 struct queue_limits lim = {
2422 .logical_block_size = 1 << p->logical_bs_shift,
2423 .physical_block_size = 1 << p->physical_bs_shift,
2424 .io_min = 1 << p->io_min_shift,
2425 .io_opt = 1 << p->io_opt_shift,
2426 .max_hw_sectors = p->max_sectors,
2427 .chunk_sectors = p->chunk_sectors,
2428 .virt_boundary_mask = p->virt_boundary_mask,
2429 .max_segments = USHRT_MAX,
2430 .max_segment_size = UINT_MAX,
2431 .dma_alignment = 3,
2432 };
2433 struct gendisk *disk;
2434 int ret = -EINVAL;
2435
2436 if (ublksrv_pid <= 0)
2437 return -EINVAL;
2438 if (!(ub->params.types & UBLK_PARAM_TYPE_BASIC))
2439 return -EINVAL;
2440
2441 if (ub->params.types & UBLK_PARAM_TYPE_DISCARD) {
2442 const struct ublk_param_discard *pd = &ub->params.discard;
2443
2444 lim.discard_alignment = pd->discard_alignment;
2445 lim.discard_granularity = pd->discard_granularity;
2446 lim.max_hw_discard_sectors = pd->max_discard_sectors;
2447 lim.max_write_zeroes_sectors = pd->max_write_zeroes_sectors;
2448 lim.max_discard_segments = pd->max_discard_segments;
2449 }
2450
2451 if (ub->params.types & UBLK_PARAM_TYPE_ZONED) {
2452 const struct ublk_param_zoned *p = &ub->params.zoned;
2453
2454 if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED))
2455 return -EOPNOTSUPP;
2456
2457 lim.features |= BLK_FEAT_ZONED;
2458 lim.max_active_zones = p->max_active_zones;
2459 lim.max_open_zones = p->max_open_zones;
2460 lim.max_hw_zone_append_sectors = p->max_zone_append_sectors;
2461 }
2462
2463 if (ub->params.basic.attrs & UBLK_ATTR_VOLATILE_CACHE) {
2464 lim.features |= BLK_FEAT_WRITE_CACHE;
2465 if (ub->params.basic.attrs & UBLK_ATTR_FUA)
2466 lim.features |= BLK_FEAT_FUA;
2467 }
2468
2469 if (ub->params.basic.attrs & UBLK_ATTR_ROTATIONAL)
2470 lim.features |= BLK_FEAT_ROTATIONAL;
2471
2472 if (ub->params.types & UBLK_PARAM_TYPE_DMA_ALIGN)
2473 lim.dma_alignment = ub->params.dma.alignment;
2474
2475 if (ub->params.types & UBLK_PARAM_TYPE_SEGMENT) {
2476 lim.seg_boundary_mask = ub->params.seg.seg_boundary_mask;
2477 lim.max_segment_size = ub->params.seg.max_segment_size;
2478 lim.max_segments = ub->params.seg.max_segments;
2479 }
2480
2481 if (wait_for_completion_interruptible(&ub->completion) != 0)
2482 return -EINTR;
2483
2484 mutex_lock(&ub->mutex);
2485 if (ub->dev_info.state == UBLK_S_DEV_LIVE ||
2486 test_bit(UB_STATE_USED, &ub->state)) {
2487 ret = -EEXIST;
2488 goto out_unlock;
2489 }
2490
2491 disk = blk_mq_alloc_disk(&ub->tag_set, &lim, NULL);
2492 if (IS_ERR(disk)) {
2493 ret = PTR_ERR(disk);
2494 goto out_unlock;
2495 }
2496 sprintf(disk->disk_name, "ublkb%d", ub->ub_number);
2497 disk->fops = &ub_fops;
2498 disk->private_data = ub;
2499
2500 ub->dev_info.ublksrv_pid = ublksrv_pid;
2501 ub->ub_disk = disk;
2502
2503 ublk_apply_params(ub);
2504
2505 /* don't probe partitions if any one ubq daemon is un-trusted */
2506 if (ub->nr_privileged_daemon != ub->nr_queues_ready)
2507 set_bit(GD_SUPPRESS_PART_SCAN, &disk->state);
2508
2509 ublk_get_device(ub);
2510 ub->dev_info.state = UBLK_S_DEV_LIVE;
2511
2512 if (ublk_dev_is_zoned(ub)) {
2513 ret = ublk_revalidate_disk_zones(ub);
2514 if (ret)
2515 goto out_put_cdev;
2516 }
2517
2518 ret = add_disk(disk);
2519 if (ret)
2520 goto out_put_cdev;
2521
2522 set_bit(UB_STATE_USED, &ub->state);
2523
2524 out_put_cdev:
2525 if (ret) {
2526 ublk_detach_disk(ub);
2527 ublk_put_device(ub);
2528 }
2529 if (ret)
2530 put_disk(disk);
2531 out_unlock:
2532 mutex_unlock(&ub->mutex);
2533 return ret;
2534 }
2535
ublk_ctrl_get_queue_affinity(struct ublk_device * ub,struct io_uring_cmd * cmd)2536 static int ublk_ctrl_get_queue_affinity(struct ublk_device *ub,
2537 struct io_uring_cmd *cmd)
2538 {
2539 const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
2540 void __user *argp = (void __user *)(unsigned long)header->addr;
2541 cpumask_var_t cpumask;
2542 unsigned long queue;
2543 unsigned int retlen;
2544 unsigned int i;
2545 int ret;
2546
2547 if (header->len * BITS_PER_BYTE < nr_cpu_ids)
2548 return -EINVAL;
2549 if (header->len & (sizeof(unsigned long)-1))
2550 return -EINVAL;
2551 if (!header->addr)
2552 return -EINVAL;
2553
2554 queue = header->data[0];
2555 if (queue >= ub->dev_info.nr_hw_queues)
2556 return -EINVAL;
2557
2558 if (!zalloc_cpumask_var(&cpumask, GFP_KERNEL))
2559 return -ENOMEM;
2560
2561 for_each_possible_cpu(i) {
2562 if (ub->tag_set.map[HCTX_TYPE_DEFAULT].mq_map[i] == queue)
2563 cpumask_set_cpu(i, cpumask);
2564 }
2565
2566 ret = -EFAULT;
2567 retlen = min_t(unsigned short, header->len, cpumask_size());
2568 if (copy_to_user(argp, cpumask, retlen))
2569 goto out_free_cpumask;
2570 if (retlen != header->len &&
2571 clear_user(argp + retlen, header->len - retlen))
2572 goto out_free_cpumask;
2573
2574 ret = 0;
2575 out_free_cpumask:
2576 free_cpumask_var(cpumask);
2577 return ret;
2578 }
2579
ublk_dump_dev_info(struct ublksrv_ctrl_dev_info * info)2580 static inline void ublk_dump_dev_info(struct ublksrv_ctrl_dev_info *info)
2581 {
2582 pr_devel("%s: dev id %d flags %llx\n", __func__,
2583 info->dev_id, info->flags);
2584 pr_devel("\t nr_hw_queues %d queue_depth %d\n",
2585 info->nr_hw_queues, info->queue_depth);
2586 }
2587
ublk_ctrl_add_dev(struct io_uring_cmd * cmd)2588 static int ublk_ctrl_add_dev(struct io_uring_cmd *cmd)
2589 {
2590 const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
2591 void __user *argp = (void __user *)(unsigned long)header->addr;
2592 struct ublksrv_ctrl_dev_info info;
2593 struct ublk_device *ub;
2594 int ret = -EINVAL;
2595
2596 if (header->len < sizeof(info) || !header->addr)
2597 return -EINVAL;
2598 if (header->queue_id != (u16)-1) {
2599 pr_warn("%s: queue_id is wrong %x\n",
2600 __func__, header->queue_id);
2601 return -EINVAL;
2602 }
2603
2604 if (copy_from_user(&info, argp, sizeof(info)))
2605 return -EFAULT;
2606
2607 if (capable(CAP_SYS_ADMIN))
2608 info.flags &= ~UBLK_F_UNPRIVILEGED_DEV;
2609 else if (!(info.flags & UBLK_F_UNPRIVILEGED_DEV))
2610 return -EPERM;
2611
2612 /* forbid nonsense combinations of recovery flags */
2613 switch (info.flags & UBLK_F_ALL_RECOVERY_FLAGS) {
2614 case 0:
2615 case UBLK_F_USER_RECOVERY:
2616 case (UBLK_F_USER_RECOVERY | UBLK_F_USER_RECOVERY_REISSUE):
2617 case (UBLK_F_USER_RECOVERY | UBLK_F_USER_RECOVERY_FAIL_IO):
2618 break;
2619 default:
2620 pr_warn("%s: invalid recovery flags %llx\n", __func__,
2621 info.flags & UBLK_F_ALL_RECOVERY_FLAGS);
2622 return -EINVAL;
2623 }
2624
2625 /*
2626 * unprivileged device can't be trusted, but RECOVERY and
2627 * RECOVERY_REISSUE still may hang error handling, so can't
2628 * support recovery features for unprivileged ublk now
2629 *
2630 * TODO: provide forward progress for RECOVERY handler, so that
2631 * unprivileged device can benefit from it
2632 */
2633 if (info.flags & UBLK_F_UNPRIVILEGED_DEV) {
2634 info.flags &= ~(UBLK_F_USER_RECOVERY_REISSUE |
2635 UBLK_F_USER_RECOVERY);
2636
2637 /*
2638 * For USER_COPY, we depends on userspace to fill request
2639 * buffer by pwrite() to ublk char device, which can't be
2640 * used for unprivileged device
2641 */
2642 if (info.flags & (UBLK_F_USER_COPY | UBLK_F_SUPPORT_ZERO_COPY))
2643 return -EINVAL;
2644 }
2645
2646 /* the created device is always owned by current user */
2647 ublk_store_owner_uid_gid(&info.owner_uid, &info.owner_gid);
2648
2649 if (header->dev_id != info.dev_id) {
2650 pr_warn("%s: dev id not match %u %u\n",
2651 __func__, header->dev_id, info.dev_id);
2652 return -EINVAL;
2653 }
2654
2655 if (header->dev_id != U32_MAX && header->dev_id >= UBLK_MAX_UBLKS) {
2656 pr_warn("%s: dev id is too large. Max supported is %d\n",
2657 __func__, UBLK_MAX_UBLKS - 1);
2658 return -EINVAL;
2659 }
2660
2661 ublk_dump_dev_info(&info);
2662
2663 ret = mutex_lock_killable(&ublk_ctl_mutex);
2664 if (ret)
2665 return ret;
2666
2667 ret = -EACCES;
2668 if ((info.flags & UBLK_F_UNPRIVILEGED_DEV) &&
2669 unprivileged_ublks_added >= unprivileged_ublks_max)
2670 goto out_unlock;
2671
2672 ret = -ENOMEM;
2673 ub = kzalloc(sizeof(*ub), GFP_KERNEL);
2674 if (!ub)
2675 goto out_unlock;
2676 mutex_init(&ub->mutex);
2677 spin_lock_init(&ub->lock);
2678 INIT_WORK(&ub->nosrv_work, ublk_nosrv_work);
2679
2680 ret = ublk_alloc_dev_number(ub, header->dev_id);
2681 if (ret < 0)
2682 goto out_free_ub;
2683
2684 memcpy(&ub->dev_info, &info, sizeof(info));
2685
2686 /* update device id */
2687 ub->dev_info.dev_id = ub->ub_number;
2688
2689 /*
2690 * 64bit flags will be copied back to userspace as feature
2691 * negotiation result, so have to clear flags which driver
2692 * doesn't support yet, then userspace can get correct flags
2693 * (features) to handle.
2694 */
2695 ub->dev_info.flags &= UBLK_F_ALL;
2696
2697 ub->dev_info.flags |= UBLK_F_CMD_IOCTL_ENCODE |
2698 UBLK_F_URING_CMD_COMP_IN_TASK;
2699
2700 /* GET_DATA isn't needed any more with USER_COPY */
2701 if (ublk_dev_is_user_copy(ub))
2702 ub->dev_info.flags &= ~UBLK_F_NEED_GET_DATA;
2703
2704 /* Zoned storage support requires user copy feature */
2705 if (ublk_dev_is_zoned(ub) &&
2706 (!IS_ENABLED(CONFIG_BLK_DEV_ZONED) || !ublk_dev_is_user_copy(ub))) {
2707 ret = -EINVAL;
2708 goto out_free_dev_number;
2709 }
2710
2711 ub->dev_info.nr_hw_queues = min_t(unsigned int,
2712 ub->dev_info.nr_hw_queues, nr_cpu_ids);
2713 ublk_align_max_io_size(ub);
2714
2715 ret = ublk_init_queues(ub);
2716 if (ret)
2717 goto out_free_dev_number;
2718
2719 ret = ublk_add_tag_set(ub);
2720 if (ret)
2721 goto out_deinit_queues;
2722
2723 ret = -EFAULT;
2724 if (copy_to_user(argp, &ub->dev_info, sizeof(info)))
2725 goto out_free_tag_set;
2726
2727 /*
2728 * Add the char dev so that ublksrv daemon can be setup.
2729 * ublk_add_chdev() will cleanup everything if it fails.
2730 */
2731 ret = ublk_add_chdev(ub);
2732 goto out_unlock;
2733
2734 out_free_tag_set:
2735 blk_mq_free_tag_set(&ub->tag_set);
2736 out_deinit_queues:
2737 ublk_deinit_queues(ub);
2738 out_free_dev_number:
2739 ublk_free_dev_number(ub);
2740 out_free_ub:
2741 mutex_destroy(&ub->mutex);
2742 kfree(ub);
2743 out_unlock:
2744 mutex_unlock(&ublk_ctl_mutex);
2745 return ret;
2746 }
2747
ublk_idr_freed(int id)2748 static inline bool ublk_idr_freed(int id)
2749 {
2750 void *ptr;
2751
2752 spin_lock(&ublk_idr_lock);
2753 ptr = idr_find(&ublk_index_idr, id);
2754 spin_unlock(&ublk_idr_lock);
2755
2756 return ptr == NULL;
2757 }
2758
ublk_ctrl_del_dev(struct ublk_device ** p_ub,bool wait)2759 static int ublk_ctrl_del_dev(struct ublk_device **p_ub, bool wait)
2760 {
2761 struct ublk_device *ub = *p_ub;
2762 int idx = ub->ub_number;
2763 int ret;
2764
2765 ret = mutex_lock_killable(&ublk_ctl_mutex);
2766 if (ret)
2767 return ret;
2768
2769 if (!test_bit(UB_STATE_DELETED, &ub->state)) {
2770 ublk_remove(ub);
2771 set_bit(UB_STATE_DELETED, &ub->state);
2772 }
2773
2774 /* Mark the reference as consumed */
2775 *p_ub = NULL;
2776 ublk_put_device(ub);
2777 mutex_unlock(&ublk_ctl_mutex);
2778
2779 /*
2780 * Wait until the idr is removed, then it can be reused after
2781 * DEL_DEV command is returned.
2782 *
2783 * If we returns because of user interrupt, future delete command
2784 * may come:
2785 *
2786 * - the device number isn't freed, this device won't or needn't
2787 * be deleted again, since UB_STATE_DELETED is set, and device
2788 * will be released after the last reference is dropped
2789 *
2790 * - the device number is freed already, we will not find this
2791 * device via ublk_get_device_from_id()
2792 */
2793 if (wait && wait_event_interruptible(ublk_idr_wq, ublk_idr_freed(idx)))
2794 return -EINTR;
2795 return 0;
2796 }
2797
ublk_ctrl_cmd_dump(struct io_uring_cmd * cmd)2798 static inline void ublk_ctrl_cmd_dump(struct io_uring_cmd *cmd)
2799 {
2800 const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
2801
2802 pr_devel("%s: cmd_op %x, dev id %d qid %d data %llx buf %llx len %u\n",
2803 __func__, cmd->cmd_op, header->dev_id, header->queue_id,
2804 header->data[0], header->addr, header->len);
2805 }
2806
ublk_ctrl_stop_dev(struct ublk_device * ub)2807 static int ublk_ctrl_stop_dev(struct ublk_device *ub)
2808 {
2809 ublk_stop_dev(ub);
2810 cancel_work_sync(&ub->nosrv_work);
2811 return 0;
2812 }
2813
ublk_ctrl_get_dev_info(struct ublk_device * ub,struct io_uring_cmd * cmd)2814 static int ublk_ctrl_get_dev_info(struct ublk_device *ub,
2815 struct io_uring_cmd *cmd)
2816 {
2817 const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
2818 void __user *argp = (void __user *)(unsigned long)header->addr;
2819
2820 if (header->len < sizeof(struct ublksrv_ctrl_dev_info) || !header->addr)
2821 return -EINVAL;
2822
2823 if (copy_to_user(argp, &ub->dev_info, sizeof(ub->dev_info)))
2824 return -EFAULT;
2825
2826 return 0;
2827 }
2828
2829 /* TYPE_DEVT is readonly, so fill it up before returning to userspace */
ublk_ctrl_fill_params_devt(struct ublk_device * ub)2830 static void ublk_ctrl_fill_params_devt(struct ublk_device *ub)
2831 {
2832 ub->params.devt.char_major = MAJOR(ub->cdev_dev.devt);
2833 ub->params.devt.char_minor = MINOR(ub->cdev_dev.devt);
2834
2835 if (ub->ub_disk) {
2836 ub->params.devt.disk_major = MAJOR(disk_devt(ub->ub_disk));
2837 ub->params.devt.disk_minor = MINOR(disk_devt(ub->ub_disk));
2838 } else {
2839 ub->params.devt.disk_major = 0;
2840 ub->params.devt.disk_minor = 0;
2841 }
2842 ub->params.types |= UBLK_PARAM_TYPE_DEVT;
2843 }
2844
ublk_ctrl_get_params(struct ublk_device * ub,struct io_uring_cmd * cmd)2845 static int ublk_ctrl_get_params(struct ublk_device *ub,
2846 struct io_uring_cmd *cmd)
2847 {
2848 const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
2849 void __user *argp = (void __user *)(unsigned long)header->addr;
2850 struct ublk_params_header ph;
2851 int ret;
2852
2853 if (header->len <= sizeof(ph) || !header->addr)
2854 return -EINVAL;
2855
2856 if (copy_from_user(&ph, argp, sizeof(ph)))
2857 return -EFAULT;
2858
2859 if (ph.len > header->len || !ph.len)
2860 return -EINVAL;
2861
2862 if (ph.len > sizeof(struct ublk_params))
2863 ph.len = sizeof(struct ublk_params);
2864
2865 mutex_lock(&ub->mutex);
2866 ublk_ctrl_fill_params_devt(ub);
2867 if (copy_to_user(argp, &ub->params, ph.len))
2868 ret = -EFAULT;
2869 else
2870 ret = 0;
2871 mutex_unlock(&ub->mutex);
2872
2873 return ret;
2874 }
2875
ublk_ctrl_set_params(struct ublk_device * ub,struct io_uring_cmd * cmd)2876 static int ublk_ctrl_set_params(struct ublk_device *ub,
2877 struct io_uring_cmd *cmd)
2878 {
2879 const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
2880 void __user *argp = (void __user *)(unsigned long)header->addr;
2881 struct ublk_params_header ph;
2882 int ret = -EFAULT;
2883
2884 if (header->len <= sizeof(ph) || !header->addr)
2885 return -EINVAL;
2886
2887 if (copy_from_user(&ph, argp, sizeof(ph)))
2888 return -EFAULT;
2889
2890 if (ph.len > header->len || !ph.len || !ph.types)
2891 return -EINVAL;
2892
2893 if (ph.len > sizeof(struct ublk_params))
2894 ph.len = sizeof(struct ublk_params);
2895
2896 mutex_lock(&ub->mutex);
2897 if (test_bit(UB_STATE_USED, &ub->state)) {
2898 /*
2899 * Parameters can only be changed when device hasn't
2900 * been started yet
2901 */
2902 ret = -EACCES;
2903 } else if (copy_from_user(&ub->params, argp, ph.len)) {
2904 ret = -EFAULT;
2905 } else {
2906 /* clear all we don't support yet */
2907 ub->params.types &= UBLK_PARAM_TYPE_ALL;
2908 ret = ublk_validate_params(ub);
2909 if (ret)
2910 ub->params.types = 0;
2911 }
2912 mutex_unlock(&ub->mutex);
2913
2914 return ret;
2915 }
2916
ublk_queue_reinit(struct ublk_device * ub,struct ublk_queue * ubq)2917 static void ublk_queue_reinit(struct ublk_device *ub, struct ublk_queue *ubq)
2918 {
2919 int i;
2920
2921 WARN_ON_ONCE(!(ubq->ubq_daemon && ubq_daemon_is_dying(ubq)));
2922
2923 /* All old ioucmds have to be completed */
2924 ubq->nr_io_ready = 0;
2925 /* old daemon is PF_EXITING, put it now */
2926 put_task_struct(ubq->ubq_daemon);
2927 /* We have to reset it to NULL, otherwise ub won't accept new FETCH_REQ */
2928 ubq->ubq_daemon = NULL;
2929 ubq->timeout = false;
2930 ubq->canceling = false;
2931
2932 for (i = 0; i < ubq->q_depth; i++) {
2933 struct ublk_io *io = &ubq->ios[i];
2934
2935 /* forget everything now and be ready for new FETCH_REQ */
2936 io->flags = 0;
2937 io->cmd = NULL;
2938 io->addr = 0;
2939 }
2940 }
2941
ublk_ctrl_start_recovery(struct ublk_device * ub,struct io_uring_cmd * cmd)2942 static int ublk_ctrl_start_recovery(struct ublk_device *ub,
2943 struct io_uring_cmd *cmd)
2944 {
2945 const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
2946 int ret = -EINVAL;
2947 int i;
2948
2949 mutex_lock(&ub->mutex);
2950 if (ublk_nosrv_should_stop_dev(ub))
2951 goto out_unlock;
2952 if (!ub->nr_queues_ready)
2953 goto out_unlock;
2954 /*
2955 * START_RECOVERY is only allowd after:
2956 *
2957 * (1) UB_STATE_OPEN is not set, which means the dying process is exited
2958 * and related io_uring ctx is freed so file struct of /dev/ublkcX is
2959 * released.
2960 *
2961 * and one of the following holds
2962 *
2963 * (2) UBLK_S_DEV_QUIESCED is set, which means the quiesce_work:
2964 * (a)has quiesced request queue
2965 * (b)has requeued every inflight rqs whose io_flags is ACTIVE
2966 * (c)has requeued/aborted every inflight rqs whose io_flags is NOT ACTIVE
2967 * (d)has completed/camceled all ioucmds owned by ther dying process
2968 *
2969 * (3) UBLK_S_DEV_FAIL_IO is set, which means the queue is not
2970 * quiesced, but all I/O is being immediately errored
2971 */
2972 if (test_bit(UB_STATE_OPEN, &ub->state) || !ublk_dev_in_recoverable_state(ub)) {
2973 ret = -EBUSY;
2974 goto out_unlock;
2975 }
2976 pr_devel("%s: start recovery for dev id %d.\n", __func__, header->dev_id);
2977 for (i = 0; i < ub->dev_info.nr_hw_queues; i++)
2978 ublk_queue_reinit(ub, ublk_get_queue(ub, i));
2979 /* set to NULL, otherwise new ubq_daemon cannot mmap the io_cmd_buf */
2980 ub->mm = NULL;
2981 ub->nr_queues_ready = 0;
2982 ub->nr_privileged_daemon = 0;
2983 init_completion(&ub->completion);
2984 ret = 0;
2985 out_unlock:
2986 mutex_unlock(&ub->mutex);
2987 return ret;
2988 }
2989
ublk_ctrl_end_recovery(struct ublk_device * ub,struct io_uring_cmd * cmd)2990 static int ublk_ctrl_end_recovery(struct ublk_device *ub,
2991 struct io_uring_cmd *cmd)
2992 {
2993 const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
2994 int ublksrv_pid = (int)header->data[0];
2995 int ret = -EINVAL;
2996 int i;
2997
2998 pr_devel("%s: Waiting for new ubq_daemons(nr: %d) are ready, dev id %d...\n",
2999 __func__, ub->dev_info.nr_hw_queues, header->dev_id);
3000 /* wait until new ubq_daemon sending all FETCH_REQ */
3001 if (wait_for_completion_interruptible(&ub->completion))
3002 return -EINTR;
3003
3004 pr_devel("%s: All new ubq_daemons(nr: %d) are ready, dev id %d\n",
3005 __func__, ub->dev_info.nr_hw_queues, header->dev_id);
3006
3007 mutex_lock(&ub->mutex);
3008 if (ublk_nosrv_should_stop_dev(ub))
3009 goto out_unlock;
3010
3011 if (!ublk_dev_in_recoverable_state(ub)) {
3012 ret = -EBUSY;
3013 goto out_unlock;
3014 }
3015 ub->dev_info.ublksrv_pid = ublksrv_pid;
3016 pr_devel("%s: new ublksrv_pid %d, dev id %d\n",
3017 __func__, ublksrv_pid, header->dev_id);
3018
3019 if (ublk_nosrv_dev_should_queue_io(ub)) {
3020 ub->dev_info.state = UBLK_S_DEV_LIVE;
3021 blk_mq_unquiesce_queue(ub->ub_disk->queue);
3022 pr_devel("%s: queue unquiesced, dev id %d.\n",
3023 __func__, header->dev_id);
3024 blk_mq_kick_requeue_list(ub->ub_disk->queue);
3025 } else {
3026 blk_mq_quiesce_queue(ub->ub_disk->queue);
3027 ub->dev_info.state = UBLK_S_DEV_LIVE;
3028 for (i = 0; i < ub->dev_info.nr_hw_queues; i++) {
3029 ublk_get_queue(ub, i)->fail_io = false;
3030 }
3031 blk_mq_unquiesce_queue(ub->ub_disk->queue);
3032 }
3033
3034 ret = 0;
3035 out_unlock:
3036 mutex_unlock(&ub->mutex);
3037 return ret;
3038 }
3039
ublk_ctrl_get_features(struct io_uring_cmd * cmd)3040 static int ublk_ctrl_get_features(struct io_uring_cmd *cmd)
3041 {
3042 const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
3043 void __user *argp = (void __user *)(unsigned long)header->addr;
3044 u64 features = UBLK_F_ALL;
3045
3046 if (header->len != UBLK_FEATURES_LEN || !header->addr)
3047 return -EINVAL;
3048
3049 if (copy_to_user(argp, &features, UBLK_FEATURES_LEN))
3050 return -EFAULT;
3051
3052 return 0;
3053 }
3054
3055 /*
3056 * All control commands are sent via /dev/ublk-control, so we have to check
3057 * the destination device's permission
3058 */
ublk_char_dev_permission(struct ublk_device * ub,const char * dev_path,int mask)3059 static int ublk_char_dev_permission(struct ublk_device *ub,
3060 const char *dev_path, int mask)
3061 {
3062 int err;
3063 struct path path;
3064 struct kstat stat;
3065
3066 err = kern_path(dev_path, LOOKUP_FOLLOW, &path);
3067 if (err)
3068 return err;
3069
3070 err = vfs_getattr(&path, &stat, STATX_TYPE, AT_STATX_SYNC_AS_STAT);
3071 if (err)
3072 goto exit;
3073
3074 err = -EPERM;
3075 if (stat.rdev != ub->cdev_dev.devt || !S_ISCHR(stat.mode))
3076 goto exit;
3077
3078 err = inode_permission(&nop_mnt_idmap,
3079 d_backing_inode(path.dentry), mask);
3080 exit:
3081 path_put(&path);
3082 return err;
3083 }
3084
ublk_ctrl_uring_cmd_permission(struct ublk_device * ub,struct io_uring_cmd * cmd)3085 static int ublk_ctrl_uring_cmd_permission(struct ublk_device *ub,
3086 struct io_uring_cmd *cmd)
3087 {
3088 struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)io_uring_sqe_cmd(cmd->sqe);
3089 bool unprivileged = ub->dev_info.flags & UBLK_F_UNPRIVILEGED_DEV;
3090 void __user *argp = (void __user *)(unsigned long)header->addr;
3091 char *dev_path = NULL;
3092 int ret = 0;
3093 int mask;
3094
3095 if (!unprivileged) {
3096 if (!capable(CAP_SYS_ADMIN))
3097 return -EPERM;
3098 /*
3099 * The new added command of UBLK_CMD_GET_DEV_INFO2 includes
3100 * char_dev_path in payload too, since userspace may not
3101 * know if the specified device is created as unprivileged
3102 * mode.
3103 */
3104 if (_IOC_NR(cmd->cmd_op) != UBLK_CMD_GET_DEV_INFO2)
3105 return 0;
3106 }
3107
3108 /*
3109 * User has to provide the char device path for unprivileged ublk
3110 *
3111 * header->addr always points to the dev path buffer, and
3112 * header->dev_path_len records length of dev path buffer.
3113 */
3114 if (!header->dev_path_len || header->dev_path_len > PATH_MAX)
3115 return -EINVAL;
3116
3117 if (header->len < header->dev_path_len)
3118 return -EINVAL;
3119
3120 dev_path = memdup_user_nul(argp, header->dev_path_len);
3121 if (IS_ERR(dev_path))
3122 return PTR_ERR(dev_path);
3123
3124 ret = -EINVAL;
3125 switch (_IOC_NR(cmd->cmd_op)) {
3126 case UBLK_CMD_GET_DEV_INFO:
3127 case UBLK_CMD_GET_DEV_INFO2:
3128 case UBLK_CMD_GET_QUEUE_AFFINITY:
3129 case UBLK_CMD_GET_PARAMS:
3130 case (_IOC_NR(UBLK_U_CMD_GET_FEATURES)):
3131 mask = MAY_READ;
3132 break;
3133 case UBLK_CMD_START_DEV:
3134 case UBLK_CMD_STOP_DEV:
3135 case UBLK_CMD_ADD_DEV:
3136 case UBLK_CMD_DEL_DEV:
3137 case UBLK_CMD_SET_PARAMS:
3138 case UBLK_CMD_START_USER_RECOVERY:
3139 case UBLK_CMD_END_USER_RECOVERY:
3140 mask = MAY_READ | MAY_WRITE;
3141 break;
3142 default:
3143 goto exit;
3144 }
3145
3146 ret = ublk_char_dev_permission(ub, dev_path, mask);
3147 if (!ret) {
3148 header->len -= header->dev_path_len;
3149 header->addr += header->dev_path_len;
3150 }
3151 pr_devel("%s: dev id %d cmd_op %x uid %d gid %d path %s ret %d\n",
3152 __func__, ub->ub_number, cmd->cmd_op,
3153 ub->dev_info.owner_uid, ub->dev_info.owner_gid,
3154 dev_path, ret);
3155 exit:
3156 kfree(dev_path);
3157 return ret;
3158 }
3159
ublk_ctrl_uring_cmd(struct io_uring_cmd * cmd,unsigned int issue_flags)3160 static int ublk_ctrl_uring_cmd(struct io_uring_cmd *cmd,
3161 unsigned int issue_flags)
3162 {
3163 const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
3164 struct ublk_device *ub = NULL;
3165 u32 cmd_op = cmd->cmd_op;
3166 int ret = -EINVAL;
3167
3168 if (issue_flags & IO_URING_F_NONBLOCK)
3169 return -EAGAIN;
3170
3171 ublk_ctrl_cmd_dump(cmd);
3172
3173 if (!(issue_flags & IO_URING_F_SQE128))
3174 goto out;
3175
3176 ret = ublk_check_cmd_op(cmd_op);
3177 if (ret)
3178 goto out;
3179
3180 if (cmd_op == UBLK_U_CMD_GET_FEATURES) {
3181 ret = ublk_ctrl_get_features(cmd);
3182 goto out;
3183 }
3184
3185 if (_IOC_NR(cmd_op) != UBLK_CMD_ADD_DEV) {
3186 ret = -ENODEV;
3187 ub = ublk_get_device_from_id(header->dev_id);
3188 if (!ub)
3189 goto out;
3190
3191 ret = ublk_ctrl_uring_cmd_permission(ub, cmd);
3192 if (ret)
3193 goto put_dev;
3194 }
3195
3196 switch (_IOC_NR(cmd_op)) {
3197 case UBLK_CMD_START_DEV:
3198 ret = ublk_ctrl_start_dev(ub, cmd);
3199 break;
3200 case UBLK_CMD_STOP_DEV:
3201 ret = ublk_ctrl_stop_dev(ub);
3202 break;
3203 case UBLK_CMD_GET_DEV_INFO:
3204 case UBLK_CMD_GET_DEV_INFO2:
3205 ret = ublk_ctrl_get_dev_info(ub, cmd);
3206 break;
3207 case UBLK_CMD_ADD_DEV:
3208 ret = ublk_ctrl_add_dev(cmd);
3209 break;
3210 case UBLK_CMD_DEL_DEV:
3211 ret = ublk_ctrl_del_dev(&ub, true);
3212 break;
3213 case UBLK_CMD_DEL_DEV_ASYNC:
3214 ret = ublk_ctrl_del_dev(&ub, false);
3215 break;
3216 case UBLK_CMD_GET_QUEUE_AFFINITY:
3217 ret = ublk_ctrl_get_queue_affinity(ub, cmd);
3218 break;
3219 case UBLK_CMD_GET_PARAMS:
3220 ret = ublk_ctrl_get_params(ub, cmd);
3221 break;
3222 case UBLK_CMD_SET_PARAMS:
3223 ret = ublk_ctrl_set_params(ub, cmd);
3224 break;
3225 case UBLK_CMD_START_USER_RECOVERY:
3226 ret = ublk_ctrl_start_recovery(ub, cmd);
3227 break;
3228 case UBLK_CMD_END_USER_RECOVERY:
3229 ret = ublk_ctrl_end_recovery(ub, cmd);
3230 break;
3231 default:
3232 ret = -EOPNOTSUPP;
3233 break;
3234 }
3235
3236 put_dev:
3237 if (ub)
3238 ublk_put_device(ub);
3239 out:
3240 pr_devel("%s: cmd done ret %d cmd_op %x, dev id %d qid %d\n",
3241 __func__, ret, cmd->cmd_op, header->dev_id, header->queue_id);
3242 return ret;
3243 }
3244
3245 static const struct file_operations ublk_ctl_fops = {
3246 .open = nonseekable_open,
3247 .uring_cmd = ublk_ctrl_uring_cmd,
3248 .owner = THIS_MODULE,
3249 .llseek = noop_llseek,
3250 };
3251
3252 static struct miscdevice ublk_misc = {
3253 .minor = MISC_DYNAMIC_MINOR,
3254 .name = "ublk-control",
3255 .fops = &ublk_ctl_fops,
3256 };
3257
ublk_init(void)3258 static int __init ublk_init(void)
3259 {
3260 int ret;
3261
3262 BUILD_BUG_ON((u64)UBLKSRV_IO_BUF_OFFSET +
3263 UBLKSRV_IO_BUF_TOTAL_SIZE < UBLKSRV_IO_BUF_OFFSET);
3264
3265 init_waitqueue_head(&ublk_idr_wq);
3266
3267 ret = misc_register(&ublk_misc);
3268 if (ret)
3269 return ret;
3270
3271 ret = alloc_chrdev_region(&ublk_chr_devt, 0, UBLK_MINORS, "ublk-char");
3272 if (ret)
3273 goto unregister_mis;
3274
3275 ret = class_register(&ublk_chr_class);
3276 if (ret)
3277 goto free_chrdev_region;
3278
3279 return 0;
3280
3281 free_chrdev_region:
3282 unregister_chrdev_region(ublk_chr_devt, UBLK_MINORS);
3283 unregister_mis:
3284 misc_deregister(&ublk_misc);
3285 return ret;
3286 }
3287
ublk_exit(void)3288 static void __exit ublk_exit(void)
3289 {
3290 struct ublk_device *ub;
3291 int id;
3292
3293 idr_for_each_entry(&ublk_index_idr, ub, id)
3294 ublk_remove(ub);
3295
3296 class_unregister(&ublk_chr_class);
3297 misc_deregister(&ublk_misc);
3298
3299 idr_destroy(&ublk_index_idr);
3300 unregister_chrdev_region(ublk_chr_devt, UBLK_MINORS);
3301 }
3302
3303 module_init(ublk_init);
3304 module_exit(ublk_exit);
3305
ublk_set_max_unprivileged_ublks(const char * buf,const struct kernel_param * kp)3306 static int ublk_set_max_unprivileged_ublks(const char *buf,
3307 const struct kernel_param *kp)
3308 {
3309 return param_set_uint_minmax(buf, kp, 0, UBLK_MAX_UBLKS);
3310 }
3311
ublk_get_max_unprivileged_ublks(char * buf,const struct kernel_param * kp)3312 static int ublk_get_max_unprivileged_ublks(char *buf,
3313 const struct kernel_param *kp)
3314 {
3315 return sysfs_emit(buf, "%u\n", unprivileged_ublks_max);
3316 }
3317
3318 static const struct kernel_param_ops ublk_max_unprivileged_ublks_ops = {
3319 .set = ublk_set_max_unprivileged_ublks,
3320 .get = ublk_get_max_unprivileged_ublks,
3321 };
3322
3323 module_param_cb(ublks_max, &ublk_max_unprivileged_ublks_ops,
3324 &unprivileged_ublks_max, 0644);
3325 MODULE_PARM_DESC(ublks_max, "max number of unprivileged ublk devices allowed to add(default: 64)");
3326
3327 MODULE_AUTHOR("Ming Lei <ming.lei@redhat.com>");
3328 MODULE_DESCRIPTION("Userspace block device");
3329 MODULE_LICENSE("GPL");
3330