1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Userspace block device - block device which IO is handled from userspace
4 *
5 * Take full use of io_uring passthrough command for communicating with
6 * ublk userspace daemon(ublksrvd) for handling basic IO request.
7 *
8 * Copyright 2022 Ming Lei <ming.lei@redhat.com>
9 *
10 * (part of code stolen from loop.c)
11 */
12 #include <linux/module.h>
13 #include <linux/moduleparam.h>
14 #include <linux/sched.h>
15 #include <linux/fs.h>
16 #include <linux/pagemap.h>
17 #include <linux/file.h>
18 #include <linux/stat.h>
19 #include <linux/errno.h>
20 #include <linux/major.h>
21 #include <linux/wait.h>
22 #include <linux/blkdev.h>
23 #include <linux/init.h>
24 #include <linux/swap.h>
25 #include <linux/slab.h>
26 #include <linux/compat.h>
27 #include <linux/mutex.h>
28 #include <linux/writeback.h>
29 #include <linux/completion.h>
30 #include <linux/highmem.h>
31 #include <linux/sysfs.h>
32 #include <linux/miscdevice.h>
33 #include <linux/falloc.h>
34 #include <linux/uio.h>
35 #include <linux/ioprio.h>
36 #include <linux/sched/mm.h>
37 #include <linux/uaccess.h>
38 #include <linux/cdev.h>
39 #include <linux/io_uring/cmd.h>
40 #include <linux/blk-mq.h>
41 #include <linux/delay.h>
42 #include <linux/mm.h>
43 #include <asm/page.h>
44 #include <linux/task_work.h>
45 #include <linux/namei.h>
46 #include <linux/kref.h>
47 #include <uapi/linux/ublk_cmd.h>
48
49 #define UBLK_MINORS (1U << MINORBITS)
50
51 /* private ioctl command mirror */
52 #define UBLK_CMD_DEL_DEV_ASYNC _IOC_NR(UBLK_U_CMD_DEL_DEV_ASYNC)
53
54 /* All UBLK_F_* have to be included into UBLK_F_ALL */
55 #define UBLK_F_ALL (UBLK_F_SUPPORT_ZERO_COPY \
56 | UBLK_F_URING_CMD_COMP_IN_TASK \
57 | UBLK_F_NEED_GET_DATA \
58 | UBLK_F_USER_RECOVERY \
59 | UBLK_F_USER_RECOVERY_REISSUE \
60 | UBLK_F_UNPRIVILEGED_DEV \
61 | UBLK_F_CMD_IOCTL_ENCODE \
62 | UBLK_F_USER_COPY \
63 | UBLK_F_ZONED \
64 | UBLK_F_USER_RECOVERY_FAIL_IO)
65
66 #define UBLK_F_ALL_RECOVERY_FLAGS (UBLK_F_USER_RECOVERY \
67 | UBLK_F_USER_RECOVERY_REISSUE \
68 | UBLK_F_USER_RECOVERY_FAIL_IO)
69
70 /* All UBLK_PARAM_TYPE_* should be included here */
71 #define UBLK_PARAM_TYPE_ALL \
72 (UBLK_PARAM_TYPE_BASIC | UBLK_PARAM_TYPE_DISCARD | \
73 UBLK_PARAM_TYPE_DEVT | UBLK_PARAM_TYPE_ZONED)
74
75 struct ublk_rq_data {
76 struct llist_node node;
77
78 struct kref ref;
79 };
80
81 struct ublk_uring_cmd_pdu {
82 struct ublk_queue *ubq;
83 u16 tag;
84 };
85
86 /*
87 * io command is active: sqe cmd is received, and its cqe isn't done
88 *
89 * If the flag is set, the io command is owned by ublk driver, and waited
90 * for incoming blk-mq request from the ublk block device.
91 *
92 * If the flag is cleared, the io command will be completed, and owned by
93 * ublk server.
94 */
95 #define UBLK_IO_FLAG_ACTIVE 0x01
96
97 /*
98 * IO command is completed via cqe, and it is being handled by ublksrv, and
99 * not committed yet
100 *
101 * Basically exclusively with UBLK_IO_FLAG_ACTIVE, so can be served for
102 * cross verification
103 */
104 #define UBLK_IO_FLAG_OWNED_BY_SRV 0x02
105
106 /*
107 * IO command is aborted, so this flag is set in case of
108 * !UBLK_IO_FLAG_ACTIVE.
109 *
110 * After this flag is observed, any pending or new incoming request
111 * associated with this io command will be failed immediately
112 */
113 #define UBLK_IO_FLAG_ABORTED 0x04
114
115 /*
116 * UBLK_IO_FLAG_NEED_GET_DATA is set because IO command requires
117 * get data buffer address from ublksrv.
118 *
119 * Then, bio data could be copied into this data buffer for a WRITE request
120 * after the IO command is issued again and UBLK_IO_FLAG_NEED_GET_DATA is unset.
121 */
122 #define UBLK_IO_FLAG_NEED_GET_DATA 0x08
123
124 /* atomic RW with ubq->cancel_lock */
125 #define UBLK_IO_FLAG_CANCELED 0x80000000
126
127 struct ublk_io {
128 /* userspace buffer address from io cmd */
129 __u64 addr;
130 unsigned int flags;
131 int res;
132
133 struct io_uring_cmd *cmd;
134 };
135
136 struct ublk_queue {
137 int q_id;
138 int q_depth;
139
140 unsigned long flags;
141 struct task_struct *ubq_daemon;
142 char *io_cmd_buf;
143
144 struct llist_head io_cmds;
145
146 unsigned long io_addr; /* mapped vm address */
147 unsigned int max_io_sz;
148 bool force_abort;
149 bool timeout;
150 bool canceling;
151 bool fail_io; /* copy of dev->state == UBLK_S_DEV_FAIL_IO */
152 unsigned short nr_io_ready; /* how many ios setup */
153 spinlock_t cancel_lock;
154 struct ublk_device *dev;
155 struct ublk_io ios[];
156 };
157
158 struct ublk_device {
159 struct gendisk *ub_disk;
160
161 char *__queues;
162
163 unsigned int queue_size;
164 struct ublksrv_ctrl_dev_info dev_info;
165
166 struct blk_mq_tag_set tag_set;
167
168 struct cdev cdev;
169 struct device cdev_dev;
170
171 #define UB_STATE_OPEN 0
172 #define UB_STATE_USED 1
173 #define UB_STATE_DELETED 2
174 unsigned long state;
175 int ub_number;
176
177 struct mutex mutex;
178
179 spinlock_t lock;
180 struct mm_struct *mm;
181
182 struct ublk_params params;
183
184 struct completion completion;
185 unsigned int nr_queues_ready;
186 unsigned int nr_privileged_daemon;
187
188 struct work_struct nosrv_work;
189 };
190
191 /* header of ublk_params */
192 struct ublk_params_header {
193 __u32 len;
194 __u32 types;
195 };
196
197 static bool ublk_abort_requests(struct ublk_device *ub, struct ublk_queue *ubq);
198
199 static inline unsigned int ublk_req_build_flags(struct request *req);
200 static inline struct ublksrv_io_desc *ublk_get_iod(struct ublk_queue *ubq,
201 int tag);
ublk_dev_is_user_copy(const struct ublk_device * ub)202 static inline bool ublk_dev_is_user_copy(const struct ublk_device *ub)
203 {
204 return ub->dev_info.flags & UBLK_F_USER_COPY;
205 }
206
ublk_dev_is_zoned(const struct ublk_device * ub)207 static inline bool ublk_dev_is_zoned(const struct ublk_device *ub)
208 {
209 return ub->dev_info.flags & UBLK_F_ZONED;
210 }
211
ublk_queue_is_zoned(struct ublk_queue * ubq)212 static inline bool ublk_queue_is_zoned(struct ublk_queue *ubq)
213 {
214 return ubq->flags & UBLK_F_ZONED;
215 }
216
217 #ifdef CONFIG_BLK_DEV_ZONED
218
219 struct ublk_zoned_report_desc {
220 __u64 sector;
221 __u32 operation;
222 __u32 nr_zones;
223 };
224
225 static DEFINE_XARRAY(ublk_zoned_report_descs);
226
ublk_zoned_insert_report_desc(const struct request * req,struct ublk_zoned_report_desc * desc)227 static int ublk_zoned_insert_report_desc(const struct request *req,
228 struct ublk_zoned_report_desc *desc)
229 {
230 return xa_insert(&ublk_zoned_report_descs, (unsigned long)req,
231 desc, GFP_KERNEL);
232 }
233
ublk_zoned_erase_report_desc(const struct request * req)234 static struct ublk_zoned_report_desc *ublk_zoned_erase_report_desc(
235 const struct request *req)
236 {
237 return xa_erase(&ublk_zoned_report_descs, (unsigned long)req);
238 }
239
ublk_zoned_get_report_desc(const struct request * req)240 static struct ublk_zoned_report_desc *ublk_zoned_get_report_desc(
241 const struct request *req)
242 {
243 return xa_load(&ublk_zoned_report_descs, (unsigned long)req);
244 }
245
ublk_get_nr_zones(const struct ublk_device * ub)246 static int ublk_get_nr_zones(const struct ublk_device *ub)
247 {
248 const struct ublk_param_basic *p = &ub->params.basic;
249
250 /* Zone size is a power of 2 */
251 return p->dev_sectors >> ilog2(p->chunk_sectors);
252 }
253
ublk_revalidate_disk_zones(struct ublk_device * ub)254 static int ublk_revalidate_disk_zones(struct ublk_device *ub)
255 {
256 return blk_revalidate_disk_zones(ub->ub_disk);
257 }
258
ublk_dev_param_zoned_validate(const struct ublk_device * ub)259 static int ublk_dev_param_zoned_validate(const struct ublk_device *ub)
260 {
261 const struct ublk_param_zoned *p = &ub->params.zoned;
262 int nr_zones;
263
264 if (!ublk_dev_is_zoned(ub))
265 return -EINVAL;
266
267 if (!p->max_zone_append_sectors)
268 return -EINVAL;
269
270 nr_zones = ublk_get_nr_zones(ub);
271
272 if (p->max_active_zones > nr_zones)
273 return -EINVAL;
274
275 if (p->max_open_zones > nr_zones)
276 return -EINVAL;
277
278 return 0;
279 }
280
ublk_dev_param_zoned_apply(struct ublk_device * ub)281 static void ublk_dev_param_zoned_apply(struct ublk_device *ub)
282 {
283 ub->ub_disk->nr_zones = ublk_get_nr_zones(ub);
284 }
285
286 /* Based on virtblk_alloc_report_buffer */
ublk_alloc_report_buffer(struct ublk_device * ublk,unsigned int nr_zones,size_t * buflen)287 static void *ublk_alloc_report_buffer(struct ublk_device *ublk,
288 unsigned int nr_zones, size_t *buflen)
289 {
290 struct request_queue *q = ublk->ub_disk->queue;
291 size_t bufsize;
292 void *buf;
293
294 nr_zones = min_t(unsigned int, nr_zones,
295 ublk->ub_disk->nr_zones);
296
297 bufsize = nr_zones * sizeof(struct blk_zone);
298 bufsize =
299 min_t(size_t, bufsize, queue_max_hw_sectors(q) << SECTOR_SHIFT);
300
301 while (bufsize >= sizeof(struct blk_zone)) {
302 buf = kvmalloc(bufsize, GFP_KERNEL | __GFP_NORETRY);
303 if (buf) {
304 *buflen = bufsize;
305 return buf;
306 }
307 bufsize >>= 1;
308 }
309
310 *buflen = 0;
311 return NULL;
312 }
313
ublk_report_zones(struct gendisk * disk,sector_t sector,unsigned int nr_zones,report_zones_cb cb,void * data)314 static int ublk_report_zones(struct gendisk *disk, sector_t sector,
315 unsigned int nr_zones, report_zones_cb cb, void *data)
316 {
317 struct ublk_device *ub = disk->private_data;
318 unsigned int zone_size_sectors = disk->queue->limits.chunk_sectors;
319 unsigned int first_zone = sector >> ilog2(zone_size_sectors);
320 unsigned int done_zones = 0;
321 unsigned int max_zones_per_request;
322 int ret;
323 struct blk_zone *buffer;
324 size_t buffer_length;
325
326 nr_zones = min_t(unsigned int, ub->ub_disk->nr_zones - first_zone,
327 nr_zones);
328
329 buffer = ublk_alloc_report_buffer(ub, nr_zones, &buffer_length);
330 if (!buffer)
331 return -ENOMEM;
332
333 max_zones_per_request = buffer_length / sizeof(struct blk_zone);
334
335 while (done_zones < nr_zones) {
336 unsigned int remaining_zones = nr_zones - done_zones;
337 unsigned int zones_in_request =
338 min_t(unsigned int, remaining_zones, max_zones_per_request);
339 struct request *req;
340 struct ublk_zoned_report_desc desc;
341 blk_status_t status;
342
343 memset(buffer, 0, buffer_length);
344
345 req = blk_mq_alloc_request(disk->queue, REQ_OP_DRV_IN, 0);
346 if (IS_ERR(req)) {
347 ret = PTR_ERR(req);
348 goto out;
349 }
350
351 desc.operation = UBLK_IO_OP_REPORT_ZONES;
352 desc.sector = sector;
353 desc.nr_zones = zones_in_request;
354 ret = ublk_zoned_insert_report_desc(req, &desc);
355 if (ret)
356 goto free_req;
357
358 ret = blk_rq_map_kern(disk->queue, req, buffer, buffer_length,
359 GFP_KERNEL);
360 if (ret)
361 goto erase_desc;
362
363 status = blk_execute_rq(req, 0);
364 ret = blk_status_to_errno(status);
365 erase_desc:
366 ublk_zoned_erase_report_desc(req);
367 free_req:
368 blk_mq_free_request(req);
369 if (ret)
370 goto out;
371
372 for (unsigned int i = 0; i < zones_in_request; i++) {
373 struct blk_zone *zone = buffer + i;
374
375 /* A zero length zone means no more zones in this response */
376 if (!zone->len)
377 break;
378
379 ret = cb(zone, i, data);
380 if (ret)
381 goto out;
382
383 done_zones++;
384 sector += zone_size_sectors;
385
386 }
387 }
388
389 ret = done_zones;
390
391 out:
392 kvfree(buffer);
393 return ret;
394 }
395
ublk_setup_iod_zoned(struct ublk_queue * ubq,struct request * req)396 static blk_status_t ublk_setup_iod_zoned(struct ublk_queue *ubq,
397 struct request *req)
398 {
399 struct ublksrv_io_desc *iod = ublk_get_iod(ubq, req->tag);
400 struct ublk_io *io = &ubq->ios[req->tag];
401 struct ublk_zoned_report_desc *desc;
402 u32 ublk_op;
403
404 switch (req_op(req)) {
405 case REQ_OP_ZONE_OPEN:
406 ublk_op = UBLK_IO_OP_ZONE_OPEN;
407 break;
408 case REQ_OP_ZONE_CLOSE:
409 ublk_op = UBLK_IO_OP_ZONE_CLOSE;
410 break;
411 case REQ_OP_ZONE_FINISH:
412 ublk_op = UBLK_IO_OP_ZONE_FINISH;
413 break;
414 case REQ_OP_ZONE_RESET:
415 ublk_op = UBLK_IO_OP_ZONE_RESET;
416 break;
417 case REQ_OP_ZONE_APPEND:
418 ublk_op = UBLK_IO_OP_ZONE_APPEND;
419 break;
420 case REQ_OP_ZONE_RESET_ALL:
421 ublk_op = UBLK_IO_OP_ZONE_RESET_ALL;
422 break;
423 case REQ_OP_DRV_IN:
424 desc = ublk_zoned_get_report_desc(req);
425 if (!desc)
426 return BLK_STS_IOERR;
427 ublk_op = desc->operation;
428 switch (ublk_op) {
429 case UBLK_IO_OP_REPORT_ZONES:
430 iod->op_flags = ublk_op | ublk_req_build_flags(req);
431 iod->nr_zones = desc->nr_zones;
432 iod->start_sector = desc->sector;
433 return BLK_STS_OK;
434 default:
435 return BLK_STS_IOERR;
436 }
437 case REQ_OP_DRV_OUT:
438 /* We do not support drv_out */
439 return BLK_STS_NOTSUPP;
440 default:
441 return BLK_STS_IOERR;
442 }
443
444 iod->op_flags = ublk_op | ublk_req_build_flags(req);
445 iod->nr_sectors = blk_rq_sectors(req);
446 iod->start_sector = blk_rq_pos(req);
447 iod->addr = io->addr;
448
449 return BLK_STS_OK;
450 }
451
452 #else
453
454 #define ublk_report_zones (NULL)
455
ublk_dev_param_zoned_validate(const struct ublk_device * ub)456 static int ublk_dev_param_zoned_validate(const struct ublk_device *ub)
457 {
458 return -EOPNOTSUPP;
459 }
460
ublk_dev_param_zoned_apply(struct ublk_device * ub)461 static void ublk_dev_param_zoned_apply(struct ublk_device *ub)
462 {
463 }
464
ublk_revalidate_disk_zones(struct ublk_device * ub)465 static int ublk_revalidate_disk_zones(struct ublk_device *ub)
466 {
467 return 0;
468 }
469
ublk_setup_iod_zoned(struct ublk_queue * ubq,struct request * req)470 static blk_status_t ublk_setup_iod_zoned(struct ublk_queue *ubq,
471 struct request *req)
472 {
473 return BLK_STS_NOTSUPP;
474 }
475
476 #endif
477
478 static inline void __ublk_complete_rq(struct request *req);
479 static void ublk_complete_rq(struct kref *ref);
480
481 static dev_t ublk_chr_devt;
482 static const struct class ublk_chr_class = {
483 .name = "ublk-char",
484 };
485
486 static DEFINE_IDR(ublk_index_idr);
487 static DEFINE_SPINLOCK(ublk_idr_lock);
488 static wait_queue_head_t ublk_idr_wq; /* wait until one idr is freed */
489
490 static DEFINE_MUTEX(ublk_ctl_mutex);
491
492 /*
493 * Max ublk devices allowed to add
494 *
495 * It can be extended to one per-user limit in future or even controlled
496 * by cgroup.
497 */
498 #define UBLK_MAX_UBLKS UBLK_MINORS
499 static unsigned int ublks_max = 64;
500 static unsigned int ublks_added; /* protected by ublk_ctl_mutex */
501
502 static struct miscdevice ublk_misc;
503
ublk_pos_to_hwq(loff_t pos)504 static inline unsigned ublk_pos_to_hwq(loff_t pos)
505 {
506 return ((pos - UBLKSRV_IO_BUF_OFFSET) >> UBLK_QID_OFF) &
507 UBLK_QID_BITS_MASK;
508 }
509
ublk_pos_to_buf_off(loff_t pos)510 static inline unsigned ublk_pos_to_buf_off(loff_t pos)
511 {
512 return (pos - UBLKSRV_IO_BUF_OFFSET) & UBLK_IO_BUF_BITS_MASK;
513 }
514
ublk_pos_to_tag(loff_t pos)515 static inline unsigned ublk_pos_to_tag(loff_t pos)
516 {
517 return ((pos - UBLKSRV_IO_BUF_OFFSET) >> UBLK_TAG_OFF) &
518 UBLK_TAG_BITS_MASK;
519 }
520
ublk_dev_param_basic_apply(struct ublk_device * ub)521 static void ublk_dev_param_basic_apply(struct ublk_device *ub)
522 {
523 const struct ublk_param_basic *p = &ub->params.basic;
524
525 if (p->attrs & UBLK_ATTR_READ_ONLY)
526 set_disk_ro(ub->ub_disk, true);
527
528 set_capacity(ub->ub_disk, p->dev_sectors);
529 }
530
ublk_validate_params(const struct ublk_device * ub)531 static int ublk_validate_params(const struct ublk_device *ub)
532 {
533 /* basic param is the only one which must be set */
534 if (ub->params.types & UBLK_PARAM_TYPE_BASIC) {
535 const struct ublk_param_basic *p = &ub->params.basic;
536
537 if (p->logical_bs_shift > PAGE_SHIFT || p->logical_bs_shift < 9)
538 return -EINVAL;
539
540 if (p->logical_bs_shift > p->physical_bs_shift)
541 return -EINVAL;
542
543 if (p->max_sectors > (ub->dev_info.max_io_buf_bytes >> 9))
544 return -EINVAL;
545
546 if (ublk_dev_is_zoned(ub) && !p->chunk_sectors)
547 return -EINVAL;
548 } else
549 return -EINVAL;
550
551 if (ub->params.types & UBLK_PARAM_TYPE_DISCARD) {
552 const struct ublk_param_discard *p = &ub->params.discard;
553
554 /* So far, only support single segment discard */
555 if (p->max_discard_sectors && p->max_discard_segments != 1)
556 return -EINVAL;
557
558 if (!p->discard_granularity)
559 return -EINVAL;
560 }
561
562 /* dev_t is read-only */
563 if (ub->params.types & UBLK_PARAM_TYPE_DEVT)
564 return -EINVAL;
565
566 if (ub->params.types & UBLK_PARAM_TYPE_ZONED)
567 return ublk_dev_param_zoned_validate(ub);
568 else if (ublk_dev_is_zoned(ub))
569 return -EINVAL;
570
571 return 0;
572 }
573
ublk_apply_params(struct ublk_device * ub)574 static void ublk_apply_params(struct ublk_device *ub)
575 {
576 ublk_dev_param_basic_apply(ub);
577
578 if (ub->params.types & UBLK_PARAM_TYPE_ZONED)
579 ublk_dev_param_zoned_apply(ub);
580 }
581
ublk_support_user_copy(const struct ublk_queue * ubq)582 static inline bool ublk_support_user_copy(const struct ublk_queue *ubq)
583 {
584 return ubq->flags & UBLK_F_USER_COPY;
585 }
586
ublk_need_req_ref(const struct ublk_queue * ubq)587 static inline bool ublk_need_req_ref(const struct ublk_queue *ubq)
588 {
589 /*
590 * read()/write() is involved in user copy, so request reference
591 * has to be grabbed
592 */
593 return ublk_support_user_copy(ubq);
594 }
595
ublk_init_req_ref(const struct ublk_queue * ubq,struct request * req)596 static inline void ublk_init_req_ref(const struct ublk_queue *ubq,
597 struct request *req)
598 {
599 if (ublk_need_req_ref(ubq)) {
600 struct ublk_rq_data *data = blk_mq_rq_to_pdu(req);
601
602 kref_init(&data->ref);
603 }
604 }
605
ublk_get_req_ref(const struct ublk_queue * ubq,struct request * req)606 static inline bool ublk_get_req_ref(const struct ublk_queue *ubq,
607 struct request *req)
608 {
609 if (ublk_need_req_ref(ubq)) {
610 struct ublk_rq_data *data = blk_mq_rq_to_pdu(req);
611
612 return kref_get_unless_zero(&data->ref);
613 }
614
615 return true;
616 }
617
ublk_put_req_ref(const struct ublk_queue * ubq,struct request * req)618 static inline void ublk_put_req_ref(const struct ublk_queue *ubq,
619 struct request *req)
620 {
621 if (ublk_need_req_ref(ubq)) {
622 struct ublk_rq_data *data = blk_mq_rq_to_pdu(req);
623
624 kref_put(&data->ref, ublk_complete_rq);
625 } else {
626 __ublk_complete_rq(req);
627 }
628 }
629
ublk_need_get_data(const struct ublk_queue * ubq)630 static inline bool ublk_need_get_data(const struct ublk_queue *ubq)
631 {
632 return ubq->flags & UBLK_F_NEED_GET_DATA;
633 }
634
635 /* Called in slow path only, keep it noinline for trace purpose */
ublk_get_device(struct ublk_device * ub)636 static noinline struct ublk_device *ublk_get_device(struct ublk_device *ub)
637 {
638 if (kobject_get_unless_zero(&ub->cdev_dev.kobj))
639 return ub;
640 return NULL;
641 }
642
643 /* Called in slow path only, keep it noinline for trace purpose */
ublk_put_device(struct ublk_device * ub)644 static noinline void ublk_put_device(struct ublk_device *ub)
645 {
646 put_device(&ub->cdev_dev);
647 }
648
ublk_get_queue(struct ublk_device * dev,int qid)649 static inline struct ublk_queue *ublk_get_queue(struct ublk_device *dev,
650 int qid)
651 {
652 return (struct ublk_queue *)&(dev->__queues[qid * dev->queue_size]);
653 }
654
ublk_rq_has_data(const struct request * rq)655 static inline bool ublk_rq_has_data(const struct request *rq)
656 {
657 return bio_has_data(rq->bio);
658 }
659
ublk_get_iod(struct ublk_queue * ubq,int tag)660 static inline struct ublksrv_io_desc *ublk_get_iod(struct ublk_queue *ubq,
661 int tag)
662 {
663 return (struct ublksrv_io_desc *)
664 &(ubq->io_cmd_buf[tag * sizeof(struct ublksrv_io_desc)]);
665 }
666
ublk_queue_cmd_buf(struct ublk_device * ub,int q_id)667 static inline char *ublk_queue_cmd_buf(struct ublk_device *ub, int q_id)
668 {
669 return ublk_get_queue(ub, q_id)->io_cmd_buf;
670 }
671
__ublk_queue_cmd_buf_size(int depth)672 static inline int __ublk_queue_cmd_buf_size(int depth)
673 {
674 return round_up(depth * sizeof(struct ublksrv_io_desc), PAGE_SIZE);
675 }
676
ublk_queue_cmd_buf_size(struct ublk_device * ub,int q_id)677 static inline int ublk_queue_cmd_buf_size(struct ublk_device *ub, int q_id)
678 {
679 struct ublk_queue *ubq = ublk_get_queue(ub, q_id);
680
681 return __ublk_queue_cmd_buf_size(ubq->q_depth);
682 }
683
ublk_max_cmd_buf_size(void)684 static int ublk_max_cmd_buf_size(void)
685 {
686 return __ublk_queue_cmd_buf_size(UBLK_MAX_QUEUE_DEPTH);
687 }
688
689 /*
690 * Should I/O outstanding to the ublk server when it exits be reissued?
691 * If not, outstanding I/O will get errors.
692 */
ublk_nosrv_should_reissue_outstanding(struct ublk_device * ub)693 static inline bool ublk_nosrv_should_reissue_outstanding(struct ublk_device *ub)
694 {
695 return (ub->dev_info.flags & UBLK_F_USER_RECOVERY) &&
696 (ub->dev_info.flags & UBLK_F_USER_RECOVERY_REISSUE);
697 }
698
699 /*
700 * Should I/O issued while there is no ublk server queue? If not, I/O
701 * issued while there is no ublk server will get errors.
702 */
ublk_nosrv_dev_should_queue_io(struct ublk_device * ub)703 static inline bool ublk_nosrv_dev_should_queue_io(struct ublk_device *ub)
704 {
705 return (ub->dev_info.flags & UBLK_F_USER_RECOVERY) &&
706 !(ub->dev_info.flags & UBLK_F_USER_RECOVERY_FAIL_IO);
707 }
708
709 /*
710 * Same as ublk_nosrv_dev_should_queue_io, but uses a queue-local copy
711 * of the device flags for smaller cache footprint - better for fast
712 * paths.
713 */
ublk_nosrv_should_queue_io(struct ublk_queue * ubq)714 static inline bool ublk_nosrv_should_queue_io(struct ublk_queue *ubq)
715 {
716 return (ubq->flags & UBLK_F_USER_RECOVERY) &&
717 !(ubq->flags & UBLK_F_USER_RECOVERY_FAIL_IO);
718 }
719
720 /*
721 * Should ublk devices be stopped (i.e. no recovery possible) when the
722 * ublk server exits? If not, devices can be used again by a future
723 * incarnation of a ublk server via the start_recovery/end_recovery
724 * commands.
725 */
ublk_nosrv_should_stop_dev(struct ublk_device * ub)726 static inline bool ublk_nosrv_should_stop_dev(struct ublk_device *ub)
727 {
728 return !(ub->dev_info.flags & UBLK_F_USER_RECOVERY);
729 }
730
ublk_dev_in_recoverable_state(struct ublk_device * ub)731 static inline bool ublk_dev_in_recoverable_state(struct ublk_device *ub)
732 {
733 return ub->dev_info.state == UBLK_S_DEV_QUIESCED ||
734 ub->dev_info.state == UBLK_S_DEV_FAIL_IO;
735 }
736
ublk_free_disk(struct gendisk * disk)737 static void ublk_free_disk(struct gendisk *disk)
738 {
739 struct ublk_device *ub = disk->private_data;
740
741 clear_bit(UB_STATE_USED, &ub->state);
742 ublk_put_device(ub);
743 }
744
ublk_store_owner_uid_gid(unsigned int * owner_uid,unsigned int * owner_gid)745 static void ublk_store_owner_uid_gid(unsigned int *owner_uid,
746 unsigned int *owner_gid)
747 {
748 kuid_t uid;
749 kgid_t gid;
750
751 current_uid_gid(&uid, &gid);
752
753 *owner_uid = from_kuid(&init_user_ns, uid);
754 *owner_gid = from_kgid(&init_user_ns, gid);
755 }
756
ublk_open(struct gendisk * disk,blk_mode_t mode)757 static int ublk_open(struct gendisk *disk, blk_mode_t mode)
758 {
759 struct ublk_device *ub = disk->private_data;
760
761 if (capable(CAP_SYS_ADMIN))
762 return 0;
763
764 /*
765 * If it is one unprivileged device, only owner can open
766 * the disk. Otherwise it could be one trap made by one
767 * evil user who grants this disk's privileges to other
768 * users deliberately.
769 *
770 * This way is reasonable too given anyone can create
771 * unprivileged device, and no need other's grant.
772 */
773 if (ub->dev_info.flags & UBLK_F_UNPRIVILEGED_DEV) {
774 unsigned int curr_uid, curr_gid;
775
776 ublk_store_owner_uid_gid(&curr_uid, &curr_gid);
777
778 if (curr_uid != ub->dev_info.owner_uid || curr_gid !=
779 ub->dev_info.owner_gid)
780 return -EPERM;
781 }
782
783 return 0;
784 }
785
786 static const struct block_device_operations ub_fops = {
787 .owner = THIS_MODULE,
788 .open = ublk_open,
789 .free_disk = ublk_free_disk,
790 .report_zones = ublk_report_zones,
791 };
792
793 #define UBLK_MAX_PIN_PAGES 32
794
795 struct ublk_io_iter {
796 struct page *pages[UBLK_MAX_PIN_PAGES];
797 struct bio *bio;
798 struct bvec_iter iter;
799 };
800
801 /* return how many pages are copied */
ublk_copy_io_pages(struct ublk_io_iter * data,size_t total,size_t pg_off,int dir)802 static void ublk_copy_io_pages(struct ublk_io_iter *data,
803 size_t total, size_t pg_off, int dir)
804 {
805 unsigned done = 0;
806 unsigned pg_idx = 0;
807
808 while (done < total) {
809 struct bio_vec bv = bio_iter_iovec(data->bio, data->iter);
810 unsigned int bytes = min3(bv.bv_len, (unsigned)total - done,
811 (unsigned)(PAGE_SIZE - pg_off));
812 void *bv_buf = bvec_kmap_local(&bv);
813 void *pg_buf = kmap_local_page(data->pages[pg_idx]);
814
815 if (dir == ITER_DEST)
816 memcpy(pg_buf + pg_off, bv_buf, bytes);
817 else
818 memcpy(bv_buf, pg_buf + pg_off, bytes);
819
820 kunmap_local(pg_buf);
821 kunmap_local(bv_buf);
822
823 /* advance page array */
824 pg_off += bytes;
825 if (pg_off == PAGE_SIZE) {
826 pg_idx += 1;
827 pg_off = 0;
828 }
829
830 done += bytes;
831
832 /* advance bio */
833 bio_advance_iter_single(data->bio, &data->iter, bytes);
834 if (!data->iter.bi_size) {
835 data->bio = data->bio->bi_next;
836 if (data->bio == NULL)
837 break;
838 data->iter = data->bio->bi_iter;
839 }
840 }
841 }
842
ublk_advance_io_iter(const struct request * req,struct ublk_io_iter * iter,unsigned int offset)843 static bool ublk_advance_io_iter(const struct request *req,
844 struct ublk_io_iter *iter, unsigned int offset)
845 {
846 struct bio *bio = req->bio;
847
848 for_each_bio(bio) {
849 if (bio->bi_iter.bi_size > offset) {
850 iter->bio = bio;
851 iter->iter = bio->bi_iter;
852 bio_advance_iter(iter->bio, &iter->iter, offset);
853 return true;
854 }
855 offset -= bio->bi_iter.bi_size;
856 }
857 return false;
858 }
859
860 /*
861 * Copy data between request pages and io_iter, and 'offset'
862 * is the start point of linear offset of request.
863 */
ublk_copy_user_pages(const struct request * req,unsigned offset,struct iov_iter * uiter,int dir)864 static size_t ublk_copy_user_pages(const struct request *req,
865 unsigned offset, struct iov_iter *uiter, int dir)
866 {
867 struct ublk_io_iter iter;
868 size_t done = 0;
869
870 if (!ublk_advance_io_iter(req, &iter, offset))
871 return 0;
872
873 while (iov_iter_count(uiter) && iter.bio) {
874 unsigned nr_pages;
875 ssize_t len;
876 size_t off;
877 int i;
878
879 len = iov_iter_get_pages2(uiter, iter.pages,
880 iov_iter_count(uiter),
881 UBLK_MAX_PIN_PAGES, &off);
882 if (len <= 0)
883 return done;
884
885 ublk_copy_io_pages(&iter, len, off, dir);
886 nr_pages = DIV_ROUND_UP(len + off, PAGE_SIZE);
887 for (i = 0; i < nr_pages; i++) {
888 if (dir == ITER_DEST)
889 set_page_dirty(iter.pages[i]);
890 put_page(iter.pages[i]);
891 }
892 done += len;
893 }
894
895 return done;
896 }
897
ublk_need_map_req(const struct request * req)898 static inline bool ublk_need_map_req(const struct request *req)
899 {
900 return ublk_rq_has_data(req) && req_op(req) == REQ_OP_WRITE;
901 }
902
ublk_need_unmap_req(const struct request * req)903 static inline bool ublk_need_unmap_req(const struct request *req)
904 {
905 return ublk_rq_has_data(req) &&
906 (req_op(req) == REQ_OP_READ || req_op(req) == REQ_OP_DRV_IN);
907 }
908
ublk_map_io(const struct ublk_queue * ubq,const struct request * req,struct ublk_io * io)909 static int ublk_map_io(const struct ublk_queue *ubq, const struct request *req,
910 struct ublk_io *io)
911 {
912 const unsigned int rq_bytes = blk_rq_bytes(req);
913
914 if (ublk_support_user_copy(ubq))
915 return rq_bytes;
916
917 /*
918 * no zero copy, we delay copy WRITE request data into ublksrv
919 * context and the big benefit is that pinning pages in current
920 * context is pretty fast, see ublk_pin_user_pages
921 */
922 if (ublk_need_map_req(req)) {
923 struct iov_iter iter;
924 const int dir = ITER_DEST;
925
926 import_ubuf(dir, u64_to_user_ptr(io->addr), rq_bytes, &iter);
927 return ublk_copy_user_pages(req, 0, &iter, dir);
928 }
929 return rq_bytes;
930 }
931
ublk_unmap_io(const struct ublk_queue * ubq,const struct request * req,struct ublk_io * io)932 static int ublk_unmap_io(const struct ublk_queue *ubq,
933 const struct request *req,
934 struct ublk_io *io)
935 {
936 const unsigned int rq_bytes = blk_rq_bytes(req);
937
938 if (ublk_support_user_copy(ubq))
939 return rq_bytes;
940
941 if (ublk_need_unmap_req(req)) {
942 struct iov_iter iter;
943 const int dir = ITER_SOURCE;
944
945 WARN_ON_ONCE(io->res > rq_bytes);
946
947 import_ubuf(dir, u64_to_user_ptr(io->addr), io->res, &iter);
948 return ublk_copy_user_pages(req, 0, &iter, dir);
949 }
950 return rq_bytes;
951 }
952
ublk_req_build_flags(struct request * req)953 static inline unsigned int ublk_req_build_flags(struct request *req)
954 {
955 unsigned flags = 0;
956
957 if (req->cmd_flags & REQ_FAILFAST_DEV)
958 flags |= UBLK_IO_F_FAILFAST_DEV;
959
960 if (req->cmd_flags & REQ_FAILFAST_TRANSPORT)
961 flags |= UBLK_IO_F_FAILFAST_TRANSPORT;
962
963 if (req->cmd_flags & REQ_FAILFAST_DRIVER)
964 flags |= UBLK_IO_F_FAILFAST_DRIVER;
965
966 if (req->cmd_flags & REQ_META)
967 flags |= UBLK_IO_F_META;
968
969 if (req->cmd_flags & REQ_FUA)
970 flags |= UBLK_IO_F_FUA;
971
972 if (req->cmd_flags & REQ_NOUNMAP)
973 flags |= UBLK_IO_F_NOUNMAP;
974
975 if (req->cmd_flags & REQ_SWAP)
976 flags |= UBLK_IO_F_SWAP;
977
978 return flags;
979 }
980
ublk_setup_iod(struct ublk_queue * ubq,struct request * req)981 static blk_status_t ublk_setup_iod(struct ublk_queue *ubq, struct request *req)
982 {
983 struct ublksrv_io_desc *iod = ublk_get_iod(ubq, req->tag);
984 struct ublk_io *io = &ubq->ios[req->tag];
985 enum req_op op = req_op(req);
986 u32 ublk_op;
987
988 if (!ublk_queue_is_zoned(ubq) &&
989 (op_is_zone_mgmt(op) || op == REQ_OP_ZONE_APPEND))
990 return BLK_STS_IOERR;
991
992 switch (req_op(req)) {
993 case REQ_OP_READ:
994 ublk_op = UBLK_IO_OP_READ;
995 break;
996 case REQ_OP_WRITE:
997 ublk_op = UBLK_IO_OP_WRITE;
998 break;
999 case REQ_OP_FLUSH:
1000 ublk_op = UBLK_IO_OP_FLUSH;
1001 break;
1002 case REQ_OP_DISCARD:
1003 ublk_op = UBLK_IO_OP_DISCARD;
1004 break;
1005 case REQ_OP_WRITE_ZEROES:
1006 ublk_op = UBLK_IO_OP_WRITE_ZEROES;
1007 break;
1008 default:
1009 if (ublk_queue_is_zoned(ubq))
1010 return ublk_setup_iod_zoned(ubq, req);
1011 return BLK_STS_IOERR;
1012 }
1013
1014 /* need to translate since kernel may change */
1015 iod->op_flags = ublk_op | ublk_req_build_flags(req);
1016 iod->nr_sectors = blk_rq_sectors(req);
1017 iod->start_sector = blk_rq_pos(req);
1018 iod->addr = io->addr;
1019
1020 return BLK_STS_OK;
1021 }
1022
ublk_get_uring_cmd_pdu(struct io_uring_cmd * ioucmd)1023 static inline struct ublk_uring_cmd_pdu *ublk_get_uring_cmd_pdu(
1024 struct io_uring_cmd *ioucmd)
1025 {
1026 return (struct ublk_uring_cmd_pdu *)&ioucmd->pdu;
1027 }
1028
ubq_daemon_is_dying(struct ublk_queue * ubq)1029 static inline bool ubq_daemon_is_dying(struct ublk_queue *ubq)
1030 {
1031 return ubq->ubq_daemon->flags & PF_EXITING;
1032 }
1033
1034 /* todo: handle partial completion */
__ublk_complete_rq(struct request * req)1035 static inline void __ublk_complete_rq(struct request *req)
1036 {
1037 struct ublk_queue *ubq = req->mq_hctx->driver_data;
1038 struct ublk_io *io = &ubq->ios[req->tag];
1039 unsigned int unmapped_bytes;
1040 blk_status_t res = BLK_STS_OK;
1041
1042 /* called from ublk_abort_queue() code path */
1043 if (io->flags & UBLK_IO_FLAG_ABORTED) {
1044 res = BLK_STS_IOERR;
1045 goto exit;
1046 }
1047
1048 /* failed read IO if nothing is read */
1049 if (!io->res && req_op(req) == REQ_OP_READ)
1050 io->res = -EIO;
1051
1052 if (io->res < 0) {
1053 res = errno_to_blk_status(io->res);
1054 goto exit;
1055 }
1056
1057 /*
1058 * FLUSH, DISCARD or WRITE_ZEROES usually won't return bytes returned, so end them
1059 * directly.
1060 *
1061 * Both the two needn't unmap.
1062 */
1063 if (req_op(req) != REQ_OP_READ && req_op(req) != REQ_OP_WRITE &&
1064 req_op(req) != REQ_OP_DRV_IN)
1065 goto exit;
1066
1067 /* for READ request, writing data in iod->addr to rq buffers */
1068 unmapped_bytes = ublk_unmap_io(ubq, req, io);
1069
1070 /*
1071 * Extremely impossible since we got data filled in just before
1072 *
1073 * Re-read simply for this unlikely case.
1074 */
1075 if (unlikely(unmapped_bytes < io->res))
1076 io->res = unmapped_bytes;
1077
1078 if (blk_update_request(req, BLK_STS_OK, io->res))
1079 blk_mq_requeue_request(req, true);
1080 else
1081 __blk_mq_end_request(req, BLK_STS_OK);
1082
1083 return;
1084 exit:
1085 blk_mq_end_request(req, res);
1086 }
1087
ublk_complete_rq(struct kref * ref)1088 static void ublk_complete_rq(struct kref *ref)
1089 {
1090 struct ublk_rq_data *data = container_of(ref, struct ublk_rq_data,
1091 ref);
1092 struct request *req = blk_mq_rq_from_pdu(data);
1093
1094 __ublk_complete_rq(req);
1095 }
1096
1097 /*
1098 * Since __ublk_rq_task_work always fails requests immediately during
1099 * exiting, __ublk_fail_req() is only called from abort context during
1100 * exiting. So lock is unnecessary.
1101 *
1102 * Also aborting may not be started yet, keep in mind that one failed
1103 * request may be issued by block layer again.
1104 */
__ublk_fail_req(struct ublk_queue * ubq,struct ublk_io * io,struct request * req)1105 static void __ublk_fail_req(struct ublk_queue *ubq, struct ublk_io *io,
1106 struct request *req)
1107 {
1108 WARN_ON_ONCE(io->flags & UBLK_IO_FLAG_ACTIVE);
1109
1110 if (ublk_nosrv_should_reissue_outstanding(ubq->dev))
1111 blk_mq_requeue_request(req, false);
1112 else
1113 ublk_put_req_ref(ubq, req);
1114 }
1115
ubq_complete_io_cmd(struct ublk_io * io,int res,unsigned issue_flags)1116 static void ubq_complete_io_cmd(struct ublk_io *io, int res,
1117 unsigned issue_flags)
1118 {
1119 /* mark this cmd owned by ublksrv */
1120 io->flags |= UBLK_IO_FLAG_OWNED_BY_SRV;
1121
1122 /*
1123 * clear ACTIVE since we are done with this sqe/cmd slot
1124 * We can only accept io cmd in case of being not active.
1125 */
1126 io->flags &= ~UBLK_IO_FLAG_ACTIVE;
1127
1128 /* tell ublksrv one io request is coming */
1129 io_uring_cmd_done(io->cmd, res, 0, issue_flags);
1130 }
1131
1132 #define UBLK_REQUEUE_DELAY_MS 3
1133
__ublk_abort_rq(struct ublk_queue * ubq,struct request * rq)1134 static inline void __ublk_abort_rq(struct ublk_queue *ubq,
1135 struct request *rq)
1136 {
1137 /* We cannot process this rq so just requeue it. */
1138 if (ublk_nosrv_dev_should_queue_io(ubq->dev))
1139 blk_mq_requeue_request(rq, false);
1140 else
1141 blk_mq_end_request(rq, BLK_STS_IOERR);
1142 }
1143
__ublk_rq_task_work(struct request * req,unsigned issue_flags)1144 static inline void __ublk_rq_task_work(struct request *req,
1145 unsigned issue_flags)
1146 {
1147 struct ublk_queue *ubq = req->mq_hctx->driver_data;
1148 int tag = req->tag;
1149 struct ublk_io *io = &ubq->ios[tag];
1150 unsigned int mapped_bytes;
1151
1152 pr_devel("%s: complete: op %d, qid %d tag %d io_flags %x addr %llx\n",
1153 __func__, io->cmd->cmd_op, ubq->q_id, req->tag, io->flags,
1154 ublk_get_iod(ubq, req->tag)->addr);
1155
1156 /*
1157 * Task is exiting if either:
1158 *
1159 * (1) current != ubq_daemon.
1160 * io_uring_cmd_complete_in_task() tries to run task_work
1161 * in a workqueue if ubq_daemon(cmd's task) is PF_EXITING.
1162 *
1163 * (2) current->flags & PF_EXITING.
1164 */
1165 if (unlikely(current != ubq->ubq_daemon || current->flags & PF_EXITING)) {
1166 __ublk_abort_rq(ubq, req);
1167 return;
1168 }
1169
1170 if (ublk_need_get_data(ubq) && ublk_need_map_req(req)) {
1171 /*
1172 * We have not handled UBLK_IO_NEED_GET_DATA command yet,
1173 * so immepdately pass UBLK_IO_RES_NEED_GET_DATA to ublksrv
1174 * and notify it.
1175 */
1176 if (!(io->flags & UBLK_IO_FLAG_NEED_GET_DATA)) {
1177 io->flags |= UBLK_IO_FLAG_NEED_GET_DATA;
1178 pr_devel("%s: need get data. op %d, qid %d tag %d io_flags %x\n",
1179 __func__, io->cmd->cmd_op, ubq->q_id,
1180 req->tag, io->flags);
1181 ubq_complete_io_cmd(io, UBLK_IO_RES_NEED_GET_DATA, issue_flags);
1182 return;
1183 }
1184 /*
1185 * We have handled UBLK_IO_NEED_GET_DATA command,
1186 * so clear UBLK_IO_FLAG_NEED_GET_DATA now and just
1187 * do the copy work.
1188 */
1189 io->flags &= ~UBLK_IO_FLAG_NEED_GET_DATA;
1190 /* update iod->addr because ublksrv may have passed a new io buffer */
1191 ublk_get_iod(ubq, req->tag)->addr = io->addr;
1192 pr_devel("%s: update iod->addr: op %d, qid %d tag %d io_flags %x addr %llx\n",
1193 __func__, io->cmd->cmd_op, ubq->q_id, req->tag, io->flags,
1194 ublk_get_iod(ubq, req->tag)->addr);
1195 }
1196
1197 mapped_bytes = ublk_map_io(ubq, req, io);
1198
1199 /* partially mapped, update io descriptor */
1200 if (unlikely(mapped_bytes != blk_rq_bytes(req))) {
1201 /*
1202 * Nothing mapped, retry until we succeed.
1203 *
1204 * We may never succeed in mapping any bytes here because
1205 * of OOM. TODO: reserve one buffer with single page pinned
1206 * for providing forward progress guarantee.
1207 */
1208 if (unlikely(!mapped_bytes)) {
1209 blk_mq_requeue_request(req, false);
1210 blk_mq_delay_kick_requeue_list(req->q,
1211 UBLK_REQUEUE_DELAY_MS);
1212 return;
1213 }
1214
1215 ublk_get_iod(ubq, req->tag)->nr_sectors =
1216 mapped_bytes >> 9;
1217 }
1218
1219 ublk_init_req_ref(ubq, req);
1220 ubq_complete_io_cmd(io, UBLK_IO_RES_OK, issue_flags);
1221 }
1222
ublk_forward_io_cmds(struct ublk_queue * ubq,unsigned issue_flags)1223 static inline void ublk_forward_io_cmds(struct ublk_queue *ubq,
1224 unsigned issue_flags)
1225 {
1226 struct llist_node *io_cmds = llist_del_all(&ubq->io_cmds);
1227 struct ublk_rq_data *data, *tmp;
1228
1229 io_cmds = llist_reverse_order(io_cmds);
1230 llist_for_each_entry_safe(data, tmp, io_cmds, node)
1231 __ublk_rq_task_work(blk_mq_rq_from_pdu(data), issue_flags);
1232 }
1233
ublk_rq_task_work_cb(struct io_uring_cmd * cmd,unsigned issue_flags)1234 static void ublk_rq_task_work_cb(struct io_uring_cmd *cmd, unsigned issue_flags)
1235 {
1236 struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd);
1237 struct ublk_queue *ubq = pdu->ubq;
1238
1239 ublk_forward_io_cmds(ubq, issue_flags);
1240 }
1241
ublk_queue_cmd(struct ublk_queue * ubq,struct request * rq)1242 static void ublk_queue_cmd(struct ublk_queue *ubq, struct request *rq)
1243 {
1244 struct ublk_rq_data *data = blk_mq_rq_to_pdu(rq);
1245
1246 if (llist_add(&data->node, &ubq->io_cmds)) {
1247 struct ublk_io *io = &ubq->ios[rq->tag];
1248
1249 io_uring_cmd_complete_in_task(io->cmd, ublk_rq_task_work_cb);
1250 }
1251 }
1252
ublk_timeout(struct request * rq)1253 static enum blk_eh_timer_return ublk_timeout(struct request *rq)
1254 {
1255 struct ublk_queue *ubq = rq->mq_hctx->driver_data;
1256 unsigned int nr_inflight = 0;
1257 int i;
1258
1259 if (ubq->flags & UBLK_F_UNPRIVILEGED_DEV) {
1260 if (!ubq->timeout) {
1261 send_sig(SIGKILL, ubq->ubq_daemon, 0);
1262 ubq->timeout = true;
1263 }
1264
1265 return BLK_EH_DONE;
1266 }
1267
1268 if (!ubq_daemon_is_dying(ubq))
1269 return BLK_EH_RESET_TIMER;
1270
1271 for (i = 0; i < ubq->q_depth; i++) {
1272 struct ublk_io *io = &ubq->ios[i];
1273
1274 if (!(io->flags & UBLK_IO_FLAG_ACTIVE))
1275 nr_inflight++;
1276 }
1277
1278 /* cancelable uring_cmd can't help us if all commands are in-flight */
1279 if (nr_inflight == ubq->q_depth) {
1280 struct ublk_device *ub = ubq->dev;
1281
1282 if (ublk_abort_requests(ub, ubq)) {
1283 schedule_work(&ub->nosrv_work);
1284 }
1285 return BLK_EH_DONE;
1286 }
1287
1288 return BLK_EH_RESET_TIMER;
1289 }
1290
ublk_queue_rq(struct blk_mq_hw_ctx * hctx,const struct blk_mq_queue_data * bd)1291 static blk_status_t ublk_queue_rq(struct blk_mq_hw_ctx *hctx,
1292 const struct blk_mq_queue_data *bd)
1293 {
1294 struct ublk_queue *ubq = hctx->driver_data;
1295 struct request *rq = bd->rq;
1296 blk_status_t res;
1297
1298 if (unlikely(ubq->fail_io)) {
1299 return BLK_STS_TARGET;
1300 }
1301
1302 /* fill iod to slot in io cmd buffer */
1303 res = ublk_setup_iod(ubq, rq);
1304 if (unlikely(res != BLK_STS_OK))
1305 return BLK_STS_IOERR;
1306
1307 /* With recovery feature enabled, force_abort is set in
1308 * ublk_stop_dev() before calling del_gendisk(). We have to
1309 * abort all requeued and new rqs here to let del_gendisk()
1310 * move on. Besides, we cannot not call io_uring_cmd_complete_in_task()
1311 * to avoid UAF on io_uring ctx.
1312 *
1313 * Note: force_abort is guaranteed to be seen because it is set
1314 * before request queue is unqiuesced.
1315 */
1316 if (ublk_nosrv_should_queue_io(ubq) && unlikely(ubq->force_abort))
1317 return BLK_STS_IOERR;
1318
1319 if (unlikely(ubq->canceling)) {
1320 __ublk_abort_rq(ubq, rq);
1321 return BLK_STS_OK;
1322 }
1323
1324 blk_mq_start_request(bd->rq);
1325 ublk_queue_cmd(ubq, rq);
1326
1327 return BLK_STS_OK;
1328 }
1329
ublk_init_hctx(struct blk_mq_hw_ctx * hctx,void * driver_data,unsigned int hctx_idx)1330 static int ublk_init_hctx(struct blk_mq_hw_ctx *hctx, void *driver_data,
1331 unsigned int hctx_idx)
1332 {
1333 struct ublk_device *ub = driver_data;
1334 struct ublk_queue *ubq = ublk_get_queue(ub, hctx->queue_num);
1335
1336 hctx->driver_data = ubq;
1337 return 0;
1338 }
1339
1340 static const struct blk_mq_ops ublk_mq_ops = {
1341 .queue_rq = ublk_queue_rq,
1342 .init_hctx = ublk_init_hctx,
1343 .timeout = ublk_timeout,
1344 };
1345
ublk_ch_open(struct inode * inode,struct file * filp)1346 static int ublk_ch_open(struct inode *inode, struct file *filp)
1347 {
1348 struct ublk_device *ub = container_of(inode->i_cdev,
1349 struct ublk_device, cdev);
1350
1351 if (test_and_set_bit(UB_STATE_OPEN, &ub->state))
1352 return -EBUSY;
1353 filp->private_data = ub;
1354 return 0;
1355 }
1356
ublk_ch_release(struct inode * inode,struct file * filp)1357 static int ublk_ch_release(struct inode *inode, struct file *filp)
1358 {
1359 struct ublk_device *ub = filp->private_data;
1360
1361 clear_bit(UB_STATE_OPEN, &ub->state);
1362 return 0;
1363 }
1364
1365 /* map pre-allocated per-queue cmd buffer to ublksrv daemon */
ublk_ch_mmap(struct file * filp,struct vm_area_struct * vma)1366 static int ublk_ch_mmap(struct file *filp, struct vm_area_struct *vma)
1367 {
1368 struct ublk_device *ub = filp->private_data;
1369 size_t sz = vma->vm_end - vma->vm_start;
1370 unsigned max_sz = ublk_max_cmd_buf_size();
1371 unsigned long pfn, end, phys_off = vma->vm_pgoff << PAGE_SHIFT;
1372 int q_id, ret = 0;
1373
1374 spin_lock(&ub->lock);
1375 if (!ub->mm)
1376 ub->mm = current->mm;
1377 if (current->mm != ub->mm)
1378 ret = -EINVAL;
1379 spin_unlock(&ub->lock);
1380
1381 if (ret)
1382 return ret;
1383
1384 if (vma->vm_flags & VM_WRITE)
1385 return -EPERM;
1386
1387 end = UBLKSRV_CMD_BUF_OFFSET + ub->dev_info.nr_hw_queues * max_sz;
1388 if (phys_off < UBLKSRV_CMD_BUF_OFFSET || phys_off >= end)
1389 return -EINVAL;
1390
1391 q_id = (phys_off - UBLKSRV_CMD_BUF_OFFSET) / max_sz;
1392 pr_devel("%s: qid %d, pid %d, addr %lx pg_off %lx sz %lu\n",
1393 __func__, q_id, current->pid, vma->vm_start,
1394 phys_off, (unsigned long)sz);
1395
1396 if (sz != ublk_queue_cmd_buf_size(ub, q_id))
1397 return -EINVAL;
1398
1399 pfn = virt_to_phys(ublk_queue_cmd_buf(ub, q_id)) >> PAGE_SHIFT;
1400 return remap_pfn_range(vma, vma->vm_start, pfn, sz, vma->vm_page_prot);
1401 }
1402
ublk_commit_completion(struct ublk_device * ub,const struct ublksrv_io_cmd * ub_cmd)1403 static void ublk_commit_completion(struct ublk_device *ub,
1404 const struct ublksrv_io_cmd *ub_cmd)
1405 {
1406 u32 qid = ub_cmd->q_id, tag = ub_cmd->tag;
1407 struct ublk_queue *ubq = ublk_get_queue(ub, qid);
1408 struct ublk_io *io = &ubq->ios[tag];
1409 struct request *req;
1410
1411 /* now this cmd slot is owned by nbd driver */
1412 io->flags &= ~UBLK_IO_FLAG_OWNED_BY_SRV;
1413 io->res = ub_cmd->result;
1414
1415 /* find the io request and complete */
1416 req = blk_mq_tag_to_rq(ub->tag_set.tags[qid], tag);
1417 if (WARN_ON_ONCE(unlikely(!req)))
1418 return;
1419
1420 if (req_op(req) == REQ_OP_ZONE_APPEND)
1421 req->__sector = ub_cmd->zone_append_lba;
1422
1423 if (likely(!blk_should_fake_timeout(req->q)))
1424 ublk_put_req_ref(ubq, req);
1425 }
1426
1427 /*
1428 * Called from ubq_daemon context via cancel fn, meantime quiesce ublk
1429 * blk-mq queue, so we are called exclusively with blk-mq and ubq_daemon
1430 * context, so everything is serialized.
1431 */
ublk_abort_queue(struct ublk_device * ub,struct ublk_queue * ubq)1432 static void ublk_abort_queue(struct ublk_device *ub, struct ublk_queue *ubq)
1433 {
1434 int i;
1435
1436 for (i = 0; i < ubq->q_depth; i++) {
1437 struct ublk_io *io = &ubq->ios[i];
1438
1439 if (!(io->flags & UBLK_IO_FLAG_ACTIVE)) {
1440 struct request *rq;
1441
1442 /*
1443 * Either we fail the request or ublk_rq_task_work_fn
1444 * will do it
1445 */
1446 rq = blk_mq_tag_to_rq(ub->tag_set.tags[ubq->q_id], i);
1447 if (rq && blk_mq_request_started(rq)) {
1448 io->flags |= UBLK_IO_FLAG_ABORTED;
1449 __ublk_fail_req(ubq, io, rq);
1450 }
1451 }
1452 }
1453 }
1454
ublk_abort_requests(struct ublk_device * ub,struct ublk_queue * ubq)1455 static bool ublk_abort_requests(struct ublk_device *ub, struct ublk_queue *ubq)
1456 {
1457 struct gendisk *disk;
1458
1459 spin_lock(&ubq->cancel_lock);
1460 if (ubq->canceling) {
1461 spin_unlock(&ubq->cancel_lock);
1462 return false;
1463 }
1464 ubq->canceling = true;
1465 spin_unlock(&ubq->cancel_lock);
1466
1467 spin_lock(&ub->lock);
1468 disk = ub->ub_disk;
1469 if (disk)
1470 get_device(disk_to_dev(disk));
1471 spin_unlock(&ub->lock);
1472
1473 /* Our disk has been dead */
1474 if (!disk)
1475 return false;
1476
1477 /* Now we are serialized with ublk_queue_rq() */
1478 blk_mq_quiesce_queue(disk->queue);
1479 /* abort queue is for making forward progress */
1480 ublk_abort_queue(ub, ubq);
1481 blk_mq_unquiesce_queue(disk->queue);
1482 put_device(disk_to_dev(disk));
1483
1484 return true;
1485 }
1486
ublk_cancel_cmd(struct ublk_queue * ubq,struct ublk_io * io,unsigned int issue_flags)1487 static void ublk_cancel_cmd(struct ublk_queue *ubq, struct ublk_io *io,
1488 unsigned int issue_flags)
1489 {
1490 bool done;
1491
1492 if (!(io->flags & UBLK_IO_FLAG_ACTIVE))
1493 return;
1494
1495 spin_lock(&ubq->cancel_lock);
1496 done = !!(io->flags & UBLK_IO_FLAG_CANCELED);
1497 if (!done)
1498 io->flags |= UBLK_IO_FLAG_CANCELED;
1499 spin_unlock(&ubq->cancel_lock);
1500
1501 if (!done)
1502 io_uring_cmd_done(io->cmd, UBLK_IO_RES_ABORT, 0, issue_flags);
1503 }
1504
1505 /*
1506 * The ublk char device won't be closed when calling cancel fn, so both
1507 * ublk device and queue are guaranteed to be live
1508 */
ublk_uring_cmd_cancel_fn(struct io_uring_cmd * cmd,unsigned int issue_flags)1509 static void ublk_uring_cmd_cancel_fn(struct io_uring_cmd *cmd,
1510 unsigned int issue_flags)
1511 {
1512 struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd);
1513 struct ublk_queue *ubq = pdu->ubq;
1514 struct task_struct *task;
1515 struct ublk_device *ub;
1516 bool need_schedule;
1517 struct ublk_io *io;
1518
1519 if (WARN_ON_ONCE(!ubq))
1520 return;
1521
1522 if (WARN_ON_ONCE(pdu->tag >= ubq->q_depth))
1523 return;
1524
1525 task = io_uring_cmd_get_task(cmd);
1526 if (WARN_ON_ONCE(task && task != ubq->ubq_daemon))
1527 return;
1528
1529 ub = ubq->dev;
1530 need_schedule = ublk_abort_requests(ub, ubq);
1531
1532 io = &ubq->ios[pdu->tag];
1533 WARN_ON_ONCE(io->cmd != cmd);
1534 ublk_cancel_cmd(ubq, io, issue_flags);
1535
1536 if (need_schedule) {
1537 schedule_work(&ub->nosrv_work);
1538 }
1539 }
1540
ublk_queue_ready(struct ublk_queue * ubq)1541 static inline bool ublk_queue_ready(struct ublk_queue *ubq)
1542 {
1543 return ubq->nr_io_ready == ubq->q_depth;
1544 }
1545
ublk_cancel_queue(struct ublk_queue * ubq)1546 static void ublk_cancel_queue(struct ublk_queue *ubq)
1547 {
1548 int i;
1549
1550 for (i = 0; i < ubq->q_depth; i++)
1551 ublk_cancel_cmd(ubq, &ubq->ios[i], IO_URING_F_UNLOCKED);
1552 }
1553
1554 /* Cancel all pending commands, must be called after del_gendisk() returns */
ublk_cancel_dev(struct ublk_device * ub)1555 static void ublk_cancel_dev(struct ublk_device *ub)
1556 {
1557 int i;
1558
1559 for (i = 0; i < ub->dev_info.nr_hw_queues; i++)
1560 ublk_cancel_queue(ublk_get_queue(ub, i));
1561 }
1562
ublk_check_inflight_rq(struct request * rq,void * data)1563 static bool ublk_check_inflight_rq(struct request *rq, void *data)
1564 {
1565 bool *idle = data;
1566
1567 if (blk_mq_request_started(rq)) {
1568 *idle = false;
1569 return false;
1570 }
1571 return true;
1572 }
1573
ublk_wait_tagset_rqs_idle(struct ublk_device * ub)1574 static void ublk_wait_tagset_rqs_idle(struct ublk_device *ub)
1575 {
1576 bool idle;
1577
1578 WARN_ON_ONCE(!blk_queue_quiesced(ub->ub_disk->queue));
1579 while (true) {
1580 idle = true;
1581 blk_mq_tagset_busy_iter(&ub->tag_set,
1582 ublk_check_inflight_rq, &idle);
1583 if (idle)
1584 break;
1585 msleep(UBLK_REQUEUE_DELAY_MS);
1586 }
1587 }
1588
__ublk_quiesce_dev(struct ublk_device * ub)1589 static void __ublk_quiesce_dev(struct ublk_device *ub)
1590 {
1591 pr_devel("%s: quiesce ub: dev_id %d state %s\n",
1592 __func__, ub->dev_info.dev_id,
1593 ub->dev_info.state == UBLK_S_DEV_LIVE ?
1594 "LIVE" : "QUIESCED");
1595 blk_mq_quiesce_queue(ub->ub_disk->queue);
1596 ublk_wait_tagset_rqs_idle(ub);
1597 ub->dev_info.state = UBLK_S_DEV_QUIESCED;
1598 }
1599
ublk_unquiesce_dev(struct ublk_device * ub)1600 static void ublk_unquiesce_dev(struct ublk_device *ub)
1601 {
1602 int i;
1603
1604 pr_devel("%s: unquiesce ub: dev_id %d state %s\n",
1605 __func__, ub->dev_info.dev_id,
1606 ub->dev_info.state == UBLK_S_DEV_LIVE ?
1607 "LIVE" : "QUIESCED");
1608 /* quiesce_work has run. We let requeued rqs be aborted
1609 * before running fallback_wq. "force_abort" must be seen
1610 * after request queue is unqiuesced. Then del_gendisk()
1611 * can move on.
1612 */
1613 for (i = 0; i < ub->dev_info.nr_hw_queues; i++)
1614 ublk_get_queue(ub, i)->force_abort = true;
1615
1616 blk_mq_unquiesce_queue(ub->ub_disk->queue);
1617 /* We may have requeued some rqs in ublk_quiesce_queue() */
1618 blk_mq_kick_requeue_list(ub->ub_disk->queue);
1619 }
1620
ublk_detach_disk(struct ublk_device * ub)1621 static struct gendisk *ublk_detach_disk(struct ublk_device *ub)
1622 {
1623 struct gendisk *disk;
1624
1625 /* Sync with ublk_abort_queue() by holding the lock */
1626 spin_lock(&ub->lock);
1627 disk = ub->ub_disk;
1628 ub->dev_info.state = UBLK_S_DEV_DEAD;
1629 ub->dev_info.ublksrv_pid = -1;
1630 ub->ub_disk = NULL;
1631 spin_unlock(&ub->lock);
1632
1633 return disk;
1634 }
1635
ublk_stop_dev(struct ublk_device * ub)1636 static void ublk_stop_dev(struct ublk_device *ub)
1637 {
1638 struct gendisk *disk;
1639
1640 mutex_lock(&ub->mutex);
1641 if (ub->dev_info.state == UBLK_S_DEV_DEAD)
1642 goto unlock;
1643 if (ublk_nosrv_dev_should_queue_io(ub)) {
1644 if (ub->dev_info.state == UBLK_S_DEV_LIVE)
1645 __ublk_quiesce_dev(ub);
1646 ublk_unquiesce_dev(ub);
1647 }
1648 del_gendisk(ub->ub_disk);
1649 disk = ublk_detach_disk(ub);
1650 put_disk(disk);
1651 unlock:
1652 mutex_unlock(&ub->mutex);
1653 ublk_cancel_dev(ub);
1654 }
1655
ublk_nosrv_work(struct work_struct * work)1656 static void ublk_nosrv_work(struct work_struct *work)
1657 {
1658 struct ublk_device *ub =
1659 container_of(work, struct ublk_device, nosrv_work);
1660 int i;
1661
1662 if (ublk_nosrv_should_stop_dev(ub)) {
1663 ublk_stop_dev(ub);
1664 return;
1665 }
1666
1667 mutex_lock(&ub->mutex);
1668 if (ub->dev_info.state != UBLK_S_DEV_LIVE)
1669 goto unlock;
1670
1671 if (ublk_nosrv_dev_should_queue_io(ub)) {
1672 __ublk_quiesce_dev(ub);
1673 } else {
1674 blk_mq_quiesce_queue(ub->ub_disk->queue);
1675 ub->dev_info.state = UBLK_S_DEV_FAIL_IO;
1676 for (i = 0; i < ub->dev_info.nr_hw_queues; i++) {
1677 ublk_get_queue(ub, i)->fail_io = true;
1678 }
1679 blk_mq_unquiesce_queue(ub->ub_disk->queue);
1680 }
1681
1682 unlock:
1683 mutex_unlock(&ub->mutex);
1684 ublk_cancel_dev(ub);
1685 }
1686
1687 /* device can only be started after all IOs are ready */
ublk_mark_io_ready(struct ublk_device * ub,struct ublk_queue * ubq)1688 static void ublk_mark_io_ready(struct ublk_device *ub, struct ublk_queue *ubq)
1689 {
1690 mutex_lock(&ub->mutex);
1691 ubq->nr_io_ready++;
1692 if (ublk_queue_ready(ubq)) {
1693 ubq->ubq_daemon = current;
1694 get_task_struct(ubq->ubq_daemon);
1695 ub->nr_queues_ready++;
1696
1697 if (capable(CAP_SYS_ADMIN))
1698 ub->nr_privileged_daemon++;
1699 }
1700 if (ub->nr_queues_ready == ub->dev_info.nr_hw_queues)
1701 complete_all(&ub->completion);
1702 mutex_unlock(&ub->mutex);
1703 }
1704
ublk_handle_need_get_data(struct ublk_device * ub,int q_id,int tag)1705 static void ublk_handle_need_get_data(struct ublk_device *ub, int q_id,
1706 int tag)
1707 {
1708 struct ublk_queue *ubq = ublk_get_queue(ub, q_id);
1709 struct request *req = blk_mq_tag_to_rq(ub->tag_set.tags[q_id], tag);
1710
1711 ublk_queue_cmd(ubq, req);
1712 }
1713
ublk_check_cmd_op(u32 cmd_op)1714 static inline int ublk_check_cmd_op(u32 cmd_op)
1715 {
1716 u32 ioc_type = _IOC_TYPE(cmd_op);
1717
1718 if (!IS_ENABLED(CONFIG_BLKDEV_UBLK_LEGACY_OPCODES) && ioc_type != 'u')
1719 return -EOPNOTSUPP;
1720
1721 if (ioc_type != 'u' && ioc_type != 0)
1722 return -EOPNOTSUPP;
1723
1724 return 0;
1725 }
1726
ublk_fill_io_cmd(struct ublk_io * io,struct io_uring_cmd * cmd,unsigned long buf_addr)1727 static inline void ublk_fill_io_cmd(struct ublk_io *io,
1728 struct io_uring_cmd *cmd, unsigned long buf_addr)
1729 {
1730 io->cmd = cmd;
1731 io->flags |= UBLK_IO_FLAG_ACTIVE;
1732 io->addr = buf_addr;
1733 }
1734
ublk_prep_cancel(struct io_uring_cmd * cmd,unsigned int issue_flags,struct ublk_queue * ubq,unsigned int tag)1735 static inline void ublk_prep_cancel(struct io_uring_cmd *cmd,
1736 unsigned int issue_flags,
1737 struct ublk_queue *ubq, unsigned int tag)
1738 {
1739 struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd);
1740
1741 /*
1742 * Safe to refer to @ubq since ublk_queue won't be died until its
1743 * commands are completed
1744 */
1745 pdu->ubq = ubq;
1746 pdu->tag = tag;
1747 io_uring_cmd_mark_cancelable(cmd, issue_flags);
1748 }
1749
__ublk_ch_uring_cmd(struct io_uring_cmd * cmd,unsigned int issue_flags,const struct ublksrv_io_cmd * ub_cmd)1750 static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd,
1751 unsigned int issue_flags,
1752 const struct ublksrv_io_cmd *ub_cmd)
1753 {
1754 struct ublk_device *ub = cmd->file->private_data;
1755 struct ublk_queue *ubq;
1756 struct ublk_io *io;
1757 u32 cmd_op = cmd->cmd_op;
1758 unsigned tag = ub_cmd->tag;
1759 int ret = -EINVAL;
1760 struct request *req;
1761
1762 pr_devel("%s: received: cmd op %d queue %d tag %d result %d\n",
1763 __func__, cmd->cmd_op, ub_cmd->q_id, tag,
1764 ub_cmd->result);
1765
1766 if (ub_cmd->q_id >= ub->dev_info.nr_hw_queues)
1767 goto out;
1768
1769 ubq = ublk_get_queue(ub, ub_cmd->q_id);
1770 if (!ubq || ub_cmd->q_id != ubq->q_id)
1771 goto out;
1772
1773 if (ubq->ubq_daemon && ubq->ubq_daemon != current)
1774 goto out;
1775
1776 if (tag >= ubq->q_depth)
1777 goto out;
1778
1779 io = &ubq->ios[tag];
1780
1781 /* there is pending io cmd, something must be wrong */
1782 if (io->flags & UBLK_IO_FLAG_ACTIVE) {
1783 ret = -EBUSY;
1784 goto out;
1785 }
1786
1787 /*
1788 * ensure that the user issues UBLK_IO_NEED_GET_DATA
1789 * iff the driver have set the UBLK_IO_FLAG_NEED_GET_DATA.
1790 */
1791 if ((!!(io->flags & UBLK_IO_FLAG_NEED_GET_DATA))
1792 ^ (_IOC_NR(cmd_op) == UBLK_IO_NEED_GET_DATA))
1793 goto out;
1794
1795 ret = ublk_check_cmd_op(cmd_op);
1796 if (ret)
1797 goto out;
1798
1799 ret = -EINVAL;
1800 switch (_IOC_NR(cmd_op)) {
1801 case UBLK_IO_FETCH_REQ:
1802 /* UBLK_IO_FETCH_REQ is only allowed before queue is setup */
1803 if (ublk_queue_ready(ubq)) {
1804 ret = -EBUSY;
1805 goto out;
1806 }
1807 /*
1808 * The io is being handled by server, so COMMIT_RQ is expected
1809 * instead of FETCH_REQ
1810 */
1811 if (io->flags & UBLK_IO_FLAG_OWNED_BY_SRV)
1812 goto out;
1813
1814 if (!ublk_support_user_copy(ubq)) {
1815 /*
1816 * FETCH_RQ has to provide IO buffer if NEED GET
1817 * DATA is not enabled
1818 */
1819 if (!ub_cmd->addr && !ublk_need_get_data(ubq))
1820 goto out;
1821 } else if (ub_cmd->addr) {
1822 /* User copy requires addr to be unset */
1823 ret = -EINVAL;
1824 goto out;
1825 }
1826
1827 ublk_fill_io_cmd(io, cmd, ub_cmd->addr);
1828 ublk_mark_io_ready(ub, ubq);
1829 break;
1830 case UBLK_IO_COMMIT_AND_FETCH_REQ:
1831 req = blk_mq_tag_to_rq(ub->tag_set.tags[ub_cmd->q_id], tag);
1832
1833 if (!(io->flags & UBLK_IO_FLAG_OWNED_BY_SRV))
1834 goto out;
1835
1836 if (!ublk_support_user_copy(ubq)) {
1837 /*
1838 * COMMIT_AND_FETCH_REQ has to provide IO buffer if
1839 * NEED GET DATA is not enabled or it is Read IO.
1840 */
1841 if (!ub_cmd->addr && (!ublk_need_get_data(ubq) ||
1842 req_op(req) == REQ_OP_READ))
1843 goto out;
1844 } else if (req_op(req) != REQ_OP_ZONE_APPEND && ub_cmd->addr) {
1845 /*
1846 * User copy requires addr to be unset when command is
1847 * not zone append
1848 */
1849 ret = -EINVAL;
1850 goto out;
1851 }
1852
1853 ublk_fill_io_cmd(io, cmd, ub_cmd->addr);
1854 ublk_commit_completion(ub, ub_cmd);
1855 break;
1856 case UBLK_IO_NEED_GET_DATA:
1857 if (!(io->flags & UBLK_IO_FLAG_OWNED_BY_SRV))
1858 goto out;
1859 ublk_fill_io_cmd(io, cmd, ub_cmd->addr);
1860 ublk_handle_need_get_data(ub, ub_cmd->q_id, ub_cmd->tag);
1861 break;
1862 default:
1863 goto out;
1864 }
1865 ublk_prep_cancel(cmd, issue_flags, ubq, tag);
1866 return -EIOCBQUEUED;
1867
1868 out:
1869 io_uring_cmd_done(cmd, ret, 0, issue_flags);
1870 pr_devel("%s: complete: cmd op %d, tag %d ret %x io_flags %x\n",
1871 __func__, cmd_op, tag, ret, io->flags);
1872 return -EIOCBQUEUED;
1873 }
1874
__ublk_check_and_get_req(struct ublk_device * ub,struct ublk_queue * ubq,int tag,size_t offset)1875 static inline struct request *__ublk_check_and_get_req(struct ublk_device *ub,
1876 struct ublk_queue *ubq, int tag, size_t offset)
1877 {
1878 struct request *req;
1879
1880 if (!ublk_need_req_ref(ubq))
1881 return NULL;
1882
1883 req = blk_mq_tag_to_rq(ub->tag_set.tags[ubq->q_id], tag);
1884 if (!req)
1885 return NULL;
1886
1887 if (!ublk_get_req_ref(ubq, req))
1888 return NULL;
1889
1890 if (unlikely(!blk_mq_request_started(req) || req->tag != tag))
1891 goto fail_put;
1892
1893 if (!ublk_rq_has_data(req))
1894 goto fail_put;
1895
1896 if (offset > blk_rq_bytes(req))
1897 goto fail_put;
1898
1899 return req;
1900 fail_put:
1901 ublk_put_req_ref(ubq, req);
1902 return NULL;
1903 }
1904
ublk_ch_uring_cmd_local(struct io_uring_cmd * cmd,unsigned int issue_flags)1905 static inline int ublk_ch_uring_cmd_local(struct io_uring_cmd *cmd,
1906 unsigned int issue_flags)
1907 {
1908 /*
1909 * Not necessary for async retry, but let's keep it simple and always
1910 * copy the values to avoid any potential reuse.
1911 */
1912 const struct ublksrv_io_cmd *ub_src = io_uring_sqe_cmd(cmd->sqe);
1913 const struct ublksrv_io_cmd ub_cmd = {
1914 .q_id = READ_ONCE(ub_src->q_id),
1915 .tag = READ_ONCE(ub_src->tag),
1916 .result = READ_ONCE(ub_src->result),
1917 .addr = READ_ONCE(ub_src->addr)
1918 };
1919
1920 WARN_ON_ONCE(issue_flags & IO_URING_F_UNLOCKED);
1921
1922 return __ublk_ch_uring_cmd(cmd, issue_flags, &ub_cmd);
1923 }
1924
ublk_ch_uring_cmd_cb(struct io_uring_cmd * cmd,unsigned int issue_flags)1925 static void ublk_ch_uring_cmd_cb(struct io_uring_cmd *cmd,
1926 unsigned int issue_flags)
1927 {
1928 ublk_ch_uring_cmd_local(cmd, issue_flags);
1929 }
1930
ublk_ch_uring_cmd(struct io_uring_cmd * cmd,unsigned int issue_flags)1931 static int ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
1932 {
1933 if (unlikely(issue_flags & IO_URING_F_CANCEL)) {
1934 ublk_uring_cmd_cancel_fn(cmd, issue_flags);
1935 return 0;
1936 }
1937
1938 /* well-implemented server won't run into unlocked */
1939 if (unlikely(issue_flags & IO_URING_F_UNLOCKED)) {
1940 io_uring_cmd_complete_in_task(cmd, ublk_ch_uring_cmd_cb);
1941 return -EIOCBQUEUED;
1942 }
1943
1944 return ublk_ch_uring_cmd_local(cmd, issue_flags);
1945 }
1946
ublk_check_ubuf_dir(const struct request * req,int ubuf_dir)1947 static inline bool ublk_check_ubuf_dir(const struct request *req,
1948 int ubuf_dir)
1949 {
1950 /* copy ubuf to request pages */
1951 if ((req_op(req) == REQ_OP_READ || req_op(req) == REQ_OP_DRV_IN) &&
1952 ubuf_dir == ITER_SOURCE)
1953 return true;
1954
1955 /* copy request pages to ubuf */
1956 if ((req_op(req) == REQ_OP_WRITE ||
1957 req_op(req) == REQ_OP_ZONE_APPEND) &&
1958 ubuf_dir == ITER_DEST)
1959 return true;
1960
1961 return false;
1962 }
1963
ublk_check_and_get_req(struct kiocb * iocb,struct iov_iter * iter,size_t * off,int dir)1964 static struct request *ublk_check_and_get_req(struct kiocb *iocb,
1965 struct iov_iter *iter, size_t *off, int dir)
1966 {
1967 struct ublk_device *ub = iocb->ki_filp->private_data;
1968 struct ublk_queue *ubq;
1969 struct request *req;
1970 size_t buf_off;
1971 u16 tag, q_id;
1972
1973 if (!ub)
1974 return ERR_PTR(-EACCES);
1975
1976 if (!user_backed_iter(iter))
1977 return ERR_PTR(-EACCES);
1978
1979 if (ub->dev_info.state == UBLK_S_DEV_DEAD)
1980 return ERR_PTR(-EACCES);
1981
1982 tag = ublk_pos_to_tag(iocb->ki_pos);
1983 q_id = ublk_pos_to_hwq(iocb->ki_pos);
1984 buf_off = ublk_pos_to_buf_off(iocb->ki_pos);
1985
1986 if (q_id >= ub->dev_info.nr_hw_queues)
1987 return ERR_PTR(-EINVAL);
1988
1989 ubq = ublk_get_queue(ub, q_id);
1990 if (!ubq)
1991 return ERR_PTR(-EINVAL);
1992
1993 if (tag >= ubq->q_depth)
1994 return ERR_PTR(-EINVAL);
1995
1996 req = __ublk_check_and_get_req(ub, ubq, tag, buf_off);
1997 if (!req)
1998 return ERR_PTR(-EINVAL);
1999
2000 if (!req->mq_hctx || !req->mq_hctx->driver_data)
2001 goto fail;
2002
2003 if (!ublk_check_ubuf_dir(req, dir))
2004 goto fail;
2005
2006 *off = buf_off;
2007 return req;
2008 fail:
2009 ublk_put_req_ref(ubq, req);
2010 return ERR_PTR(-EACCES);
2011 }
2012
ublk_ch_read_iter(struct kiocb * iocb,struct iov_iter * to)2013 static ssize_t ublk_ch_read_iter(struct kiocb *iocb, struct iov_iter *to)
2014 {
2015 struct ublk_queue *ubq;
2016 struct request *req;
2017 size_t buf_off;
2018 size_t ret;
2019
2020 req = ublk_check_and_get_req(iocb, to, &buf_off, ITER_DEST);
2021 if (IS_ERR(req))
2022 return PTR_ERR(req);
2023
2024 ret = ublk_copy_user_pages(req, buf_off, to, ITER_DEST);
2025 ubq = req->mq_hctx->driver_data;
2026 ublk_put_req_ref(ubq, req);
2027
2028 return ret;
2029 }
2030
ublk_ch_write_iter(struct kiocb * iocb,struct iov_iter * from)2031 static ssize_t ublk_ch_write_iter(struct kiocb *iocb, struct iov_iter *from)
2032 {
2033 struct ublk_queue *ubq;
2034 struct request *req;
2035 size_t buf_off;
2036 size_t ret;
2037
2038 req = ublk_check_and_get_req(iocb, from, &buf_off, ITER_SOURCE);
2039 if (IS_ERR(req))
2040 return PTR_ERR(req);
2041
2042 ret = ublk_copy_user_pages(req, buf_off, from, ITER_SOURCE);
2043 ubq = req->mq_hctx->driver_data;
2044 ublk_put_req_ref(ubq, req);
2045
2046 return ret;
2047 }
2048
2049 static const struct file_operations ublk_ch_fops = {
2050 .owner = THIS_MODULE,
2051 .open = ublk_ch_open,
2052 .release = ublk_ch_release,
2053 .read_iter = ublk_ch_read_iter,
2054 .write_iter = ublk_ch_write_iter,
2055 .uring_cmd = ublk_ch_uring_cmd,
2056 .mmap = ublk_ch_mmap,
2057 };
2058
ublk_deinit_queue(struct ublk_device * ub,int q_id)2059 static void ublk_deinit_queue(struct ublk_device *ub, int q_id)
2060 {
2061 int size = ublk_queue_cmd_buf_size(ub, q_id);
2062 struct ublk_queue *ubq = ublk_get_queue(ub, q_id);
2063
2064 if (ubq->ubq_daemon)
2065 put_task_struct(ubq->ubq_daemon);
2066 if (ubq->io_cmd_buf)
2067 free_pages((unsigned long)ubq->io_cmd_buf, get_order(size));
2068 }
2069
ublk_init_queue(struct ublk_device * ub,int q_id)2070 static int ublk_init_queue(struct ublk_device *ub, int q_id)
2071 {
2072 struct ublk_queue *ubq = ublk_get_queue(ub, q_id);
2073 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO;
2074 void *ptr;
2075 int size;
2076
2077 spin_lock_init(&ubq->cancel_lock);
2078 ubq->flags = ub->dev_info.flags;
2079 ubq->q_id = q_id;
2080 ubq->q_depth = ub->dev_info.queue_depth;
2081 size = ublk_queue_cmd_buf_size(ub, q_id);
2082
2083 ptr = (void *) __get_free_pages(gfp_flags, get_order(size));
2084 if (!ptr)
2085 return -ENOMEM;
2086
2087 ubq->io_cmd_buf = ptr;
2088 ubq->dev = ub;
2089 return 0;
2090 }
2091
ublk_deinit_queues(struct ublk_device * ub)2092 static void ublk_deinit_queues(struct ublk_device *ub)
2093 {
2094 int nr_queues = ub->dev_info.nr_hw_queues;
2095 int i;
2096
2097 if (!ub->__queues)
2098 return;
2099
2100 for (i = 0; i < nr_queues; i++)
2101 ublk_deinit_queue(ub, i);
2102 kfree(ub->__queues);
2103 }
2104
ublk_init_queues(struct ublk_device * ub)2105 static int ublk_init_queues(struct ublk_device *ub)
2106 {
2107 int nr_queues = ub->dev_info.nr_hw_queues;
2108 int depth = ub->dev_info.queue_depth;
2109 int ubq_size = sizeof(struct ublk_queue) + depth * sizeof(struct ublk_io);
2110 int i, ret = -ENOMEM;
2111
2112 ub->queue_size = ubq_size;
2113 ub->__queues = kcalloc(nr_queues, ubq_size, GFP_KERNEL);
2114 if (!ub->__queues)
2115 return ret;
2116
2117 for (i = 0; i < nr_queues; i++) {
2118 if (ublk_init_queue(ub, i))
2119 goto fail;
2120 }
2121
2122 init_completion(&ub->completion);
2123 return 0;
2124
2125 fail:
2126 ublk_deinit_queues(ub);
2127 return ret;
2128 }
2129
ublk_alloc_dev_number(struct ublk_device * ub,int idx)2130 static int ublk_alloc_dev_number(struct ublk_device *ub, int idx)
2131 {
2132 int i = idx;
2133 int err;
2134
2135 spin_lock(&ublk_idr_lock);
2136 /* allocate id, if @id >= 0, we're requesting that specific id */
2137 if (i >= 0) {
2138 err = idr_alloc(&ublk_index_idr, ub, i, i + 1, GFP_NOWAIT);
2139 if (err == -ENOSPC)
2140 err = -EEXIST;
2141 } else {
2142 err = idr_alloc(&ublk_index_idr, ub, 0, UBLK_MAX_UBLKS,
2143 GFP_NOWAIT);
2144 }
2145 spin_unlock(&ublk_idr_lock);
2146
2147 if (err >= 0)
2148 ub->ub_number = err;
2149
2150 return err;
2151 }
2152
ublk_free_dev_number(struct ublk_device * ub)2153 static void ublk_free_dev_number(struct ublk_device *ub)
2154 {
2155 spin_lock(&ublk_idr_lock);
2156 idr_remove(&ublk_index_idr, ub->ub_number);
2157 wake_up_all(&ublk_idr_wq);
2158 spin_unlock(&ublk_idr_lock);
2159 }
2160
ublk_cdev_rel(struct device * dev)2161 static void ublk_cdev_rel(struct device *dev)
2162 {
2163 struct ublk_device *ub = container_of(dev, struct ublk_device, cdev_dev);
2164
2165 blk_mq_free_tag_set(&ub->tag_set);
2166 ublk_deinit_queues(ub);
2167 ublk_free_dev_number(ub);
2168 mutex_destroy(&ub->mutex);
2169 kfree(ub);
2170 }
2171
ublk_add_chdev(struct ublk_device * ub)2172 static int ublk_add_chdev(struct ublk_device *ub)
2173 {
2174 struct device *dev = &ub->cdev_dev;
2175 int minor = ub->ub_number;
2176 int ret;
2177
2178 dev->parent = ublk_misc.this_device;
2179 dev->devt = MKDEV(MAJOR(ublk_chr_devt), minor);
2180 dev->class = &ublk_chr_class;
2181 dev->release = ublk_cdev_rel;
2182 device_initialize(dev);
2183
2184 ret = dev_set_name(dev, "ublkc%d", minor);
2185 if (ret)
2186 goto fail;
2187
2188 cdev_init(&ub->cdev, &ublk_ch_fops);
2189 ret = cdev_device_add(&ub->cdev, dev);
2190 if (ret)
2191 goto fail;
2192
2193 ublks_added++;
2194 return 0;
2195 fail:
2196 put_device(dev);
2197 return ret;
2198 }
2199
2200 /* align max io buffer size with PAGE_SIZE */
ublk_align_max_io_size(struct ublk_device * ub)2201 static void ublk_align_max_io_size(struct ublk_device *ub)
2202 {
2203 unsigned int max_io_bytes = ub->dev_info.max_io_buf_bytes;
2204
2205 ub->dev_info.max_io_buf_bytes =
2206 round_down(max_io_bytes, PAGE_SIZE);
2207 }
2208
ublk_add_tag_set(struct ublk_device * ub)2209 static int ublk_add_tag_set(struct ublk_device *ub)
2210 {
2211 ub->tag_set.ops = &ublk_mq_ops;
2212 ub->tag_set.nr_hw_queues = ub->dev_info.nr_hw_queues;
2213 ub->tag_set.queue_depth = ub->dev_info.queue_depth;
2214 ub->tag_set.numa_node = NUMA_NO_NODE;
2215 ub->tag_set.cmd_size = sizeof(struct ublk_rq_data);
2216 ub->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
2217 ub->tag_set.driver_data = ub;
2218 return blk_mq_alloc_tag_set(&ub->tag_set);
2219 }
2220
ublk_remove(struct ublk_device * ub)2221 static void ublk_remove(struct ublk_device *ub)
2222 {
2223 ublk_stop_dev(ub);
2224 cancel_work_sync(&ub->nosrv_work);
2225 cdev_device_del(&ub->cdev, &ub->cdev_dev);
2226 ublk_put_device(ub);
2227 ublks_added--;
2228 }
2229
ublk_get_device_from_id(int idx)2230 static struct ublk_device *ublk_get_device_from_id(int idx)
2231 {
2232 struct ublk_device *ub = NULL;
2233
2234 if (idx < 0)
2235 return NULL;
2236
2237 spin_lock(&ublk_idr_lock);
2238 ub = idr_find(&ublk_index_idr, idx);
2239 if (ub)
2240 ub = ublk_get_device(ub);
2241 spin_unlock(&ublk_idr_lock);
2242
2243 return ub;
2244 }
2245
ublk_ctrl_start_dev(struct ublk_device * ub,struct io_uring_cmd * cmd)2246 static int ublk_ctrl_start_dev(struct ublk_device *ub, struct io_uring_cmd *cmd)
2247 {
2248 const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
2249 const struct ublk_param_basic *p = &ub->params.basic;
2250 int ublksrv_pid = (int)header->data[0];
2251 struct queue_limits lim = {
2252 .logical_block_size = 1 << p->logical_bs_shift,
2253 .physical_block_size = 1 << p->physical_bs_shift,
2254 .io_min = 1 << p->io_min_shift,
2255 .io_opt = 1 << p->io_opt_shift,
2256 .max_hw_sectors = p->max_sectors,
2257 .chunk_sectors = p->chunk_sectors,
2258 .virt_boundary_mask = p->virt_boundary_mask,
2259 .max_segments = USHRT_MAX,
2260 .max_segment_size = UINT_MAX,
2261 .dma_alignment = 3,
2262 };
2263 struct gendisk *disk;
2264 int ret = -EINVAL;
2265
2266 if (ublksrv_pid <= 0)
2267 return -EINVAL;
2268 if (!(ub->params.types & UBLK_PARAM_TYPE_BASIC))
2269 return -EINVAL;
2270
2271 if (ub->params.types & UBLK_PARAM_TYPE_DISCARD) {
2272 const struct ublk_param_discard *pd = &ub->params.discard;
2273
2274 lim.discard_alignment = pd->discard_alignment;
2275 lim.discard_granularity = pd->discard_granularity;
2276 lim.max_hw_discard_sectors = pd->max_discard_sectors;
2277 lim.max_write_zeroes_sectors = pd->max_write_zeroes_sectors;
2278 lim.max_discard_segments = pd->max_discard_segments;
2279 }
2280
2281 if (ub->params.types & UBLK_PARAM_TYPE_ZONED) {
2282 const struct ublk_param_zoned *p = &ub->params.zoned;
2283
2284 if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED))
2285 return -EOPNOTSUPP;
2286
2287 lim.features |= BLK_FEAT_ZONED;
2288 lim.max_active_zones = p->max_active_zones;
2289 lim.max_open_zones = p->max_open_zones;
2290 lim.max_hw_zone_append_sectors = p->max_zone_append_sectors;
2291 }
2292
2293 if (ub->params.basic.attrs & UBLK_ATTR_VOLATILE_CACHE) {
2294 lim.features |= BLK_FEAT_WRITE_CACHE;
2295 if (ub->params.basic.attrs & UBLK_ATTR_FUA)
2296 lim.features |= BLK_FEAT_FUA;
2297 }
2298
2299 if (ub->params.basic.attrs & UBLK_ATTR_ROTATIONAL)
2300 lim.features |= BLK_FEAT_ROTATIONAL;
2301
2302 if (wait_for_completion_interruptible(&ub->completion) != 0)
2303 return -EINTR;
2304
2305 mutex_lock(&ub->mutex);
2306 if (ub->dev_info.state == UBLK_S_DEV_LIVE ||
2307 test_bit(UB_STATE_USED, &ub->state)) {
2308 ret = -EEXIST;
2309 goto out_unlock;
2310 }
2311
2312 disk = blk_mq_alloc_disk(&ub->tag_set, &lim, NULL);
2313 if (IS_ERR(disk)) {
2314 ret = PTR_ERR(disk);
2315 goto out_unlock;
2316 }
2317 sprintf(disk->disk_name, "ublkb%d", ub->ub_number);
2318 disk->fops = &ub_fops;
2319 disk->private_data = ub;
2320
2321 ub->dev_info.ublksrv_pid = ublksrv_pid;
2322 ub->ub_disk = disk;
2323
2324 ublk_apply_params(ub);
2325
2326 /* don't probe partitions if any one ubq daemon is un-trusted */
2327 if (ub->nr_privileged_daemon != ub->nr_queues_ready)
2328 set_bit(GD_SUPPRESS_PART_SCAN, &disk->state);
2329
2330 ublk_get_device(ub);
2331 ub->dev_info.state = UBLK_S_DEV_LIVE;
2332
2333 if (ublk_dev_is_zoned(ub)) {
2334 ret = ublk_revalidate_disk_zones(ub);
2335 if (ret)
2336 goto out_put_cdev;
2337 }
2338
2339 ret = add_disk(disk);
2340 if (ret)
2341 goto out_put_cdev;
2342
2343 set_bit(UB_STATE_USED, &ub->state);
2344
2345 out_put_cdev:
2346 if (ret) {
2347 ublk_detach_disk(ub);
2348 ublk_put_device(ub);
2349 }
2350 if (ret)
2351 put_disk(disk);
2352 out_unlock:
2353 mutex_unlock(&ub->mutex);
2354 return ret;
2355 }
2356
ublk_ctrl_get_queue_affinity(struct ublk_device * ub,struct io_uring_cmd * cmd)2357 static int ublk_ctrl_get_queue_affinity(struct ublk_device *ub,
2358 struct io_uring_cmd *cmd)
2359 {
2360 const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
2361 void __user *argp = (void __user *)(unsigned long)header->addr;
2362 cpumask_var_t cpumask;
2363 unsigned long queue;
2364 unsigned int retlen;
2365 unsigned int i;
2366 int ret;
2367
2368 if (header->len * BITS_PER_BYTE < nr_cpu_ids)
2369 return -EINVAL;
2370 if (header->len & (sizeof(unsigned long)-1))
2371 return -EINVAL;
2372 if (!header->addr)
2373 return -EINVAL;
2374
2375 queue = header->data[0];
2376 if (queue >= ub->dev_info.nr_hw_queues)
2377 return -EINVAL;
2378
2379 if (!zalloc_cpumask_var(&cpumask, GFP_KERNEL))
2380 return -ENOMEM;
2381
2382 for_each_possible_cpu(i) {
2383 if (ub->tag_set.map[HCTX_TYPE_DEFAULT].mq_map[i] == queue)
2384 cpumask_set_cpu(i, cpumask);
2385 }
2386
2387 ret = -EFAULT;
2388 retlen = min_t(unsigned short, header->len, cpumask_size());
2389 if (copy_to_user(argp, cpumask, retlen))
2390 goto out_free_cpumask;
2391 if (retlen != header->len &&
2392 clear_user(argp + retlen, header->len - retlen))
2393 goto out_free_cpumask;
2394
2395 ret = 0;
2396 out_free_cpumask:
2397 free_cpumask_var(cpumask);
2398 return ret;
2399 }
2400
ublk_dump_dev_info(struct ublksrv_ctrl_dev_info * info)2401 static inline void ublk_dump_dev_info(struct ublksrv_ctrl_dev_info *info)
2402 {
2403 pr_devel("%s: dev id %d flags %llx\n", __func__,
2404 info->dev_id, info->flags);
2405 pr_devel("\t nr_hw_queues %d queue_depth %d\n",
2406 info->nr_hw_queues, info->queue_depth);
2407 }
2408
ublk_ctrl_add_dev(struct io_uring_cmd * cmd)2409 static int ublk_ctrl_add_dev(struct io_uring_cmd *cmd)
2410 {
2411 const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
2412 void __user *argp = (void __user *)(unsigned long)header->addr;
2413 struct ublksrv_ctrl_dev_info info;
2414 struct ublk_device *ub;
2415 int ret = -EINVAL;
2416
2417 if (header->len < sizeof(info) || !header->addr)
2418 return -EINVAL;
2419 if (header->queue_id != (u16)-1) {
2420 pr_warn("%s: queue_id is wrong %x\n",
2421 __func__, header->queue_id);
2422 return -EINVAL;
2423 }
2424
2425 if (copy_from_user(&info, argp, sizeof(info)))
2426 return -EFAULT;
2427
2428 if (capable(CAP_SYS_ADMIN))
2429 info.flags &= ~UBLK_F_UNPRIVILEGED_DEV;
2430 else if (!(info.flags & UBLK_F_UNPRIVILEGED_DEV))
2431 return -EPERM;
2432
2433 /* forbid nonsense combinations of recovery flags */
2434 switch (info.flags & UBLK_F_ALL_RECOVERY_FLAGS) {
2435 case 0:
2436 case UBLK_F_USER_RECOVERY:
2437 case (UBLK_F_USER_RECOVERY | UBLK_F_USER_RECOVERY_REISSUE):
2438 case (UBLK_F_USER_RECOVERY | UBLK_F_USER_RECOVERY_FAIL_IO):
2439 break;
2440 default:
2441 pr_warn("%s: invalid recovery flags %llx\n", __func__,
2442 info.flags & UBLK_F_ALL_RECOVERY_FLAGS);
2443 return -EINVAL;
2444 }
2445
2446 /*
2447 * unprivileged device can't be trusted, but RECOVERY and
2448 * RECOVERY_REISSUE still may hang error handling, so can't
2449 * support recovery features for unprivileged ublk now
2450 *
2451 * TODO: provide forward progress for RECOVERY handler, so that
2452 * unprivileged device can benefit from it
2453 */
2454 if (info.flags & UBLK_F_UNPRIVILEGED_DEV) {
2455 info.flags &= ~(UBLK_F_USER_RECOVERY_REISSUE |
2456 UBLK_F_USER_RECOVERY);
2457
2458 /*
2459 * For USER_COPY, we depends on userspace to fill request
2460 * buffer by pwrite() to ublk char device, which can't be
2461 * used for unprivileged device
2462 */
2463 if (info.flags & UBLK_F_USER_COPY)
2464 return -EINVAL;
2465 }
2466
2467 /* the created device is always owned by current user */
2468 ublk_store_owner_uid_gid(&info.owner_uid, &info.owner_gid);
2469
2470 if (header->dev_id != info.dev_id) {
2471 pr_warn("%s: dev id not match %u %u\n",
2472 __func__, header->dev_id, info.dev_id);
2473 return -EINVAL;
2474 }
2475
2476 if (header->dev_id != U32_MAX && header->dev_id >= UBLK_MAX_UBLKS) {
2477 pr_warn("%s: dev id is too large. Max supported is %d\n",
2478 __func__, UBLK_MAX_UBLKS - 1);
2479 return -EINVAL;
2480 }
2481
2482 ublk_dump_dev_info(&info);
2483
2484 ret = mutex_lock_killable(&ublk_ctl_mutex);
2485 if (ret)
2486 return ret;
2487
2488 ret = -EACCES;
2489 if (ublks_added >= ublks_max)
2490 goto out_unlock;
2491
2492 ret = -ENOMEM;
2493 ub = kzalloc(sizeof(*ub), GFP_KERNEL);
2494 if (!ub)
2495 goto out_unlock;
2496 mutex_init(&ub->mutex);
2497 spin_lock_init(&ub->lock);
2498 INIT_WORK(&ub->nosrv_work, ublk_nosrv_work);
2499
2500 ret = ublk_alloc_dev_number(ub, header->dev_id);
2501 if (ret < 0)
2502 goto out_free_ub;
2503
2504 memcpy(&ub->dev_info, &info, sizeof(info));
2505
2506 /* update device id */
2507 ub->dev_info.dev_id = ub->ub_number;
2508
2509 /*
2510 * 64bit flags will be copied back to userspace as feature
2511 * negotiation result, so have to clear flags which driver
2512 * doesn't support yet, then userspace can get correct flags
2513 * (features) to handle.
2514 */
2515 ub->dev_info.flags &= UBLK_F_ALL;
2516
2517 ub->dev_info.flags |= UBLK_F_CMD_IOCTL_ENCODE |
2518 UBLK_F_URING_CMD_COMP_IN_TASK;
2519
2520 /* GET_DATA isn't needed any more with USER_COPY */
2521 if (ublk_dev_is_user_copy(ub))
2522 ub->dev_info.flags &= ~UBLK_F_NEED_GET_DATA;
2523
2524 /* Zoned storage support requires user copy feature */
2525 if (ublk_dev_is_zoned(ub) &&
2526 (!IS_ENABLED(CONFIG_BLK_DEV_ZONED) || !ublk_dev_is_user_copy(ub))) {
2527 ret = -EINVAL;
2528 goto out_free_dev_number;
2529 }
2530
2531 /* We are not ready to support zero copy */
2532 ub->dev_info.flags &= ~UBLK_F_SUPPORT_ZERO_COPY;
2533
2534 ub->dev_info.nr_hw_queues = min_t(unsigned int,
2535 ub->dev_info.nr_hw_queues, nr_cpu_ids);
2536 ublk_align_max_io_size(ub);
2537
2538 ret = ublk_init_queues(ub);
2539 if (ret)
2540 goto out_free_dev_number;
2541
2542 ret = ublk_add_tag_set(ub);
2543 if (ret)
2544 goto out_deinit_queues;
2545
2546 ret = -EFAULT;
2547 if (copy_to_user(argp, &ub->dev_info, sizeof(info)))
2548 goto out_free_tag_set;
2549
2550 /*
2551 * Add the char dev so that ublksrv daemon can be setup.
2552 * ublk_add_chdev() will cleanup everything if it fails.
2553 */
2554 ret = ublk_add_chdev(ub);
2555 goto out_unlock;
2556
2557 out_free_tag_set:
2558 blk_mq_free_tag_set(&ub->tag_set);
2559 out_deinit_queues:
2560 ublk_deinit_queues(ub);
2561 out_free_dev_number:
2562 ublk_free_dev_number(ub);
2563 out_free_ub:
2564 mutex_destroy(&ub->mutex);
2565 kfree(ub);
2566 out_unlock:
2567 mutex_unlock(&ublk_ctl_mutex);
2568 return ret;
2569 }
2570
ublk_idr_freed(int id)2571 static inline bool ublk_idr_freed(int id)
2572 {
2573 void *ptr;
2574
2575 spin_lock(&ublk_idr_lock);
2576 ptr = idr_find(&ublk_index_idr, id);
2577 spin_unlock(&ublk_idr_lock);
2578
2579 return ptr == NULL;
2580 }
2581
ublk_ctrl_del_dev(struct ublk_device ** p_ub,bool wait)2582 static int ublk_ctrl_del_dev(struct ublk_device **p_ub, bool wait)
2583 {
2584 struct ublk_device *ub = *p_ub;
2585 int idx = ub->ub_number;
2586 int ret;
2587
2588 ret = mutex_lock_killable(&ublk_ctl_mutex);
2589 if (ret)
2590 return ret;
2591
2592 if (!test_bit(UB_STATE_DELETED, &ub->state)) {
2593 ublk_remove(ub);
2594 set_bit(UB_STATE_DELETED, &ub->state);
2595 }
2596
2597 /* Mark the reference as consumed */
2598 *p_ub = NULL;
2599 ublk_put_device(ub);
2600 mutex_unlock(&ublk_ctl_mutex);
2601
2602 /*
2603 * Wait until the idr is removed, then it can be reused after
2604 * DEL_DEV command is returned.
2605 *
2606 * If we returns because of user interrupt, future delete command
2607 * may come:
2608 *
2609 * - the device number isn't freed, this device won't or needn't
2610 * be deleted again, since UB_STATE_DELETED is set, and device
2611 * will be released after the last reference is dropped
2612 *
2613 * - the device number is freed already, we will not find this
2614 * device via ublk_get_device_from_id()
2615 */
2616 if (wait && wait_event_interruptible(ublk_idr_wq, ublk_idr_freed(idx)))
2617 return -EINTR;
2618 return 0;
2619 }
2620
ublk_ctrl_cmd_dump(struct io_uring_cmd * cmd)2621 static inline void ublk_ctrl_cmd_dump(struct io_uring_cmd *cmd)
2622 {
2623 const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
2624
2625 pr_devel("%s: cmd_op %x, dev id %d qid %d data %llx buf %llx len %u\n",
2626 __func__, cmd->cmd_op, header->dev_id, header->queue_id,
2627 header->data[0], header->addr, header->len);
2628 }
2629
ublk_ctrl_stop_dev(struct ublk_device * ub)2630 static int ublk_ctrl_stop_dev(struct ublk_device *ub)
2631 {
2632 ublk_stop_dev(ub);
2633 cancel_work_sync(&ub->nosrv_work);
2634 return 0;
2635 }
2636
ublk_ctrl_get_dev_info(struct ublk_device * ub,struct io_uring_cmd * cmd)2637 static int ublk_ctrl_get_dev_info(struct ublk_device *ub,
2638 struct io_uring_cmd *cmd)
2639 {
2640 const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
2641 void __user *argp = (void __user *)(unsigned long)header->addr;
2642
2643 if (header->len < sizeof(struct ublksrv_ctrl_dev_info) || !header->addr)
2644 return -EINVAL;
2645
2646 if (copy_to_user(argp, &ub->dev_info, sizeof(ub->dev_info)))
2647 return -EFAULT;
2648
2649 return 0;
2650 }
2651
2652 /* TYPE_DEVT is readonly, so fill it up before returning to userspace */
ublk_ctrl_fill_params_devt(struct ublk_device * ub)2653 static void ublk_ctrl_fill_params_devt(struct ublk_device *ub)
2654 {
2655 ub->params.devt.char_major = MAJOR(ub->cdev_dev.devt);
2656 ub->params.devt.char_minor = MINOR(ub->cdev_dev.devt);
2657
2658 if (ub->ub_disk) {
2659 ub->params.devt.disk_major = MAJOR(disk_devt(ub->ub_disk));
2660 ub->params.devt.disk_minor = MINOR(disk_devt(ub->ub_disk));
2661 } else {
2662 ub->params.devt.disk_major = 0;
2663 ub->params.devt.disk_minor = 0;
2664 }
2665 ub->params.types |= UBLK_PARAM_TYPE_DEVT;
2666 }
2667
ublk_ctrl_get_params(struct ublk_device * ub,struct io_uring_cmd * cmd)2668 static int ublk_ctrl_get_params(struct ublk_device *ub,
2669 struct io_uring_cmd *cmd)
2670 {
2671 const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
2672 void __user *argp = (void __user *)(unsigned long)header->addr;
2673 struct ublk_params_header ph;
2674 int ret;
2675
2676 if (header->len <= sizeof(ph) || !header->addr)
2677 return -EINVAL;
2678
2679 if (copy_from_user(&ph, argp, sizeof(ph)))
2680 return -EFAULT;
2681
2682 if (ph.len > header->len || !ph.len)
2683 return -EINVAL;
2684
2685 if (ph.len > sizeof(struct ublk_params))
2686 ph.len = sizeof(struct ublk_params);
2687
2688 mutex_lock(&ub->mutex);
2689 ublk_ctrl_fill_params_devt(ub);
2690 if (copy_to_user(argp, &ub->params, ph.len))
2691 ret = -EFAULT;
2692 else
2693 ret = 0;
2694 mutex_unlock(&ub->mutex);
2695
2696 return ret;
2697 }
2698
ublk_ctrl_set_params(struct ublk_device * ub,struct io_uring_cmd * cmd)2699 static int ublk_ctrl_set_params(struct ublk_device *ub,
2700 struct io_uring_cmd *cmd)
2701 {
2702 const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
2703 void __user *argp = (void __user *)(unsigned long)header->addr;
2704 struct ublk_params_header ph;
2705 int ret = -EFAULT;
2706
2707 if (header->len <= sizeof(ph) || !header->addr)
2708 return -EINVAL;
2709
2710 if (copy_from_user(&ph, argp, sizeof(ph)))
2711 return -EFAULT;
2712
2713 if (ph.len > header->len || !ph.len || !ph.types)
2714 return -EINVAL;
2715
2716 if (ph.len > sizeof(struct ublk_params))
2717 ph.len = sizeof(struct ublk_params);
2718
2719 /* parameters can only be changed when device isn't live */
2720 mutex_lock(&ub->mutex);
2721 if (ub->dev_info.state == UBLK_S_DEV_LIVE) {
2722 ret = -EACCES;
2723 } else if (copy_from_user(&ub->params, argp, ph.len)) {
2724 ret = -EFAULT;
2725 } else {
2726 /* clear all we don't support yet */
2727 ub->params.types &= UBLK_PARAM_TYPE_ALL;
2728 ret = ublk_validate_params(ub);
2729 if (ret)
2730 ub->params.types = 0;
2731 }
2732 mutex_unlock(&ub->mutex);
2733
2734 return ret;
2735 }
2736
ublk_queue_reinit(struct ublk_device * ub,struct ublk_queue * ubq)2737 static void ublk_queue_reinit(struct ublk_device *ub, struct ublk_queue *ubq)
2738 {
2739 int i;
2740
2741 WARN_ON_ONCE(!(ubq->ubq_daemon && ubq_daemon_is_dying(ubq)));
2742
2743 /* All old ioucmds have to be completed */
2744 ubq->nr_io_ready = 0;
2745 /* old daemon is PF_EXITING, put it now */
2746 put_task_struct(ubq->ubq_daemon);
2747 /* We have to reset it to NULL, otherwise ub won't accept new FETCH_REQ */
2748 ubq->ubq_daemon = NULL;
2749 ubq->timeout = false;
2750 ubq->canceling = false;
2751
2752 for (i = 0; i < ubq->q_depth; i++) {
2753 struct ublk_io *io = &ubq->ios[i];
2754
2755 /* forget everything now and be ready for new FETCH_REQ */
2756 io->flags = 0;
2757 io->cmd = NULL;
2758 io->addr = 0;
2759 }
2760 }
2761
ublk_ctrl_start_recovery(struct ublk_device * ub,struct io_uring_cmd * cmd)2762 static int ublk_ctrl_start_recovery(struct ublk_device *ub,
2763 struct io_uring_cmd *cmd)
2764 {
2765 const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
2766 int ret = -EINVAL;
2767 int i;
2768
2769 mutex_lock(&ub->mutex);
2770 if (ublk_nosrv_should_stop_dev(ub))
2771 goto out_unlock;
2772 if (!ub->nr_queues_ready)
2773 goto out_unlock;
2774 /*
2775 * START_RECOVERY is only allowd after:
2776 *
2777 * (1) UB_STATE_OPEN is not set, which means the dying process is exited
2778 * and related io_uring ctx is freed so file struct of /dev/ublkcX is
2779 * released.
2780 *
2781 * and one of the following holds
2782 *
2783 * (2) UBLK_S_DEV_QUIESCED is set, which means the quiesce_work:
2784 * (a)has quiesced request queue
2785 * (b)has requeued every inflight rqs whose io_flags is ACTIVE
2786 * (c)has requeued/aborted every inflight rqs whose io_flags is NOT ACTIVE
2787 * (d)has completed/camceled all ioucmds owned by ther dying process
2788 *
2789 * (3) UBLK_S_DEV_FAIL_IO is set, which means the queue is not
2790 * quiesced, but all I/O is being immediately errored
2791 */
2792 if (test_bit(UB_STATE_OPEN, &ub->state) || !ublk_dev_in_recoverable_state(ub)) {
2793 ret = -EBUSY;
2794 goto out_unlock;
2795 }
2796 pr_devel("%s: start recovery for dev id %d.\n", __func__, header->dev_id);
2797 for (i = 0; i < ub->dev_info.nr_hw_queues; i++)
2798 ublk_queue_reinit(ub, ublk_get_queue(ub, i));
2799 /* set to NULL, otherwise new ubq_daemon cannot mmap the io_cmd_buf */
2800 ub->mm = NULL;
2801 ub->nr_queues_ready = 0;
2802 ub->nr_privileged_daemon = 0;
2803 init_completion(&ub->completion);
2804 ret = 0;
2805 out_unlock:
2806 mutex_unlock(&ub->mutex);
2807 return ret;
2808 }
2809
ublk_ctrl_end_recovery(struct ublk_device * ub,struct io_uring_cmd * cmd)2810 static int ublk_ctrl_end_recovery(struct ublk_device *ub,
2811 struct io_uring_cmd *cmd)
2812 {
2813 const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
2814 int ublksrv_pid = (int)header->data[0];
2815 int ret = -EINVAL;
2816 int i;
2817
2818 pr_devel("%s: Waiting for new ubq_daemons(nr: %d) are ready, dev id %d...\n",
2819 __func__, ub->dev_info.nr_hw_queues, header->dev_id);
2820 /* wait until new ubq_daemon sending all FETCH_REQ */
2821 if (wait_for_completion_interruptible(&ub->completion))
2822 return -EINTR;
2823
2824 pr_devel("%s: All new ubq_daemons(nr: %d) are ready, dev id %d\n",
2825 __func__, ub->dev_info.nr_hw_queues, header->dev_id);
2826
2827 mutex_lock(&ub->mutex);
2828 if (ublk_nosrv_should_stop_dev(ub))
2829 goto out_unlock;
2830
2831 if (!ublk_dev_in_recoverable_state(ub)) {
2832 ret = -EBUSY;
2833 goto out_unlock;
2834 }
2835 ub->dev_info.ublksrv_pid = ublksrv_pid;
2836 pr_devel("%s: new ublksrv_pid %d, dev id %d\n",
2837 __func__, ublksrv_pid, header->dev_id);
2838
2839 if (ublk_nosrv_dev_should_queue_io(ub)) {
2840 ub->dev_info.state = UBLK_S_DEV_LIVE;
2841 blk_mq_unquiesce_queue(ub->ub_disk->queue);
2842 pr_devel("%s: queue unquiesced, dev id %d.\n",
2843 __func__, header->dev_id);
2844 blk_mq_kick_requeue_list(ub->ub_disk->queue);
2845 } else {
2846 blk_mq_quiesce_queue(ub->ub_disk->queue);
2847 ub->dev_info.state = UBLK_S_DEV_LIVE;
2848 for (i = 0; i < ub->dev_info.nr_hw_queues; i++) {
2849 ublk_get_queue(ub, i)->fail_io = false;
2850 }
2851 blk_mq_unquiesce_queue(ub->ub_disk->queue);
2852 }
2853
2854 ret = 0;
2855 out_unlock:
2856 mutex_unlock(&ub->mutex);
2857 return ret;
2858 }
2859
ublk_ctrl_get_features(struct io_uring_cmd * cmd)2860 static int ublk_ctrl_get_features(struct io_uring_cmd *cmd)
2861 {
2862 const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
2863 void __user *argp = (void __user *)(unsigned long)header->addr;
2864 u64 features = UBLK_F_ALL & ~UBLK_F_SUPPORT_ZERO_COPY;
2865
2866 if (header->len != UBLK_FEATURES_LEN || !header->addr)
2867 return -EINVAL;
2868
2869 if (copy_to_user(argp, &features, UBLK_FEATURES_LEN))
2870 return -EFAULT;
2871
2872 return 0;
2873 }
2874
2875 /*
2876 * All control commands are sent via /dev/ublk-control, so we have to check
2877 * the destination device's permission
2878 */
ublk_char_dev_permission(struct ublk_device * ub,const char * dev_path,int mask)2879 static int ublk_char_dev_permission(struct ublk_device *ub,
2880 const char *dev_path, int mask)
2881 {
2882 int err;
2883 struct path path;
2884 struct kstat stat;
2885
2886 err = kern_path(dev_path, LOOKUP_FOLLOW, &path);
2887 if (err)
2888 return err;
2889
2890 err = vfs_getattr(&path, &stat, STATX_TYPE, AT_STATX_SYNC_AS_STAT);
2891 if (err)
2892 goto exit;
2893
2894 err = -EPERM;
2895 if (stat.rdev != ub->cdev_dev.devt || !S_ISCHR(stat.mode))
2896 goto exit;
2897
2898 err = inode_permission(&nop_mnt_idmap,
2899 d_backing_inode(path.dentry), mask);
2900 exit:
2901 path_put(&path);
2902 return err;
2903 }
2904
ublk_ctrl_uring_cmd_permission(struct ublk_device * ub,struct io_uring_cmd * cmd)2905 static int ublk_ctrl_uring_cmd_permission(struct ublk_device *ub,
2906 struct io_uring_cmd *cmd)
2907 {
2908 struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)io_uring_sqe_cmd(cmd->sqe);
2909 bool unprivileged = ub->dev_info.flags & UBLK_F_UNPRIVILEGED_DEV;
2910 void __user *argp = (void __user *)(unsigned long)header->addr;
2911 char *dev_path = NULL;
2912 int ret = 0;
2913 int mask;
2914
2915 if (!unprivileged) {
2916 if (!capable(CAP_SYS_ADMIN))
2917 return -EPERM;
2918 /*
2919 * The new added command of UBLK_CMD_GET_DEV_INFO2 includes
2920 * char_dev_path in payload too, since userspace may not
2921 * know if the specified device is created as unprivileged
2922 * mode.
2923 */
2924 if (_IOC_NR(cmd->cmd_op) != UBLK_CMD_GET_DEV_INFO2)
2925 return 0;
2926 }
2927
2928 /*
2929 * User has to provide the char device path for unprivileged ublk
2930 *
2931 * header->addr always points to the dev path buffer, and
2932 * header->dev_path_len records length of dev path buffer.
2933 */
2934 if (!header->dev_path_len || header->dev_path_len > PATH_MAX)
2935 return -EINVAL;
2936
2937 if (header->len < header->dev_path_len)
2938 return -EINVAL;
2939
2940 dev_path = memdup_user_nul(argp, header->dev_path_len);
2941 if (IS_ERR(dev_path))
2942 return PTR_ERR(dev_path);
2943
2944 ret = -EINVAL;
2945 switch (_IOC_NR(cmd->cmd_op)) {
2946 case UBLK_CMD_GET_DEV_INFO:
2947 case UBLK_CMD_GET_DEV_INFO2:
2948 case UBLK_CMD_GET_QUEUE_AFFINITY:
2949 case UBLK_CMD_GET_PARAMS:
2950 case (_IOC_NR(UBLK_U_CMD_GET_FEATURES)):
2951 mask = MAY_READ;
2952 break;
2953 case UBLK_CMD_START_DEV:
2954 case UBLK_CMD_STOP_DEV:
2955 case UBLK_CMD_ADD_DEV:
2956 case UBLK_CMD_DEL_DEV:
2957 case UBLK_CMD_SET_PARAMS:
2958 case UBLK_CMD_START_USER_RECOVERY:
2959 case UBLK_CMD_END_USER_RECOVERY:
2960 mask = MAY_READ | MAY_WRITE;
2961 break;
2962 default:
2963 goto exit;
2964 }
2965
2966 ret = ublk_char_dev_permission(ub, dev_path, mask);
2967 if (!ret) {
2968 header->len -= header->dev_path_len;
2969 header->addr += header->dev_path_len;
2970 }
2971 pr_devel("%s: dev id %d cmd_op %x uid %d gid %d path %s ret %d\n",
2972 __func__, ub->ub_number, cmd->cmd_op,
2973 ub->dev_info.owner_uid, ub->dev_info.owner_gid,
2974 dev_path, ret);
2975 exit:
2976 kfree(dev_path);
2977 return ret;
2978 }
2979
ublk_ctrl_uring_cmd(struct io_uring_cmd * cmd,unsigned int issue_flags)2980 static int ublk_ctrl_uring_cmd(struct io_uring_cmd *cmd,
2981 unsigned int issue_flags)
2982 {
2983 const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
2984 struct ublk_device *ub = NULL;
2985 u32 cmd_op = cmd->cmd_op;
2986 int ret = -EINVAL;
2987
2988 if (issue_flags & IO_URING_F_NONBLOCK)
2989 return -EAGAIN;
2990
2991 ublk_ctrl_cmd_dump(cmd);
2992
2993 if (!(issue_flags & IO_URING_F_SQE128))
2994 goto out;
2995
2996 ret = ublk_check_cmd_op(cmd_op);
2997 if (ret)
2998 goto out;
2999
3000 if (cmd_op == UBLK_U_CMD_GET_FEATURES) {
3001 ret = ublk_ctrl_get_features(cmd);
3002 goto out;
3003 }
3004
3005 if (_IOC_NR(cmd_op) != UBLK_CMD_ADD_DEV) {
3006 ret = -ENODEV;
3007 ub = ublk_get_device_from_id(header->dev_id);
3008 if (!ub)
3009 goto out;
3010
3011 ret = ublk_ctrl_uring_cmd_permission(ub, cmd);
3012 if (ret)
3013 goto put_dev;
3014 }
3015
3016 switch (_IOC_NR(cmd_op)) {
3017 case UBLK_CMD_START_DEV:
3018 ret = ublk_ctrl_start_dev(ub, cmd);
3019 break;
3020 case UBLK_CMD_STOP_DEV:
3021 ret = ublk_ctrl_stop_dev(ub);
3022 break;
3023 case UBLK_CMD_GET_DEV_INFO:
3024 case UBLK_CMD_GET_DEV_INFO2:
3025 ret = ublk_ctrl_get_dev_info(ub, cmd);
3026 break;
3027 case UBLK_CMD_ADD_DEV:
3028 ret = ublk_ctrl_add_dev(cmd);
3029 break;
3030 case UBLK_CMD_DEL_DEV:
3031 ret = ublk_ctrl_del_dev(&ub, true);
3032 break;
3033 case UBLK_CMD_DEL_DEV_ASYNC:
3034 ret = ublk_ctrl_del_dev(&ub, false);
3035 break;
3036 case UBLK_CMD_GET_QUEUE_AFFINITY:
3037 ret = ublk_ctrl_get_queue_affinity(ub, cmd);
3038 break;
3039 case UBLK_CMD_GET_PARAMS:
3040 ret = ublk_ctrl_get_params(ub, cmd);
3041 break;
3042 case UBLK_CMD_SET_PARAMS:
3043 ret = ublk_ctrl_set_params(ub, cmd);
3044 break;
3045 case UBLK_CMD_START_USER_RECOVERY:
3046 ret = ublk_ctrl_start_recovery(ub, cmd);
3047 break;
3048 case UBLK_CMD_END_USER_RECOVERY:
3049 ret = ublk_ctrl_end_recovery(ub, cmd);
3050 break;
3051 default:
3052 ret = -EOPNOTSUPP;
3053 break;
3054 }
3055
3056 put_dev:
3057 if (ub)
3058 ublk_put_device(ub);
3059 out:
3060 io_uring_cmd_done(cmd, ret, 0, issue_flags);
3061 pr_devel("%s: cmd done ret %d cmd_op %x, dev id %d qid %d\n",
3062 __func__, ret, cmd->cmd_op, header->dev_id, header->queue_id);
3063 return -EIOCBQUEUED;
3064 }
3065
3066 static const struct file_operations ublk_ctl_fops = {
3067 .open = nonseekable_open,
3068 .uring_cmd = ublk_ctrl_uring_cmd,
3069 .owner = THIS_MODULE,
3070 .llseek = noop_llseek,
3071 };
3072
3073 static struct miscdevice ublk_misc = {
3074 .minor = MISC_DYNAMIC_MINOR,
3075 .name = "ublk-control",
3076 .fops = &ublk_ctl_fops,
3077 };
3078
ublk_init(void)3079 static int __init ublk_init(void)
3080 {
3081 int ret;
3082
3083 BUILD_BUG_ON((u64)UBLKSRV_IO_BUF_OFFSET +
3084 UBLKSRV_IO_BUF_TOTAL_SIZE < UBLKSRV_IO_BUF_OFFSET);
3085
3086 init_waitqueue_head(&ublk_idr_wq);
3087
3088 ret = misc_register(&ublk_misc);
3089 if (ret)
3090 return ret;
3091
3092 ret = alloc_chrdev_region(&ublk_chr_devt, 0, UBLK_MINORS, "ublk-char");
3093 if (ret)
3094 goto unregister_mis;
3095
3096 ret = class_register(&ublk_chr_class);
3097 if (ret)
3098 goto free_chrdev_region;
3099
3100 return 0;
3101
3102 free_chrdev_region:
3103 unregister_chrdev_region(ublk_chr_devt, UBLK_MINORS);
3104 unregister_mis:
3105 misc_deregister(&ublk_misc);
3106 return ret;
3107 }
3108
ublk_exit(void)3109 static void __exit ublk_exit(void)
3110 {
3111 struct ublk_device *ub;
3112 int id;
3113
3114 idr_for_each_entry(&ublk_index_idr, ub, id)
3115 ublk_remove(ub);
3116
3117 class_unregister(&ublk_chr_class);
3118 misc_deregister(&ublk_misc);
3119
3120 idr_destroy(&ublk_index_idr);
3121 unregister_chrdev_region(ublk_chr_devt, UBLK_MINORS);
3122 }
3123
3124 module_init(ublk_init);
3125 module_exit(ublk_exit);
3126
ublk_set_max_ublks(const char * buf,const struct kernel_param * kp)3127 static int ublk_set_max_ublks(const char *buf, const struct kernel_param *kp)
3128 {
3129 return param_set_uint_minmax(buf, kp, 0, UBLK_MAX_UBLKS);
3130 }
3131
ublk_get_max_ublks(char * buf,const struct kernel_param * kp)3132 static int ublk_get_max_ublks(char *buf, const struct kernel_param *kp)
3133 {
3134 return sysfs_emit(buf, "%u\n", ublks_max);
3135 }
3136
3137 static const struct kernel_param_ops ublk_max_ublks_ops = {
3138 .set = ublk_set_max_ublks,
3139 .get = ublk_get_max_ublks,
3140 };
3141
3142 module_param_cb(ublks_max, &ublk_max_ublks_ops, &ublks_max, 0644);
3143 MODULE_PARM_DESC(ublks_max, "max number of ublk devices allowed to add(default: 64)");
3144
3145 MODULE_AUTHOR("Ming Lei <ming.lei@redhat.com>");
3146 MODULE_DESCRIPTION("Userspace block device");
3147 MODULE_LICENSE("GPL");
3148