xref: /linux/drivers/block/ublk_drv.c (revision 1cea5180f2f812c444ceebdc40f5d001bedd030d)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Userspace block device - block device which IO is handled from userspace
4  *
5  * Take full use of io_uring passthrough command for communicating with
6  * ublk userspace daemon(ublksrvd) for handling basic IO request.
7  *
8  * Copyright 2022 Ming Lei <ming.lei@redhat.com>
9  *
10  * (part of code stolen from loop.c)
11  */
12 #include <linux/module.h>
13 #include <linux/moduleparam.h>
14 #include <linux/sched.h>
15 #include <linux/fs.h>
16 #include <linux/pagemap.h>
17 #include <linux/file.h>
18 #include <linux/stat.h>
19 #include <linux/errno.h>
20 #include <linux/major.h>
21 #include <linux/wait.h>
22 #include <linux/blkdev.h>
23 #include <linux/init.h>
24 #include <linux/swap.h>
25 #include <linux/slab.h>
26 #include <linux/compat.h>
27 #include <linux/mutex.h>
28 #include <linux/writeback.h>
29 #include <linux/completion.h>
30 #include <linux/highmem.h>
31 #include <linux/sysfs.h>
32 #include <linux/miscdevice.h>
33 #include <linux/falloc.h>
34 #include <linux/uio.h>
35 #include <linux/ioprio.h>
36 #include <linux/sched/mm.h>
37 #include <linux/uaccess.h>
38 #include <linux/cdev.h>
39 #include <linux/io_uring/cmd.h>
40 #include <linux/blk-mq.h>
41 #include <linux/delay.h>
42 #include <linux/mm.h>
43 #include <asm/page.h>
44 #include <linux/task_work.h>
45 #include <linux/namei.h>
46 #include <linux/kref.h>
47 #include <uapi/linux/ublk_cmd.h>
48 
49 #define UBLK_MINORS		(1U << MINORBITS)
50 
51 /* private ioctl command mirror */
52 #define UBLK_CMD_DEL_DEV_ASYNC	_IOC_NR(UBLK_U_CMD_DEL_DEV_ASYNC)
53 #define UBLK_CMD_UPDATE_SIZE	_IOC_NR(UBLK_U_CMD_UPDATE_SIZE)
54 #define UBLK_CMD_QUIESCE_DEV	_IOC_NR(UBLK_U_CMD_QUIESCE_DEV)
55 
56 #define UBLK_IO_REGISTER_IO_BUF		_IOC_NR(UBLK_U_IO_REGISTER_IO_BUF)
57 #define UBLK_IO_UNREGISTER_IO_BUF	_IOC_NR(UBLK_U_IO_UNREGISTER_IO_BUF)
58 
59 /* All UBLK_F_* have to be included into UBLK_F_ALL */
60 #define UBLK_F_ALL (UBLK_F_SUPPORT_ZERO_COPY \
61 		| UBLK_F_URING_CMD_COMP_IN_TASK \
62 		| UBLK_F_NEED_GET_DATA \
63 		| UBLK_F_USER_RECOVERY \
64 		| UBLK_F_USER_RECOVERY_REISSUE \
65 		| UBLK_F_UNPRIVILEGED_DEV \
66 		| UBLK_F_CMD_IOCTL_ENCODE \
67 		| UBLK_F_USER_COPY \
68 		| UBLK_F_ZONED \
69 		| UBLK_F_USER_RECOVERY_FAIL_IO \
70 		| UBLK_F_UPDATE_SIZE \
71 		| UBLK_F_AUTO_BUF_REG \
72 		| UBLK_F_QUIESCE \
73 		| UBLK_F_PER_IO_DAEMON \
74 		| UBLK_F_BUF_REG_OFF_DAEMON)
75 
76 #define UBLK_F_ALL_RECOVERY_FLAGS (UBLK_F_USER_RECOVERY \
77 		| UBLK_F_USER_RECOVERY_REISSUE \
78 		| UBLK_F_USER_RECOVERY_FAIL_IO)
79 
80 /* All UBLK_PARAM_TYPE_* should be included here */
81 #define UBLK_PARAM_TYPE_ALL                                \
82 	(UBLK_PARAM_TYPE_BASIC | UBLK_PARAM_TYPE_DISCARD | \
83 	 UBLK_PARAM_TYPE_DEVT | UBLK_PARAM_TYPE_ZONED |    \
84 	 UBLK_PARAM_TYPE_DMA_ALIGN | UBLK_PARAM_TYPE_SEGMENT)
85 
86 struct ublk_uring_cmd_pdu {
87 	/*
88 	 * Store requests in same batch temporarily for queuing them to
89 	 * daemon context.
90 	 *
91 	 * It should have been stored to request payload, but we do want
92 	 * to avoid extra pre-allocation, and uring_cmd payload is always
93 	 * free for us
94 	 */
95 	union {
96 		struct request *req;
97 		struct request *req_list;
98 	};
99 
100 	/*
101 	 * The following two are valid in this cmd whole lifetime, and
102 	 * setup in ublk uring_cmd handler
103 	 */
104 	struct ublk_queue *ubq;
105 
106 	struct ublk_auto_buf_reg buf;
107 
108 	u16 tag;
109 };
110 
111 /*
112  * io command is active: sqe cmd is received, and its cqe isn't done
113  *
114  * If the flag is set, the io command is owned by ublk driver, and waited
115  * for incoming blk-mq request from the ublk block device.
116  *
117  * If the flag is cleared, the io command will be completed, and owned by
118  * ublk server.
119  */
120 #define UBLK_IO_FLAG_ACTIVE	0x01
121 
122 /*
123  * IO command is completed via cqe, and it is being handled by ublksrv, and
124  * not committed yet
125  *
126  * Basically exclusively with UBLK_IO_FLAG_ACTIVE, so can be served for
127  * cross verification
128  */
129 #define UBLK_IO_FLAG_OWNED_BY_SRV 0x02
130 
131 /*
132  * UBLK_IO_FLAG_NEED_GET_DATA is set because IO command requires
133  * get data buffer address from ublksrv.
134  *
135  * Then, bio data could be copied into this data buffer for a WRITE request
136  * after the IO command is issued again and UBLK_IO_FLAG_NEED_GET_DATA is unset.
137  */
138 #define UBLK_IO_FLAG_NEED_GET_DATA 0x08
139 
140 /*
141  * request buffer is registered automatically, so we have to unregister it
142  * before completing this request.
143  *
144  * io_uring will unregister buffer automatically for us during exiting.
145  */
146 #define UBLK_IO_FLAG_AUTO_BUF_REG 	0x10
147 
148 /* atomic RW with ubq->cancel_lock */
149 #define UBLK_IO_FLAG_CANCELED	0x80000000
150 
151 /*
152  * Initialize refcount to a large number to include any registered buffers.
153  * UBLK_IO_COMMIT_AND_FETCH_REQ will release these references minus those for
154  * any buffers registered on the io daemon task.
155  */
156 #define UBLK_REFCOUNT_INIT (REFCOUNT_MAX / 2)
157 
158 struct ublk_io {
159 	/* userspace buffer address from io cmd */
160 	__u64	addr;
161 	unsigned int flags;
162 	int res;
163 
164 	union {
165 		/* valid if UBLK_IO_FLAG_ACTIVE is set */
166 		struct io_uring_cmd *cmd;
167 		/* valid if UBLK_IO_FLAG_OWNED_BY_SRV is set */
168 		struct request *req;
169 	};
170 
171 	struct task_struct *task;
172 
173 	/*
174 	 * The number of uses of this I/O by the ublk server
175 	 * if user copy or zero copy are enabled:
176 	 * - UBLK_REFCOUNT_INIT from dispatch to the server
177 	 *   until UBLK_IO_COMMIT_AND_FETCH_REQ
178 	 * - 1 for each inflight ublk_ch_{read,write}_iter() call
179 	 * - 1 for each io_uring registered buffer not registered on task
180 	 * The I/O can only be completed once all references are dropped.
181 	 * User copy and buffer registration operations are only permitted
182 	 * if the reference count is nonzero.
183 	 */
184 	refcount_t ref;
185 	/* Count of buffers registered on task and not yet unregistered */
186 	unsigned task_registered_buffers;
187 
188 	/* auto-registered buffer, valid if UBLK_IO_FLAG_AUTO_BUF_REG is set */
189 	u16 buf_index;
190 	void *buf_ctx_handle;
191 } ____cacheline_aligned_in_smp;
192 
193 struct ublk_queue {
194 	int q_id;
195 	int q_depth;
196 
197 	unsigned long flags;
198 	struct ublksrv_io_desc *io_cmd_buf;
199 
200 	bool force_abort;
201 	bool canceling;
202 	bool fail_io; /* copy of dev->state == UBLK_S_DEV_FAIL_IO */
203 	unsigned short nr_io_ready;	/* how many ios setup */
204 	spinlock_t		cancel_lock;
205 	struct ublk_device *dev;
206 	struct ublk_io ios[];
207 };
208 
209 struct ublk_device {
210 	struct gendisk		*ub_disk;
211 
212 	char	*__queues;
213 
214 	unsigned int	queue_size;
215 	struct ublksrv_ctrl_dev_info	dev_info;
216 
217 	struct blk_mq_tag_set	tag_set;
218 
219 	struct cdev		cdev;
220 	struct device		cdev_dev;
221 
222 #define UB_STATE_OPEN		0
223 #define UB_STATE_USED		1
224 #define UB_STATE_DELETED	2
225 	unsigned long		state;
226 	int			ub_number;
227 
228 	struct mutex		mutex;
229 
230 	spinlock_t		lock;
231 	struct mm_struct	*mm;
232 
233 	struct ublk_params	params;
234 
235 	struct completion	completion;
236 	unsigned int		nr_queues_ready;
237 	unsigned int		nr_privileged_daemon;
238 	struct mutex cancel_mutex;
239 	bool canceling;
240 };
241 
242 /* header of ublk_params */
243 struct ublk_params_header {
244 	__u32	len;
245 	__u32	types;
246 };
247 
248 static void ublk_io_release(void *priv);
249 static void ublk_stop_dev_unlocked(struct ublk_device *ub);
250 static void ublk_abort_queue(struct ublk_device *ub, struct ublk_queue *ubq);
251 static inline struct request *__ublk_check_and_get_req(struct ublk_device *ub,
252 		const struct ublk_queue *ubq, struct ublk_io *io,
253 		size_t offset);
254 static inline unsigned int ublk_req_build_flags(struct request *req);
255 
256 static inline struct ublksrv_io_desc *
257 ublk_get_iod(const struct ublk_queue *ubq, unsigned tag)
258 {
259 	return &ubq->io_cmd_buf[tag];
260 }
261 
262 static inline bool ublk_dev_is_zoned(const struct ublk_device *ub)
263 {
264 	return ub->dev_info.flags & UBLK_F_ZONED;
265 }
266 
267 static inline bool ublk_queue_is_zoned(struct ublk_queue *ubq)
268 {
269 	return ubq->flags & UBLK_F_ZONED;
270 }
271 
272 #ifdef CONFIG_BLK_DEV_ZONED
273 
274 struct ublk_zoned_report_desc {
275 	__u64 sector;
276 	__u32 operation;
277 	__u32 nr_zones;
278 };
279 
280 static DEFINE_XARRAY(ublk_zoned_report_descs);
281 
282 static int ublk_zoned_insert_report_desc(const struct request *req,
283 		struct ublk_zoned_report_desc *desc)
284 {
285 	return xa_insert(&ublk_zoned_report_descs, (unsigned long)req,
286 			    desc, GFP_KERNEL);
287 }
288 
289 static struct ublk_zoned_report_desc *ublk_zoned_erase_report_desc(
290 		const struct request *req)
291 {
292 	return xa_erase(&ublk_zoned_report_descs, (unsigned long)req);
293 }
294 
295 static struct ublk_zoned_report_desc *ublk_zoned_get_report_desc(
296 		const struct request *req)
297 {
298 	return xa_load(&ublk_zoned_report_descs, (unsigned long)req);
299 }
300 
301 static int ublk_get_nr_zones(const struct ublk_device *ub)
302 {
303 	const struct ublk_param_basic *p = &ub->params.basic;
304 
305 	/* Zone size is a power of 2 */
306 	return p->dev_sectors >> ilog2(p->chunk_sectors);
307 }
308 
309 static int ublk_revalidate_disk_zones(struct ublk_device *ub)
310 {
311 	return blk_revalidate_disk_zones(ub->ub_disk);
312 }
313 
314 static int ublk_dev_param_zoned_validate(const struct ublk_device *ub)
315 {
316 	const struct ublk_param_zoned *p = &ub->params.zoned;
317 	int nr_zones;
318 
319 	if (!ublk_dev_is_zoned(ub))
320 		return -EINVAL;
321 
322 	if (!p->max_zone_append_sectors)
323 		return -EINVAL;
324 
325 	nr_zones = ublk_get_nr_zones(ub);
326 
327 	if (p->max_active_zones > nr_zones)
328 		return -EINVAL;
329 
330 	if (p->max_open_zones > nr_zones)
331 		return -EINVAL;
332 
333 	return 0;
334 }
335 
336 static void ublk_dev_param_zoned_apply(struct ublk_device *ub)
337 {
338 	ub->ub_disk->nr_zones = ublk_get_nr_zones(ub);
339 }
340 
341 /* Based on virtblk_alloc_report_buffer */
342 static void *ublk_alloc_report_buffer(struct ublk_device *ublk,
343 				      unsigned int nr_zones, size_t *buflen)
344 {
345 	struct request_queue *q = ublk->ub_disk->queue;
346 	size_t bufsize;
347 	void *buf;
348 
349 	nr_zones = min_t(unsigned int, nr_zones,
350 			 ublk->ub_disk->nr_zones);
351 
352 	bufsize = nr_zones * sizeof(struct blk_zone);
353 	bufsize =
354 		min_t(size_t, bufsize, queue_max_hw_sectors(q) << SECTOR_SHIFT);
355 
356 	while (bufsize >= sizeof(struct blk_zone)) {
357 		buf = kvmalloc(bufsize, GFP_KERNEL | __GFP_NORETRY);
358 		if (buf) {
359 			*buflen = bufsize;
360 			return buf;
361 		}
362 		bufsize >>= 1;
363 	}
364 
365 	*buflen = 0;
366 	return NULL;
367 }
368 
369 static int ublk_report_zones(struct gendisk *disk, sector_t sector,
370 		      unsigned int nr_zones, report_zones_cb cb, void *data)
371 {
372 	struct ublk_device *ub = disk->private_data;
373 	unsigned int zone_size_sectors = disk->queue->limits.chunk_sectors;
374 	unsigned int first_zone = sector >> ilog2(zone_size_sectors);
375 	unsigned int done_zones = 0;
376 	unsigned int max_zones_per_request;
377 	int ret;
378 	struct blk_zone *buffer;
379 	size_t buffer_length;
380 
381 	nr_zones = min_t(unsigned int, ub->ub_disk->nr_zones - first_zone,
382 			 nr_zones);
383 
384 	buffer = ublk_alloc_report_buffer(ub, nr_zones, &buffer_length);
385 	if (!buffer)
386 		return -ENOMEM;
387 
388 	max_zones_per_request = buffer_length / sizeof(struct blk_zone);
389 
390 	while (done_zones < nr_zones) {
391 		unsigned int remaining_zones = nr_zones - done_zones;
392 		unsigned int zones_in_request =
393 			min_t(unsigned int, remaining_zones, max_zones_per_request);
394 		struct request *req;
395 		struct ublk_zoned_report_desc desc;
396 		blk_status_t status;
397 
398 		memset(buffer, 0, buffer_length);
399 
400 		req = blk_mq_alloc_request(disk->queue, REQ_OP_DRV_IN, 0);
401 		if (IS_ERR(req)) {
402 			ret = PTR_ERR(req);
403 			goto out;
404 		}
405 
406 		desc.operation = UBLK_IO_OP_REPORT_ZONES;
407 		desc.sector = sector;
408 		desc.nr_zones = zones_in_request;
409 		ret = ublk_zoned_insert_report_desc(req, &desc);
410 		if (ret)
411 			goto free_req;
412 
413 		ret = blk_rq_map_kern(req, buffer, buffer_length, GFP_KERNEL);
414 		if (ret)
415 			goto erase_desc;
416 
417 		status = blk_execute_rq(req, 0);
418 		ret = blk_status_to_errno(status);
419 erase_desc:
420 		ublk_zoned_erase_report_desc(req);
421 free_req:
422 		blk_mq_free_request(req);
423 		if (ret)
424 			goto out;
425 
426 		for (unsigned int i = 0; i < zones_in_request; i++) {
427 			struct blk_zone *zone = buffer + i;
428 
429 			/* A zero length zone means no more zones in this response */
430 			if (!zone->len)
431 				break;
432 
433 			ret = cb(zone, i, data);
434 			if (ret)
435 				goto out;
436 
437 			done_zones++;
438 			sector += zone_size_sectors;
439 
440 		}
441 	}
442 
443 	ret = done_zones;
444 
445 out:
446 	kvfree(buffer);
447 	return ret;
448 }
449 
450 static blk_status_t ublk_setup_iod_zoned(struct ublk_queue *ubq,
451 					 struct request *req)
452 {
453 	struct ublksrv_io_desc *iod = ublk_get_iod(ubq, req->tag);
454 	struct ublk_io *io = &ubq->ios[req->tag];
455 	struct ublk_zoned_report_desc *desc;
456 	u32 ublk_op;
457 
458 	switch (req_op(req)) {
459 	case REQ_OP_ZONE_OPEN:
460 		ublk_op = UBLK_IO_OP_ZONE_OPEN;
461 		break;
462 	case REQ_OP_ZONE_CLOSE:
463 		ublk_op = UBLK_IO_OP_ZONE_CLOSE;
464 		break;
465 	case REQ_OP_ZONE_FINISH:
466 		ublk_op = UBLK_IO_OP_ZONE_FINISH;
467 		break;
468 	case REQ_OP_ZONE_RESET:
469 		ublk_op = UBLK_IO_OP_ZONE_RESET;
470 		break;
471 	case REQ_OP_ZONE_APPEND:
472 		ublk_op = UBLK_IO_OP_ZONE_APPEND;
473 		break;
474 	case REQ_OP_ZONE_RESET_ALL:
475 		ublk_op = UBLK_IO_OP_ZONE_RESET_ALL;
476 		break;
477 	case REQ_OP_DRV_IN:
478 		desc = ublk_zoned_get_report_desc(req);
479 		if (!desc)
480 			return BLK_STS_IOERR;
481 		ublk_op = desc->operation;
482 		switch (ublk_op) {
483 		case UBLK_IO_OP_REPORT_ZONES:
484 			iod->op_flags = ublk_op | ublk_req_build_flags(req);
485 			iod->nr_zones = desc->nr_zones;
486 			iod->start_sector = desc->sector;
487 			return BLK_STS_OK;
488 		default:
489 			return BLK_STS_IOERR;
490 		}
491 	case REQ_OP_DRV_OUT:
492 		/* We do not support drv_out */
493 		return BLK_STS_NOTSUPP;
494 	default:
495 		return BLK_STS_IOERR;
496 	}
497 
498 	iod->op_flags = ublk_op | ublk_req_build_flags(req);
499 	iod->nr_sectors = blk_rq_sectors(req);
500 	iod->start_sector = blk_rq_pos(req);
501 	iod->addr = io->addr;
502 
503 	return BLK_STS_OK;
504 }
505 
506 #else
507 
508 #define ublk_report_zones (NULL)
509 
510 static int ublk_dev_param_zoned_validate(const struct ublk_device *ub)
511 {
512 	return -EOPNOTSUPP;
513 }
514 
515 static void ublk_dev_param_zoned_apply(struct ublk_device *ub)
516 {
517 }
518 
519 static int ublk_revalidate_disk_zones(struct ublk_device *ub)
520 {
521 	return 0;
522 }
523 
524 static blk_status_t ublk_setup_iod_zoned(struct ublk_queue *ubq,
525 					 struct request *req)
526 {
527 	return BLK_STS_NOTSUPP;
528 }
529 
530 #endif
531 
532 static inline void __ublk_complete_rq(struct request *req);
533 
534 static dev_t ublk_chr_devt;
535 static const struct class ublk_chr_class = {
536 	.name = "ublk-char",
537 };
538 
539 static DEFINE_IDR(ublk_index_idr);
540 static DEFINE_SPINLOCK(ublk_idr_lock);
541 static wait_queue_head_t ublk_idr_wq;	/* wait until one idr is freed */
542 
543 static DEFINE_MUTEX(ublk_ctl_mutex);
544 
545 
546 #define UBLK_MAX_UBLKS UBLK_MINORS
547 
548 /*
549  * Max unprivileged ublk devices allowed to add
550  *
551  * It can be extended to one per-user limit in future or even controlled
552  * by cgroup.
553  */
554 static unsigned int unprivileged_ublks_max = 64;
555 static unsigned int unprivileged_ublks_added; /* protected by ublk_ctl_mutex */
556 
557 static struct miscdevice ublk_misc;
558 
559 static inline unsigned ublk_pos_to_hwq(loff_t pos)
560 {
561 	return ((pos - UBLKSRV_IO_BUF_OFFSET) >> UBLK_QID_OFF) &
562 		UBLK_QID_BITS_MASK;
563 }
564 
565 static inline unsigned ublk_pos_to_buf_off(loff_t pos)
566 {
567 	return (pos - UBLKSRV_IO_BUF_OFFSET) & UBLK_IO_BUF_BITS_MASK;
568 }
569 
570 static inline unsigned ublk_pos_to_tag(loff_t pos)
571 {
572 	return ((pos - UBLKSRV_IO_BUF_OFFSET) >> UBLK_TAG_OFF) &
573 		UBLK_TAG_BITS_MASK;
574 }
575 
576 static void ublk_dev_param_basic_apply(struct ublk_device *ub)
577 {
578 	const struct ublk_param_basic *p = &ub->params.basic;
579 
580 	if (p->attrs & UBLK_ATTR_READ_ONLY)
581 		set_disk_ro(ub->ub_disk, true);
582 
583 	set_capacity(ub->ub_disk, p->dev_sectors);
584 }
585 
586 static int ublk_validate_params(const struct ublk_device *ub)
587 {
588 	/* basic param is the only one which must be set */
589 	if (ub->params.types & UBLK_PARAM_TYPE_BASIC) {
590 		const struct ublk_param_basic *p = &ub->params.basic;
591 
592 		if (p->logical_bs_shift > PAGE_SHIFT || p->logical_bs_shift < 9)
593 			return -EINVAL;
594 
595 		if (p->logical_bs_shift > p->physical_bs_shift)
596 			return -EINVAL;
597 
598 		if (p->max_sectors > (ub->dev_info.max_io_buf_bytes >> 9))
599 			return -EINVAL;
600 
601 		if (ublk_dev_is_zoned(ub) && !p->chunk_sectors)
602 			return -EINVAL;
603 	} else
604 		return -EINVAL;
605 
606 	if (ub->params.types & UBLK_PARAM_TYPE_DISCARD) {
607 		const struct ublk_param_discard *p = &ub->params.discard;
608 
609 		/* So far, only support single segment discard */
610 		if (p->max_discard_sectors && p->max_discard_segments != 1)
611 			return -EINVAL;
612 
613 		if (!p->discard_granularity)
614 			return -EINVAL;
615 	}
616 
617 	/* dev_t is read-only */
618 	if (ub->params.types & UBLK_PARAM_TYPE_DEVT)
619 		return -EINVAL;
620 
621 	if (ub->params.types & UBLK_PARAM_TYPE_ZONED)
622 		return ublk_dev_param_zoned_validate(ub);
623 	else if (ublk_dev_is_zoned(ub))
624 		return -EINVAL;
625 
626 	if (ub->params.types & UBLK_PARAM_TYPE_DMA_ALIGN) {
627 		const struct ublk_param_dma_align *p = &ub->params.dma;
628 
629 		if (p->alignment >= PAGE_SIZE)
630 			return -EINVAL;
631 
632 		if (!is_power_of_2(p->alignment + 1))
633 			return -EINVAL;
634 	}
635 
636 	if (ub->params.types & UBLK_PARAM_TYPE_SEGMENT) {
637 		const struct ublk_param_segment *p = &ub->params.seg;
638 
639 		if (!is_power_of_2(p->seg_boundary_mask + 1))
640 			return -EINVAL;
641 
642 		if (p->seg_boundary_mask + 1 < UBLK_MIN_SEGMENT_SIZE)
643 			return -EINVAL;
644 		if (p->max_segment_size < UBLK_MIN_SEGMENT_SIZE)
645 			return -EINVAL;
646 	}
647 
648 	return 0;
649 }
650 
651 static void ublk_apply_params(struct ublk_device *ub)
652 {
653 	ublk_dev_param_basic_apply(ub);
654 
655 	if (ub->params.types & UBLK_PARAM_TYPE_ZONED)
656 		ublk_dev_param_zoned_apply(ub);
657 }
658 
659 static inline bool ublk_support_zero_copy(const struct ublk_queue *ubq)
660 {
661 	return ubq->flags & UBLK_F_SUPPORT_ZERO_COPY;
662 }
663 
664 static inline bool ublk_support_auto_buf_reg(const struct ublk_queue *ubq)
665 {
666 	return ubq->flags & UBLK_F_AUTO_BUF_REG;
667 }
668 
669 static inline bool ublk_support_user_copy(const struct ublk_queue *ubq)
670 {
671 	return ubq->flags & UBLK_F_USER_COPY;
672 }
673 
674 static inline bool ublk_need_map_io(const struct ublk_queue *ubq)
675 {
676 	return !ublk_support_user_copy(ubq) && !ublk_support_zero_copy(ubq) &&
677 		!ublk_support_auto_buf_reg(ubq);
678 }
679 
680 static inline bool ublk_need_req_ref(const struct ublk_queue *ubq)
681 {
682 	/*
683 	 * read()/write() is involved in user copy, so request reference
684 	 * has to be grabbed
685 	 *
686 	 * for zero copy, request buffer need to be registered to io_uring
687 	 * buffer table, so reference is needed
688 	 *
689 	 * For auto buffer register, ublk server still may issue
690 	 * UBLK_IO_COMMIT_AND_FETCH_REQ before one registered buffer is used up,
691 	 * so reference is required too.
692 	 */
693 	return ublk_support_user_copy(ubq) || ublk_support_zero_copy(ubq) ||
694 		ublk_support_auto_buf_reg(ubq);
695 }
696 
697 static inline void ublk_init_req_ref(const struct ublk_queue *ubq,
698 		struct ublk_io *io)
699 {
700 	if (ublk_need_req_ref(ubq))
701 		refcount_set(&io->ref, UBLK_REFCOUNT_INIT);
702 }
703 
704 static inline bool ublk_get_req_ref(struct ublk_io *io)
705 {
706 	return refcount_inc_not_zero(&io->ref);
707 }
708 
709 static inline void ublk_put_req_ref(struct ublk_io *io, struct request *req)
710 {
711 	if (refcount_dec_and_test(&io->ref))
712 		__ublk_complete_rq(req);
713 }
714 
715 static inline void ublk_sub_req_ref(struct ublk_io *io, struct request *req)
716 {
717 	unsigned sub_refs = UBLK_REFCOUNT_INIT - io->task_registered_buffers;
718 
719 	io->task_registered_buffers = 0;
720 	if (refcount_sub_and_test(sub_refs, &io->ref))
721 		__ublk_complete_rq(req);
722 }
723 
724 static inline bool ublk_need_get_data(const struct ublk_queue *ubq)
725 {
726 	return ubq->flags & UBLK_F_NEED_GET_DATA;
727 }
728 
729 /* Called in slow path only, keep it noinline for trace purpose */
730 static noinline struct ublk_device *ublk_get_device(struct ublk_device *ub)
731 {
732 	if (kobject_get_unless_zero(&ub->cdev_dev.kobj))
733 		return ub;
734 	return NULL;
735 }
736 
737 /* Called in slow path only, keep it noinline for trace purpose */
738 static noinline void ublk_put_device(struct ublk_device *ub)
739 {
740 	put_device(&ub->cdev_dev);
741 }
742 
743 static inline struct ublk_queue *ublk_get_queue(struct ublk_device *dev,
744 		int qid)
745 {
746        return (struct ublk_queue *)&(dev->__queues[qid * dev->queue_size]);
747 }
748 
749 static inline bool ublk_rq_has_data(const struct request *rq)
750 {
751 	return bio_has_data(rq->bio);
752 }
753 
754 static inline struct ublksrv_io_desc *
755 ublk_queue_cmd_buf(struct ublk_device *ub, int q_id)
756 {
757 	return ublk_get_queue(ub, q_id)->io_cmd_buf;
758 }
759 
760 static inline int __ublk_queue_cmd_buf_size(int depth)
761 {
762 	return round_up(depth * sizeof(struct ublksrv_io_desc), PAGE_SIZE);
763 }
764 
765 static inline int ublk_queue_cmd_buf_size(struct ublk_device *ub, int q_id)
766 {
767 	struct ublk_queue *ubq = ublk_get_queue(ub, q_id);
768 
769 	return __ublk_queue_cmd_buf_size(ubq->q_depth);
770 }
771 
772 static int ublk_max_cmd_buf_size(void)
773 {
774 	return __ublk_queue_cmd_buf_size(UBLK_MAX_QUEUE_DEPTH);
775 }
776 
777 /*
778  * Should I/O outstanding to the ublk server when it exits be reissued?
779  * If not, outstanding I/O will get errors.
780  */
781 static inline bool ublk_nosrv_should_reissue_outstanding(struct ublk_device *ub)
782 {
783 	return (ub->dev_info.flags & UBLK_F_USER_RECOVERY) &&
784 	       (ub->dev_info.flags & UBLK_F_USER_RECOVERY_REISSUE);
785 }
786 
787 /*
788  * Should I/O issued while there is no ublk server queue? If not, I/O
789  * issued while there is no ublk server will get errors.
790  */
791 static inline bool ublk_nosrv_dev_should_queue_io(struct ublk_device *ub)
792 {
793 	return (ub->dev_info.flags & UBLK_F_USER_RECOVERY) &&
794 	       !(ub->dev_info.flags & UBLK_F_USER_RECOVERY_FAIL_IO);
795 }
796 
797 /*
798  * Same as ublk_nosrv_dev_should_queue_io, but uses a queue-local copy
799  * of the device flags for smaller cache footprint - better for fast
800  * paths.
801  */
802 static inline bool ublk_nosrv_should_queue_io(struct ublk_queue *ubq)
803 {
804 	return (ubq->flags & UBLK_F_USER_RECOVERY) &&
805 	       !(ubq->flags & UBLK_F_USER_RECOVERY_FAIL_IO);
806 }
807 
808 /*
809  * Should ublk devices be stopped (i.e. no recovery possible) when the
810  * ublk server exits? If not, devices can be used again by a future
811  * incarnation of a ublk server via the start_recovery/end_recovery
812  * commands.
813  */
814 static inline bool ublk_nosrv_should_stop_dev(struct ublk_device *ub)
815 {
816 	return !(ub->dev_info.flags & UBLK_F_USER_RECOVERY);
817 }
818 
819 static inline bool ublk_dev_in_recoverable_state(struct ublk_device *ub)
820 {
821 	return ub->dev_info.state == UBLK_S_DEV_QUIESCED ||
822 	       ub->dev_info.state == UBLK_S_DEV_FAIL_IO;
823 }
824 
825 static void ublk_free_disk(struct gendisk *disk)
826 {
827 	struct ublk_device *ub = disk->private_data;
828 
829 	clear_bit(UB_STATE_USED, &ub->state);
830 	ublk_put_device(ub);
831 }
832 
833 static void ublk_store_owner_uid_gid(unsigned int *owner_uid,
834 		unsigned int *owner_gid)
835 {
836 	kuid_t uid;
837 	kgid_t gid;
838 
839 	current_uid_gid(&uid, &gid);
840 
841 	*owner_uid = from_kuid(&init_user_ns, uid);
842 	*owner_gid = from_kgid(&init_user_ns, gid);
843 }
844 
845 static int ublk_open(struct gendisk *disk, blk_mode_t mode)
846 {
847 	struct ublk_device *ub = disk->private_data;
848 
849 	if (capable(CAP_SYS_ADMIN))
850 		return 0;
851 
852 	/*
853 	 * If it is one unprivileged device, only owner can open
854 	 * the disk. Otherwise it could be one trap made by one
855 	 * evil user who grants this disk's privileges to other
856 	 * users deliberately.
857 	 *
858 	 * This way is reasonable too given anyone can create
859 	 * unprivileged device, and no need other's grant.
860 	 */
861 	if (ub->dev_info.flags & UBLK_F_UNPRIVILEGED_DEV) {
862 		unsigned int curr_uid, curr_gid;
863 
864 		ublk_store_owner_uid_gid(&curr_uid, &curr_gid);
865 
866 		if (curr_uid != ub->dev_info.owner_uid || curr_gid !=
867 				ub->dev_info.owner_gid)
868 			return -EPERM;
869 	}
870 
871 	return 0;
872 }
873 
874 static const struct block_device_operations ub_fops = {
875 	.owner =	THIS_MODULE,
876 	.open =		ublk_open,
877 	.free_disk =	ublk_free_disk,
878 	.report_zones =	ublk_report_zones,
879 };
880 
881 #define UBLK_MAX_PIN_PAGES	32
882 
883 struct ublk_io_iter {
884 	struct page *pages[UBLK_MAX_PIN_PAGES];
885 	struct bio *bio;
886 	struct bvec_iter iter;
887 };
888 
889 /* return how many pages are copied */
890 static void ublk_copy_io_pages(struct ublk_io_iter *data,
891 		size_t total, size_t pg_off, int dir)
892 {
893 	unsigned done = 0;
894 	unsigned pg_idx = 0;
895 
896 	while (done < total) {
897 		struct bio_vec bv = bio_iter_iovec(data->bio, data->iter);
898 		unsigned int bytes = min3(bv.bv_len, (unsigned)total - done,
899 				(unsigned)(PAGE_SIZE - pg_off));
900 		void *bv_buf = bvec_kmap_local(&bv);
901 		void *pg_buf = kmap_local_page(data->pages[pg_idx]);
902 
903 		if (dir == ITER_DEST)
904 			memcpy(pg_buf + pg_off, bv_buf, bytes);
905 		else
906 			memcpy(bv_buf, pg_buf + pg_off, bytes);
907 
908 		kunmap_local(pg_buf);
909 		kunmap_local(bv_buf);
910 
911 		/* advance page array */
912 		pg_off += bytes;
913 		if (pg_off == PAGE_SIZE) {
914 			pg_idx += 1;
915 			pg_off = 0;
916 		}
917 
918 		done += bytes;
919 
920 		/* advance bio */
921 		bio_advance_iter_single(data->bio, &data->iter, bytes);
922 		if (!data->iter.bi_size) {
923 			data->bio = data->bio->bi_next;
924 			if (data->bio == NULL)
925 				break;
926 			data->iter = data->bio->bi_iter;
927 		}
928 	}
929 }
930 
931 static bool ublk_advance_io_iter(const struct request *req,
932 		struct ublk_io_iter *iter, unsigned int offset)
933 {
934 	struct bio *bio = req->bio;
935 
936 	for_each_bio(bio) {
937 		if (bio->bi_iter.bi_size > offset) {
938 			iter->bio = bio;
939 			iter->iter = bio->bi_iter;
940 			bio_advance_iter(iter->bio, &iter->iter, offset);
941 			return true;
942 		}
943 		offset -= bio->bi_iter.bi_size;
944 	}
945 	return false;
946 }
947 
948 /*
949  * Copy data between request pages and io_iter, and 'offset'
950  * is the start point of linear offset of request.
951  */
952 static size_t ublk_copy_user_pages(const struct request *req,
953 		unsigned offset, struct iov_iter *uiter, int dir)
954 {
955 	struct ublk_io_iter iter;
956 	size_t done = 0;
957 
958 	if (!ublk_advance_io_iter(req, &iter, offset))
959 		return 0;
960 
961 	while (iov_iter_count(uiter) && iter.bio) {
962 		unsigned nr_pages;
963 		ssize_t len;
964 		size_t off;
965 		int i;
966 
967 		len = iov_iter_get_pages2(uiter, iter.pages,
968 				iov_iter_count(uiter),
969 				UBLK_MAX_PIN_PAGES, &off);
970 		if (len <= 0)
971 			return done;
972 
973 		ublk_copy_io_pages(&iter, len, off, dir);
974 		nr_pages = DIV_ROUND_UP(len + off, PAGE_SIZE);
975 		for (i = 0; i < nr_pages; i++) {
976 			if (dir == ITER_DEST)
977 				set_page_dirty(iter.pages[i]);
978 			put_page(iter.pages[i]);
979 		}
980 		done += len;
981 	}
982 
983 	return done;
984 }
985 
986 static inline bool ublk_need_map_req(const struct request *req)
987 {
988 	return ublk_rq_has_data(req) && req_op(req) == REQ_OP_WRITE;
989 }
990 
991 static inline bool ublk_need_unmap_req(const struct request *req)
992 {
993 	return ublk_rq_has_data(req) &&
994 	       (req_op(req) == REQ_OP_READ || req_op(req) == REQ_OP_DRV_IN);
995 }
996 
997 static int ublk_map_io(const struct ublk_queue *ubq, const struct request *req,
998 		struct ublk_io *io)
999 {
1000 	const unsigned int rq_bytes = blk_rq_bytes(req);
1001 
1002 	if (!ublk_need_map_io(ubq))
1003 		return rq_bytes;
1004 
1005 	/*
1006 	 * no zero copy, we delay copy WRITE request data into ublksrv
1007 	 * context and the big benefit is that pinning pages in current
1008 	 * context is pretty fast, see ublk_pin_user_pages
1009 	 */
1010 	if (ublk_need_map_req(req)) {
1011 		struct iov_iter iter;
1012 		const int dir = ITER_DEST;
1013 
1014 		import_ubuf(dir, u64_to_user_ptr(io->addr), rq_bytes, &iter);
1015 		return ublk_copy_user_pages(req, 0, &iter, dir);
1016 	}
1017 	return rq_bytes;
1018 }
1019 
1020 static int ublk_unmap_io(const struct ublk_queue *ubq,
1021 		const struct request *req,
1022 		struct ublk_io *io)
1023 {
1024 	const unsigned int rq_bytes = blk_rq_bytes(req);
1025 
1026 	if (!ublk_need_map_io(ubq))
1027 		return rq_bytes;
1028 
1029 	if (ublk_need_unmap_req(req)) {
1030 		struct iov_iter iter;
1031 		const int dir = ITER_SOURCE;
1032 
1033 		WARN_ON_ONCE(io->res > rq_bytes);
1034 
1035 		import_ubuf(dir, u64_to_user_ptr(io->addr), io->res, &iter);
1036 		return ublk_copy_user_pages(req, 0, &iter, dir);
1037 	}
1038 	return rq_bytes;
1039 }
1040 
1041 static inline unsigned int ublk_req_build_flags(struct request *req)
1042 {
1043 	unsigned flags = 0;
1044 
1045 	if (req->cmd_flags & REQ_FAILFAST_DEV)
1046 		flags |= UBLK_IO_F_FAILFAST_DEV;
1047 
1048 	if (req->cmd_flags & REQ_FAILFAST_TRANSPORT)
1049 		flags |= UBLK_IO_F_FAILFAST_TRANSPORT;
1050 
1051 	if (req->cmd_flags & REQ_FAILFAST_DRIVER)
1052 		flags |= UBLK_IO_F_FAILFAST_DRIVER;
1053 
1054 	if (req->cmd_flags & REQ_META)
1055 		flags |= UBLK_IO_F_META;
1056 
1057 	if (req->cmd_flags & REQ_FUA)
1058 		flags |= UBLK_IO_F_FUA;
1059 
1060 	if (req->cmd_flags & REQ_NOUNMAP)
1061 		flags |= UBLK_IO_F_NOUNMAP;
1062 
1063 	if (req->cmd_flags & REQ_SWAP)
1064 		flags |= UBLK_IO_F_SWAP;
1065 
1066 	return flags;
1067 }
1068 
1069 static blk_status_t ublk_setup_iod(struct ublk_queue *ubq, struct request *req)
1070 {
1071 	struct ublksrv_io_desc *iod = ublk_get_iod(ubq, req->tag);
1072 	struct ublk_io *io = &ubq->ios[req->tag];
1073 	enum req_op op = req_op(req);
1074 	u32 ublk_op;
1075 
1076 	if (!ublk_queue_is_zoned(ubq) &&
1077 	    (op_is_zone_mgmt(op) || op == REQ_OP_ZONE_APPEND))
1078 		return BLK_STS_IOERR;
1079 
1080 	switch (req_op(req)) {
1081 	case REQ_OP_READ:
1082 		ublk_op = UBLK_IO_OP_READ;
1083 		break;
1084 	case REQ_OP_WRITE:
1085 		ublk_op = UBLK_IO_OP_WRITE;
1086 		break;
1087 	case REQ_OP_FLUSH:
1088 		ublk_op = UBLK_IO_OP_FLUSH;
1089 		break;
1090 	case REQ_OP_DISCARD:
1091 		ublk_op = UBLK_IO_OP_DISCARD;
1092 		break;
1093 	case REQ_OP_WRITE_ZEROES:
1094 		ublk_op = UBLK_IO_OP_WRITE_ZEROES;
1095 		break;
1096 	default:
1097 		if (ublk_queue_is_zoned(ubq))
1098 			return ublk_setup_iod_zoned(ubq, req);
1099 		return BLK_STS_IOERR;
1100 	}
1101 
1102 	/* need to translate since kernel may change */
1103 	iod->op_flags = ublk_op | ublk_req_build_flags(req);
1104 	iod->nr_sectors = blk_rq_sectors(req);
1105 	iod->start_sector = blk_rq_pos(req);
1106 	iod->addr = io->addr;
1107 
1108 	return BLK_STS_OK;
1109 }
1110 
1111 static inline struct ublk_uring_cmd_pdu *ublk_get_uring_cmd_pdu(
1112 		struct io_uring_cmd *ioucmd)
1113 {
1114 	return io_uring_cmd_to_pdu(ioucmd, struct ublk_uring_cmd_pdu);
1115 }
1116 
1117 /* todo: handle partial completion */
1118 static inline void __ublk_complete_rq(struct request *req)
1119 {
1120 	struct ublk_queue *ubq = req->mq_hctx->driver_data;
1121 	struct ublk_io *io = &ubq->ios[req->tag];
1122 	unsigned int unmapped_bytes;
1123 	blk_status_t res = BLK_STS_OK;
1124 
1125 	/* failed read IO if nothing is read */
1126 	if (!io->res && req_op(req) == REQ_OP_READ)
1127 		io->res = -EIO;
1128 
1129 	if (io->res < 0) {
1130 		res = errno_to_blk_status(io->res);
1131 		goto exit;
1132 	}
1133 
1134 	/*
1135 	 * FLUSH, DISCARD or WRITE_ZEROES usually won't return bytes returned, so end them
1136 	 * directly.
1137 	 *
1138 	 * Both the two needn't unmap.
1139 	 */
1140 	if (req_op(req) != REQ_OP_READ && req_op(req) != REQ_OP_WRITE &&
1141 	    req_op(req) != REQ_OP_DRV_IN)
1142 		goto exit;
1143 
1144 	/* for READ request, writing data in iod->addr to rq buffers */
1145 	unmapped_bytes = ublk_unmap_io(ubq, req, io);
1146 
1147 	/*
1148 	 * Extremely impossible since we got data filled in just before
1149 	 *
1150 	 * Re-read simply for this unlikely case.
1151 	 */
1152 	if (unlikely(unmapped_bytes < io->res))
1153 		io->res = unmapped_bytes;
1154 
1155 	if (blk_update_request(req, BLK_STS_OK, io->res))
1156 		blk_mq_requeue_request(req, true);
1157 	else
1158 		__blk_mq_end_request(req, BLK_STS_OK);
1159 
1160 	return;
1161 exit:
1162 	blk_mq_end_request(req, res);
1163 }
1164 
1165 static struct io_uring_cmd *__ublk_prep_compl_io_cmd(struct ublk_io *io,
1166 						     struct request *req)
1167 {
1168 	/* read cmd first because req will overwrite it */
1169 	struct io_uring_cmd *cmd = io->cmd;
1170 
1171 	/* mark this cmd owned by ublksrv */
1172 	io->flags |= UBLK_IO_FLAG_OWNED_BY_SRV;
1173 
1174 	/*
1175 	 * clear ACTIVE since we are done with this sqe/cmd slot
1176 	 * We can only accept io cmd in case of being not active.
1177 	 */
1178 	io->flags &= ~UBLK_IO_FLAG_ACTIVE;
1179 
1180 	io->req = req;
1181 	return cmd;
1182 }
1183 
1184 static void ublk_complete_io_cmd(struct ublk_io *io, struct request *req,
1185 				 int res, unsigned issue_flags)
1186 {
1187 	struct io_uring_cmd *cmd = __ublk_prep_compl_io_cmd(io, req);
1188 
1189 	/* tell ublksrv one io request is coming */
1190 	io_uring_cmd_done(cmd, res, 0, issue_flags);
1191 }
1192 
1193 #define UBLK_REQUEUE_DELAY_MS	3
1194 
1195 static inline void __ublk_abort_rq(struct ublk_queue *ubq,
1196 		struct request *rq)
1197 {
1198 	/* We cannot process this rq so just requeue it. */
1199 	if (ublk_nosrv_dev_should_queue_io(ubq->dev))
1200 		blk_mq_requeue_request(rq, false);
1201 	else
1202 		blk_mq_end_request(rq, BLK_STS_IOERR);
1203 }
1204 
1205 static void
1206 ublk_auto_buf_reg_fallback(const struct ublk_queue *ubq, struct ublk_io *io)
1207 {
1208 	unsigned tag = io - ubq->ios;
1209 	struct ublksrv_io_desc *iod = ublk_get_iod(ubq, tag);
1210 
1211 	iod->op_flags |= UBLK_IO_F_NEED_REG_BUF;
1212 }
1213 
1214 static bool ublk_auto_buf_reg(const struct ublk_queue *ubq, struct request *req,
1215 			      struct ublk_io *io, unsigned int issue_flags)
1216 {
1217 	struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(io->cmd);
1218 	int ret;
1219 
1220 	ret = io_buffer_register_bvec(io->cmd, req, ublk_io_release,
1221 				      pdu->buf.index, issue_flags);
1222 	if (ret) {
1223 		if (pdu->buf.flags & UBLK_AUTO_BUF_REG_FALLBACK) {
1224 			ublk_auto_buf_reg_fallback(ubq, io);
1225 			return true;
1226 		}
1227 		blk_mq_end_request(req, BLK_STS_IOERR);
1228 		return false;
1229 	}
1230 
1231 	io->task_registered_buffers = 1;
1232 	io->buf_ctx_handle = io_uring_cmd_ctx_handle(io->cmd);
1233 	/* store buffer index in request payload */
1234 	io->buf_index = pdu->buf.index;
1235 	io->flags |= UBLK_IO_FLAG_AUTO_BUF_REG;
1236 	return true;
1237 }
1238 
1239 static bool ublk_prep_auto_buf_reg(struct ublk_queue *ubq,
1240 				   struct request *req, struct ublk_io *io,
1241 				   unsigned int issue_flags)
1242 {
1243 	ublk_init_req_ref(ubq, io);
1244 	if (ublk_support_auto_buf_reg(ubq) && ublk_rq_has_data(req))
1245 		return ublk_auto_buf_reg(ubq, req, io, issue_flags);
1246 
1247 	return true;
1248 }
1249 
1250 static bool ublk_start_io(const struct ublk_queue *ubq, struct request *req,
1251 			  struct ublk_io *io)
1252 {
1253 	unsigned mapped_bytes = ublk_map_io(ubq, req, io);
1254 
1255 	/* partially mapped, update io descriptor */
1256 	if (unlikely(mapped_bytes != blk_rq_bytes(req))) {
1257 		/*
1258 		 * Nothing mapped, retry until we succeed.
1259 		 *
1260 		 * We may never succeed in mapping any bytes here because
1261 		 * of OOM. TODO: reserve one buffer with single page pinned
1262 		 * for providing forward progress guarantee.
1263 		 */
1264 		if (unlikely(!mapped_bytes)) {
1265 			blk_mq_requeue_request(req, false);
1266 			blk_mq_delay_kick_requeue_list(req->q,
1267 					UBLK_REQUEUE_DELAY_MS);
1268 			return false;
1269 		}
1270 
1271 		ublk_get_iod(ubq, req->tag)->nr_sectors =
1272 			mapped_bytes >> 9;
1273 	}
1274 
1275 	return true;
1276 }
1277 
1278 static void ublk_dispatch_req(struct ublk_queue *ubq,
1279 			      struct request *req,
1280 			      unsigned int issue_flags)
1281 {
1282 	int tag = req->tag;
1283 	struct ublk_io *io = &ubq->ios[tag];
1284 
1285 	pr_devel("%s: complete: qid %d tag %d io_flags %x addr %llx\n",
1286 			__func__, ubq->q_id, req->tag, io->flags,
1287 			ublk_get_iod(ubq, req->tag)->addr);
1288 
1289 	/*
1290 	 * Task is exiting if either:
1291 	 *
1292 	 * (1) current != io->task.
1293 	 * io_uring_cmd_complete_in_task() tries to run task_work
1294 	 * in a workqueue if cmd's task is PF_EXITING.
1295 	 *
1296 	 * (2) current->flags & PF_EXITING.
1297 	 */
1298 	if (unlikely(current != io->task || current->flags & PF_EXITING)) {
1299 		__ublk_abort_rq(ubq, req);
1300 		return;
1301 	}
1302 
1303 	if (ublk_need_get_data(ubq) && ublk_need_map_req(req)) {
1304 		/*
1305 		 * We have not handled UBLK_IO_NEED_GET_DATA command yet,
1306 		 * so immediately pass UBLK_IO_RES_NEED_GET_DATA to ublksrv
1307 		 * and notify it.
1308 		 */
1309 		io->flags |= UBLK_IO_FLAG_NEED_GET_DATA;
1310 		pr_devel("%s: need get data. qid %d tag %d io_flags %x\n",
1311 				__func__, ubq->q_id, req->tag, io->flags);
1312 		ublk_complete_io_cmd(io, req, UBLK_IO_RES_NEED_GET_DATA,
1313 				     issue_flags);
1314 		return;
1315 	}
1316 
1317 	if (!ublk_start_io(ubq, req, io))
1318 		return;
1319 
1320 	if (ublk_prep_auto_buf_reg(ubq, req, io, issue_flags))
1321 		ublk_complete_io_cmd(io, req, UBLK_IO_RES_OK, issue_flags);
1322 }
1323 
1324 static void ublk_cmd_tw_cb(struct io_uring_cmd *cmd,
1325 			   unsigned int issue_flags)
1326 {
1327 	struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd);
1328 	struct ublk_queue *ubq = pdu->ubq;
1329 
1330 	ublk_dispatch_req(ubq, pdu->req, issue_flags);
1331 }
1332 
1333 static void ublk_queue_cmd(struct ublk_queue *ubq, struct request *rq)
1334 {
1335 	struct io_uring_cmd *cmd = ubq->ios[rq->tag].cmd;
1336 	struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd);
1337 
1338 	pdu->req = rq;
1339 	io_uring_cmd_complete_in_task(cmd, ublk_cmd_tw_cb);
1340 }
1341 
1342 static void ublk_cmd_list_tw_cb(struct io_uring_cmd *cmd,
1343 		unsigned int issue_flags)
1344 {
1345 	struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd);
1346 	struct request *rq = pdu->req_list;
1347 	struct request *next;
1348 
1349 	do {
1350 		next = rq->rq_next;
1351 		rq->rq_next = NULL;
1352 		ublk_dispatch_req(rq->mq_hctx->driver_data, rq, issue_flags);
1353 		rq = next;
1354 	} while (rq);
1355 }
1356 
1357 static void ublk_queue_cmd_list(struct ublk_io *io, struct rq_list *l)
1358 {
1359 	struct io_uring_cmd *cmd = io->cmd;
1360 	struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd);
1361 
1362 	pdu->req_list = rq_list_peek(l);
1363 	rq_list_init(l);
1364 	io_uring_cmd_complete_in_task(cmd, ublk_cmd_list_tw_cb);
1365 }
1366 
1367 static enum blk_eh_timer_return ublk_timeout(struct request *rq)
1368 {
1369 	struct ublk_queue *ubq = rq->mq_hctx->driver_data;
1370 	struct ublk_io *io = &ubq->ios[rq->tag];
1371 
1372 	if (ubq->flags & UBLK_F_UNPRIVILEGED_DEV) {
1373 		send_sig(SIGKILL, io->task, 0);
1374 		return BLK_EH_DONE;
1375 	}
1376 
1377 	return BLK_EH_RESET_TIMER;
1378 }
1379 
1380 static blk_status_t ublk_prep_req(struct ublk_queue *ubq, struct request *rq,
1381 				  bool check_cancel)
1382 {
1383 	blk_status_t res;
1384 
1385 	if (unlikely(ubq->fail_io))
1386 		return BLK_STS_TARGET;
1387 
1388 	/* With recovery feature enabled, force_abort is set in
1389 	 * ublk_stop_dev() before calling del_gendisk(). We have to
1390 	 * abort all requeued and new rqs here to let del_gendisk()
1391 	 * move on. Besides, we cannot not call io_uring_cmd_complete_in_task()
1392 	 * to avoid UAF on io_uring ctx.
1393 	 *
1394 	 * Note: force_abort is guaranteed to be seen because it is set
1395 	 * before request queue is unqiuesced.
1396 	 */
1397 	if (ublk_nosrv_should_queue_io(ubq) && unlikely(ubq->force_abort))
1398 		return BLK_STS_IOERR;
1399 
1400 	if (check_cancel && unlikely(ubq->canceling))
1401 		return BLK_STS_IOERR;
1402 
1403 	/* fill iod to slot in io cmd buffer */
1404 	res = ublk_setup_iod(ubq, rq);
1405 	if (unlikely(res != BLK_STS_OK))
1406 		return BLK_STS_IOERR;
1407 
1408 	blk_mq_start_request(rq);
1409 	return BLK_STS_OK;
1410 }
1411 
1412 static blk_status_t ublk_queue_rq(struct blk_mq_hw_ctx *hctx,
1413 		const struct blk_mq_queue_data *bd)
1414 {
1415 	struct ublk_queue *ubq = hctx->driver_data;
1416 	struct request *rq = bd->rq;
1417 	blk_status_t res;
1418 
1419 	res = ublk_prep_req(ubq, rq, false);
1420 	if (res != BLK_STS_OK)
1421 		return res;
1422 
1423 	/*
1424 	 * ->canceling has to be handled after ->force_abort and ->fail_io
1425 	 * is dealt with, otherwise this request may not be failed in case
1426 	 * of recovery, and cause hang when deleting disk
1427 	 */
1428 	if (unlikely(ubq->canceling)) {
1429 		__ublk_abort_rq(ubq, rq);
1430 		return BLK_STS_OK;
1431 	}
1432 
1433 	ublk_queue_cmd(ubq, rq);
1434 	return BLK_STS_OK;
1435 }
1436 
1437 static inline bool ublk_belong_to_same_batch(const struct ublk_io *io,
1438 					     const struct ublk_io *io2)
1439 {
1440 	return (io_uring_cmd_ctx_handle(io->cmd) ==
1441 		io_uring_cmd_ctx_handle(io2->cmd)) &&
1442 		(io->task == io2->task);
1443 }
1444 
1445 static void ublk_queue_rqs(struct rq_list *rqlist)
1446 {
1447 	struct rq_list requeue_list = { };
1448 	struct rq_list submit_list = { };
1449 	struct ublk_io *io = NULL;
1450 	struct request *req;
1451 
1452 	while ((req = rq_list_pop(rqlist))) {
1453 		struct ublk_queue *this_q = req->mq_hctx->driver_data;
1454 		struct ublk_io *this_io = &this_q->ios[req->tag];
1455 
1456 		if (io && !ublk_belong_to_same_batch(io, this_io) &&
1457 				!rq_list_empty(&submit_list))
1458 			ublk_queue_cmd_list(io, &submit_list);
1459 		io = this_io;
1460 
1461 		if (ublk_prep_req(this_q, req, true) == BLK_STS_OK)
1462 			rq_list_add_tail(&submit_list, req);
1463 		else
1464 			rq_list_add_tail(&requeue_list, req);
1465 	}
1466 
1467 	if (!rq_list_empty(&submit_list))
1468 		ublk_queue_cmd_list(io, &submit_list);
1469 	*rqlist = requeue_list;
1470 }
1471 
1472 static int ublk_init_hctx(struct blk_mq_hw_ctx *hctx, void *driver_data,
1473 		unsigned int hctx_idx)
1474 {
1475 	struct ublk_device *ub = driver_data;
1476 	struct ublk_queue *ubq = ublk_get_queue(ub, hctx->queue_num);
1477 
1478 	hctx->driver_data = ubq;
1479 	return 0;
1480 }
1481 
1482 static const struct blk_mq_ops ublk_mq_ops = {
1483 	.queue_rq       = ublk_queue_rq,
1484 	.queue_rqs      = ublk_queue_rqs,
1485 	.init_hctx	= ublk_init_hctx,
1486 	.timeout	= ublk_timeout,
1487 };
1488 
1489 static void ublk_queue_reinit(struct ublk_device *ub, struct ublk_queue *ubq)
1490 {
1491 	int i;
1492 
1493 	/* All old ioucmds have to be completed */
1494 	ubq->nr_io_ready = 0;
1495 
1496 	for (i = 0; i < ubq->q_depth; i++) {
1497 		struct ublk_io *io = &ubq->ios[i];
1498 
1499 		/*
1500 		 * UBLK_IO_FLAG_CANCELED is kept for avoiding to touch
1501 		 * io->cmd
1502 		 */
1503 		io->flags &= UBLK_IO_FLAG_CANCELED;
1504 		io->cmd = NULL;
1505 		io->addr = 0;
1506 
1507 		/*
1508 		 * old task is PF_EXITING, put it now
1509 		 *
1510 		 * It could be NULL in case of closing one quiesced
1511 		 * device.
1512 		 */
1513 		if (io->task) {
1514 			put_task_struct(io->task);
1515 			io->task = NULL;
1516 		}
1517 
1518 		WARN_ON_ONCE(refcount_read(&io->ref));
1519 		WARN_ON_ONCE(io->task_registered_buffers);
1520 	}
1521 }
1522 
1523 static int ublk_ch_open(struct inode *inode, struct file *filp)
1524 {
1525 	struct ublk_device *ub = container_of(inode->i_cdev,
1526 			struct ublk_device, cdev);
1527 
1528 	if (test_and_set_bit(UB_STATE_OPEN, &ub->state))
1529 		return -EBUSY;
1530 	filp->private_data = ub;
1531 	return 0;
1532 }
1533 
1534 static void ublk_reset_ch_dev(struct ublk_device *ub)
1535 {
1536 	int i;
1537 
1538 	for (i = 0; i < ub->dev_info.nr_hw_queues; i++)
1539 		ublk_queue_reinit(ub, ublk_get_queue(ub, i));
1540 
1541 	/* set to NULL, otherwise new tasks cannot mmap io_cmd_buf */
1542 	ub->mm = NULL;
1543 	ub->nr_queues_ready = 0;
1544 	ub->nr_privileged_daemon = 0;
1545 }
1546 
1547 static struct gendisk *ublk_get_disk(struct ublk_device *ub)
1548 {
1549 	struct gendisk *disk;
1550 
1551 	spin_lock(&ub->lock);
1552 	disk = ub->ub_disk;
1553 	if (disk)
1554 		get_device(disk_to_dev(disk));
1555 	spin_unlock(&ub->lock);
1556 
1557 	return disk;
1558 }
1559 
1560 static void ublk_put_disk(struct gendisk *disk)
1561 {
1562 	if (disk)
1563 		put_device(disk_to_dev(disk));
1564 }
1565 
1566 /*
1567  * Use this function to ensure that ->canceling is consistently set for
1568  * the device and all queues. Do not set these flags directly.
1569  *
1570  * Caller must ensure that:
1571  * - cancel_mutex is held. This ensures that there is no concurrent
1572  *   access to ub->canceling and no concurrent writes to ubq->canceling.
1573  * - there are no concurrent reads of ubq->canceling from the queue_rq
1574  *   path. This can be done by quiescing the queue, or through other
1575  *   means.
1576  */
1577 static void ublk_set_canceling(struct ublk_device *ub, bool canceling)
1578 	__must_hold(&ub->cancel_mutex)
1579 {
1580 	int i;
1581 
1582 	ub->canceling = canceling;
1583 	for (i = 0; i < ub->dev_info.nr_hw_queues; i++)
1584 		ublk_get_queue(ub, i)->canceling = canceling;
1585 }
1586 
1587 static int ublk_ch_release(struct inode *inode, struct file *filp)
1588 {
1589 	struct ublk_device *ub = filp->private_data;
1590 	struct gendisk *disk;
1591 	int i;
1592 
1593 	/*
1594 	 * disk isn't attached yet, either device isn't live, or it has
1595 	 * been removed already, so we needn't to do anything
1596 	 */
1597 	disk = ublk_get_disk(ub);
1598 	if (!disk)
1599 		goto out;
1600 
1601 	/*
1602 	 * All uring_cmd are done now, so abort any request outstanding to
1603 	 * the ublk server
1604 	 *
1605 	 * This can be done in lockless way because ublk server has been
1606 	 * gone
1607 	 *
1608 	 * More importantly, we have to provide forward progress guarantee
1609 	 * without holding ub->mutex, otherwise control task grabbing
1610 	 * ub->mutex triggers deadlock
1611 	 *
1612 	 * All requests may be inflight, so ->canceling may not be set, set
1613 	 * it now.
1614 	 */
1615 	mutex_lock(&ub->cancel_mutex);
1616 	ublk_set_canceling(ub, true);
1617 	for (i = 0; i < ub->dev_info.nr_hw_queues; i++)
1618 		ublk_abort_queue(ub, ublk_get_queue(ub, i));
1619 	mutex_unlock(&ub->cancel_mutex);
1620 	blk_mq_kick_requeue_list(disk->queue);
1621 
1622 	/*
1623 	 * All infligh requests have been completed or requeued and any new
1624 	 * request will be failed or requeued via `->canceling` now, so it is
1625 	 * fine to grab ub->mutex now.
1626 	 */
1627 	mutex_lock(&ub->mutex);
1628 
1629 	/* double check after grabbing lock */
1630 	if (!ub->ub_disk)
1631 		goto unlock;
1632 
1633 	/*
1634 	 * Transition the device to the nosrv state. What exactly this
1635 	 * means depends on the recovery flags
1636 	 */
1637 	blk_mq_quiesce_queue(disk->queue);
1638 	if (ublk_nosrv_should_stop_dev(ub)) {
1639 		/*
1640 		 * Allow any pending/future I/O to pass through quickly
1641 		 * with an error. This is needed because del_gendisk
1642 		 * waits for all pending I/O to complete
1643 		 */
1644 		for (i = 0; i < ub->dev_info.nr_hw_queues; i++)
1645 			ublk_get_queue(ub, i)->force_abort = true;
1646 		blk_mq_unquiesce_queue(disk->queue);
1647 
1648 		ublk_stop_dev_unlocked(ub);
1649 	} else {
1650 		if (ublk_nosrv_dev_should_queue_io(ub)) {
1651 			/* ->canceling is set and all requests are aborted */
1652 			ub->dev_info.state = UBLK_S_DEV_QUIESCED;
1653 		} else {
1654 			ub->dev_info.state = UBLK_S_DEV_FAIL_IO;
1655 			for (i = 0; i < ub->dev_info.nr_hw_queues; i++)
1656 				ublk_get_queue(ub, i)->fail_io = true;
1657 		}
1658 		blk_mq_unquiesce_queue(disk->queue);
1659 	}
1660 unlock:
1661 	mutex_unlock(&ub->mutex);
1662 	ublk_put_disk(disk);
1663 
1664 	/* all uring_cmd has been done now, reset device & ubq */
1665 	ublk_reset_ch_dev(ub);
1666 out:
1667 	clear_bit(UB_STATE_OPEN, &ub->state);
1668 	return 0;
1669 }
1670 
1671 /* map pre-allocated per-queue cmd buffer to ublksrv daemon */
1672 static int ublk_ch_mmap(struct file *filp, struct vm_area_struct *vma)
1673 {
1674 	struct ublk_device *ub = filp->private_data;
1675 	size_t sz = vma->vm_end - vma->vm_start;
1676 	unsigned max_sz = ublk_max_cmd_buf_size();
1677 	unsigned long pfn, end, phys_off = vma->vm_pgoff << PAGE_SHIFT;
1678 	int q_id, ret = 0;
1679 
1680 	spin_lock(&ub->lock);
1681 	if (!ub->mm)
1682 		ub->mm = current->mm;
1683 	if (current->mm != ub->mm)
1684 		ret = -EINVAL;
1685 	spin_unlock(&ub->lock);
1686 
1687 	if (ret)
1688 		return ret;
1689 
1690 	if (vma->vm_flags & VM_WRITE)
1691 		return -EPERM;
1692 
1693 	end = UBLKSRV_CMD_BUF_OFFSET + ub->dev_info.nr_hw_queues * max_sz;
1694 	if (phys_off < UBLKSRV_CMD_BUF_OFFSET || phys_off >= end)
1695 		return -EINVAL;
1696 
1697 	q_id = (phys_off - UBLKSRV_CMD_BUF_OFFSET) / max_sz;
1698 	pr_devel("%s: qid %d, pid %d, addr %lx pg_off %lx sz %lu\n",
1699 			__func__, q_id, current->pid, vma->vm_start,
1700 			phys_off, (unsigned long)sz);
1701 
1702 	if (sz != ublk_queue_cmd_buf_size(ub, q_id))
1703 		return -EINVAL;
1704 
1705 	pfn = virt_to_phys(ublk_queue_cmd_buf(ub, q_id)) >> PAGE_SHIFT;
1706 	return remap_pfn_range(vma, vma->vm_start, pfn, sz, vma->vm_page_prot);
1707 }
1708 
1709 static void __ublk_fail_req(struct ublk_queue *ubq, struct ublk_io *io,
1710 		struct request *req)
1711 {
1712 	WARN_ON_ONCE(io->flags & UBLK_IO_FLAG_ACTIVE);
1713 
1714 	if (ublk_nosrv_should_reissue_outstanding(ubq->dev))
1715 		blk_mq_requeue_request(req, false);
1716 	else {
1717 		io->res = -EIO;
1718 		__ublk_complete_rq(req);
1719 	}
1720 }
1721 
1722 /*
1723  * Called from ublk char device release handler, when any uring_cmd is
1724  * done, meantime request queue is "quiesced" since all inflight requests
1725  * can't be completed because ublk server is dead.
1726  *
1727  * So no one can hold our request IO reference any more, simply ignore the
1728  * reference, and complete the request immediately
1729  */
1730 static void ublk_abort_queue(struct ublk_device *ub, struct ublk_queue *ubq)
1731 {
1732 	int i;
1733 
1734 	for (i = 0; i < ubq->q_depth; i++) {
1735 		struct ublk_io *io = &ubq->ios[i];
1736 
1737 		if (io->flags & UBLK_IO_FLAG_OWNED_BY_SRV)
1738 			__ublk_fail_req(ubq, io, io->req);
1739 	}
1740 }
1741 
1742 static void ublk_start_cancel(struct ublk_device *ub)
1743 {
1744 	struct gendisk *disk = ublk_get_disk(ub);
1745 
1746 	/* Our disk has been dead */
1747 	if (!disk)
1748 		return;
1749 
1750 	mutex_lock(&ub->cancel_mutex);
1751 	if (ub->canceling)
1752 		goto out;
1753 	/*
1754 	 * Now we are serialized with ublk_queue_rq()
1755 	 *
1756 	 * Make sure that ubq->canceling is set when queue is frozen,
1757 	 * because ublk_queue_rq() has to rely on this flag for avoiding to
1758 	 * touch completed uring_cmd
1759 	 */
1760 	blk_mq_quiesce_queue(disk->queue);
1761 	ublk_set_canceling(ub, true);
1762 	blk_mq_unquiesce_queue(disk->queue);
1763 out:
1764 	mutex_unlock(&ub->cancel_mutex);
1765 	ublk_put_disk(disk);
1766 }
1767 
1768 static void ublk_cancel_cmd(struct ublk_queue *ubq, unsigned tag,
1769 		unsigned int issue_flags)
1770 {
1771 	struct ublk_io *io = &ubq->ios[tag];
1772 	struct ublk_device *ub = ubq->dev;
1773 	struct request *req;
1774 	bool done;
1775 
1776 	if (!(io->flags & UBLK_IO_FLAG_ACTIVE))
1777 		return;
1778 
1779 	/*
1780 	 * Don't try to cancel this command if the request is started for
1781 	 * avoiding race between io_uring_cmd_done() and
1782 	 * io_uring_cmd_complete_in_task().
1783 	 *
1784 	 * Either the started request will be aborted via __ublk_abort_rq(),
1785 	 * then this uring_cmd is canceled next time, or it will be done in
1786 	 * task work function ublk_dispatch_req() because io_uring guarantees
1787 	 * that ublk_dispatch_req() is always called
1788 	 */
1789 	req = blk_mq_tag_to_rq(ub->tag_set.tags[ubq->q_id], tag);
1790 	if (req && blk_mq_request_started(req) && req->tag == tag)
1791 		return;
1792 
1793 	spin_lock(&ubq->cancel_lock);
1794 	done = !!(io->flags & UBLK_IO_FLAG_CANCELED);
1795 	if (!done)
1796 		io->flags |= UBLK_IO_FLAG_CANCELED;
1797 	spin_unlock(&ubq->cancel_lock);
1798 
1799 	if (!done)
1800 		io_uring_cmd_done(io->cmd, UBLK_IO_RES_ABORT, 0, issue_flags);
1801 }
1802 
1803 /*
1804  * The ublk char device won't be closed when calling cancel fn, so both
1805  * ublk device and queue are guaranteed to be live
1806  *
1807  * Two-stage cancel:
1808  *
1809  * - make every active uring_cmd done in ->cancel_fn()
1810  *
1811  * - aborting inflight ublk IO requests in ublk char device release handler,
1812  *   which depends on 1st stage because device can only be closed iff all
1813  *   uring_cmd are done
1814  *
1815  * Do _not_ try to acquire ub->mutex before all inflight requests are
1816  * aborted, otherwise deadlock may be caused.
1817  */
1818 static void ublk_uring_cmd_cancel_fn(struct io_uring_cmd *cmd,
1819 		unsigned int issue_flags)
1820 {
1821 	struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd);
1822 	struct ublk_queue *ubq = pdu->ubq;
1823 	struct task_struct *task;
1824 	struct ublk_io *io;
1825 
1826 	if (WARN_ON_ONCE(!ubq))
1827 		return;
1828 
1829 	if (WARN_ON_ONCE(pdu->tag >= ubq->q_depth))
1830 		return;
1831 
1832 	task = io_uring_cmd_get_task(cmd);
1833 	io = &ubq->ios[pdu->tag];
1834 	if (WARN_ON_ONCE(task && task != io->task))
1835 		return;
1836 
1837 	ublk_start_cancel(ubq->dev);
1838 
1839 	WARN_ON_ONCE(io->cmd != cmd);
1840 	ublk_cancel_cmd(ubq, pdu->tag, issue_flags);
1841 }
1842 
1843 static inline bool ublk_queue_ready(struct ublk_queue *ubq)
1844 {
1845 	return ubq->nr_io_ready == ubq->q_depth;
1846 }
1847 
1848 static void ublk_cancel_queue(struct ublk_queue *ubq)
1849 {
1850 	int i;
1851 
1852 	for (i = 0; i < ubq->q_depth; i++)
1853 		ublk_cancel_cmd(ubq, i, IO_URING_F_UNLOCKED);
1854 }
1855 
1856 /* Cancel all pending commands, must be called after del_gendisk() returns */
1857 static void ublk_cancel_dev(struct ublk_device *ub)
1858 {
1859 	int i;
1860 
1861 	for (i = 0; i < ub->dev_info.nr_hw_queues; i++)
1862 		ublk_cancel_queue(ublk_get_queue(ub, i));
1863 }
1864 
1865 static bool ublk_check_inflight_rq(struct request *rq, void *data)
1866 {
1867 	bool *idle = data;
1868 
1869 	if (blk_mq_request_started(rq)) {
1870 		*idle = false;
1871 		return false;
1872 	}
1873 	return true;
1874 }
1875 
1876 static void ublk_wait_tagset_rqs_idle(struct ublk_device *ub)
1877 {
1878 	bool idle;
1879 
1880 	WARN_ON_ONCE(!blk_queue_quiesced(ub->ub_disk->queue));
1881 	while (true) {
1882 		idle = true;
1883 		blk_mq_tagset_busy_iter(&ub->tag_set,
1884 				ublk_check_inflight_rq, &idle);
1885 		if (idle)
1886 			break;
1887 		msleep(UBLK_REQUEUE_DELAY_MS);
1888 	}
1889 }
1890 
1891 static void ublk_force_abort_dev(struct ublk_device *ub)
1892 {
1893 	int i;
1894 
1895 	pr_devel("%s: force abort ub: dev_id %d state %s\n",
1896 			__func__, ub->dev_info.dev_id,
1897 			ub->dev_info.state == UBLK_S_DEV_LIVE ?
1898 			"LIVE" : "QUIESCED");
1899 	blk_mq_quiesce_queue(ub->ub_disk->queue);
1900 	if (ub->dev_info.state == UBLK_S_DEV_LIVE)
1901 		ublk_wait_tagset_rqs_idle(ub);
1902 
1903 	for (i = 0; i < ub->dev_info.nr_hw_queues; i++)
1904 		ublk_get_queue(ub, i)->force_abort = true;
1905 	blk_mq_unquiesce_queue(ub->ub_disk->queue);
1906 	/* We may have requeued some rqs in ublk_quiesce_queue() */
1907 	blk_mq_kick_requeue_list(ub->ub_disk->queue);
1908 }
1909 
1910 static struct gendisk *ublk_detach_disk(struct ublk_device *ub)
1911 {
1912 	struct gendisk *disk;
1913 
1914 	/* Sync with ublk_abort_queue() by holding the lock */
1915 	spin_lock(&ub->lock);
1916 	disk = ub->ub_disk;
1917 	ub->dev_info.state = UBLK_S_DEV_DEAD;
1918 	ub->dev_info.ublksrv_pid = -1;
1919 	ub->ub_disk = NULL;
1920 	spin_unlock(&ub->lock);
1921 
1922 	return disk;
1923 }
1924 
1925 static void ublk_stop_dev_unlocked(struct ublk_device *ub)
1926 	__must_hold(&ub->mutex)
1927 {
1928 	struct gendisk *disk;
1929 
1930 	if (ub->dev_info.state == UBLK_S_DEV_DEAD)
1931 		return;
1932 
1933 	if (ublk_nosrv_dev_should_queue_io(ub))
1934 		ublk_force_abort_dev(ub);
1935 	del_gendisk(ub->ub_disk);
1936 	disk = ublk_detach_disk(ub);
1937 	put_disk(disk);
1938 }
1939 
1940 static void ublk_stop_dev(struct ublk_device *ub)
1941 {
1942 	mutex_lock(&ub->mutex);
1943 	ublk_stop_dev_unlocked(ub);
1944 	mutex_unlock(&ub->mutex);
1945 	ublk_cancel_dev(ub);
1946 }
1947 
1948 /* reset ublk io_uring queue & io flags */
1949 static void ublk_reset_io_flags(struct ublk_device *ub)
1950 {
1951 	int i, j;
1952 
1953 	for (i = 0; i < ub->dev_info.nr_hw_queues; i++) {
1954 		struct ublk_queue *ubq = ublk_get_queue(ub, i);
1955 
1956 		/* UBLK_IO_FLAG_CANCELED can be cleared now */
1957 		spin_lock(&ubq->cancel_lock);
1958 		for (j = 0; j < ubq->q_depth; j++)
1959 			ubq->ios[j].flags &= ~UBLK_IO_FLAG_CANCELED;
1960 		spin_unlock(&ubq->cancel_lock);
1961 		ubq->fail_io = false;
1962 	}
1963 	mutex_lock(&ub->cancel_mutex);
1964 	ublk_set_canceling(ub, false);
1965 	mutex_unlock(&ub->cancel_mutex);
1966 }
1967 
1968 /* device can only be started after all IOs are ready */
1969 static void ublk_mark_io_ready(struct ublk_device *ub, struct ublk_queue *ubq)
1970 	__must_hold(&ub->mutex)
1971 {
1972 	ubq->nr_io_ready++;
1973 	if (ublk_queue_ready(ubq)) {
1974 		ub->nr_queues_ready++;
1975 
1976 		if (capable(CAP_SYS_ADMIN))
1977 			ub->nr_privileged_daemon++;
1978 	}
1979 
1980 	if (ub->nr_queues_ready == ub->dev_info.nr_hw_queues) {
1981 		/* now we are ready for handling ublk io request */
1982 		ublk_reset_io_flags(ub);
1983 		complete_all(&ub->completion);
1984 	}
1985 }
1986 
1987 static inline int ublk_check_cmd_op(u32 cmd_op)
1988 {
1989 	u32 ioc_type = _IOC_TYPE(cmd_op);
1990 
1991 	if (!IS_ENABLED(CONFIG_BLKDEV_UBLK_LEGACY_OPCODES) && ioc_type != 'u')
1992 		return -EOPNOTSUPP;
1993 
1994 	if (ioc_type != 'u' && ioc_type != 0)
1995 		return -EOPNOTSUPP;
1996 
1997 	return 0;
1998 }
1999 
2000 static inline void ublk_fill_io_cmd(struct ublk_io *io,
2001 		struct io_uring_cmd *cmd, unsigned long buf_addr)
2002 {
2003 	io->cmd = cmd;
2004 	io->flags |= UBLK_IO_FLAG_ACTIVE;
2005 	io->addr = buf_addr;
2006 }
2007 
2008 static inline void ublk_prep_cancel(struct io_uring_cmd *cmd,
2009 				    unsigned int issue_flags,
2010 				    struct ublk_queue *ubq, unsigned int tag)
2011 {
2012 	struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd);
2013 
2014 	/*
2015 	 * Safe to refer to @ubq since ublk_queue won't be died until its
2016 	 * commands are completed
2017 	 */
2018 	pdu->ubq = ubq;
2019 	pdu->tag = tag;
2020 	io_uring_cmd_mark_cancelable(cmd, issue_flags);
2021 }
2022 
2023 static inline int ublk_set_auto_buf_reg(struct io_uring_cmd *cmd)
2024 {
2025 	struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd);
2026 
2027 	pdu->buf = ublk_sqe_addr_to_auto_buf_reg(READ_ONCE(cmd->sqe->addr));
2028 
2029 	if (pdu->buf.reserved0 || pdu->buf.reserved1)
2030 		return -EINVAL;
2031 
2032 	if (pdu->buf.flags & ~UBLK_AUTO_BUF_REG_F_MASK)
2033 		return -EINVAL;
2034 	return 0;
2035 }
2036 
2037 static void ublk_io_release(void *priv)
2038 {
2039 	struct request *rq = priv;
2040 	struct ublk_queue *ubq = rq->mq_hctx->driver_data;
2041 	struct ublk_io *io = &ubq->ios[rq->tag];
2042 
2043 	/*
2044 	 * task_registered_buffers may be 0 if buffers were registered off task
2045 	 * but unregistered on task. Or after UBLK_IO_COMMIT_AND_FETCH_REQ.
2046 	 */
2047 	if (current == io->task && io->task_registered_buffers)
2048 		io->task_registered_buffers--;
2049 	else
2050 		ublk_put_req_ref(io, rq);
2051 }
2052 
2053 static int ublk_register_io_buf(struct io_uring_cmd *cmd,
2054 				const struct ublk_queue *ubq,
2055 				struct ublk_io *io,
2056 				unsigned int index, unsigned int issue_flags)
2057 {
2058 	struct ublk_device *ub = cmd->file->private_data;
2059 	struct request *req;
2060 	int ret;
2061 
2062 	if (!ublk_support_zero_copy(ubq))
2063 		return -EINVAL;
2064 
2065 	req = __ublk_check_and_get_req(ub, ubq, io, 0);
2066 	if (!req)
2067 		return -EINVAL;
2068 
2069 	ret = io_buffer_register_bvec(cmd, req, ublk_io_release, index,
2070 				      issue_flags);
2071 	if (ret) {
2072 		ublk_put_req_ref(io, req);
2073 		return ret;
2074 	}
2075 
2076 	return 0;
2077 }
2078 
2079 static int
2080 ublk_daemon_register_io_buf(struct io_uring_cmd *cmd,
2081 			    const struct ublk_queue *ubq, struct ublk_io *io,
2082 			    unsigned index, unsigned issue_flags)
2083 {
2084 	unsigned new_registered_buffers;
2085 	struct request *req = io->req;
2086 	int ret;
2087 
2088 	/*
2089 	 * Ensure there are still references for ublk_sub_req_ref() to release.
2090 	 * If not, fall back on the thread-safe buffer registration.
2091 	 */
2092 	new_registered_buffers = io->task_registered_buffers + 1;
2093 	if (unlikely(new_registered_buffers >= UBLK_REFCOUNT_INIT))
2094 		return ublk_register_io_buf(cmd, ubq, io, index, issue_flags);
2095 
2096 	if (!ublk_support_zero_copy(ubq) || !ublk_rq_has_data(req))
2097 		return -EINVAL;
2098 
2099 	ret = io_buffer_register_bvec(cmd, req, ublk_io_release, index,
2100 				      issue_flags);
2101 	if (ret)
2102 		return ret;
2103 
2104 	io->task_registered_buffers = new_registered_buffers;
2105 	return 0;
2106 }
2107 
2108 static int ublk_unregister_io_buf(struct io_uring_cmd *cmd,
2109 				  const struct ublk_device *ub,
2110 				  unsigned int index, unsigned int issue_flags)
2111 {
2112 	if (!(ub->dev_info.flags & UBLK_F_SUPPORT_ZERO_COPY))
2113 		return -EINVAL;
2114 
2115 	return io_buffer_unregister_bvec(cmd, index, issue_flags);
2116 }
2117 
2118 static int ublk_fetch(struct io_uring_cmd *cmd, struct ublk_queue *ubq,
2119 		      struct ublk_io *io, __u64 buf_addr)
2120 {
2121 	struct ublk_device *ub = ubq->dev;
2122 	int ret = 0;
2123 
2124 	/*
2125 	 * When handling FETCH command for setting up ublk uring queue,
2126 	 * ub->mutex is the innermost lock, and we won't block for handling
2127 	 * FETCH, so it is fine even for IO_URING_F_NONBLOCK.
2128 	 */
2129 	mutex_lock(&ub->mutex);
2130 	/* UBLK_IO_FETCH_REQ is only allowed before queue is setup */
2131 	if (ublk_queue_ready(ubq)) {
2132 		ret = -EBUSY;
2133 		goto out;
2134 	}
2135 
2136 	/* allow each command to be FETCHed at most once */
2137 	if (io->flags & UBLK_IO_FLAG_ACTIVE) {
2138 		ret = -EINVAL;
2139 		goto out;
2140 	}
2141 
2142 	WARN_ON_ONCE(io->flags & UBLK_IO_FLAG_OWNED_BY_SRV);
2143 
2144 	if (ublk_need_map_io(ubq)) {
2145 		/*
2146 		 * FETCH_RQ has to provide IO buffer if NEED GET
2147 		 * DATA is not enabled
2148 		 */
2149 		if (!buf_addr && !ublk_need_get_data(ubq))
2150 			goto out;
2151 	} else if (buf_addr) {
2152 		/* User copy requires addr to be unset */
2153 		ret = -EINVAL;
2154 		goto out;
2155 	}
2156 
2157 	if (ublk_support_auto_buf_reg(ubq)) {
2158 		ret = ublk_set_auto_buf_reg(cmd);
2159 		if (ret)
2160 			goto out;
2161 	}
2162 
2163 	ublk_fill_io_cmd(io, cmd, buf_addr);
2164 	WRITE_ONCE(io->task, get_task_struct(current));
2165 	ublk_mark_io_ready(ub, ubq);
2166 out:
2167 	mutex_unlock(&ub->mutex);
2168 	return ret;
2169 }
2170 
2171 static int ublk_commit_and_fetch(const struct ublk_queue *ubq,
2172 				 struct ublk_io *io, struct io_uring_cmd *cmd,
2173 				 const struct ublksrv_io_cmd *ub_cmd,
2174 				 unsigned int issue_flags)
2175 {
2176 	struct request *req = io->req;
2177 
2178 	if (ublk_need_map_io(ubq)) {
2179 		/*
2180 		 * COMMIT_AND_FETCH_REQ has to provide IO buffer if
2181 		 * NEED GET DATA is not enabled or it is Read IO.
2182 		 */
2183 		if (!ub_cmd->addr && (!ublk_need_get_data(ubq) ||
2184 					req_op(req) == REQ_OP_READ))
2185 			return -EINVAL;
2186 	} else if (req_op(req) != REQ_OP_ZONE_APPEND && ub_cmd->addr) {
2187 		/*
2188 		 * User copy requires addr to be unset when command is
2189 		 * not zone append
2190 		 */
2191 		return -EINVAL;
2192 	}
2193 
2194 	if (ublk_support_auto_buf_reg(ubq)) {
2195 		int ret;
2196 
2197 		/*
2198 		 * `UBLK_F_AUTO_BUF_REG` only works iff `UBLK_IO_FETCH_REQ`
2199 		 * and `UBLK_IO_COMMIT_AND_FETCH_REQ` are issued from same
2200 		 * `io_ring_ctx`.
2201 		 *
2202 		 * If this uring_cmd's io_ring_ctx isn't same with the
2203 		 * one for registering the buffer, it is ublk server's
2204 		 * responsibility for unregistering the buffer, otherwise
2205 		 * this ublk request gets stuck.
2206 		 */
2207 		if (io->flags & UBLK_IO_FLAG_AUTO_BUF_REG) {
2208 			if (io->buf_ctx_handle == io_uring_cmd_ctx_handle(cmd))
2209 				io_buffer_unregister_bvec(cmd, io->buf_index,
2210 						issue_flags);
2211 			io->flags &= ~UBLK_IO_FLAG_AUTO_BUF_REG;
2212 		}
2213 
2214 		ret = ublk_set_auto_buf_reg(cmd);
2215 		if (ret)
2216 			return ret;
2217 	}
2218 
2219 	ublk_fill_io_cmd(io, cmd, ub_cmd->addr);
2220 
2221 	/* now this cmd slot is owned by ublk driver */
2222 	io->flags &= ~UBLK_IO_FLAG_OWNED_BY_SRV;
2223 	io->res = ub_cmd->result;
2224 
2225 	if (req_op(req) == REQ_OP_ZONE_APPEND)
2226 		req->__sector = ub_cmd->zone_append_lba;
2227 
2228 	if (unlikely(blk_should_fake_timeout(req->q)))
2229 		return 0;
2230 
2231 	if (ublk_need_req_ref(ubq))
2232 		ublk_sub_req_ref(io, req);
2233 	else
2234 		__ublk_complete_rq(req);
2235 	return 0;
2236 }
2237 
2238 static bool ublk_get_data(const struct ublk_queue *ubq, struct ublk_io *io,
2239 			  struct request *req)
2240 {
2241 	/*
2242 	 * We have handled UBLK_IO_NEED_GET_DATA command,
2243 	 * so clear UBLK_IO_FLAG_NEED_GET_DATA now and just
2244 	 * do the copy work.
2245 	 */
2246 	io->flags &= ~UBLK_IO_FLAG_NEED_GET_DATA;
2247 	/* update iod->addr because ublksrv may have passed a new io buffer */
2248 	ublk_get_iod(ubq, req->tag)->addr = io->addr;
2249 	pr_devel("%s: update iod->addr: qid %d tag %d io_flags %x addr %llx\n",
2250 			__func__, ubq->q_id, req->tag, io->flags,
2251 			ublk_get_iod(ubq, req->tag)->addr);
2252 
2253 	return ublk_start_io(ubq, req, io);
2254 }
2255 
2256 static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd,
2257 			       unsigned int issue_flags,
2258 			       const struct ublksrv_io_cmd *ub_cmd)
2259 {
2260 	struct ublk_device *ub = cmd->file->private_data;
2261 	struct ublk_queue *ubq;
2262 	struct ublk_io *io;
2263 	u32 cmd_op = cmd->cmd_op;
2264 	unsigned tag = ub_cmd->tag;
2265 	struct request *req;
2266 	int ret;
2267 
2268 	pr_devel("%s: received: cmd op %d queue %d tag %d result %d\n",
2269 			__func__, cmd->cmd_op, ub_cmd->q_id, tag,
2270 			ub_cmd->result);
2271 
2272 	ret = ublk_check_cmd_op(cmd_op);
2273 	if (ret)
2274 		goto out;
2275 
2276 	/*
2277 	 * io_buffer_unregister_bvec() doesn't access the ubq or io,
2278 	 * so no need to validate the q_id, tag, or task
2279 	 */
2280 	if (_IOC_NR(cmd_op) == UBLK_IO_UNREGISTER_IO_BUF)
2281 		return ublk_unregister_io_buf(cmd, ub, ub_cmd->addr,
2282 					      issue_flags);
2283 
2284 	ret = -EINVAL;
2285 	if (ub_cmd->q_id >= ub->dev_info.nr_hw_queues)
2286 		goto out;
2287 
2288 	ubq = ublk_get_queue(ub, ub_cmd->q_id);
2289 
2290 	if (tag >= ubq->q_depth)
2291 		goto out;
2292 
2293 	io = &ubq->ios[tag];
2294 	/* UBLK_IO_FETCH_REQ can be handled on any task, which sets io->task */
2295 	if (unlikely(_IOC_NR(cmd_op) == UBLK_IO_FETCH_REQ)) {
2296 		ret = ublk_fetch(cmd, ubq, io, ub_cmd->addr);
2297 		if (ret)
2298 			goto out;
2299 
2300 		ublk_prep_cancel(cmd, issue_flags, ubq, tag);
2301 		return -EIOCBQUEUED;
2302 	}
2303 
2304 	if (READ_ONCE(io->task) != current) {
2305 		/*
2306 		 * ublk_register_io_buf() accesses only the io's refcount,
2307 		 * so can be handled on any task
2308 		 */
2309 		if (_IOC_NR(cmd_op) == UBLK_IO_REGISTER_IO_BUF)
2310 			return ublk_register_io_buf(cmd, ubq, io, ub_cmd->addr,
2311 						    issue_flags);
2312 
2313 		goto out;
2314 	}
2315 
2316 	/* there is pending io cmd, something must be wrong */
2317 	if (!(io->flags & UBLK_IO_FLAG_OWNED_BY_SRV)) {
2318 		ret = -EBUSY;
2319 		goto out;
2320 	}
2321 
2322 	/*
2323 	 * ensure that the user issues UBLK_IO_NEED_GET_DATA
2324 	 * iff the driver have set the UBLK_IO_FLAG_NEED_GET_DATA.
2325 	 */
2326 	if ((!!(io->flags & UBLK_IO_FLAG_NEED_GET_DATA))
2327 			^ (_IOC_NR(cmd_op) == UBLK_IO_NEED_GET_DATA))
2328 		goto out;
2329 
2330 	switch (_IOC_NR(cmd_op)) {
2331 	case UBLK_IO_REGISTER_IO_BUF:
2332 		return ublk_daemon_register_io_buf(cmd, ubq, io, ub_cmd->addr,
2333 						   issue_flags);
2334 	case UBLK_IO_COMMIT_AND_FETCH_REQ:
2335 		ret = ublk_commit_and_fetch(ubq, io, cmd, ub_cmd, issue_flags);
2336 		if (ret)
2337 			goto out;
2338 		break;
2339 	case UBLK_IO_NEED_GET_DATA:
2340 		/*
2341 		 * ublk_get_data() may fail and fallback to requeue, so keep
2342 		 * uring_cmd active first and prepare for handling new requeued
2343 		 * request
2344 		 */
2345 		req = io->req;
2346 		ublk_fill_io_cmd(io, cmd, ub_cmd->addr);
2347 		io->flags &= ~UBLK_IO_FLAG_OWNED_BY_SRV;
2348 		if (likely(ublk_get_data(ubq, io, req))) {
2349 			__ublk_prep_compl_io_cmd(io, req);
2350 			return UBLK_IO_RES_OK;
2351 		}
2352 		break;
2353 	default:
2354 		goto out;
2355 	}
2356 	ublk_prep_cancel(cmd, issue_flags, ubq, tag);
2357 	return -EIOCBQUEUED;
2358 
2359  out:
2360 	pr_devel("%s: complete: cmd op %d, tag %d ret %x io_flags %x\n",
2361 			__func__, cmd_op, tag, ret, io->flags);
2362 	return ret;
2363 }
2364 
2365 static inline struct request *__ublk_check_and_get_req(struct ublk_device *ub,
2366 		const struct ublk_queue *ubq, struct ublk_io *io, size_t offset)
2367 {
2368 	unsigned tag = io - ubq->ios;
2369 	struct request *req;
2370 
2371 	/*
2372 	 * can't use io->req in case of concurrent UBLK_IO_COMMIT_AND_FETCH_REQ,
2373 	 * which would overwrite it with io->cmd
2374 	 */
2375 	req = blk_mq_tag_to_rq(ub->tag_set.tags[ubq->q_id], tag);
2376 	if (!req)
2377 		return NULL;
2378 
2379 	if (!ublk_get_req_ref(io))
2380 		return NULL;
2381 
2382 	if (unlikely(!blk_mq_request_started(req) || req->tag != tag))
2383 		goto fail_put;
2384 
2385 	if (!ublk_rq_has_data(req))
2386 		goto fail_put;
2387 
2388 	if (offset > blk_rq_bytes(req))
2389 		goto fail_put;
2390 
2391 	return req;
2392 fail_put:
2393 	ublk_put_req_ref(io, req);
2394 	return NULL;
2395 }
2396 
2397 static inline int ublk_ch_uring_cmd_local(struct io_uring_cmd *cmd,
2398 		unsigned int issue_flags)
2399 {
2400 	/*
2401 	 * Not necessary for async retry, but let's keep it simple and always
2402 	 * copy the values to avoid any potential reuse.
2403 	 */
2404 	const struct ublksrv_io_cmd *ub_src = io_uring_sqe_cmd(cmd->sqe);
2405 	const struct ublksrv_io_cmd ub_cmd = {
2406 		.q_id = READ_ONCE(ub_src->q_id),
2407 		.tag = READ_ONCE(ub_src->tag),
2408 		.result = READ_ONCE(ub_src->result),
2409 		.addr = READ_ONCE(ub_src->addr)
2410 	};
2411 
2412 	WARN_ON_ONCE(issue_flags & IO_URING_F_UNLOCKED);
2413 
2414 	return __ublk_ch_uring_cmd(cmd, issue_flags, &ub_cmd);
2415 }
2416 
2417 static void ublk_ch_uring_cmd_cb(struct io_uring_cmd *cmd,
2418 		unsigned int issue_flags)
2419 {
2420 	int ret = ublk_ch_uring_cmd_local(cmd, issue_flags);
2421 
2422 	if (ret != -EIOCBQUEUED)
2423 		io_uring_cmd_done(cmd, ret, 0, issue_flags);
2424 }
2425 
2426 static int ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
2427 {
2428 	if (unlikely(issue_flags & IO_URING_F_CANCEL)) {
2429 		ublk_uring_cmd_cancel_fn(cmd, issue_flags);
2430 		return 0;
2431 	}
2432 
2433 	/* well-implemented server won't run into unlocked */
2434 	if (unlikely(issue_flags & IO_URING_F_UNLOCKED)) {
2435 		io_uring_cmd_complete_in_task(cmd, ublk_ch_uring_cmd_cb);
2436 		return -EIOCBQUEUED;
2437 	}
2438 
2439 	return ublk_ch_uring_cmd_local(cmd, issue_flags);
2440 }
2441 
2442 static inline bool ublk_check_ubuf_dir(const struct request *req,
2443 		int ubuf_dir)
2444 {
2445 	/* copy ubuf to request pages */
2446 	if ((req_op(req) == REQ_OP_READ || req_op(req) == REQ_OP_DRV_IN) &&
2447 	    ubuf_dir == ITER_SOURCE)
2448 		return true;
2449 
2450 	/* copy request pages to ubuf */
2451 	if ((req_op(req) == REQ_OP_WRITE ||
2452 	     req_op(req) == REQ_OP_ZONE_APPEND) &&
2453 	    ubuf_dir == ITER_DEST)
2454 		return true;
2455 
2456 	return false;
2457 }
2458 
2459 static struct request *ublk_check_and_get_req(struct kiocb *iocb,
2460 		struct iov_iter *iter, size_t *off, int dir,
2461 		struct ublk_io **io)
2462 {
2463 	struct ublk_device *ub = iocb->ki_filp->private_data;
2464 	struct ublk_queue *ubq;
2465 	struct request *req;
2466 	size_t buf_off;
2467 	u16 tag, q_id;
2468 
2469 	if (!ub)
2470 		return ERR_PTR(-EACCES);
2471 
2472 	if (!user_backed_iter(iter))
2473 		return ERR_PTR(-EACCES);
2474 
2475 	if (ub->dev_info.state == UBLK_S_DEV_DEAD)
2476 		return ERR_PTR(-EACCES);
2477 
2478 	tag = ublk_pos_to_tag(iocb->ki_pos);
2479 	q_id = ublk_pos_to_hwq(iocb->ki_pos);
2480 	buf_off = ublk_pos_to_buf_off(iocb->ki_pos);
2481 
2482 	if (q_id >= ub->dev_info.nr_hw_queues)
2483 		return ERR_PTR(-EINVAL);
2484 
2485 	ubq = ublk_get_queue(ub, q_id);
2486 	if (!ubq)
2487 		return ERR_PTR(-EINVAL);
2488 
2489 	if (!ublk_support_user_copy(ubq))
2490 		return ERR_PTR(-EACCES);
2491 
2492 	if (tag >= ubq->q_depth)
2493 		return ERR_PTR(-EINVAL);
2494 
2495 	*io = &ubq->ios[tag];
2496 	req = __ublk_check_and_get_req(ub, ubq, *io, buf_off);
2497 	if (!req)
2498 		return ERR_PTR(-EINVAL);
2499 
2500 	if (!req->mq_hctx || !req->mq_hctx->driver_data)
2501 		goto fail;
2502 
2503 	if (!ublk_check_ubuf_dir(req, dir))
2504 		goto fail;
2505 
2506 	*off = buf_off;
2507 	return req;
2508 fail:
2509 	ublk_put_req_ref(*io, req);
2510 	return ERR_PTR(-EACCES);
2511 }
2512 
2513 static ssize_t ublk_ch_read_iter(struct kiocb *iocb, struct iov_iter *to)
2514 {
2515 	struct request *req;
2516 	struct ublk_io *io;
2517 	size_t buf_off;
2518 	size_t ret;
2519 
2520 	req = ublk_check_and_get_req(iocb, to, &buf_off, ITER_DEST, &io);
2521 	if (IS_ERR(req))
2522 		return PTR_ERR(req);
2523 
2524 	ret = ublk_copy_user_pages(req, buf_off, to, ITER_DEST);
2525 	ublk_put_req_ref(io, req);
2526 
2527 	return ret;
2528 }
2529 
2530 static ssize_t ublk_ch_write_iter(struct kiocb *iocb, struct iov_iter *from)
2531 {
2532 	struct request *req;
2533 	struct ublk_io *io;
2534 	size_t buf_off;
2535 	size_t ret;
2536 
2537 	req = ublk_check_and_get_req(iocb, from, &buf_off, ITER_SOURCE, &io);
2538 	if (IS_ERR(req))
2539 		return PTR_ERR(req);
2540 
2541 	ret = ublk_copy_user_pages(req, buf_off, from, ITER_SOURCE);
2542 	ublk_put_req_ref(io, req);
2543 
2544 	return ret;
2545 }
2546 
2547 static const struct file_operations ublk_ch_fops = {
2548 	.owner = THIS_MODULE,
2549 	.open = ublk_ch_open,
2550 	.release = ublk_ch_release,
2551 	.read_iter = ublk_ch_read_iter,
2552 	.write_iter = ublk_ch_write_iter,
2553 	.uring_cmd = ublk_ch_uring_cmd,
2554 	.mmap = ublk_ch_mmap,
2555 };
2556 
2557 static void ublk_deinit_queue(struct ublk_device *ub, int q_id)
2558 {
2559 	int size = ublk_queue_cmd_buf_size(ub, q_id);
2560 	struct ublk_queue *ubq = ublk_get_queue(ub, q_id);
2561 	int i;
2562 
2563 	for (i = 0; i < ubq->q_depth; i++) {
2564 		struct ublk_io *io = &ubq->ios[i];
2565 		if (io->task)
2566 			put_task_struct(io->task);
2567 		WARN_ON_ONCE(refcount_read(&io->ref));
2568 		WARN_ON_ONCE(io->task_registered_buffers);
2569 	}
2570 
2571 	if (ubq->io_cmd_buf)
2572 		free_pages((unsigned long)ubq->io_cmd_buf, get_order(size));
2573 }
2574 
2575 static int ublk_init_queue(struct ublk_device *ub, int q_id)
2576 {
2577 	struct ublk_queue *ubq = ublk_get_queue(ub, q_id);
2578 	gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO;
2579 	void *ptr;
2580 	int size;
2581 
2582 	spin_lock_init(&ubq->cancel_lock);
2583 	ubq->flags = ub->dev_info.flags;
2584 	ubq->q_id = q_id;
2585 	ubq->q_depth = ub->dev_info.queue_depth;
2586 	size = ublk_queue_cmd_buf_size(ub, q_id);
2587 
2588 	ptr = (void *) __get_free_pages(gfp_flags, get_order(size));
2589 	if (!ptr)
2590 		return -ENOMEM;
2591 
2592 	ubq->io_cmd_buf = ptr;
2593 	ubq->dev = ub;
2594 	return 0;
2595 }
2596 
2597 static void ublk_deinit_queues(struct ublk_device *ub)
2598 {
2599 	int nr_queues = ub->dev_info.nr_hw_queues;
2600 	int i;
2601 
2602 	if (!ub->__queues)
2603 		return;
2604 
2605 	for (i = 0; i < nr_queues; i++)
2606 		ublk_deinit_queue(ub, i);
2607 	kvfree(ub->__queues);
2608 }
2609 
2610 static int ublk_init_queues(struct ublk_device *ub)
2611 {
2612 	int nr_queues = ub->dev_info.nr_hw_queues;
2613 	int depth = ub->dev_info.queue_depth;
2614 	int ubq_size = sizeof(struct ublk_queue) + depth * sizeof(struct ublk_io);
2615 	int i, ret = -ENOMEM;
2616 
2617 	ub->queue_size = ubq_size;
2618 	ub->__queues = kvcalloc(nr_queues, ubq_size, GFP_KERNEL);
2619 	if (!ub->__queues)
2620 		return ret;
2621 
2622 	for (i = 0; i < nr_queues; i++) {
2623 		if (ublk_init_queue(ub, i))
2624 			goto fail;
2625 	}
2626 
2627 	init_completion(&ub->completion);
2628 	return 0;
2629 
2630  fail:
2631 	ublk_deinit_queues(ub);
2632 	return ret;
2633 }
2634 
2635 static int ublk_alloc_dev_number(struct ublk_device *ub, int idx)
2636 {
2637 	int i = idx;
2638 	int err;
2639 
2640 	spin_lock(&ublk_idr_lock);
2641 	/* allocate id, if @id >= 0, we're requesting that specific id */
2642 	if (i >= 0) {
2643 		err = idr_alloc(&ublk_index_idr, ub, i, i + 1, GFP_NOWAIT);
2644 		if (err == -ENOSPC)
2645 			err = -EEXIST;
2646 	} else {
2647 		err = idr_alloc(&ublk_index_idr, ub, 0, UBLK_MAX_UBLKS,
2648 				GFP_NOWAIT);
2649 	}
2650 	spin_unlock(&ublk_idr_lock);
2651 
2652 	if (err >= 0)
2653 		ub->ub_number = err;
2654 
2655 	return err;
2656 }
2657 
2658 static void ublk_free_dev_number(struct ublk_device *ub)
2659 {
2660 	spin_lock(&ublk_idr_lock);
2661 	idr_remove(&ublk_index_idr, ub->ub_number);
2662 	wake_up_all(&ublk_idr_wq);
2663 	spin_unlock(&ublk_idr_lock);
2664 }
2665 
2666 static void ublk_cdev_rel(struct device *dev)
2667 {
2668 	struct ublk_device *ub = container_of(dev, struct ublk_device, cdev_dev);
2669 
2670 	blk_mq_free_tag_set(&ub->tag_set);
2671 	ublk_deinit_queues(ub);
2672 	ublk_free_dev_number(ub);
2673 	mutex_destroy(&ub->mutex);
2674 	mutex_destroy(&ub->cancel_mutex);
2675 	kfree(ub);
2676 }
2677 
2678 static int ublk_add_chdev(struct ublk_device *ub)
2679 {
2680 	struct device *dev = &ub->cdev_dev;
2681 	int minor = ub->ub_number;
2682 	int ret;
2683 
2684 	dev->parent = ublk_misc.this_device;
2685 	dev->devt = MKDEV(MAJOR(ublk_chr_devt), minor);
2686 	dev->class = &ublk_chr_class;
2687 	dev->release = ublk_cdev_rel;
2688 	device_initialize(dev);
2689 
2690 	ret = dev_set_name(dev, "ublkc%d", minor);
2691 	if (ret)
2692 		goto fail;
2693 
2694 	cdev_init(&ub->cdev, &ublk_ch_fops);
2695 	ret = cdev_device_add(&ub->cdev, dev);
2696 	if (ret)
2697 		goto fail;
2698 
2699 	if (ub->dev_info.flags & UBLK_F_UNPRIVILEGED_DEV)
2700 		unprivileged_ublks_added++;
2701 	return 0;
2702  fail:
2703 	put_device(dev);
2704 	return ret;
2705 }
2706 
2707 /* align max io buffer size with PAGE_SIZE */
2708 static void ublk_align_max_io_size(struct ublk_device *ub)
2709 {
2710 	unsigned int max_io_bytes = ub->dev_info.max_io_buf_bytes;
2711 
2712 	ub->dev_info.max_io_buf_bytes =
2713 		round_down(max_io_bytes, PAGE_SIZE);
2714 }
2715 
2716 static int ublk_add_tag_set(struct ublk_device *ub)
2717 {
2718 	ub->tag_set.ops = &ublk_mq_ops;
2719 	ub->tag_set.nr_hw_queues = ub->dev_info.nr_hw_queues;
2720 	ub->tag_set.queue_depth = ub->dev_info.queue_depth;
2721 	ub->tag_set.numa_node = NUMA_NO_NODE;
2722 	ub->tag_set.driver_data = ub;
2723 	return blk_mq_alloc_tag_set(&ub->tag_set);
2724 }
2725 
2726 static void ublk_remove(struct ublk_device *ub)
2727 {
2728 	bool unprivileged;
2729 
2730 	ublk_stop_dev(ub);
2731 	cdev_device_del(&ub->cdev, &ub->cdev_dev);
2732 	unprivileged = ub->dev_info.flags & UBLK_F_UNPRIVILEGED_DEV;
2733 	ublk_put_device(ub);
2734 
2735 	if (unprivileged)
2736 		unprivileged_ublks_added--;
2737 }
2738 
2739 static struct ublk_device *ublk_get_device_from_id(int idx)
2740 {
2741 	struct ublk_device *ub = NULL;
2742 
2743 	if (idx < 0)
2744 		return NULL;
2745 
2746 	spin_lock(&ublk_idr_lock);
2747 	ub = idr_find(&ublk_index_idr, idx);
2748 	if (ub)
2749 		ub = ublk_get_device(ub);
2750 	spin_unlock(&ublk_idr_lock);
2751 
2752 	return ub;
2753 }
2754 
2755 static int ublk_ctrl_start_dev(struct ublk_device *ub,
2756 		const struct ublksrv_ctrl_cmd *header)
2757 {
2758 	const struct ublk_param_basic *p = &ub->params.basic;
2759 	int ublksrv_pid = (int)header->data[0];
2760 	struct queue_limits lim = {
2761 		.logical_block_size	= 1 << p->logical_bs_shift,
2762 		.physical_block_size	= 1 << p->physical_bs_shift,
2763 		.io_min			= 1 << p->io_min_shift,
2764 		.io_opt			= 1 << p->io_opt_shift,
2765 		.max_hw_sectors		= p->max_sectors,
2766 		.chunk_sectors		= p->chunk_sectors,
2767 		.virt_boundary_mask	= p->virt_boundary_mask,
2768 		.max_segments		= USHRT_MAX,
2769 		.max_segment_size	= UINT_MAX,
2770 		.dma_alignment		= 3,
2771 	};
2772 	struct gendisk *disk;
2773 	int ret = -EINVAL;
2774 
2775 	if (ublksrv_pid <= 0)
2776 		return -EINVAL;
2777 	if (!(ub->params.types & UBLK_PARAM_TYPE_BASIC))
2778 		return -EINVAL;
2779 
2780 	if (ub->params.types & UBLK_PARAM_TYPE_DISCARD) {
2781 		const struct ublk_param_discard *pd = &ub->params.discard;
2782 
2783 		lim.discard_alignment = pd->discard_alignment;
2784 		lim.discard_granularity = pd->discard_granularity;
2785 		lim.max_hw_discard_sectors = pd->max_discard_sectors;
2786 		lim.max_write_zeroes_sectors = pd->max_write_zeroes_sectors;
2787 		lim.max_discard_segments = pd->max_discard_segments;
2788 	}
2789 
2790 	if (ub->params.types & UBLK_PARAM_TYPE_ZONED) {
2791 		const struct ublk_param_zoned *p = &ub->params.zoned;
2792 
2793 		if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED))
2794 			return -EOPNOTSUPP;
2795 
2796 		lim.features |= BLK_FEAT_ZONED;
2797 		lim.max_active_zones = p->max_active_zones;
2798 		lim.max_open_zones =  p->max_open_zones;
2799 		lim.max_hw_zone_append_sectors = p->max_zone_append_sectors;
2800 	}
2801 
2802 	if (ub->params.basic.attrs & UBLK_ATTR_VOLATILE_CACHE) {
2803 		lim.features |= BLK_FEAT_WRITE_CACHE;
2804 		if (ub->params.basic.attrs & UBLK_ATTR_FUA)
2805 			lim.features |= BLK_FEAT_FUA;
2806 	}
2807 
2808 	if (ub->params.basic.attrs & UBLK_ATTR_ROTATIONAL)
2809 		lim.features |= BLK_FEAT_ROTATIONAL;
2810 
2811 	if (ub->params.types & UBLK_PARAM_TYPE_DMA_ALIGN)
2812 		lim.dma_alignment = ub->params.dma.alignment;
2813 
2814 	if (ub->params.types & UBLK_PARAM_TYPE_SEGMENT) {
2815 		lim.seg_boundary_mask = ub->params.seg.seg_boundary_mask;
2816 		lim.max_segment_size = ub->params.seg.max_segment_size;
2817 		lim.max_segments = ub->params.seg.max_segments;
2818 	}
2819 
2820 	if (wait_for_completion_interruptible(&ub->completion) != 0)
2821 		return -EINTR;
2822 
2823 	mutex_lock(&ub->mutex);
2824 	if (ub->dev_info.state == UBLK_S_DEV_LIVE ||
2825 	    test_bit(UB_STATE_USED, &ub->state)) {
2826 		ret = -EEXIST;
2827 		goto out_unlock;
2828 	}
2829 
2830 	disk = blk_mq_alloc_disk(&ub->tag_set, &lim, NULL);
2831 	if (IS_ERR(disk)) {
2832 		ret = PTR_ERR(disk);
2833 		goto out_unlock;
2834 	}
2835 	sprintf(disk->disk_name, "ublkb%d", ub->ub_number);
2836 	disk->fops = &ub_fops;
2837 	disk->private_data = ub;
2838 
2839 	ub->dev_info.ublksrv_pid = ublksrv_pid;
2840 	ub->ub_disk = disk;
2841 
2842 	ublk_apply_params(ub);
2843 
2844 	/* don't probe partitions if any one ubq daemon is un-trusted */
2845 	if (ub->nr_privileged_daemon != ub->nr_queues_ready)
2846 		set_bit(GD_SUPPRESS_PART_SCAN, &disk->state);
2847 
2848 	ublk_get_device(ub);
2849 	ub->dev_info.state = UBLK_S_DEV_LIVE;
2850 
2851 	if (ublk_dev_is_zoned(ub)) {
2852 		ret = ublk_revalidate_disk_zones(ub);
2853 		if (ret)
2854 			goto out_put_cdev;
2855 	}
2856 
2857 	ret = add_disk(disk);
2858 	if (ret)
2859 		goto out_put_cdev;
2860 
2861 	set_bit(UB_STATE_USED, &ub->state);
2862 
2863 out_put_cdev:
2864 	if (ret) {
2865 		ublk_detach_disk(ub);
2866 		ublk_put_device(ub);
2867 	}
2868 	if (ret)
2869 		put_disk(disk);
2870 out_unlock:
2871 	mutex_unlock(&ub->mutex);
2872 	return ret;
2873 }
2874 
2875 static int ublk_ctrl_get_queue_affinity(struct ublk_device *ub,
2876 		const struct ublksrv_ctrl_cmd *header)
2877 {
2878 	void __user *argp = (void __user *)(unsigned long)header->addr;
2879 	cpumask_var_t cpumask;
2880 	unsigned long queue;
2881 	unsigned int retlen;
2882 	unsigned int i;
2883 	int ret;
2884 
2885 	if (header->len * BITS_PER_BYTE < nr_cpu_ids)
2886 		return -EINVAL;
2887 	if (header->len & (sizeof(unsigned long)-1))
2888 		return -EINVAL;
2889 	if (!header->addr)
2890 		return -EINVAL;
2891 
2892 	queue = header->data[0];
2893 	if (queue >= ub->dev_info.nr_hw_queues)
2894 		return -EINVAL;
2895 
2896 	if (!zalloc_cpumask_var(&cpumask, GFP_KERNEL))
2897 		return -ENOMEM;
2898 
2899 	for_each_possible_cpu(i) {
2900 		if (ub->tag_set.map[HCTX_TYPE_DEFAULT].mq_map[i] == queue)
2901 			cpumask_set_cpu(i, cpumask);
2902 	}
2903 
2904 	ret = -EFAULT;
2905 	retlen = min_t(unsigned short, header->len, cpumask_size());
2906 	if (copy_to_user(argp, cpumask, retlen))
2907 		goto out_free_cpumask;
2908 	if (retlen != header->len &&
2909 	    clear_user(argp + retlen, header->len - retlen))
2910 		goto out_free_cpumask;
2911 
2912 	ret = 0;
2913 out_free_cpumask:
2914 	free_cpumask_var(cpumask);
2915 	return ret;
2916 }
2917 
2918 static inline void ublk_dump_dev_info(struct ublksrv_ctrl_dev_info *info)
2919 {
2920 	pr_devel("%s: dev id %d flags %llx\n", __func__,
2921 			info->dev_id, info->flags);
2922 	pr_devel("\t nr_hw_queues %d queue_depth %d\n",
2923 			info->nr_hw_queues, info->queue_depth);
2924 }
2925 
2926 static int ublk_ctrl_add_dev(const struct ublksrv_ctrl_cmd *header)
2927 {
2928 	void __user *argp = (void __user *)(unsigned long)header->addr;
2929 	struct ublksrv_ctrl_dev_info info;
2930 	struct ublk_device *ub;
2931 	int ret = -EINVAL;
2932 
2933 	if (header->len < sizeof(info) || !header->addr)
2934 		return -EINVAL;
2935 	if (header->queue_id != (u16)-1) {
2936 		pr_warn("%s: queue_id is wrong %x\n",
2937 			__func__, header->queue_id);
2938 		return -EINVAL;
2939 	}
2940 
2941 	if (copy_from_user(&info, argp, sizeof(info)))
2942 		return -EFAULT;
2943 
2944 	if (info.queue_depth > UBLK_MAX_QUEUE_DEPTH || !info.queue_depth ||
2945 	    info.nr_hw_queues > UBLK_MAX_NR_QUEUES || !info.nr_hw_queues)
2946 		return -EINVAL;
2947 
2948 	if (capable(CAP_SYS_ADMIN))
2949 		info.flags &= ~UBLK_F_UNPRIVILEGED_DEV;
2950 	else if (!(info.flags & UBLK_F_UNPRIVILEGED_DEV))
2951 		return -EPERM;
2952 
2953 	/* forbid nonsense combinations of recovery flags */
2954 	switch (info.flags & UBLK_F_ALL_RECOVERY_FLAGS) {
2955 	case 0:
2956 	case UBLK_F_USER_RECOVERY:
2957 	case (UBLK_F_USER_RECOVERY | UBLK_F_USER_RECOVERY_REISSUE):
2958 	case (UBLK_F_USER_RECOVERY | UBLK_F_USER_RECOVERY_FAIL_IO):
2959 		break;
2960 	default:
2961 		pr_warn("%s: invalid recovery flags %llx\n", __func__,
2962 			info.flags & UBLK_F_ALL_RECOVERY_FLAGS);
2963 		return -EINVAL;
2964 	}
2965 
2966 	if ((info.flags & UBLK_F_QUIESCE) && !(info.flags & UBLK_F_USER_RECOVERY)) {
2967 		pr_warn("UBLK_F_QUIESCE requires UBLK_F_USER_RECOVERY\n");
2968 		return -EINVAL;
2969 	}
2970 
2971 	/*
2972 	 * unprivileged device can't be trusted, but RECOVERY and
2973 	 * RECOVERY_REISSUE still may hang error handling, so can't
2974 	 * support recovery features for unprivileged ublk now
2975 	 *
2976 	 * TODO: provide forward progress for RECOVERY handler, so that
2977 	 * unprivileged device can benefit from it
2978 	 */
2979 	if (info.flags & UBLK_F_UNPRIVILEGED_DEV) {
2980 		info.flags &= ~(UBLK_F_USER_RECOVERY_REISSUE |
2981 				UBLK_F_USER_RECOVERY);
2982 
2983 		/*
2984 		 * For USER_COPY, we depends on userspace to fill request
2985 		 * buffer by pwrite() to ublk char device, which can't be
2986 		 * used for unprivileged device
2987 		 *
2988 		 * Same with zero copy or auto buffer register.
2989 		 */
2990 		if (info.flags & (UBLK_F_USER_COPY | UBLK_F_SUPPORT_ZERO_COPY |
2991 					UBLK_F_AUTO_BUF_REG))
2992 			return -EINVAL;
2993 	}
2994 
2995 	/* the created device is always owned by current user */
2996 	ublk_store_owner_uid_gid(&info.owner_uid, &info.owner_gid);
2997 
2998 	if (header->dev_id != info.dev_id) {
2999 		pr_warn("%s: dev id not match %u %u\n",
3000 			__func__, header->dev_id, info.dev_id);
3001 		return -EINVAL;
3002 	}
3003 
3004 	if (header->dev_id != U32_MAX && header->dev_id >= UBLK_MAX_UBLKS) {
3005 		pr_warn("%s: dev id is too large. Max supported is %d\n",
3006 			__func__, UBLK_MAX_UBLKS - 1);
3007 		return -EINVAL;
3008 	}
3009 
3010 	ublk_dump_dev_info(&info);
3011 
3012 	ret = mutex_lock_killable(&ublk_ctl_mutex);
3013 	if (ret)
3014 		return ret;
3015 
3016 	ret = -EACCES;
3017 	if ((info.flags & UBLK_F_UNPRIVILEGED_DEV) &&
3018 	    unprivileged_ublks_added >= unprivileged_ublks_max)
3019 		goto out_unlock;
3020 
3021 	ret = -ENOMEM;
3022 	ub = kzalloc(sizeof(*ub), GFP_KERNEL);
3023 	if (!ub)
3024 		goto out_unlock;
3025 	mutex_init(&ub->mutex);
3026 	spin_lock_init(&ub->lock);
3027 	mutex_init(&ub->cancel_mutex);
3028 
3029 	ret = ublk_alloc_dev_number(ub, header->dev_id);
3030 	if (ret < 0)
3031 		goto out_free_ub;
3032 
3033 	memcpy(&ub->dev_info, &info, sizeof(info));
3034 
3035 	/* update device id */
3036 	ub->dev_info.dev_id = ub->ub_number;
3037 
3038 	/*
3039 	 * 64bit flags will be copied back to userspace as feature
3040 	 * negotiation result, so have to clear flags which driver
3041 	 * doesn't support yet, then userspace can get correct flags
3042 	 * (features) to handle.
3043 	 */
3044 	ub->dev_info.flags &= UBLK_F_ALL;
3045 
3046 	ub->dev_info.flags |= UBLK_F_CMD_IOCTL_ENCODE |
3047 		UBLK_F_URING_CMD_COMP_IN_TASK |
3048 		UBLK_F_PER_IO_DAEMON |
3049 		UBLK_F_BUF_REG_OFF_DAEMON;
3050 
3051 	/* GET_DATA isn't needed any more with USER_COPY or ZERO COPY */
3052 	if (ub->dev_info.flags & (UBLK_F_USER_COPY | UBLK_F_SUPPORT_ZERO_COPY |
3053 				UBLK_F_AUTO_BUF_REG))
3054 		ub->dev_info.flags &= ~UBLK_F_NEED_GET_DATA;
3055 
3056 	/*
3057 	 * Zoned storage support requires reuse `ublksrv_io_cmd->addr` for
3058 	 * returning write_append_lba, which is only allowed in case of
3059 	 * user copy or zero copy
3060 	 */
3061 	if (ublk_dev_is_zoned(ub) &&
3062 	    (!IS_ENABLED(CONFIG_BLK_DEV_ZONED) || !(ub->dev_info.flags &
3063 	     (UBLK_F_USER_COPY | UBLK_F_SUPPORT_ZERO_COPY)))) {
3064 		ret = -EINVAL;
3065 		goto out_free_dev_number;
3066 	}
3067 
3068 	ub->dev_info.nr_hw_queues = min_t(unsigned int,
3069 			ub->dev_info.nr_hw_queues, nr_cpu_ids);
3070 	ublk_align_max_io_size(ub);
3071 
3072 	ret = ublk_init_queues(ub);
3073 	if (ret)
3074 		goto out_free_dev_number;
3075 
3076 	ret = ublk_add_tag_set(ub);
3077 	if (ret)
3078 		goto out_deinit_queues;
3079 
3080 	ret = -EFAULT;
3081 	if (copy_to_user(argp, &ub->dev_info, sizeof(info)))
3082 		goto out_free_tag_set;
3083 
3084 	/*
3085 	 * Add the char dev so that ublksrv daemon can be setup.
3086 	 * ublk_add_chdev() will cleanup everything if it fails.
3087 	 */
3088 	ret = ublk_add_chdev(ub);
3089 	goto out_unlock;
3090 
3091 out_free_tag_set:
3092 	blk_mq_free_tag_set(&ub->tag_set);
3093 out_deinit_queues:
3094 	ublk_deinit_queues(ub);
3095 out_free_dev_number:
3096 	ublk_free_dev_number(ub);
3097 out_free_ub:
3098 	mutex_destroy(&ub->mutex);
3099 	mutex_destroy(&ub->cancel_mutex);
3100 	kfree(ub);
3101 out_unlock:
3102 	mutex_unlock(&ublk_ctl_mutex);
3103 	return ret;
3104 }
3105 
3106 static inline bool ublk_idr_freed(int id)
3107 {
3108 	void *ptr;
3109 
3110 	spin_lock(&ublk_idr_lock);
3111 	ptr = idr_find(&ublk_index_idr, id);
3112 	spin_unlock(&ublk_idr_lock);
3113 
3114 	return ptr == NULL;
3115 }
3116 
3117 static int ublk_ctrl_del_dev(struct ublk_device **p_ub, bool wait)
3118 {
3119 	struct ublk_device *ub = *p_ub;
3120 	int idx = ub->ub_number;
3121 	int ret;
3122 
3123 	ret = mutex_lock_killable(&ublk_ctl_mutex);
3124 	if (ret)
3125 		return ret;
3126 
3127 	if (!test_bit(UB_STATE_DELETED, &ub->state)) {
3128 		ublk_remove(ub);
3129 		set_bit(UB_STATE_DELETED, &ub->state);
3130 	}
3131 
3132 	/* Mark the reference as consumed */
3133 	*p_ub = NULL;
3134 	ublk_put_device(ub);
3135 	mutex_unlock(&ublk_ctl_mutex);
3136 
3137 	/*
3138 	 * Wait until the idr is removed, then it can be reused after
3139 	 * DEL_DEV command is returned.
3140 	 *
3141 	 * If we returns because of user interrupt, future delete command
3142 	 * may come:
3143 	 *
3144 	 * - the device number isn't freed, this device won't or needn't
3145 	 *   be deleted again, since UB_STATE_DELETED is set, and device
3146 	 *   will be released after the last reference is dropped
3147 	 *
3148 	 * - the device number is freed already, we will not find this
3149 	 *   device via ublk_get_device_from_id()
3150 	 */
3151 	if (wait && wait_event_interruptible(ublk_idr_wq, ublk_idr_freed(idx)))
3152 		return -EINTR;
3153 	return 0;
3154 }
3155 
3156 static inline void ublk_ctrl_cmd_dump(struct io_uring_cmd *cmd)
3157 {
3158 	const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
3159 
3160 	pr_devel("%s: cmd_op %x, dev id %d qid %d data %llx buf %llx len %u\n",
3161 			__func__, cmd->cmd_op, header->dev_id, header->queue_id,
3162 			header->data[0], header->addr, header->len);
3163 }
3164 
3165 static int ublk_ctrl_stop_dev(struct ublk_device *ub)
3166 {
3167 	ublk_stop_dev(ub);
3168 	return 0;
3169 }
3170 
3171 static int ublk_ctrl_get_dev_info(struct ublk_device *ub,
3172 		const struct ublksrv_ctrl_cmd *header)
3173 {
3174 	void __user *argp = (void __user *)(unsigned long)header->addr;
3175 
3176 	if (header->len < sizeof(struct ublksrv_ctrl_dev_info) || !header->addr)
3177 		return -EINVAL;
3178 
3179 	if (copy_to_user(argp, &ub->dev_info, sizeof(ub->dev_info)))
3180 		return -EFAULT;
3181 
3182 	return 0;
3183 }
3184 
3185 /* TYPE_DEVT is readonly, so fill it up before returning to userspace */
3186 static void ublk_ctrl_fill_params_devt(struct ublk_device *ub)
3187 {
3188 	ub->params.devt.char_major = MAJOR(ub->cdev_dev.devt);
3189 	ub->params.devt.char_minor = MINOR(ub->cdev_dev.devt);
3190 
3191 	if (ub->ub_disk) {
3192 		ub->params.devt.disk_major = MAJOR(disk_devt(ub->ub_disk));
3193 		ub->params.devt.disk_minor = MINOR(disk_devt(ub->ub_disk));
3194 	} else {
3195 		ub->params.devt.disk_major = 0;
3196 		ub->params.devt.disk_minor = 0;
3197 	}
3198 	ub->params.types |= UBLK_PARAM_TYPE_DEVT;
3199 }
3200 
3201 static int ublk_ctrl_get_params(struct ublk_device *ub,
3202 		const struct ublksrv_ctrl_cmd *header)
3203 {
3204 	void __user *argp = (void __user *)(unsigned long)header->addr;
3205 	struct ublk_params_header ph;
3206 	int ret;
3207 
3208 	if (header->len <= sizeof(ph) || !header->addr)
3209 		return -EINVAL;
3210 
3211 	if (copy_from_user(&ph, argp, sizeof(ph)))
3212 		return -EFAULT;
3213 
3214 	if (ph.len > header->len || !ph.len)
3215 		return -EINVAL;
3216 
3217 	if (ph.len > sizeof(struct ublk_params))
3218 		ph.len = sizeof(struct ublk_params);
3219 
3220 	mutex_lock(&ub->mutex);
3221 	ublk_ctrl_fill_params_devt(ub);
3222 	if (copy_to_user(argp, &ub->params, ph.len))
3223 		ret = -EFAULT;
3224 	else
3225 		ret = 0;
3226 	mutex_unlock(&ub->mutex);
3227 
3228 	return ret;
3229 }
3230 
3231 static int ublk_ctrl_set_params(struct ublk_device *ub,
3232 		const struct ublksrv_ctrl_cmd *header)
3233 {
3234 	void __user *argp = (void __user *)(unsigned long)header->addr;
3235 	struct ublk_params_header ph;
3236 	int ret = -EFAULT;
3237 
3238 	if (header->len <= sizeof(ph) || !header->addr)
3239 		return -EINVAL;
3240 
3241 	if (copy_from_user(&ph, argp, sizeof(ph)))
3242 		return -EFAULT;
3243 
3244 	if (ph.len > header->len || !ph.len || !ph.types)
3245 		return -EINVAL;
3246 
3247 	if (ph.len > sizeof(struct ublk_params))
3248 		ph.len = sizeof(struct ublk_params);
3249 
3250 	mutex_lock(&ub->mutex);
3251 	if (test_bit(UB_STATE_USED, &ub->state)) {
3252 		/*
3253 		 * Parameters can only be changed when device hasn't
3254 		 * been started yet
3255 		 */
3256 		ret = -EACCES;
3257 	} else if (copy_from_user(&ub->params, argp, ph.len)) {
3258 		ret = -EFAULT;
3259 	} else {
3260 		/* clear all we don't support yet */
3261 		ub->params.types &= UBLK_PARAM_TYPE_ALL;
3262 		ret = ublk_validate_params(ub);
3263 		if (ret)
3264 			ub->params.types = 0;
3265 	}
3266 	mutex_unlock(&ub->mutex);
3267 
3268 	return ret;
3269 }
3270 
3271 static int ublk_ctrl_start_recovery(struct ublk_device *ub,
3272 		const struct ublksrv_ctrl_cmd *header)
3273 {
3274 	int ret = -EINVAL;
3275 
3276 	mutex_lock(&ub->mutex);
3277 	if (ublk_nosrv_should_stop_dev(ub))
3278 		goto out_unlock;
3279 	/*
3280 	 * START_RECOVERY is only allowd after:
3281 	 *
3282 	 * (1) UB_STATE_OPEN is not set, which means the dying process is exited
3283 	 *     and related io_uring ctx is freed so file struct of /dev/ublkcX is
3284 	 *     released.
3285 	 *
3286 	 * and one of the following holds
3287 	 *
3288 	 * (2) UBLK_S_DEV_QUIESCED is set, which means the quiesce_work:
3289 	 *     (a)has quiesced request queue
3290 	 *     (b)has requeued every inflight rqs whose io_flags is ACTIVE
3291 	 *     (c)has requeued/aborted every inflight rqs whose io_flags is NOT ACTIVE
3292 	 *     (d)has completed/camceled all ioucmds owned by ther dying process
3293 	 *
3294 	 * (3) UBLK_S_DEV_FAIL_IO is set, which means the queue is not
3295 	 *     quiesced, but all I/O is being immediately errored
3296 	 */
3297 	if (test_bit(UB_STATE_OPEN, &ub->state) || !ublk_dev_in_recoverable_state(ub)) {
3298 		ret = -EBUSY;
3299 		goto out_unlock;
3300 	}
3301 	pr_devel("%s: start recovery for dev id %d.\n", __func__, header->dev_id);
3302 	init_completion(&ub->completion);
3303 	ret = 0;
3304  out_unlock:
3305 	mutex_unlock(&ub->mutex);
3306 	return ret;
3307 }
3308 
3309 static int ublk_ctrl_end_recovery(struct ublk_device *ub,
3310 		const struct ublksrv_ctrl_cmd *header)
3311 {
3312 	int ublksrv_pid = (int)header->data[0];
3313 	int ret = -EINVAL;
3314 
3315 	pr_devel("%s: Waiting for all FETCH_REQs, dev id %d...\n", __func__,
3316 		 header->dev_id);
3317 
3318 	if (wait_for_completion_interruptible(&ub->completion))
3319 		return -EINTR;
3320 
3321 	pr_devel("%s: All FETCH_REQs received, dev id %d\n", __func__,
3322 		 header->dev_id);
3323 
3324 	mutex_lock(&ub->mutex);
3325 	if (ublk_nosrv_should_stop_dev(ub))
3326 		goto out_unlock;
3327 
3328 	if (!ublk_dev_in_recoverable_state(ub)) {
3329 		ret = -EBUSY;
3330 		goto out_unlock;
3331 	}
3332 	ub->dev_info.ublksrv_pid = ublksrv_pid;
3333 	ub->dev_info.state = UBLK_S_DEV_LIVE;
3334 	pr_devel("%s: new ublksrv_pid %d, dev id %d\n",
3335 			__func__, ublksrv_pid, header->dev_id);
3336 	blk_mq_kick_requeue_list(ub->ub_disk->queue);
3337 	ret = 0;
3338  out_unlock:
3339 	mutex_unlock(&ub->mutex);
3340 	return ret;
3341 }
3342 
3343 static int ublk_ctrl_get_features(const struct ublksrv_ctrl_cmd *header)
3344 {
3345 	void __user *argp = (void __user *)(unsigned long)header->addr;
3346 	u64 features = UBLK_F_ALL;
3347 
3348 	if (header->len != UBLK_FEATURES_LEN || !header->addr)
3349 		return -EINVAL;
3350 
3351 	if (copy_to_user(argp, &features, UBLK_FEATURES_LEN))
3352 		return -EFAULT;
3353 
3354 	return 0;
3355 }
3356 
3357 static void ublk_ctrl_set_size(struct ublk_device *ub, const struct ublksrv_ctrl_cmd *header)
3358 {
3359 	struct ublk_param_basic *p = &ub->params.basic;
3360 	u64 new_size = header->data[0];
3361 
3362 	mutex_lock(&ub->mutex);
3363 	p->dev_sectors = new_size;
3364 	set_capacity_and_notify(ub->ub_disk, p->dev_sectors);
3365 	mutex_unlock(&ub->mutex);
3366 }
3367 
3368 struct count_busy {
3369 	const struct ublk_queue *ubq;
3370 	unsigned int nr_busy;
3371 };
3372 
3373 static bool ublk_count_busy_req(struct request *rq, void *data)
3374 {
3375 	struct count_busy *idle = data;
3376 
3377 	if (!blk_mq_request_started(rq) && rq->mq_hctx->driver_data == idle->ubq)
3378 		idle->nr_busy += 1;
3379 	return true;
3380 }
3381 
3382 /* uring_cmd is guaranteed to be active if the associated request is idle */
3383 static bool ubq_has_idle_io(const struct ublk_queue *ubq)
3384 {
3385 	struct count_busy data = {
3386 		.ubq = ubq,
3387 	};
3388 
3389 	blk_mq_tagset_busy_iter(&ubq->dev->tag_set, ublk_count_busy_req, &data);
3390 	return data.nr_busy < ubq->q_depth;
3391 }
3392 
3393 /* Wait until each hw queue has at least one idle IO */
3394 static int ublk_wait_for_idle_io(struct ublk_device *ub,
3395 				 unsigned int timeout_ms)
3396 {
3397 	unsigned int elapsed = 0;
3398 	int ret;
3399 
3400 	while (elapsed < timeout_ms && !signal_pending(current)) {
3401 		unsigned int queues_cancelable = 0;
3402 		int i;
3403 
3404 		for (i = 0; i < ub->dev_info.nr_hw_queues; i++) {
3405 			struct ublk_queue *ubq = ublk_get_queue(ub, i);
3406 
3407 			queues_cancelable += !!ubq_has_idle_io(ubq);
3408 		}
3409 
3410 		/*
3411 		 * Each queue needs at least one active command for
3412 		 * notifying ublk server
3413 		 */
3414 		if (queues_cancelable == ub->dev_info.nr_hw_queues)
3415 			break;
3416 
3417 		msleep(UBLK_REQUEUE_DELAY_MS);
3418 		elapsed += UBLK_REQUEUE_DELAY_MS;
3419 	}
3420 
3421 	if (signal_pending(current))
3422 		ret = -EINTR;
3423 	else if (elapsed >= timeout_ms)
3424 		ret = -EBUSY;
3425 	else
3426 		ret = 0;
3427 
3428 	return ret;
3429 }
3430 
3431 static int ublk_ctrl_quiesce_dev(struct ublk_device *ub,
3432 				 const struct ublksrv_ctrl_cmd *header)
3433 {
3434 	/* zero means wait forever */
3435 	u64 timeout_ms = header->data[0];
3436 	struct gendisk *disk;
3437 	int ret = -ENODEV;
3438 
3439 	if (!(ub->dev_info.flags & UBLK_F_QUIESCE))
3440 		return -EOPNOTSUPP;
3441 
3442 	mutex_lock(&ub->mutex);
3443 	disk = ublk_get_disk(ub);
3444 	if (!disk)
3445 		goto unlock;
3446 	if (ub->dev_info.state == UBLK_S_DEV_DEAD)
3447 		goto put_disk;
3448 
3449 	ret = 0;
3450 	/* already in expected state */
3451 	if (ub->dev_info.state != UBLK_S_DEV_LIVE)
3452 		goto put_disk;
3453 
3454 	/* Mark the device as canceling */
3455 	mutex_lock(&ub->cancel_mutex);
3456 	blk_mq_quiesce_queue(disk->queue);
3457 	ublk_set_canceling(ub, true);
3458 	blk_mq_unquiesce_queue(disk->queue);
3459 	mutex_unlock(&ub->cancel_mutex);
3460 
3461 	if (!timeout_ms)
3462 		timeout_ms = UINT_MAX;
3463 	ret = ublk_wait_for_idle_io(ub, timeout_ms);
3464 
3465 put_disk:
3466 	ublk_put_disk(disk);
3467 unlock:
3468 	mutex_unlock(&ub->mutex);
3469 
3470 	/* Cancel pending uring_cmd */
3471 	if (!ret)
3472 		ublk_cancel_dev(ub);
3473 	return ret;
3474 }
3475 
3476 /*
3477  * All control commands are sent via /dev/ublk-control, so we have to check
3478  * the destination device's permission
3479  */
3480 static int ublk_char_dev_permission(struct ublk_device *ub,
3481 		const char *dev_path, int mask)
3482 {
3483 	int err;
3484 	struct path path;
3485 	struct kstat stat;
3486 
3487 	err = kern_path(dev_path, LOOKUP_FOLLOW, &path);
3488 	if (err)
3489 		return err;
3490 
3491 	err = vfs_getattr(&path, &stat, STATX_TYPE, AT_STATX_SYNC_AS_STAT);
3492 	if (err)
3493 		goto exit;
3494 
3495 	err = -EPERM;
3496 	if (stat.rdev != ub->cdev_dev.devt || !S_ISCHR(stat.mode))
3497 		goto exit;
3498 
3499 	err = inode_permission(&nop_mnt_idmap,
3500 			d_backing_inode(path.dentry), mask);
3501 exit:
3502 	path_put(&path);
3503 	return err;
3504 }
3505 
3506 static int ublk_ctrl_uring_cmd_permission(struct ublk_device *ub,
3507 		struct io_uring_cmd *cmd)
3508 {
3509 	struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)io_uring_sqe_cmd(cmd->sqe);
3510 	bool unprivileged = ub->dev_info.flags & UBLK_F_UNPRIVILEGED_DEV;
3511 	void __user *argp = (void __user *)(unsigned long)header->addr;
3512 	char *dev_path = NULL;
3513 	int ret = 0;
3514 	int mask;
3515 
3516 	if (!unprivileged) {
3517 		if (!capable(CAP_SYS_ADMIN))
3518 			return -EPERM;
3519 		/*
3520 		 * The new added command of UBLK_CMD_GET_DEV_INFO2 includes
3521 		 * char_dev_path in payload too, since userspace may not
3522 		 * know if the specified device is created as unprivileged
3523 		 * mode.
3524 		 */
3525 		if (_IOC_NR(cmd->cmd_op) != UBLK_CMD_GET_DEV_INFO2)
3526 			return 0;
3527 	}
3528 
3529 	/*
3530 	 * User has to provide the char device path for unprivileged ublk
3531 	 *
3532 	 * header->addr always points to the dev path buffer, and
3533 	 * header->dev_path_len records length of dev path buffer.
3534 	 */
3535 	if (!header->dev_path_len || header->dev_path_len > PATH_MAX)
3536 		return -EINVAL;
3537 
3538 	if (header->len < header->dev_path_len)
3539 		return -EINVAL;
3540 
3541 	dev_path = memdup_user_nul(argp, header->dev_path_len);
3542 	if (IS_ERR(dev_path))
3543 		return PTR_ERR(dev_path);
3544 
3545 	ret = -EINVAL;
3546 	switch (_IOC_NR(cmd->cmd_op)) {
3547 	case UBLK_CMD_GET_DEV_INFO:
3548 	case UBLK_CMD_GET_DEV_INFO2:
3549 	case UBLK_CMD_GET_QUEUE_AFFINITY:
3550 	case UBLK_CMD_GET_PARAMS:
3551 	case (_IOC_NR(UBLK_U_CMD_GET_FEATURES)):
3552 		mask = MAY_READ;
3553 		break;
3554 	case UBLK_CMD_START_DEV:
3555 	case UBLK_CMD_STOP_DEV:
3556 	case UBLK_CMD_ADD_DEV:
3557 	case UBLK_CMD_DEL_DEV:
3558 	case UBLK_CMD_SET_PARAMS:
3559 	case UBLK_CMD_START_USER_RECOVERY:
3560 	case UBLK_CMD_END_USER_RECOVERY:
3561 	case UBLK_CMD_UPDATE_SIZE:
3562 	case UBLK_CMD_QUIESCE_DEV:
3563 		mask = MAY_READ | MAY_WRITE;
3564 		break;
3565 	default:
3566 		goto exit;
3567 	}
3568 
3569 	ret = ublk_char_dev_permission(ub, dev_path, mask);
3570 	if (!ret) {
3571 		header->len -= header->dev_path_len;
3572 		header->addr += header->dev_path_len;
3573 	}
3574 	pr_devel("%s: dev id %d cmd_op %x uid %d gid %d path %s ret %d\n",
3575 			__func__, ub->ub_number, cmd->cmd_op,
3576 			ub->dev_info.owner_uid, ub->dev_info.owner_gid,
3577 			dev_path, ret);
3578 exit:
3579 	kfree(dev_path);
3580 	return ret;
3581 }
3582 
3583 static int ublk_ctrl_uring_cmd(struct io_uring_cmd *cmd,
3584 		unsigned int issue_flags)
3585 {
3586 	const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
3587 	struct ublk_device *ub = NULL;
3588 	u32 cmd_op = cmd->cmd_op;
3589 	int ret = -EINVAL;
3590 
3591 	if (issue_flags & IO_URING_F_NONBLOCK)
3592 		return -EAGAIN;
3593 
3594 	ublk_ctrl_cmd_dump(cmd);
3595 
3596 	if (!(issue_flags & IO_URING_F_SQE128))
3597 		goto out;
3598 
3599 	ret = ublk_check_cmd_op(cmd_op);
3600 	if (ret)
3601 		goto out;
3602 
3603 	if (cmd_op == UBLK_U_CMD_GET_FEATURES) {
3604 		ret = ublk_ctrl_get_features(header);
3605 		goto out;
3606 	}
3607 
3608 	if (_IOC_NR(cmd_op) != UBLK_CMD_ADD_DEV) {
3609 		ret = -ENODEV;
3610 		ub = ublk_get_device_from_id(header->dev_id);
3611 		if (!ub)
3612 			goto out;
3613 
3614 		ret = ublk_ctrl_uring_cmd_permission(ub, cmd);
3615 		if (ret)
3616 			goto put_dev;
3617 	}
3618 
3619 	switch (_IOC_NR(cmd_op)) {
3620 	case UBLK_CMD_START_DEV:
3621 		ret = ublk_ctrl_start_dev(ub, header);
3622 		break;
3623 	case UBLK_CMD_STOP_DEV:
3624 		ret = ublk_ctrl_stop_dev(ub);
3625 		break;
3626 	case UBLK_CMD_GET_DEV_INFO:
3627 	case UBLK_CMD_GET_DEV_INFO2:
3628 		ret = ublk_ctrl_get_dev_info(ub, header);
3629 		break;
3630 	case UBLK_CMD_ADD_DEV:
3631 		ret = ublk_ctrl_add_dev(header);
3632 		break;
3633 	case UBLK_CMD_DEL_DEV:
3634 		ret = ublk_ctrl_del_dev(&ub, true);
3635 		break;
3636 	case UBLK_CMD_DEL_DEV_ASYNC:
3637 		ret = ublk_ctrl_del_dev(&ub, false);
3638 		break;
3639 	case UBLK_CMD_GET_QUEUE_AFFINITY:
3640 		ret = ublk_ctrl_get_queue_affinity(ub, header);
3641 		break;
3642 	case UBLK_CMD_GET_PARAMS:
3643 		ret = ublk_ctrl_get_params(ub, header);
3644 		break;
3645 	case UBLK_CMD_SET_PARAMS:
3646 		ret = ublk_ctrl_set_params(ub, header);
3647 		break;
3648 	case UBLK_CMD_START_USER_RECOVERY:
3649 		ret = ublk_ctrl_start_recovery(ub, header);
3650 		break;
3651 	case UBLK_CMD_END_USER_RECOVERY:
3652 		ret = ublk_ctrl_end_recovery(ub, header);
3653 		break;
3654 	case UBLK_CMD_UPDATE_SIZE:
3655 		ublk_ctrl_set_size(ub, header);
3656 		ret = 0;
3657 		break;
3658 	case UBLK_CMD_QUIESCE_DEV:
3659 		ret = ublk_ctrl_quiesce_dev(ub, header);
3660 		break;
3661 	default:
3662 		ret = -EOPNOTSUPP;
3663 		break;
3664 	}
3665 
3666  put_dev:
3667 	if (ub)
3668 		ublk_put_device(ub);
3669  out:
3670 	pr_devel("%s: cmd done ret %d cmd_op %x, dev id %d qid %d\n",
3671 			__func__, ret, cmd->cmd_op, header->dev_id, header->queue_id);
3672 	return ret;
3673 }
3674 
3675 static const struct file_operations ublk_ctl_fops = {
3676 	.open		= nonseekable_open,
3677 	.uring_cmd      = ublk_ctrl_uring_cmd,
3678 	.owner		= THIS_MODULE,
3679 	.llseek		= noop_llseek,
3680 };
3681 
3682 static struct miscdevice ublk_misc = {
3683 	.minor		= MISC_DYNAMIC_MINOR,
3684 	.name		= "ublk-control",
3685 	.fops		= &ublk_ctl_fops,
3686 };
3687 
3688 static int __init ublk_init(void)
3689 {
3690 	int ret;
3691 
3692 	BUILD_BUG_ON((u64)UBLKSRV_IO_BUF_OFFSET +
3693 			UBLKSRV_IO_BUF_TOTAL_SIZE < UBLKSRV_IO_BUF_OFFSET);
3694 	BUILD_BUG_ON(sizeof(struct ublk_auto_buf_reg) != 8);
3695 
3696 	init_waitqueue_head(&ublk_idr_wq);
3697 
3698 	ret = misc_register(&ublk_misc);
3699 	if (ret)
3700 		return ret;
3701 
3702 	ret = alloc_chrdev_region(&ublk_chr_devt, 0, UBLK_MINORS, "ublk-char");
3703 	if (ret)
3704 		goto unregister_mis;
3705 
3706 	ret = class_register(&ublk_chr_class);
3707 	if (ret)
3708 		goto free_chrdev_region;
3709 
3710 	return 0;
3711 
3712 free_chrdev_region:
3713 	unregister_chrdev_region(ublk_chr_devt, UBLK_MINORS);
3714 unregister_mis:
3715 	misc_deregister(&ublk_misc);
3716 	return ret;
3717 }
3718 
3719 static void __exit ublk_exit(void)
3720 {
3721 	struct ublk_device *ub;
3722 	int id;
3723 
3724 	idr_for_each_entry(&ublk_index_idr, ub, id)
3725 		ublk_remove(ub);
3726 
3727 	class_unregister(&ublk_chr_class);
3728 	misc_deregister(&ublk_misc);
3729 
3730 	idr_destroy(&ublk_index_idr);
3731 	unregister_chrdev_region(ublk_chr_devt, UBLK_MINORS);
3732 }
3733 
3734 module_init(ublk_init);
3735 module_exit(ublk_exit);
3736 
3737 static int ublk_set_max_unprivileged_ublks(const char *buf,
3738 					   const struct kernel_param *kp)
3739 {
3740 	return param_set_uint_minmax(buf, kp, 0, UBLK_MAX_UBLKS);
3741 }
3742 
3743 static int ublk_get_max_unprivileged_ublks(char *buf,
3744 					   const struct kernel_param *kp)
3745 {
3746 	return sysfs_emit(buf, "%u\n", unprivileged_ublks_max);
3747 }
3748 
3749 static const struct kernel_param_ops ublk_max_unprivileged_ublks_ops = {
3750 	.set = ublk_set_max_unprivileged_ublks,
3751 	.get = ublk_get_max_unprivileged_ublks,
3752 };
3753 
3754 module_param_cb(ublks_max, &ublk_max_unprivileged_ublks_ops,
3755 		&unprivileged_ublks_max, 0644);
3756 MODULE_PARM_DESC(ublks_max, "max number of unprivileged ublk devices allowed to add(default: 64)");
3757 
3758 MODULE_AUTHOR("Ming Lei <ming.lei@redhat.com>");
3759 MODULE_DESCRIPTION("Userspace block device");
3760 MODULE_LICENSE("GPL");
3761