xref: /linux/drivers/nvme/host/ioctl.c (revision 60684c2bd35064043360e6f716d1b7c20e967b7d)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2011-2014, Intel Corporation.
4  * Copyright (c) 2017-2021 Christoph Hellwig.
5  */
6 #include <linux/ptrace.h>	/* for force_successful_syscall_return */
7 #include <linux/nvme_ioctl.h>
8 #include <linux/io_uring.h>
9 #include "nvme.h"
10 
11 enum {
12 	NVME_IOCTL_VEC		= (1 << 0),
13 	NVME_IOCTL_PARTITION	= (1 << 1),
14 };
15 
16 static bool nvme_cmd_allowed(struct nvme_ns *ns, struct nvme_command *c,
17 		unsigned int flags, fmode_t mode)
18 {
19 	u32 effects;
20 
21 	if (capable(CAP_SYS_ADMIN))
22 		return true;
23 
24 	/*
25 	 * Do not allow unprivileged passthrough on partitions, as that allows an
26 	 * escape from the containment of the partition.
27 	 */
28 	if (flags & NVME_IOCTL_PARTITION)
29 		return false;
30 
31 	/*
32 	 * Do not allow unprivileged processes to send vendor specific or fabrics
33 	 * commands as we can't be sure about their effects.
34 	 */
35 	if (c->common.opcode >= nvme_cmd_vendor_start ||
36 	    c->common.opcode == nvme_fabrics_command)
37 		return false;
38 
39 	/*
40 	 * Do not allow unprivileged passthrough of admin commands except
41 	 * for a subset of identify commands that contain information required
42 	 * to form proper I/O commands in userspace and do not expose any
43 	 * potentially sensitive information.
44 	 */
45 	if (!ns) {
46 		if (c->common.opcode == nvme_admin_identify) {
47 			switch (c->identify.cns) {
48 			case NVME_ID_CNS_NS:
49 			case NVME_ID_CNS_CS_NS:
50 			case NVME_ID_CNS_NS_CS_INDEP:
51 			case NVME_ID_CNS_CS_CTRL:
52 			case NVME_ID_CNS_CTRL:
53 				return true;
54 			}
55 		}
56 		return false;
57 	}
58 
59 	/*
60 	 * Check if the controller provides a Commands Supported and Effects log
61 	 * and marks this command as supported.  If not reject unprivileged
62 	 * passthrough.
63 	 */
64 	effects = nvme_command_effects(ns->ctrl, ns, c->common.opcode);
65 	if (!(effects & NVME_CMD_EFFECTS_CSUPP))
66 		return false;
67 
68 	/*
69 	 * Don't allow passthrough for command that have intrusive (or unknown)
70 	 * effects.
71 	 */
72 	if (effects & ~(NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC |
73 			NVME_CMD_EFFECTS_UUID_SEL |
74 			NVME_CMD_EFFECTS_SCOPE_MASK))
75 		return false;
76 
77 	/*
78 	 * Only allow I/O commands that transfer data to the controller or that
79 	 * change the logical block contents if the file descriptor is open for
80 	 * writing.
81 	 */
82 	if (nvme_is_write(c) || (effects & NVME_CMD_EFFECTS_LBCC))
83 		return mode & FMODE_WRITE;
84 	return true;
85 }
86 
87 /*
88  * Convert integer values from ioctl structures to user pointers, silently
89  * ignoring the upper bits in the compat case to match behaviour of 32-bit
90  * kernels.
91  */
92 static void __user *nvme_to_user_ptr(uintptr_t ptrval)
93 {
94 	if (in_compat_syscall())
95 		ptrval = (compat_uptr_t)ptrval;
96 	return (void __user *)ptrval;
97 }
98 
99 static void *nvme_add_user_metadata(struct request *req, void __user *ubuf,
100 		unsigned len, u32 seed)
101 {
102 	struct bio_integrity_payload *bip;
103 	int ret = -ENOMEM;
104 	void *buf;
105 	struct bio *bio = req->bio;
106 
107 	buf = kmalloc(len, GFP_KERNEL);
108 	if (!buf)
109 		goto out;
110 
111 	ret = -EFAULT;
112 	if ((req_op(req) == REQ_OP_DRV_OUT) && copy_from_user(buf, ubuf, len))
113 		goto out_free_meta;
114 
115 	bip = bio_integrity_alloc(bio, GFP_KERNEL, 1);
116 	if (IS_ERR(bip)) {
117 		ret = PTR_ERR(bip);
118 		goto out_free_meta;
119 	}
120 
121 	bip->bip_iter.bi_size = len;
122 	bip->bip_iter.bi_sector = seed;
123 	ret = bio_integrity_add_page(bio, virt_to_page(buf), len,
124 			offset_in_page(buf));
125 	if (ret != len) {
126 		ret = -ENOMEM;
127 		goto out_free_meta;
128 	}
129 
130 	req->cmd_flags |= REQ_INTEGRITY;
131 	return buf;
132 out_free_meta:
133 	kfree(buf);
134 out:
135 	return ERR_PTR(ret);
136 }
137 
138 static int nvme_finish_user_metadata(struct request *req, void __user *ubuf,
139 		void *meta, unsigned len, int ret)
140 {
141 	if (!ret && req_op(req) == REQ_OP_DRV_IN &&
142 	    copy_to_user(ubuf, meta, len))
143 		ret = -EFAULT;
144 	kfree(meta);
145 	return ret;
146 }
147 
148 static struct request *nvme_alloc_user_request(struct request_queue *q,
149 		struct nvme_command *cmd, blk_opf_t rq_flags,
150 		blk_mq_req_flags_t blk_flags)
151 {
152 	struct request *req;
153 
154 	req = blk_mq_alloc_request(q, nvme_req_op(cmd) | rq_flags, blk_flags);
155 	if (IS_ERR(req))
156 		return req;
157 	nvme_init_request(req, cmd);
158 	nvme_req(req)->flags |= NVME_REQ_USERCMD;
159 	return req;
160 }
161 
162 static int nvme_map_user_request(struct request *req, u64 ubuffer,
163 		unsigned bufflen, void __user *meta_buffer, unsigned meta_len,
164 		u32 meta_seed, void **metap, struct io_uring_cmd *ioucmd,
165 		unsigned int flags)
166 {
167 	struct request_queue *q = req->q;
168 	struct nvme_ns *ns = q->queuedata;
169 	struct block_device *bdev = ns ? ns->disk->part0 : NULL;
170 	struct bio *bio = NULL;
171 	void *meta = NULL;
172 	int ret;
173 
174 	if (ioucmd && (ioucmd->flags & IORING_URING_CMD_FIXED)) {
175 		struct iov_iter iter;
176 
177 		/* fixedbufs is only for non-vectored io */
178 		if (WARN_ON_ONCE(flags & NVME_IOCTL_VEC))
179 			return -EINVAL;
180 		ret = io_uring_cmd_import_fixed(ubuffer, bufflen,
181 				rq_data_dir(req), &iter, ioucmd);
182 		if (ret < 0)
183 			goto out;
184 		ret = blk_rq_map_user_iov(q, req, NULL, &iter, GFP_KERNEL);
185 	} else {
186 		ret = blk_rq_map_user_io(req, NULL, nvme_to_user_ptr(ubuffer),
187 				bufflen, GFP_KERNEL, flags & NVME_IOCTL_VEC, 0,
188 				0, rq_data_dir(req));
189 	}
190 
191 	if (ret)
192 		goto out;
193 	bio = req->bio;
194 	if (bdev)
195 		bio_set_dev(bio, bdev);
196 
197 	if (bdev && meta_buffer && meta_len) {
198 		meta = nvme_add_user_metadata(req, meta_buffer, meta_len,
199 				meta_seed);
200 		if (IS_ERR(meta)) {
201 			ret = PTR_ERR(meta);
202 			goto out_unmap;
203 		}
204 		*metap = meta;
205 	}
206 
207 	return ret;
208 
209 out_unmap:
210 	if (bio)
211 		blk_rq_unmap_user(bio);
212 out:
213 	blk_mq_free_request(req);
214 	return ret;
215 }
216 
217 static int nvme_submit_user_cmd(struct request_queue *q,
218 		struct nvme_command *cmd, u64 ubuffer, unsigned bufflen,
219 		void __user *meta_buffer, unsigned meta_len, u32 meta_seed,
220 		u64 *result, unsigned timeout, unsigned int flags)
221 {
222 	struct nvme_ns *ns = q->queuedata;
223 	struct nvme_ctrl *ctrl;
224 	struct request *req;
225 	void *meta = NULL;
226 	struct bio *bio;
227 	u32 effects;
228 	int ret;
229 
230 	req = nvme_alloc_user_request(q, cmd, 0, 0);
231 	if (IS_ERR(req))
232 		return PTR_ERR(req);
233 
234 	req->timeout = timeout;
235 	if (ubuffer && bufflen) {
236 		ret = nvme_map_user_request(req, ubuffer, bufflen, meta_buffer,
237 				meta_len, meta_seed, &meta, NULL, flags);
238 		if (ret)
239 			return ret;
240 	}
241 
242 	bio = req->bio;
243 	ctrl = nvme_req(req)->ctrl;
244 
245 	effects = nvme_passthru_start(ctrl, ns, cmd->common.opcode);
246 	ret = nvme_execute_rq(req, false);
247 	if (result)
248 		*result = le64_to_cpu(nvme_req(req)->result.u64);
249 	if (meta)
250 		ret = nvme_finish_user_metadata(req, meta_buffer, meta,
251 						meta_len, ret);
252 	if (bio)
253 		blk_rq_unmap_user(bio);
254 	blk_mq_free_request(req);
255 
256 	if (effects)
257 		nvme_passthru_end(ctrl, effects, cmd, ret);
258 
259 	return ret;
260 }
261 
262 static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
263 {
264 	struct nvme_user_io io;
265 	struct nvme_command c;
266 	unsigned length, meta_len;
267 	void __user *metadata;
268 
269 	if (copy_from_user(&io, uio, sizeof(io)))
270 		return -EFAULT;
271 	if (io.flags)
272 		return -EINVAL;
273 
274 	switch (io.opcode) {
275 	case nvme_cmd_write:
276 	case nvme_cmd_read:
277 	case nvme_cmd_compare:
278 		break;
279 	default:
280 		return -EINVAL;
281 	}
282 
283 	length = (io.nblocks + 1) << ns->lba_shift;
284 
285 	if ((io.control & NVME_RW_PRINFO_PRACT) &&
286 	    ns->ms == sizeof(struct t10_pi_tuple)) {
287 		/*
288 		 * Protection information is stripped/inserted by the
289 		 * controller.
290 		 */
291 		if (nvme_to_user_ptr(io.metadata))
292 			return -EINVAL;
293 		meta_len = 0;
294 		metadata = NULL;
295 	} else {
296 		meta_len = (io.nblocks + 1) * ns->ms;
297 		metadata = nvme_to_user_ptr(io.metadata);
298 	}
299 
300 	if (ns->features & NVME_NS_EXT_LBAS) {
301 		length += meta_len;
302 		meta_len = 0;
303 	} else if (meta_len) {
304 		if ((io.metadata & 3) || !io.metadata)
305 			return -EINVAL;
306 	}
307 
308 	memset(&c, 0, sizeof(c));
309 	c.rw.opcode = io.opcode;
310 	c.rw.flags = io.flags;
311 	c.rw.nsid = cpu_to_le32(ns->head->ns_id);
312 	c.rw.slba = cpu_to_le64(io.slba);
313 	c.rw.length = cpu_to_le16(io.nblocks);
314 	c.rw.control = cpu_to_le16(io.control);
315 	c.rw.dsmgmt = cpu_to_le32(io.dsmgmt);
316 	c.rw.reftag = cpu_to_le32(io.reftag);
317 	c.rw.apptag = cpu_to_le16(io.apptag);
318 	c.rw.appmask = cpu_to_le16(io.appmask);
319 
320 	return nvme_submit_user_cmd(ns->queue, &c, io.addr, length, metadata,
321 			meta_len, lower_32_bits(io.slba), NULL, 0, 0);
322 }
323 
324 static bool nvme_validate_passthru_nsid(struct nvme_ctrl *ctrl,
325 					struct nvme_ns *ns, __u32 nsid)
326 {
327 	if (ns && nsid != ns->head->ns_id) {
328 		dev_err(ctrl->device,
329 			"%s: nsid (%u) in cmd does not match nsid (%u)"
330 			"of namespace\n",
331 			current->comm, nsid, ns->head->ns_id);
332 		return false;
333 	}
334 
335 	return true;
336 }
337 
338 static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
339 		struct nvme_passthru_cmd __user *ucmd, unsigned int flags,
340 		fmode_t mode)
341 {
342 	struct nvme_passthru_cmd cmd;
343 	struct nvme_command c;
344 	unsigned timeout = 0;
345 	u64 result;
346 	int status;
347 
348 	if (copy_from_user(&cmd, ucmd, sizeof(cmd)))
349 		return -EFAULT;
350 	if (cmd.flags)
351 		return -EINVAL;
352 	if (!nvme_validate_passthru_nsid(ctrl, ns, cmd.nsid))
353 		return -EINVAL;
354 
355 	memset(&c, 0, sizeof(c));
356 	c.common.opcode = cmd.opcode;
357 	c.common.flags = cmd.flags;
358 	c.common.nsid = cpu_to_le32(cmd.nsid);
359 	c.common.cdw2[0] = cpu_to_le32(cmd.cdw2);
360 	c.common.cdw2[1] = cpu_to_le32(cmd.cdw3);
361 	c.common.cdw10 = cpu_to_le32(cmd.cdw10);
362 	c.common.cdw11 = cpu_to_le32(cmd.cdw11);
363 	c.common.cdw12 = cpu_to_le32(cmd.cdw12);
364 	c.common.cdw13 = cpu_to_le32(cmd.cdw13);
365 	c.common.cdw14 = cpu_to_le32(cmd.cdw14);
366 	c.common.cdw15 = cpu_to_le32(cmd.cdw15);
367 
368 	if (!nvme_cmd_allowed(ns, &c, 0, mode))
369 		return -EACCES;
370 
371 	if (cmd.timeout_ms)
372 		timeout = msecs_to_jiffies(cmd.timeout_ms);
373 
374 	status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
375 			cmd.addr, cmd.data_len, nvme_to_user_ptr(cmd.metadata),
376 			cmd.metadata_len, 0, &result, timeout, 0);
377 
378 	if (status >= 0) {
379 		if (put_user(result, &ucmd->result))
380 			return -EFAULT;
381 	}
382 
383 	return status;
384 }
385 
386 static int nvme_user_cmd64(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
387 		struct nvme_passthru_cmd64 __user *ucmd, unsigned int flags,
388 		fmode_t mode)
389 {
390 	struct nvme_passthru_cmd64 cmd;
391 	struct nvme_command c;
392 	unsigned timeout = 0;
393 	int status;
394 
395 	if (copy_from_user(&cmd, ucmd, sizeof(cmd)))
396 		return -EFAULT;
397 	if (cmd.flags)
398 		return -EINVAL;
399 	if (!nvme_validate_passthru_nsid(ctrl, ns, cmd.nsid))
400 		return -EINVAL;
401 
402 	memset(&c, 0, sizeof(c));
403 	c.common.opcode = cmd.opcode;
404 	c.common.flags = cmd.flags;
405 	c.common.nsid = cpu_to_le32(cmd.nsid);
406 	c.common.cdw2[0] = cpu_to_le32(cmd.cdw2);
407 	c.common.cdw2[1] = cpu_to_le32(cmd.cdw3);
408 	c.common.cdw10 = cpu_to_le32(cmd.cdw10);
409 	c.common.cdw11 = cpu_to_le32(cmd.cdw11);
410 	c.common.cdw12 = cpu_to_le32(cmd.cdw12);
411 	c.common.cdw13 = cpu_to_le32(cmd.cdw13);
412 	c.common.cdw14 = cpu_to_le32(cmd.cdw14);
413 	c.common.cdw15 = cpu_to_le32(cmd.cdw15);
414 
415 	if (!nvme_cmd_allowed(ns, &c, flags, mode))
416 		return -EACCES;
417 
418 	if (cmd.timeout_ms)
419 		timeout = msecs_to_jiffies(cmd.timeout_ms);
420 
421 	status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
422 			cmd.addr, cmd.data_len, nvme_to_user_ptr(cmd.metadata),
423 			cmd.metadata_len, 0, &cmd.result, timeout, flags);
424 
425 	if (status >= 0) {
426 		if (put_user(cmd.result, &ucmd->result))
427 			return -EFAULT;
428 	}
429 
430 	return status;
431 }
432 
433 struct nvme_uring_data {
434 	__u64	metadata;
435 	__u64	addr;
436 	__u32	data_len;
437 	__u32	metadata_len;
438 	__u32	timeout_ms;
439 };
440 
441 /*
442  * This overlays struct io_uring_cmd pdu.
443  * Expect build errors if this grows larger than that.
444  */
445 struct nvme_uring_cmd_pdu {
446 	union {
447 		struct bio *bio;
448 		struct request *req;
449 	};
450 	u32 meta_len;
451 	u32 nvme_status;
452 	union {
453 		struct {
454 			void *meta; /* kernel-resident buffer */
455 			void __user *meta_buffer;
456 		};
457 		u64 result;
458 	} u;
459 };
460 
461 static inline struct nvme_uring_cmd_pdu *nvme_uring_cmd_pdu(
462 		struct io_uring_cmd *ioucmd)
463 {
464 	return (struct nvme_uring_cmd_pdu *)&ioucmd->pdu;
465 }
466 
467 static void nvme_uring_task_meta_cb(struct io_uring_cmd *ioucmd)
468 {
469 	struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
470 	struct request *req = pdu->req;
471 	int status;
472 	u64 result;
473 
474 	if (nvme_req(req)->flags & NVME_REQ_CANCELLED)
475 		status = -EINTR;
476 	else
477 		status = nvme_req(req)->status;
478 
479 	result = le64_to_cpu(nvme_req(req)->result.u64);
480 
481 	if (pdu->meta_len)
482 		status = nvme_finish_user_metadata(req, pdu->u.meta_buffer,
483 					pdu->u.meta, pdu->meta_len, status);
484 	if (req->bio)
485 		blk_rq_unmap_user(req->bio);
486 	blk_mq_free_request(req);
487 
488 	io_uring_cmd_done(ioucmd, status, result);
489 }
490 
491 static void nvme_uring_task_cb(struct io_uring_cmd *ioucmd)
492 {
493 	struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
494 
495 	if (pdu->bio)
496 		blk_rq_unmap_user(pdu->bio);
497 
498 	io_uring_cmd_done(ioucmd, pdu->nvme_status, pdu->u.result);
499 }
500 
501 static enum rq_end_io_ret nvme_uring_cmd_end_io(struct request *req,
502 						blk_status_t err)
503 {
504 	struct io_uring_cmd *ioucmd = req->end_io_data;
505 	struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
506 	void *cookie = READ_ONCE(ioucmd->cookie);
507 
508 	req->bio = pdu->bio;
509 	if (nvme_req(req)->flags & NVME_REQ_CANCELLED)
510 		pdu->nvme_status = -EINTR;
511 	else
512 		pdu->nvme_status = nvme_req(req)->status;
513 	pdu->u.result = le64_to_cpu(nvme_req(req)->result.u64);
514 
515 	/*
516 	 * For iopoll, complete it directly.
517 	 * Otherwise, move the completion to task work.
518 	 */
519 	if (cookie != NULL && blk_rq_is_poll(req))
520 		nvme_uring_task_cb(ioucmd);
521 	else
522 		io_uring_cmd_complete_in_task(ioucmd, nvme_uring_task_cb);
523 
524 	return RQ_END_IO_FREE;
525 }
526 
527 static enum rq_end_io_ret nvme_uring_cmd_end_io_meta(struct request *req,
528 						     blk_status_t err)
529 {
530 	struct io_uring_cmd *ioucmd = req->end_io_data;
531 	struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
532 	void *cookie = READ_ONCE(ioucmd->cookie);
533 
534 	req->bio = pdu->bio;
535 	pdu->req = req;
536 
537 	/*
538 	 * For iopoll, complete it directly.
539 	 * Otherwise, move the completion to task work.
540 	 */
541 	if (cookie != NULL && blk_rq_is_poll(req))
542 		nvme_uring_task_meta_cb(ioucmd);
543 	else
544 		io_uring_cmd_complete_in_task(ioucmd, nvme_uring_task_meta_cb);
545 
546 	return RQ_END_IO_NONE;
547 }
548 
549 static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
550 		struct io_uring_cmd *ioucmd, unsigned int issue_flags, bool vec)
551 {
552 	struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
553 	const struct nvme_uring_cmd *cmd = ioucmd->cmd;
554 	struct request_queue *q = ns ? ns->queue : ctrl->admin_q;
555 	struct nvme_uring_data d;
556 	struct nvme_command c;
557 	struct request *req;
558 	blk_opf_t rq_flags = REQ_ALLOC_CACHE;
559 	blk_mq_req_flags_t blk_flags = 0;
560 	void *meta = NULL;
561 	int ret;
562 
563 	c.common.opcode = READ_ONCE(cmd->opcode);
564 	c.common.flags = READ_ONCE(cmd->flags);
565 	if (c.common.flags)
566 		return -EINVAL;
567 
568 	c.common.command_id = 0;
569 	c.common.nsid = cpu_to_le32(cmd->nsid);
570 	if (!nvme_validate_passthru_nsid(ctrl, ns, le32_to_cpu(c.common.nsid)))
571 		return -EINVAL;
572 
573 	c.common.cdw2[0] = cpu_to_le32(READ_ONCE(cmd->cdw2));
574 	c.common.cdw2[1] = cpu_to_le32(READ_ONCE(cmd->cdw3));
575 	c.common.metadata = 0;
576 	c.common.dptr.prp1 = c.common.dptr.prp2 = 0;
577 	c.common.cdw10 = cpu_to_le32(READ_ONCE(cmd->cdw10));
578 	c.common.cdw11 = cpu_to_le32(READ_ONCE(cmd->cdw11));
579 	c.common.cdw12 = cpu_to_le32(READ_ONCE(cmd->cdw12));
580 	c.common.cdw13 = cpu_to_le32(READ_ONCE(cmd->cdw13));
581 	c.common.cdw14 = cpu_to_le32(READ_ONCE(cmd->cdw14));
582 	c.common.cdw15 = cpu_to_le32(READ_ONCE(cmd->cdw15));
583 
584 	if (!nvme_cmd_allowed(ns, &c, 0, ioucmd->file->f_mode))
585 		return -EACCES;
586 
587 	d.metadata = READ_ONCE(cmd->metadata);
588 	d.addr = READ_ONCE(cmd->addr);
589 	d.data_len = READ_ONCE(cmd->data_len);
590 	d.metadata_len = READ_ONCE(cmd->metadata_len);
591 	d.timeout_ms = READ_ONCE(cmd->timeout_ms);
592 
593 	if (issue_flags & IO_URING_F_NONBLOCK) {
594 		rq_flags |= REQ_NOWAIT;
595 		blk_flags = BLK_MQ_REQ_NOWAIT;
596 	}
597 	if (issue_flags & IO_URING_F_IOPOLL)
598 		rq_flags |= REQ_POLLED;
599 
600 retry:
601 	req = nvme_alloc_user_request(q, &c, rq_flags, blk_flags);
602 	if (IS_ERR(req))
603 		return PTR_ERR(req);
604 	req->timeout = d.timeout_ms ? msecs_to_jiffies(d.timeout_ms) : 0;
605 
606 	if (d.addr && d.data_len) {
607 		ret = nvme_map_user_request(req, d.addr,
608 			d.data_len, nvme_to_user_ptr(d.metadata),
609 			d.metadata_len, 0, &meta, ioucmd, vec);
610 		if (ret)
611 			return ret;
612 	}
613 
614 	if (issue_flags & IO_URING_F_IOPOLL && rq_flags & REQ_POLLED) {
615 		if (unlikely(!req->bio)) {
616 			/* we can't poll this, so alloc regular req instead */
617 			blk_mq_free_request(req);
618 			rq_flags &= ~REQ_POLLED;
619 			goto retry;
620 		} else {
621 			WRITE_ONCE(ioucmd->cookie, req->bio);
622 			req->bio->bi_opf |= REQ_POLLED;
623 		}
624 	}
625 	/* to free bio on completion, as req->bio will be null at that time */
626 	pdu->bio = req->bio;
627 	pdu->meta_len = d.metadata_len;
628 	req->end_io_data = ioucmd;
629 	if (pdu->meta_len) {
630 		pdu->u.meta = meta;
631 		pdu->u.meta_buffer = nvme_to_user_ptr(d.metadata);
632 		req->end_io = nvme_uring_cmd_end_io_meta;
633 	} else {
634 		req->end_io = nvme_uring_cmd_end_io;
635 	}
636 	blk_execute_rq_nowait(req, false);
637 	return -EIOCBQUEUED;
638 }
639 
640 static bool is_ctrl_ioctl(unsigned int cmd)
641 {
642 	if (cmd == NVME_IOCTL_ADMIN_CMD || cmd == NVME_IOCTL_ADMIN64_CMD)
643 		return true;
644 	if (is_sed_ioctl(cmd))
645 		return true;
646 	return false;
647 }
648 
649 static int nvme_ctrl_ioctl(struct nvme_ctrl *ctrl, unsigned int cmd,
650 		void __user *argp, fmode_t mode)
651 {
652 	switch (cmd) {
653 	case NVME_IOCTL_ADMIN_CMD:
654 		return nvme_user_cmd(ctrl, NULL, argp, 0, mode);
655 	case NVME_IOCTL_ADMIN64_CMD:
656 		return nvme_user_cmd64(ctrl, NULL, argp, 0, mode);
657 	default:
658 		return sed_ioctl(ctrl->opal_dev, cmd, argp);
659 	}
660 }
661 
662 #ifdef COMPAT_FOR_U64_ALIGNMENT
663 struct nvme_user_io32 {
664 	__u8	opcode;
665 	__u8	flags;
666 	__u16	control;
667 	__u16	nblocks;
668 	__u16	rsvd;
669 	__u64	metadata;
670 	__u64	addr;
671 	__u64	slba;
672 	__u32	dsmgmt;
673 	__u32	reftag;
674 	__u16	apptag;
675 	__u16	appmask;
676 } __attribute__((__packed__));
677 #define NVME_IOCTL_SUBMIT_IO32	_IOW('N', 0x42, struct nvme_user_io32)
678 #endif /* COMPAT_FOR_U64_ALIGNMENT */
679 
680 static int nvme_ns_ioctl(struct nvme_ns *ns, unsigned int cmd,
681 		void __user *argp, unsigned int flags, fmode_t mode)
682 {
683 	switch (cmd) {
684 	case NVME_IOCTL_ID:
685 		force_successful_syscall_return();
686 		return ns->head->ns_id;
687 	case NVME_IOCTL_IO_CMD:
688 		return nvme_user_cmd(ns->ctrl, ns, argp, flags, mode);
689 	/*
690 	 * struct nvme_user_io can have different padding on some 32-bit ABIs.
691 	 * Just accept the compat version as all fields that are used are the
692 	 * same size and at the same offset.
693 	 */
694 #ifdef COMPAT_FOR_U64_ALIGNMENT
695 	case NVME_IOCTL_SUBMIT_IO32:
696 #endif
697 	case NVME_IOCTL_SUBMIT_IO:
698 		return nvme_submit_io(ns, argp);
699 	case NVME_IOCTL_IO64_CMD_VEC:
700 		flags |= NVME_IOCTL_VEC;
701 		fallthrough;
702 	case NVME_IOCTL_IO64_CMD:
703 		return nvme_user_cmd64(ns->ctrl, ns, argp, flags, mode);
704 	default:
705 		return -ENOTTY;
706 	}
707 }
708 
709 int nvme_ioctl(struct block_device *bdev, fmode_t mode,
710 		unsigned int cmd, unsigned long arg)
711 {
712 	struct nvme_ns *ns = bdev->bd_disk->private_data;
713 	void __user *argp = (void __user *)arg;
714 	unsigned int flags = 0;
715 
716 	if (bdev_is_partition(bdev))
717 		flags |= NVME_IOCTL_PARTITION;
718 
719 	if (is_ctrl_ioctl(cmd))
720 		return nvme_ctrl_ioctl(ns->ctrl, cmd, argp, mode);
721 	return nvme_ns_ioctl(ns, cmd, argp, flags, mode);
722 }
723 
724 long nvme_ns_chr_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
725 {
726 	struct nvme_ns *ns =
727 		container_of(file_inode(file)->i_cdev, struct nvme_ns, cdev);
728 	void __user *argp = (void __user *)arg;
729 
730 	if (is_ctrl_ioctl(cmd))
731 		return nvme_ctrl_ioctl(ns->ctrl, cmd, argp, file->f_mode);
732 	return nvme_ns_ioctl(ns, cmd, argp, 0, file->f_mode);
733 }
734 
735 static int nvme_uring_cmd_checks(unsigned int issue_flags)
736 {
737 
738 	/* NVMe passthrough requires big SQE/CQE support */
739 	if ((issue_flags & (IO_URING_F_SQE128|IO_URING_F_CQE32)) !=
740 	    (IO_URING_F_SQE128|IO_URING_F_CQE32))
741 		return -EOPNOTSUPP;
742 	return 0;
743 }
744 
745 static int nvme_ns_uring_cmd(struct nvme_ns *ns, struct io_uring_cmd *ioucmd,
746 			     unsigned int issue_flags)
747 {
748 	struct nvme_ctrl *ctrl = ns->ctrl;
749 	int ret;
750 
751 	BUILD_BUG_ON(sizeof(struct nvme_uring_cmd_pdu) > sizeof(ioucmd->pdu));
752 
753 	ret = nvme_uring_cmd_checks(issue_flags);
754 	if (ret)
755 		return ret;
756 
757 	switch (ioucmd->cmd_op) {
758 	case NVME_URING_CMD_IO:
759 		ret = nvme_uring_cmd_io(ctrl, ns, ioucmd, issue_flags, false);
760 		break;
761 	case NVME_URING_CMD_IO_VEC:
762 		ret = nvme_uring_cmd_io(ctrl, ns, ioucmd, issue_flags, true);
763 		break;
764 	default:
765 		ret = -ENOTTY;
766 	}
767 
768 	return ret;
769 }
770 
771 int nvme_ns_chr_uring_cmd(struct io_uring_cmd *ioucmd, unsigned int issue_flags)
772 {
773 	struct nvme_ns *ns = container_of(file_inode(ioucmd->file)->i_cdev,
774 			struct nvme_ns, cdev);
775 
776 	return nvme_ns_uring_cmd(ns, ioucmd, issue_flags);
777 }
778 
779 int nvme_ns_chr_uring_cmd_iopoll(struct io_uring_cmd *ioucmd,
780 				 struct io_comp_batch *iob,
781 				 unsigned int poll_flags)
782 {
783 	struct bio *bio;
784 	int ret = 0;
785 	struct nvme_ns *ns;
786 	struct request_queue *q;
787 
788 	rcu_read_lock();
789 	bio = READ_ONCE(ioucmd->cookie);
790 	ns = container_of(file_inode(ioucmd->file)->i_cdev,
791 			struct nvme_ns, cdev);
792 	q = ns->queue;
793 	if (test_bit(QUEUE_FLAG_POLL, &q->queue_flags) && bio && bio->bi_bdev)
794 		ret = bio_poll(bio, iob, poll_flags);
795 	rcu_read_unlock();
796 	return ret;
797 }
798 #ifdef CONFIG_NVME_MULTIPATH
799 static int nvme_ns_head_ctrl_ioctl(struct nvme_ns *ns, unsigned int cmd,
800 		void __user *argp, struct nvme_ns_head *head, int srcu_idx,
801 		fmode_t mode)
802 	__releases(&head->srcu)
803 {
804 	struct nvme_ctrl *ctrl = ns->ctrl;
805 	int ret;
806 
807 	nvme_get_ctrl(ns->ctrl);
808 	srcu_read_unlock(&head->srcu, srcu_idx);
809 	ret = nvme_ctrl_ioctl(ns->ctrl, cmd, argp, mode);
810 
811 	nvme_put_ctrl(ctrl);
812 	return ret;
813 }
814 
815 int nvme_ns_head_ioctl(struct block_device *bdev, fmode_t mode,
816 		unsigned int cmd, unsigned long arg)
817 {
818 	struct nvme_ns_head *head = bdev->bd_disk->private_data;
819 	void __user *argp = (void __user *)arg;
820 	struct nvme_ns *ns;
821 	int srcu_idx, ret = -EWOULDBLOCK;
822 	unsigned int flags = 0;
823 
824 	if (bdev_is_partition(bdev))
825 		flags |= NVME_IOCTL_PARTITION;
826 
827 	srcu_idx = srcu_read_lock(&head->srcu);
828 	ns = nvme_find_path(head);
829 	if (!ns)
830 		goto out_unlock;
831 
832 	/*
833 	 * Handle ioctls that apply to the controller instead of the namespace
834 	 * seperately and drop the ns SRCU reference early.  This avoids a
835 	 * deadlock when deleting namespaces using the passthrough interface.
836 	 */
837 	if (is_ctrl_ioctl(cmd))
838 		return nvme_ns_head_ctrl_ioctl(ns, cmd, argp, head, srcu_idx,
839 					mode);
840 
841 	ret = nvme_ns_ioctl(ns, cmd, argp, flags, mode);
842 out_unlock:
843 	srcu_read_unlock(&head->srcu, srcu_idx);
844 	return ret;
845 }
846 
847 long nvme_ns_head_chr_ioctl(struct file *file, unsigned int cmd,
848 		unsigned long arg)
849 {
850 	struct cdev *cdev = file_inode(file)->i_cdev;
851 	struct nvme_ns_head *head =
852 		container_of(cdev, struct nvme_ns_head, cdev);
853 	void __user *argp = (void __user *)arg;
854 	struct nvme_ns *ns;
855 	int srcu_idx, ret = -EWOULDBLOCK;
856 
857 	srcu_idx = srcu_read_lock(&head->srcu);
858 	ns = nvme_find_path(head);
859 	if (!ns)
860 		goto out_unlock;
861 
862 	if (is_ctrl_ioctl(cmd))
863 		return nvme_ns_head_ctrl_ioctl(ns, cmd, argp, head, srcu_idx,
864 				file->f_mode);
865 
866 	ret = nvme_ns_ioctl(ns, cmd, argp, 0, file->f_mode);
867 out_unlock:
868 	srcu_read_unlock(&head->srcu, srcu_idx);
869 	return ret;
870 }
871 
872 int nvme_ns_head_chr_uring_cmd(struct io_uring_cmd *ioucmd,
873 		unsigned int issue_flags)
874 {
875 	struct cdev *cdev = file_inode(ioucmd->file)->i_cdev;
876 	struct nvme_ns_head *head = container_of(cdev, struct nvme_ns_head, cdev);
877 	int srcu_idx = srcu_read_lock(&head->srcu);
878 	struct nvme_ns *ns = nvme_find_path(head);
879 	int ret = -EINVAL;
880 
881 	if (ns)
882 		ret = nvme_ns_uring_cmd(ns, ioucmd, issue_flags);
883 	srcu_read_unlock(&head->srcu, srcu_idx);
884 	return ret;
885 }
886 
887 int nvme_ns_head_chr_uring_cmd_iopoll(struct io_uring_cmd *ioucmd,
888 				      struct io_comp_batch *iob,
889 				      unsigned int poll_flags)
890 {
891 	struct cdev *cdev = file_inode(ioucmd->file)->i_cdev;
892 	struct nvme_ns_head *head = container_of(cdev, struct nvme_ns_head, cdev);
893 	int srcu_idx = srcu_read_lock(&head->srcu);
894 	struct nvme_ns *ns = nvme_find_path(head);
895 	struct bio *bio;
896 	int ret = 0;
897 	struct request_queue *q;
898 
899 	if (ns) {
900 		rcu_read_lock();
901 		bio = READ_ONCE(ioucmd->cookie);
902 		q = ns->queue;
903 		if (test_bit(QUEUE_FLAG_POLL, &q->queue_flags) && bio
904 				&& bio->bi_bdev)
905 			ret = bio_poll(bio, iob, poll_flags);
906 		rcu_read_unlock();
907 	}
908 	srcu_read_unlock(&head->srcu, srcu_idx);
909 	return ret;
910 }
911 #endif /* CONFIG_NVME_MULTIPATH */
912 
913 int nvme_dev_uring_cmd(struct io_uring_cmd *ioucmd, unsigned int issue_flags)
914 {
915 	struct nvme_ctrl *ctrl = ioucmd->file->private_data;
916 	int ret;
917 
918 	/* IOPOLL not supported yet */
919 	if (issue_flags & IO_URING_F_IOPOLL)
920 		return -EOPNOTSUPP;
921 
922 	ret = nvme_uring_cmd_checks(issue_flags);
923 	if (ret)
924 		return ret;
925 
926 	switch (ioucmd->cmd_op) {
927 	case NVME_URING_CMD_ADMIN:
928 		ret = nvme_uring_cmd_io(ctrl, NULL, ioucmd, issue_flags, false);
929 		break;
930 	case NVME_URING_CMD_ADMIN_VEC:
931 		ret = nvme_uring_cmd_io(ctrl, NULL, ioucmd, issue_flags, true);
932 		break;
933 	default:
934 		ret = -ENOTTY;
935 	}
936 
937 	return ret;
938 }
939 
940 static int nvme_dev_user_cmd(struct nvme_ctrl *ctrl, void __user *argp,
941 		fmode_t mode)
942 {
943 	struct nvme_ns *ns;
944 	int ret;
945 
946 	down_read(&ctrl->namespaces_rwsem);
947 	if (list_empty(&ctrl->namespaces)) {
948 		ret = -ENOTTY;
949 		goto out_unlock;
950 	}
951 
952 	ns = list_first_entry(&ctrl->namespaces, struct nvme_ns, list);
953 	if (ns != list_last_entry(&ctrl->namespaces, struct nvme_ns, list)) {
954 		dev_warn(ctrl->device,
955 			"NVME_IOCTL_IO_CMD not supported when multiple namespaces present!\n");
956 		ret = -EINVAL;
957 		goto out_unlock;
958 	}
959 
960 	dev_warn(ctrl->device,
961 		"using deprecated NVME_IOCTL_IO_CMD ioctl on the char device!\n");
962 	kref_get(&ns->kref);
963 	up_read(&ctrl->namespaces_rwsem);
964 
965 	ret = nvme_user_cmd(ctrl, ns, argp, 0, mode);
966 	nvme_put_ns(ns);
967 	return ret;
968 
969 out_unlock:
970 	up_read(&ctrl->namespaces_rwsem);
971 	return ret;
972 }
973 
974 long nvme_dev_ioctl(struct file *file, unsigned int cmd,
975 		unsigned long arg)
976 {
977 	struct nvme_ctrl *ctrl = file->private_data;
978 	void __user *argp = (void __user *)arg;
979 
980 	switch (cmd) {
981 	case NVME_IOCTL_ADMIN_CMD:
982 		return nvme_user_cmd(ctrl, NULL, argp, 0, file->f_mode);
983 	case NVME_IOCTL_ADMIN64_CMD:
984 		return nvme_user_cmd64(ctrl, NULL, argp, 0, file->f_mode);
985 	case NVME_IOCTL_IO_CMD:
986 		return nvme_dev_user_cmd(ctrl, argp, file->f_mode);
987 	case NVME_IOCTL_RESET:
988 		if (!capable(CAP_SYS_ADMIN))
989 			return -EACCES;
990 		dev_warn(ctrl->device, "resetting controller\n");
991 		return nvme_reset_ctrl_sync(ctrl);
992 	case NVME_IOCTL_SUBSYS_RESET:
993 		if (!capable(CAP_SYS_ADMIN))
994 			return -EACCES;
995 		return nvme_reset_subsystem(ctrl);
996 	case NVME_IOCTL_RESCAN:
997 		if (!capable(CAP_SYS_ADMIN))
998 			return -EACCES;
999 		nvme_queue_scan(ctrl);
1000 		return 0;
1001 	default:
1002 		return -ENOTTY;
1003 	}
1004 }
1005