xref: /linux/drivers/nvme/host/ioctl.c (revision 949dd321ded41cba661f4ec04c521e294e73b89f)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2011-2014, Intel Corporation.
4  * Copyright (c) 2017-2021 Christoph Hellwig.
5  */
6 #include <linux/blk-integrity.h>
7 #include <linux/ptrace.h>	/* for force_successful_syscall_return */
8 #include <linux/nvme_ioctl.h>
9 #include <linux/io_uring/cmd.h>
10 #include "nvme.h"
11 
12 enum {
13 	NVME_IOCTL_VEC		= (1 << 0),
14 	NVME_IOCTL_PARTITION	= (1 << 1),
15 };
16 
nvme_cmd_allowed(struct nvme_ns * ns,struct nvme_command * c,unsigned int flags,bool open_for_write)17 static bool nvme_cmd_allowed(struct nvme_ns *ns, struct nvme_command *c,
18 		unsigned int flags, bool open_for_write)
19 {
20 	u32 effects;
21 
22 	/*
23 	 * Do not allow unprivileged passthrough on partitions, as that allows an
24 	 * escape from the containment of the partition.
25 	 */
26 	if (flags & NVME_IOCTL_PARTITION)
27 		goto admin;
28 
29 	/*
30 	 * Do not allow unprivileged processes to send vendor specific or fabrics
31 	 * commands as we can't be sure about their effects.
32 	 */
33 	if (c->common.opcode >= nvme_cmd_vendor_start ||
34 	    c->common.opcode == nvme_fabrics_command)
35 		goto admin;
36 
37 	/*
38 	 * Do not allow unprivileged passthrough of admin commands except
39 	 * for a subset of identify commands that contain information required
40 	 * to form proper I/O commands in userspace and do not expose any
41 	 * potentially sensitive information.
42 	 */
43 	if (!ns) {
44 		if (c->common.opcode == nvme_admin_identify) {
45 			switch (c->identify.cns) {
46 			case NVME_ID_CNS_NS:
47 			case NVME_ID_CNS_CS_NS:
48 			case NVME_ID_CNS_NS_CS_INDEP:
49 			case NVME_ID_CNS_CS_CTRL:
50 			case NVME_ID_CNS_CTRL:
51 				return true;
52 			}
53 		}
54 		goto admin;
55 	}
56 
57 	/*
58 	 * Check if the controller provides a Commands Supported and Effects log
59 	 * and marks this command as supported.  If not reject unprivileged
60 	 * passthrough.
61 	 */
62 	effects = nvme_command_effects(ns->ctrl, ns, c->common.opcode);
63 	if (!(effects & NVME_CMD_EFFECTS_CSUPP))
64 		goto admin;
65 
66 	/*
67 	 * Don't allow passthrough for command that have intrusive (or unknown)
68 	 * effects.
69 	 */
70 	if (effects & ~(NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC |
71 			NVME_CMD_EFFECTS_UUID_SEL |
72 			NVME_CMD_EFFECTS_SCOPE_MASK))
73 		goto admin;
74 
75 	/*
76 	 * Only allow I/O commands that transfer data to the controller or that
77 	 * change the logical block contents if the file descriptor is open for
78 	 * writing.
79 	 */
80 	if ((nvme_is_write(c) || (effects & NVME_CMD_EFFECTS_LBCC)) &&
81 	    !open_for_write)
82 		goto admin;
83 
84 	return true;
85 admin:
86 	return capable(CAP_SYS_ADMIN);
87 }
88 
89 /*
90  * Convert integer values from ioctl structures to user pointers, silently
91  * ignoring the upper bits in the compat case to match behaviour of 32-bit
92  * kernels.
93  */
nvme_to_user_ptr(uintptr_t ptrval)94 static void __user *nvme_to_user_ptr(uintptr_t ptrval)
95 {
96 	if (in_compat_syscall())
97 		ptrval = (compat_uptr_t)ptrval;
98 	return (void __user *)ptrval;
99 }
100 
nvme_alloc_user_request(struct request_queue * q,struct nvme_command * cmd,blk_opf_t rq_flags,blk_mq_req_flags_t blk_flags)101 static struct request *nvme_alloc_user_request(struct request_queue *q,
102 		struct nvme_command *cmd, blk_opf_t rq_flags,
103 		blk_mq_req_flags_t blk_flags)
104 {
105 	struct request *req;
106 
107 	req = blk_mq_alloc_request(q, nvme_req_op(cmd) | rq_flags, blk_flags);
108 	if (IS_ERR(req))
109 		return req;
110 	nvme_init_request(req, cmd);
111 	nvme_req(req)->flags |= NVME_REQ_USERCMD;
112 	return req;
113 }
114 
nvme_map_user_request(struct request * req,u64 ubuffer,unsigned bufflen,void __user * meta_buffer,unsigned meta_len,struct iov_iter * iter,unsigned int flags)115 static int nvme_map_user_request(struct request *req, u64 ubuffer,
116 		unsigned bufflen, void __user *meta_buffer, unsigned meta_len,
117 		struct iov_iter *iter, unsigned int flags)
118 {
119 	struct request_queue *q = req->q;
120 	struct nvme_ns *ns = q->queuedata;
121 	struct block_device *bdev = ns ? ns->disk->part0 : NULL;
122 	bool supports_metadata = bdev && blk_get_integrity(bdev->bd_disk);
123 	struct nvme_ctrl *ctrl = nvme_req(req)->ctrl;
124 	bool has_metadata = meta_buffer && meta_len;
125 	struct bio *bio = NULL;
126 	int ret;
127 
128 	if (!nvme_ctrl_sgl_supported(ctrl))
129 		dev_warn_once(ctrl->device, "using unchecked data buffer\n");
130 	if (has_metadata) {
131 		if (!supports_metadata)
132 			return -EINVAL;
133 
134 		if (!nvme_ctrl_meta_sgl_supported(ctrl))
135 			dev_warn_once(ctrl->device,
136 				      "using unchecked metadata buffer\n");
137 	}
138 
139 	if (iter)
140 		ret = blk_rq_map_user_iov(q, req, NULL, iter, GFP_KERNEL);
141 	else
142 		ret = blk_rq_map_user_io(req, NULL, nvme_to_user_ptr(ubuffer),
143 				bufflen, GFP_KERNEL, flags & NVME_IOCTL_VEC, 0,
144 				0, rq_data_dir(req));
145 
146 	if (ret)
147 		return ret;
148 
149 	bio = req->bio;
150 	if (bdev)
151 		bio_set_dev(bio, bdev);
152 
153 	if (has_metadata) {
154 		ret = blk_rq_integrity_map_user(req, meta_buffer, meta_len);
155 		if (ret)
156 			goto out_unmap;
157 	}
158 
159 	return ret;
160 
161 out_unmap:
162 	if (bio)
163 		blk_rq_unmap_user(bio);
164 	return ret;
165 }
166 
nvme_submit_user_cmd(struct request_queue * q,struct nvme_command * cmd,u64 ubuffer,unsigned bufflen,void __user * meta_buffer,unsigned meta_len,u64 * result,unsigned timeout,unsigned int flags)167 static int nvme_submit_user_cmd(struct request_queue *q,
168 		struct nvme_command *cmd, u64 ubuffer, unsigned bufflen,
169 		void __user *meta_buffer, unsigned meta_len,
170 		u64 *result, unsigned timeout, unsigned int flags)
171 {
172 	struct nvme_ns *ns = q->queuedata;
173 	struct nvme_ctrl *ctrl;
174 	struct request *req;
175 	struct bio *bio;
176 	u32 effects;
177 	int ret;
178 
179 	req = nvme_alloc_user_request(q, cmd, 0, 0);
180 	if (IS_ERR(req))
181 		return PTR_ERR(req);
182 
183 	req->timeout = timeout;
184 	if (ubuffer && bufflen) {
185 		ret = nvme_map_user_request(req, ubuffer, bufflen, meta_buffer,
186 				meta_len, NULL, flags);
187 		if (ret)
188 			goto out_free_req;
189 	}
190 
191 	bio = req->bio;
192 	ctrl = nvme_req(req)->ctrl;
193 
194 	effects = nvme_passthru_start(ctrl, ns, cmd->common.opcode);
195 	ret = nvme_execute_rq(req, false);
196 	if (result)
197 		*result = le64_to_cpu(nvme_req(req)->result.u64);
198 	if (bio)
199 		blk_rq_unmap_user(bio);
200 	blk_mq_free_request(req);
201 
202 	if (effects)
203 		nvme_passthru_end(ctrl, ns, effects, cmd, ret);
204 	return ret;
205 
206 out_free_req:
207 	blk_mq_free_request(req);
208 	return ret;
209 }
210 
nvme_submit_io(struct nvme_ns * ns,struct nvme_user_io __user * uio)211 static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
212 {
213 	struct nvme_user_io io;
214 	struct nvme_command c;
215 	unsigned length, meta_len;
216 	void __user *metadata;
217 
218 	if (copy_from_user(&io, uio, sizeof(io)))
219 		return -EFAULT;
220 	if (io.flags)
221 		return -EINVAL;
222 
223 	switch (io.opcode) {
224 	case nvme_cmd_write:
225 	case nvme_cmd_read:
226 	case nvme_cmd_compare:
227 		break;
228 	default:
229 		return -EINVAL;
230 	}
231 
232 	length = (io.nblocks + 1) << ns->head->lba_shift;
233 
234 	if ((io.control & NVME_RW_PRINFO_PRACT) &&
235 	    (ns->head->ms == ns->head->pi_size)) {
236 		/*
237 		 * Protection information is stripped/inserted by the
238 		 * controller.
239 		 */
240 		if (nvme_to_user_ptr(io.metadata))
241 			return -EINVAL;
242 		meta_len = 0;
243 		metadata = NULL;
244 	} else {
245 		meta_len = (io.nblocks + 1) * ns->head->ms;
246 		metadata = nvme_to_user_ptr(io.metadata);
247 	}
248 
249 	if (ns->head->features & NVME_NS_EXT_LBAS) {
250 		length += meta_len;
251 		meta_len = 0;
252 	} else if (meta_len) {
253 		if ((io.metadata & 3) || !io.metadata)
254 			return -EINVAL;
255 	}
256 
257 	memset(&c, 0, sizeof(c));
258 	c.rw.opcode = io.opcode;
259 	c.rw.flags = io.flags;
260 	c.rw.nsid = cpu_to_le32(ns->head->ns_id);
261 	c.rw.slba = cpu_to_le64(io.slba);
262 	c.rw.length = cpu_to_le16(io.nblocks);
263 	c.rw.control = cpu_to_le16(io.control);
264 	c.rw.dsmgmt = cpu_to_le32(io.dsmgmt);
265 	c.rw.reftag = cpu_to_le32(io.reftag);
266 	c.rw.lbat = cpu_to_le16(io.apptag);
267 	c.rw.lbatm = cpu_to_le16(io.appmask);
268 
269 	return nvme_submit_user_cmd(ns->queue, &c, io.addr, length, metadata,
270 			meta_len, NULL, 0, 0);
271 }
272 
nvme_validate_passthru_nsid(struct nvme_ctrl * ctrl,struct nvme_ns * ns,__u32 nsid)273 static bool nvme_validate_passthru_nsid(struct nvme_ctrl *ctrl,
274 					struct nvme_ns *ns, __u32 nsid)
275 {
276 	if (ns && nsid != ns->head->ns_id) {
277 		dev_err(ctrl->device,
278 			"%s: nsid (%u) in cmd does not match nsid (%u) of namespace\n",
279 			current->comm, nsid, ns->head->ns_id);
280 		return false;
281 	}
282 
283 	return true;
284 }
285 
nvme_user_cmd(struct nvme_ctrl * ctrl,struct nvme_ns * ns,struct nvme_passthru_cmd __user * ucmd,unsigned int flags,bool open_for_write)286 static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
287 		struct nvme_passthru_cmd __user *ucmd, unsigned int flags,
288 		bool open_for_write)
289 {
290 	struct nvme_passthru_cmd cmd;
291 	struct nvme_command c;
292 	unsigned timeout = 0;
293 	u64 result;
294 	int status;
295 
296 	if (copy_from_user(&cmd, ucmd, sizeof(cmd)))
297 		return -EFAULT;
298 	if (cmd.flags)
299 		return -EINVAL;
300 	if (!nvme_validate_passthru_nsid(ctrl, ns, cmd.nsid))
301 		return -EINVAL;
302 
303 	memset(&c, 0, sizeof(c));
304 	c.common.opcode = cmd.opcode;
305 	c.common.flags = cmd.flags;
306 	c.common.nsid = cpu_to_le32(cmd.nsid);
307 	c.common.cdw2[0] = cpu_to_le32(cmd.cdw2);
308 	c.common.cdw2[1] = cpu_to_le32(cmd.cdw3);
309 	c.common.cdw10 = cpu_to_le32(cmd.cdw10);
310 	c.common.cdw11 = cpu_to_le32(cmd.cdw11);
311 	c.common.cdw12 = cpu_to_le32(cmd.cdw12);
312 	c.common.cdw13 = cpu_to_le32(cmd.cdw13);
313 	c.common.cdw14 = cpu_to_le32(cmd.cdw14);
314 	c.common.cdw15 = cpu_to_le32(cmd.cdw15);
315 
316 	if (!nvme_cmd_allowed(ns, &c, 0, open_for_write))
317 		return -EACCES;
318 
319 	if (cmd.timeout_ms)
320 		timeout = msecs_to_jiffies(cmd.timeout_ms);
321 
322 	status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
323 			cmd.addr, cmd.data_len, nvme_to_user_ptr(cmd.metadata),
324 			cmd.metadata_len, &result, timeout, 0);
325 
326 	if (status >= 0) {
327 		if (put_user(result, &ucmd->result))
328 			return -EFAULT;
329 	}
330 
331 	return status;
332 }
333 
nvme_user_cmd64(struct nvme_ctrl * ctrl,struct nvme_ns * ns,struct nvme_passthru_cmd64 __user * ucmd,unsigned int flags,bool open_for_write)334 static int nvme_user_cmd64(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
335 		struct nvme_passthru_cmd64 __user *ucmd, unsigned int flags,
336 		bool open_for_write)
337 {
338 	struct nvme_passthru_cmd64 cmd;
339 	struct nvme_command c;
340 	unsigned timeout = 0;
341 	int status;
342 
343 	if (copy_from_user(&cmd, ucmd, sizeof(cmd)))
344 		return -EFAULT;
345 	if (cmd.flags)
346 		return -EINVAL;
347 	if (!nvme_validate_passthru_nsid(ctrl, ns, cmd.nsid))
348 		return -EINVAL;
349 
350 	memset(&c, 0, sizeof(c));
351 	c.common.opcode = cmd.opcode;
352 	c.common.flags = cmd.flags;
353 	c.common.nsid = cpu_to_le32(cmd.nsid);
354 	c.common.cdw2[0] = cpu_to_le32(cmd.cdw2);
355 	c.common.cdw2[1] = cpu_to_le32(cmd.cdw3);
356 	c.common.cdw10 = cpu_to_le32(cmd.cdw10);
357 	c.common.cdw11 = cpu_to_le32(cmd.cdw11);
358 	c.common.cdw12 = cpu_to_le32(cmd.cdw12);
359 	c.common.cdw13 = cpu_to_le32(cmd.cdw13);
360 	c.common.cdw14 = cpu_to_le32(cmd.cdw14);
361 	c.common.cdw15 = cpu_to_le32(cmd.cdw15);
362 
363 	if (!nvme_cmd_allowed(ns, &c, flags, open_for_write))
364 		return -EACCES;
365 
366 	if (cmd.timeout_ms)
367 		timeout = msecs_to_jiffies(cmd.timeout_ms);
368 
369 	status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
370 			cmd.addr, cmd.data_len, nvme_to_user_ptr(cmd.metadata),
371 			cmd.metadata_len, &cmd.result, timeout, flags);
372 
373 	if (status >= 0) {
374 		if (put_user(cmd.result, &ucmd->result))
375 			return -EFAULT;
376 	}
377 
378 	return status;
379 }
380 
381 struct nvme_uring_data {
382 	__u64	metadata;
383 	__u64	addr;
384 	__u32	data_len;
385 	__u32	metadata_len;
386 	__u32	timeout_ms;
387 };
388 
389 /*
390  * This overlays struct io_uring_cmd pdu.
391  * Expect build errors if this grows larger than that.
392  */
393 struct nvme_uring_cmd_pdu {
394 	struct request *req;
395 	struct bio *bio;
396 	u64 result;
397 	int status;
398 };
399 
nvme_uring_cmd_pdu(struct io_uring_cmd * ioucmd)400 static inline struct nvme_uring_cmd_pdu *nvme_uring_cmd_pdu(
401 		struct io_uring_cmd *ioucmd)
402 {
403 	return io_uring_cmd_to_pdu(ioucmd, struct nvme_uring_cmd_pdu);
404 }
405 
nvme_uring_task_cb(struct io_uring_cmd * ioucmd,unsigned issue_flags)406 static void nvme_uring_task_cb(struct io_uring_cmd *ioucmd,
407 			       unsigned issue_flags)
408 {
409 	struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
410 
411 	if (pdu->bio)
412 		blk_rq_unmap_user(pdu->bio);
413 	io_uring_cmd_done(ioucmd, pdu->status, pdu->result, issue_flags);
414 }
415 
nvme_uring_cmd_end_io(struct request * req,blk_status_t err)416 static enum rq_end_io_ret nvme_uring_cmd_end_io(struct request *req,
417 						blk_status_t err)
418 {
419 	struct io_uring_cmd *ioucmd = req->end_io_data;
420 	struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
421 
422 	if (nvme_req(req)->flags & NVME_REQ_CANCELLED) {
423 		pdu->status = -EINTR;
424 	} else {
425 		pdu->status = nvme_req(req)->status;
426 		if (!pdu->status)
427 			pdu->status = blk_status_to_errno(err);
428 	}
429 	pdu->result = le64_to_cpu(nvme_req(req)->result.u64);
430 
431 	/*
432 	 * For iopoll, complete it directly. Note that using the uring_cmd
433 	 * helper for this is safe only because we check blk_rq_is_poll().
434 	 * As that returns false if we're NOT on a polled queue, then it's
435 	 * safe to use the polled completion helper.
436 	 *
437 	 * Otherwise, move the completion to task work.
438 	 */
439 	if (blk_rq_is_poll(req)) {
440 		if (pdu->bio)
441 			blk_rq_unmap_user(pdu->bio);
442 		io_uring_cmd_iopoll_done(ioucmd, pdu->result, pdu->status);
443 	} else {
444 		io_uring_cmd_do_in_task_lazy(ioucmd, nvme_uring_task_cb);
445 	}
446 
447 	return RQ_END_IO_FREE;
448 }
449 
nvme_uring_cmd_io(struct nvme_ctrl * ctrl,struct nvme_ns * ns,struct io_uring_cmd * ioucmd,unsigned int issue_flags,bool vec)450 static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
451 		struct io_uring_cmd *ioucmd, unsigned int issue_flags, bool vec)
452 {
453 	struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
454 	const struct nvme_uring_cmd *cmd = io_uring_sqe_cmd(ioucmd->sqe);
455 	struct request_queue *q = ns ? ns->queue : ctrl->admin_q;
456 	struct nvme_uring_data d;
457 	struct nvme_command c;
458 	struct iov_iter iter;
459 	struct iov_iter *map_iter = NULL;
460 	struct request *req;
461 	blk_opf_t rq_flags = REQ_ALLOC_CACHE;
462 	blk_mq_req_flags_t blk_flags = 0;
463 	int ret;
464 
465 	c.common.opcode = READ_ONCE(cmd->opcode);
466 	c.common.flags = READ_ONCE(cmd->flags);
467 	if (c.common.flags)
468 		return -EINVAL;
469 
470 	c.common.command_id = 0;
471 	c.common.nsid = cpu_to_le32(cmd->nsid);
472 	if (!nvme_validate_passthru_nsid(ctrl, ns, le32_to_cpu(c.common.nsid)))
473 		return -EINVAL;
474 
475 	c.common.cdw2[0] = cpu_to_le32(READ_ONCE(cmd->cdw2));
476 	c.common.cdw2[1] = cpu_to_le32(READ_ONCE(cmd->cdw3));
477 	c.common.metadata = 0;
478 	c.common.dptr.prp1 = c.common.dptr.prp2 = 0;
479 	c.common.cdw10 = cpu_to_le32(READ_ONCE(cmd->cdw10));
480 	c.common.cdw11 = cpu_to_le32(READ_ONCE(cmd->cdw11));
481 	c.common.cdw12 = cpu_to_le32(READ_ONCE(cmd->cdw12));
482 	c.common.cdw13 = cpu_to_le32(READ_ONCE(cmd->cdw13));
483 	c.common.cdw14 = cpu_to_le32(READ_ONCE(cmd->cdw14));
484 	c.common.cdw15 = cpu_to_le32(READ_ONCE(cmd->cdw15));
485 
486 	if (!nvme_cmd_allowed(ns, &c, 0, ioucmd->file->f_mode & FMODE_WRITE))
487 		return -EACCES;
488 
489 	d.metadata = READ_ONCE(cmd->metadata);
490 	d.addr = READ_ONCE(cmd->addr);
491 	d.data_len = READ_ONCE(cmd->data_len);
492 	d.metadata_len = READ_ONCE(cmd->metadata_len);
493 	d.timeout_ms = READ_ONCE(cmd->timeout_ms);
494 
495 	if (d.data_len && (ioucmd->flags & IORING_URING_CMD_FIXED)) {
496 		/* fixedbufs is only for non-vectored io */
497 		if (vec)
498 			return -EINVAL;
499 
500 		ret = io_uring_cmd_import_fixed(d.addr, d.data_len,
501 			nvme_is_write(&c) ? WRITE : READ, &iter, ioucmd,
502 			issue_flags);
503 		if (ret < 0)
504 			return ret;
505 
506 		map_iter = &iter;
507 	}
508 
509 	if (issue_flags & IO_URING_F_NONBLOCK) {
510 		rq_flags |= REQ_NOWAIT;
511 		blk_flags = BLK_MQ_REQ_NOWAIT;
512 	}
513 	if (issue_flags & IO_URING_F_IOPOLL)
514 		rq_flags |= REQ_POLLED;
515 
516 	req = nvme_alloc_user_request(q, &c, rq_flags, blk_flags);
517 	if (IS_ERR(req))
518 		return PTR_ERR(req);
519 	req->timeout = d.timeout_ms ? msecs_to_jiffies(d.timeout_ms) : 0;
520 
521 	if (d.data_len) {
522 		ret = nvme_map_user_request(req, d.addr, d.data_len,
523 			nvme_to_user_ptr(d.metadata), d.metadata_len,
524 			map_iter, vec);
525 		if (ret)
526 			goto out_free_req;
527 	}
528 
529 	/* to free bio on completion, as req->bio will be null at that time */
530 	pdu->bio = req->bio;
531 	pdu->req = req;
532 	req->end_io_data = ioucmd;
533 	req->end_io = nvme_uring_cmd_end_io;
534 	blk_execute_rq_nowait(req, false);
535 	return -EIOCBQUEUED;
536 
537 out_free_req:
538 	blk_mq_free_request(req);
539 	return ret;
540 }
541 
is_ctrl_ioctl(unsigned int cmd)542 static bool is_ctrl_ioctl(unsigned int cmd)
543 {
544 	if (cmd == NVME_IOCTL_ADMIN_CMD || cmd == NVME_IOCTL_ADMIN64_CMD)
545 		return true;
546 	if (is_sed_ioctl(cmd))
547 		return true;
548 	return false;
549 }
550 
nvme_ctrl_ioctl(struct nvme_ctrl * ctrl,unsigned int cmd,void __user * argp,bool open_for_write)551 static int nvme_ctrl_ioctl(struct nvme_ctrl *ctrl, unsigned int cmd,
552 		void __user *argp, bool open_for_write)
553 {
554 	switch (cmd) {
555 	case NVME_IOCTL_ADMIN_CMD:
556 		return nvme_user_cmd(ctrl, NULL, argp, 0, open_for_write);
557 	case NVME_IOCTL_ADMIN64_CMD:
558 		return nvme_user_cmd64(ctrl, NULL, argp, 0, open_for_write);
559 	default:
560 		return sed_ioctl(ctrl->opal_dev, cmd, argp);
561 	}
562 }
563 
564 #ifdef COMPAT_FOR_U64_ALIGNMENT
565 struct nvme_user_io32 {
566 	__u8	opcode;
567 	__u8	flags;
568 	__u16	control;
569 	__u16	nblocks;
570 	__u16	rsvd;
571 	__u64	metadata;
572 	__u64	addr;
573 	__u64	slba;
574 	__u32	dsmgmt;
575 	__u32	reftag;
576 	__u16	apptag;
577 	__u16	appmask;
578 } __attribute__((__packed__));
579 #define NVME_IOCTL_SUBMIT_IO32	_IOW('N', 0x42, struct nvme_user_io32)
580 #endif /* COMPAT_FOR_U64_ALIGNMENT */
581 
nvme_ns_ioctl(struct nvme_ns * ns,unsigned int cmd,void __user * argp,unsigned int flags,bool open_for_write)582 static int nvme_ns_ioctl(struct nvme_ns *ns, unsigned int cmd,
583 		void __user *argp, unsigned int flags, bool open_for_write)
584 {
585 	switch (cmd) {
586 	case NVME_IOCTL_ID:
587 		force_successful_syscall_return();
588 		return ns->head->ns_id;
589 	case NVME_IOCTL_IO_CMD:
590 		return nvme_user_cmd(ns->ctrl, ns, argp, flags, open_for_write);
591 	/*
592 	 * struct nvme_user_io can have different padding on some 32-bit ABIs.
593 	 * Just accept the compat version as all fields that are used are the
594 	 * same size and at the same offset.
595 	 */
596 #ifdef COMPAT_FOR_U64_ALIGNMENT
597 	case NVME_IOCTL_SUBMIT_IO32:
598 #endif
599 	case NVME_IOCTL_SUBMIT_IO:
600 		return nvme_submit_io(ns, argp);
601 	case NVME_IOCTL_IO64_CMD_VEC:
602 		flags |= NVME_IOCTL_VEC;
603 		fallthrough;
604 	case NVME_IOCTL_IO64_CMD:
605 		return nvme_user_cmd64(ns->ctrl, ns, argp, flags,
606 				       open_for_write);
607 	default:
608 		return -ENOTTY;
609 	}
610 }
611 
nvme_ioctl(struct block_device * bdev,blk_mode_t mode,unsigned int cmd,unsigned long arg)612 int nvme_ioctl(struct block_device *bdev, blk_mode_t mode,
613 		unsigned int cmd, unsigned long arg)
614 {
615 	struct nvme_ns *ns = bdev->bd_disk->private_data;
616 	bool open_for_write = mode & BLK_OPEN_WRITE;
617 	void __user *argp = (void __user *)arg;
618 	unsigned int flags = 0;
619 
620 	if (bdev_is_partition(bdev))
621 		flags |= NVME_IOCTL_PARTITION;
622 
623 	if (is_ctrl_ioctl(cmd))
624 		return nvme_ctrl_ioctl(ns->ctrl, cmd, argp, open_for_write);
625 	return nvme_ns_ioctl(ns, cmd, argp, flags, open_for_write);
626 }
627 
nvme_ns_chr_ioctl(struct file * file,unsigned int cmd,unsigned long arg)628 long nvme_ns_chr_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
629 {
630 	struct nvme_ns *ns =
631 		container_of(file_inode(file)->i_cdev, struct nvme_ns, cdev);
632 	bool open_for_write = file->f_mode & FMODE_WRITE;
633 	void __user *argp = (void __user *)arg;
634 
635 	if (is_ctrl_ioctl(cmd))
636 		return nvme_ctrl_ioctl(ns->ctrl, cmd, argp, open_for_write);
637 	return nvme_ns_ioctl(ns, cmd, argp, 0, open_for_write);
638 }
639 
nvme_uring_cmd_checks(unsigned int issue_flags)640 static int nvme_uring_cmd_checks(unsigned int issue_flags)
641 {
642 
643 	/* NVMe passthrough requires big SQE/CQE support */
644 	if ((issue_flags & (IO_URING_F_SQE128|IO_URING_F_CQE32)) !=
645 	    (IO_URING_F_SQE128|IO_URING_F_CQE32))
646 		return -EOPNOTSUPP;
647 	return 0;
648 }
649 
nvme_ns_uring_cmd(struct nvme_ns * ns,struct io_uring_cmd * ioucmd,unsigned int issue_flags)650 static int nvme_ns_uring_cmd(struct nvme_ns *ns, struct io_uring_cmd *ioucmd,
651 			     unsigned int issue_flags)
652 {
653 	struct nvme_ctrl *ctrl = ns->ctrl;
654 	int ret;
655 
656 	ret = nvme_uring_cmd_checks(issue_flags);
657 	if (ret)
658 		return ret;
659 
660 	switch (ioucmd->cmd_op) {
661 	case NVME_URING_CMD_IO:
662 		ret = nvme_uring_cmd_io(ctrl, ns, ioucmd, issue_flags, false);
663 		break;
664 	case NVME_URING_CMD_IO_VEC:
665 		ret = nvme_uring_cmd_io(ctrl, ns, ioucmd, issue_flags, true);
666 		break;
667 	default:
668 		ret = -ENOTTY;
669 	}
670 
671 	return ret;
672 }
673 
nvme_ns_chr_uring_cmd(struct io_uring_cmd * ioucmd,unsigned int issue_flags)674 int nvme_ns_chr_uring_cmd(struct io_uring_cmd *ioucmd, unsigned int issue_flags)
675 {
676 	struct nvme_ns *ns = container_of(file_inode(ioucmd->file)->i_cdev,
677 			struct nvme_ns, cdev);
678 
679 	return nvme_ns_uring_cmd(ns, ioucmd, issue_flags);
680 }
681 
nvme_ns_chr_uring_cmd_iopoll(struct io_uring_cmd * ioucmd,struct io_comp_batch * iob,unsigned int poll_flags)682 int nvme_ns_chr_uring_cmd_iopoll(struct io_uring_cmd *ioucmd,
683 				 struct io_comp_batch *iob,
684 				 unsigned int poll_flags)
685 {
686 	struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
687 	struct request *req = pdu->req;
688 
689 	if (req && blk_rq_is_poll(req))
690 		return blk_rq_poll(req, iob, poll_flags);
691 	return 0;
692 }
693 #ifdef CONFIG_NVME_MULTIPATH
nvme_ns_head_ctrl_ioctl(struct nvme_ns * ns,unsigned int cmd,void __user * argp,struct nvme_ns_head * head,int srcu_idx,bool open_for_write)694 static int nvme_ns_head_ctrl_ioctl(struct nvme_ns *ns, unsigned int cmd,
695 		void __user *argp, struct nvme_ns_head *head, int srcu_idx,
696 		bool open_for_write)
697 	__releases(&head->srcu)
698 {
699 	struct nvme_ctrl *ctrl = ns->ctrl;
700 	int ret;
701 
702 	nvme_get_ctrl(ns->ctrl);
703 	srcu_read_unlock(&head->srcu, srcu_idx);
704 	ret = nvme_ctrl_ioctl(ns->ctrl, cmd, argp, open_for_write);
705 
706 	nvme_put_ctrl(ctrl);
707 	return ret;
708 }
709 
nvme_ns_head_ioctl(struct block_device * bdev,blk_mode_t mode,unsigned int cmd,unsigned long arg)710 int nvme_ns_head_ioctl(struct block_device *bdev, blk_mode_t mode,
711 		unsigned int cmd, unsigned long arg)
712 {
713 	struct nvme_ns_head *head = bdev->bd_disk->private_data;
714 	bool open_for_write = mode & BLK_OPEN_WRITE;
715 	void __user *argp = (void __user *)arg;
716 	struct nvme_ns *ns;
717 	int srcu_idx, ret = -EWOULDBLOCK;
718 	unsigned int flags = 0;
719 
720 	if (bdev_is_partition(bdev))
721 		flags |= NVME_IOCTL_PARTITION;
722 
723 	srcu_idx = srcu_read_lock(&head->srcu);
724 	ns = nvme_find_path(head);
725 	if (!ns)
726 		goto out_unlock;
727 
728 	/*
729 	 * Handle ioctls that apply to the controller instead of the namespace
730 	 * seperately and drop the ns SRCU reference early.  This avoids a
731 	 * deadlock when deleting namespaces using the passthrough interface.
732 	 */
733 	if (is_ctrl_ioctl(cmd))
734 		return nvme_ns_head_ctrl_ioctl(ns, cmd, argp, head, srcu_idx,
735 					       open_for_write);
736 
737 	ret = nvme_ns_ioctl(ns, cmd, argp, flags, open_for_write);
738 out_unlock:
739 	srcu_read_unlock(&head->srcu, srcu_idx);
740 	return ret;
741 }
742 
nvme_ns_head_chr_ioctl(struct file * file,unsigned int cmd,unsigned long arg)743 long nvme_ns_head_chr_ioctl(struct file *file, unsigned int cmd,
744 		unsigned long arg)
745 {
746 	bool open_for_write = file->f_mode & FMODE_WRITE;
747 	struct cdev *cdev = file_inode(file)->i_cdev;
748 	struct nvme_ns_head *head =
749 		container_of(cdev, struct nvme_ns_head, cdev);
750 	void __user *argp = (void __user *)arg;
751 	struct nvme_ns *ns;
752 	int srcu_idx, ret = -EWOULDBLOCK;
753 
754 	srcu_idx = srcu_read_lock(&head->srcu);
755 	ns = nvme_find_path(head);
756 	if (!ns)
757 		goto out_unlock;
758 
759 	if (is_ctrl_ioctl(cmd))
760 		return nvme_ns_head_ctrl_ioctl(ns, cmd, argp, head, srcu_idx,
761 				open_for_write);
762 
763 	ret = nvme_ns_ioctl(ns, cmd, argp, 0, open_for_write);
764 out_unlock:
765 	srcu_read_unlock(&head->srcu, srcu_idx);
766 	return ret;
767 }
768 
nvme_ns_head_chr_uring_cmd(struct io_uring_cmd * ioucmd,unsigned int issue_flags)769 int nvme_ns_head_chr_uring_cmd(struct io_uring_cmd *ioucmd,
770 		unsigned int issue_flags)
771 {
772 	struct cdev *cdev = file_inode(ioucmd->file)->i_cdev;
773 	struct nvme_ns_head *head = container_of(cdev, struct nvme_ns_head, cdev);
774 	int srcu_idx = srcu_read_lock(&head->srcu);
775 	struct nvme_ns *ns = nvme_find_path(head);
776 	int ret = -EINVAL;
777 
778 	if (ns)
779 		ret = nvme_ns_uring_cmd(ns, ioucmd, issue_flags);
780 	srcu_read_unlock(&head->srcu, srcu_idx);
781 	return ret;
782 }
783 #endif /* CONFIG_NVME_MULTIPATH */
784 
nvme_dev_uring_cmd(struct io_uring_cmd * ioucmd,unsigned int issue_flags)785 int nvme_dev_uring_cmd(struct io_uring_cmd *ioucmd, unsigned int issue_flags)
786 {
787 	struct nvme_ctrl *ctrl = ioucmd->file->private_data;
788 	int ret;
789 
790 	/* IOPOLL not supported yet */
791 	if (issue_flags & IO_URING_F_IOPOLL)
792 		return -EOPNOTSUPP;
793 
794 	ret = nvme_uring_cmd_checks(issue_flags);
795 	if (ret)
796 		return ret;
797 
798 	switch (ioucmd->cmd_op) {
799 	case NVME_URING_CMD_ADMIN:
800 		ret = nvme_uring_cmd_io(ctrl, NULL, ioucmd, issue_flags, false);
801 		break;
802 	case NVME_URING_CMD_ADMIN_VEC:
803 		ret = nvme_uring_cmd_io(ctrl, NULL, ioucmd, issue_flags, true);
804 		break;
805 	default:
806 		ret = -ENOTTY;
807 	}
808 
809 	return ret;
810 }
811 
nvme_dev_user_cmd(struct nvme_ctrl * ctrl,void __user * argp,bool open_for_write)812 static int nvme_dev_user_cmd(struct nvme_ctrl *ctrl, void __user *argp,
813 		bool open_for_write)
814 {
815 	struct nvme_ns *ns;
816 	int ret, srcu_idx;
817 
818 	srcu_idx = srcu_read_lock(&ctrl->srcu);
819 	if (list_empty(&ctrl->namespaces)) {
820 		ret = -ENOTTY;
821 		goto out_unlock;
822 	}
823 
824 	ns = list_first_or_null_rcu(&ctrl->namespaces, struct nvme_ns, list);
825 	if (ns != list_last_entry(&ctrl->namespaces, struct nvme_ns, list)) {
826 		dev_warn(ctrl->device,
827 			"NVME_IOCTL_IO_CMD not supported when multiple namespaces present!\n");
828 		ret = -EINVAL;
829 		goto out_unlock;
830 	}
831 
832 	dev_warn(ctrl->device,
833 		"using deprecated NVME_IOCTL_IO_CMD ioctl on the char device!\n");
834 	if (!nvme_get_ns(ns)) {
835 		ret = -ENXIO;
836 		goto out_unlock;
837 	}
838 	srcu_read_unlock(&ctrl->srcu, srcu_idx);
839 
840 	ret = nvme_user_cmd(ctrl, ns, argp, 0, open_for_write);
841 	nvme_put_ns(ns);
842 	return ret;
843 
844 out_unlock:
845 	srcu_read_unlock(&ctrl->srcu, srcu_idx);
846 	return ret;
847 }
848 
nvme_dev_ioctl(struct file * file,unsigned int cmd,unsigned long arg)849 long nvme_dev_ioctl(struct file *file, unsigned int cmd,
850 		unsigned long arg)
851 {
852 	bool open_for_write = file->f_mode & FMODE_WRITE;
853 	struct nvme_ctrl *ctrl = file->private_data;
854 	void __user *argp = (void __user *)arg;
855 
856 	switch (cmd) {
857 	case NVME_IOCTL_ADMIN_CMD:
858 		return nvme_user_cmd(ctrl, NULL, argp, 0, open_for_write);
859 	case NVME_IOCTL_ADMIN64_CMD:
860 		return nvme_user_cmd64(ctrl, NULL, argp, 0, open_for_write);
861 	case NVME_IOCTL_IO_CMD:
862 		return nvme_dev_user_cmd(ctrl, argp, open_for_write);
863 	case NVME_IOCTL_RESET:
864 		if (!capable(CAP_SYS_ADMIN))
865 			return -EACCES;
866 		dev_warn(ctrl->device, "resetting controller\n");
867 		return nvme_reset_ctrl_sync(ctrl);
868 	case NVME_IOCTL_SUBSYS_RESET:
869 		if (!capable(CAP_SYS_ADMIN))
870 			return -EACCES;
871 		return nvme_reset_subsystem(ctrl);
872 	case NVME_IOCTL_RESCAN:
873 		if (!capable(CAP_SYS_ADMIN))
874 			return -EACCES;
875 		nvme_queue_scan(ctrl);
876 		return 0;
877 	default:
878 		return -ENOTTY;
879 	}
880 }
881