xref: /linux/drivers/scsi/scsi_bsg.c (revision 9100a28c8bb4270744942cf834efcd80f1acda7d)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/bsg.h>
3 #include <linux/io_uring/cmd.h>
4 #include <scsi/scsi.h>
5 #include <scsi/scsi_ioctl.h>
6 #include <scsi/scsi_cmnd.h>
7 #include <scsi/scsi_device.h>
8 #include <scsi/sg.h>
9 #include "scsi_priv.h"
10 
11 #define uptr64(val) ((void __user *)(uintptr_t)(val))
12 
13 /*
14  * Per-command BSG SCSI PDU stored in io_uring_cmd.pdu[32].
15  * Holds temporary state between submission, completion and task_work.
16  */
17 struct scsi_bsg_uring_cmd_pdu {
18 	struct bio *bio;		/* mapped user buffer, unmap in task work */
19 	struct request *req;		/* block request, freed in task work */
20 	u64 response_addr;		/* user space response buffer address */
21 };
22 static_assert(sizeof(struct scsi_bsg_uring_cmd_pdu) <= sizeof_field(struct io_uring_cmd, pdu));
23 
24 static inline struct scsi_bsg_uring_cmd_pdu *scsi_bsg_uring_cmd_pdu(
25 	struct io_uring_cmd *ioucmd)
26 {
27 	return io_uring_cmd_to_pdu(ioucmd, struct scsi_bsg_uring_cmd_pdu);
28 }
29 
30 /* Task work: build res2 (layout in uapi/linux/bsg.h) and copy sense to user. */
31 static void scsi_bsg_uring_task_cb(struct io_tw_req tw_req, io_tw_token_t tw)
32 {
33 	struct io_uring_cmd *ioucmd = io_uring_cmd_from_tw(tw_req);
34 	struct scsi_bsg_uring_cmd_pdu *pdu = scsi_bsg_uring_cmd_pdu(ioucmd);
35 	struct request *rq = pdu->req;
36 	struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
37 	u64 res2;
38 	int ret = 0;
39 	u8 driver_status = 0;
40 	u8 sense_len_wr = 0;
41 
42 	if (pdu->bio)
43 		blk_rq_unmap_user(pdu->bio);
44 
45 	if (scsi_status_is_check_condition(scmd->result)) {
46 		driver_status = DRIVER_SENSE;
47 		if (pdu->response_addr)
48 			sense_len_wr = min_t(u8, scmd->sense_len,
49 					     SCSI_SENSE_BUFFERSIZE);
50 	}
51 
52 	if (sense_len_wr) {
53 		if (copy_to_user(uptr64(pdu->response_addr), scmd->sense_buffer,
54 				 sense_len_wr))
55 			ret = -EFAULT;
56 	}
57 
58 	res2 = bsg_scsi_res2_build(status_byte(scmd->result), driver_status,
59 				  host_byte(scmd->result), sense_len_wr,
60 				  scmd->resid_len);
61 
62 	blk_mq_free_request(rq);
63 	io_uring_cmd_done32(ioucmd, ret, res2,
64 			    IO_URING_CMD_TASK_WORK_ISSUE_FLAGS);
65 }
66 
67 static enum rq_end_io_ret scsi_bsg_uring_cmd_done(struct request *req,
68 						  blk_status_t status,
69 						  const struct io_comp_batch *iocb)
70 {
71 	struct io_uring_cmd *ioucmd = req->end_io_data;
72 
73 	io_uring_cmd_do_in_task_lazy(ioucmd, scsi_bsg_uring_task_cb);
74 	return RQ_END_IO_NONE;
75 }
76 
77 static int scsi_bsg_map_user_buffer(struct request *req,
78 				    struct io_uring_cmd *ioucmd,
79 				    unsigned int issue_flags, gfp_t gfp_mask)
80 {
81 	const struct bsg_uring_cmd *cmd = io_uring_sqe128_cmd(ioucmd->sqe, struct bsg_uring_cmd);
82 	bool is_write = cmd->dout_xfer_len > 0;
83 	u64 buf_addr = is_write ? cmd->dout_xferp : cmd->din_xferp;
84 	unsigned long buf_len = is_write ? cmd->dout_xfer_len : cmd->din_xfer_len;
85 	struct iov_iter iter;
86 	int ret;
87 
88 	if (ioucmd->flags & IORING_URING_CMD_FIXED) {
89 		ret = io_uring_cmd_import_fixed(buf_addr, buf_len,
90 						is_write ? WRITE : READ,
91 						&iter, ioucmd, issue_flags);
92 		if (ret < 0)
93 			return ret;
94 		ret = blk_rq_map_user_iov(req->q, req, NULL, &iter, gfp_mask);
95 	} else {
96 		ret = blk_rq_map_user(req->q, req, NULL, uptr64(buf_addr),
97 				      buf_len, gfp_mask);
98 	}
99 
100 	return ret;
101 }
102 
103 static int scsi_bsg_uring_cmd(struct request_queue *q, struct io_uring_cmd *ioucmd,
104 			       unsigned int issue_flags, bool open_for_write)
105 {
106 	struct scsi_bsg_uring_cmd_pdu *pdu = scsi_bsg_uring_cmd_pdu(ioucmd);
107 	const struct bsg_uring_cmd *cmd = io_uring_sqe128_cmd(ioucmd->sqe, struct bsg_uring_cmd);
108 	struct scsi_cmnd *scmd;
109 	struct request *req;
110 	blk_mq_req_flags_t blk_flags = 0;
111 	gfp_t gfp_mask = GFP_KERNEL;
112 	int ret;
113 
114 	if (cmd->protocol != BSG_PROTOCOL_SCSI ||
115 	    cmd->subprotocol != BSG_SUB_PROTOCOL_SCSI_CMD)
116 		return -EINVAL;
117 
118 	if (!cmd->request || cmd->request_len == 0)
119 		return -EINVAL;
120 
121 	if (cmd->dout_xfer_len && cmd->din_xfer_len) {
122 		pr_warn_once("BIDI support in bsg has been removed.\n");
123 		return -EOPNOTSUPP;
124 	}
125 
126 	if (cmd->dout_iovec_count > 0 || cmd->din_iovec_count > 0)
127 		return -EOPNOTSUPP;
128 
129 	if (issue_flags & IO_URING_F_NONBLOCK) {
130 		blk_flags = BLK_MQ_REQ_NOWAIT;
131 		gfp_mask = GFP_NOWAIT;
132 	}
133 
134 	req = scsi_alloc_request(q, cmd->dout_xfer_len ?
135 				 REQ_OP_DRV_OUT : REQ_OP_DRV_IN, blk_flags);
136 	if (IS_ERR(req))
137 		return PTR_ERR(req);
138 
139 	scmd = blk_mq_rq_to_pdu(req);
140 	scmd->cmd_len = cmd->request_len;
141 	if (scmd->cmd_len > sizeof(scmd->cmnd)) {
142 		ret = -EINVAL;
143 		goto out_free_req;
144 	}
145 	scmd->allowed = SG_DEFAULT_RETRIES;
146 
147 	if (copy_from_user(scmd->cmnd, uptr64(cmd->request), cmd->request_len)) {
148 		ret = -EFAULT;
149 		goto out_free_req;
150 	}
151 
152 	if (!scsi_cmd_allowed(scmd->cmnd, open_for_write)) {
153 		ret = -EPERM;
154 		goto out_free_req;
155 	}
156 
157 	pdu->response_addr = cmd->response;
158 	scmd->sense_len = cmd->max_response_len ?
159 		min(cmd->max_response_len, SCSI_SENSE_BUFFERSIZE) : SCSI_SENSE_BUFFERSIZE;
160 
161 	if (cmd->dout_xfer_len || cmd->din_xfer_len) {
162 		ret = scsi_bsg_map_user_buffer(req, ioucmd, issue_flags, gfp_mask);
163 		if (ret)
164 			goto out_free_req;
165 		pdu->bio = req->bio;
166 	} else {
167 		pdu->bio = NULL;
168 	}
169 
170 	req->timeout = cmd->timeout_ms ?
171 		msecs_to_jiffies(cmd->timeout_ms) : BLK_DEFAULT_SG_TIMEOUT;
172 
173 	req->end_io = scsi_bsg_uring_cmd_done;
174 	req->end_io_data = ioucmd;
175 	pdu->req = req;
176 
177 	blk_execute_rq_nowait(req, false);
178 	return -EIOCBQUEUED;
179 
180 out_free_req:
181 	blk_mq_free_request(req);
182 	return ret;
183 }
184 
185 static int scsi_bsg_sg_io_fn(struct request_queue *q, struct sg_io_v4 *hdr,
186 		bool open_for_write, unsigned int timeout)
187 {
188 	struct scsi_cmnd *scmd;
189 	struct request *rq;
190 	struct bio *bio;
191 	int ret;
192 
193 	if (hdr->protocol != BSG_PROTOCOL_SCSI  ||
194 	    hdr->subprotocol != BSG_SUB_PROTOCOL_SCSI_CMD)
195 		return -EINVAL;
196 	if (hdr->dout_xfer_len && hdr->din_xfer_len) {
197 		pr_warn_once("BIDI support in bsg has been removed.\n");
198 		return -EOPNOTSUPP;
199 	}
200 
201 	rq = scsi_alloc_request(q, hdr->dout_xfer_len ?
202 				REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
203 	if (IS_ERR(rq))
204 		return PTR_ERR(rq);
205 	rq->timeout = timeout;
206 
207 	scmd = blk_mq_rq_to_pdu(rq);
208 	scmd->cmd_len = hdr->request_len;
209 	if (scmd->cmd_len > sizeof(scmd->cmnd)) {
210 		ret = -EINVAL;
211 		goto out_put_request;
212 	}
213 
214 	ret = -EFAULT;
215 	if (copy_from_user(scmd->cmnd, uptr64(hdr->request), scmd->cmd_len))
216 		goto out_put_request;
217 	ret = -EPERM;
218 	if (!scsi_cmd_allowed(scmd->cmnd, open_for_write))
219 		goto out_put_request;
220 
221 	ret = 0;
222 	if (hdr->dout_xfer_len) {
223 		ret = blk_rq_map_user(rq->q, rq, NULL, uptr64(hdr->dout_xferp),
224 				hdr->dout_xfer_len, GFP_KERNEL);
225 	} else if (hdr->din_xfer_len) {
226 		ret = blk_rq_map_user(rq->q, rq, NULL, uptr64(hdr->din_xferp),
227 				hdr->din_xfer_len, GFP_KERNEL);
228 	}
229 
230 	if (ret)
231 		goto out_put_request;
232 
233 	bio = rq->bio;
234 	blk_execute_rq(rq, !(hdr->flags & BSG_FLAG_Q_AT_TAIL));
235 
236 	/*
237 	 * fill in all the output members
238 	 */
239 	hdr->device_status = scmd->result & 0xff;
240 	hdr->transport_status = host_byte(scmd->result);
241 	hdr->driver_status = 0;
242 	if (scsi_status_is_check_condition(scmd->result))
243 		hdr->driver_status = DRIVER_SENSE;
244 	hdr->info = 0;
245 	if (hdr->device_status || hdr->transport_status || hdr->driver_status)
246 		hdr->info |= SG_INFO_CHECK;
247 	hdr->response_len = 0;
248 
249 	if (scmd->sense_len && hdr->response) {
250 		int len = min_t(unsigned int, hdr->max_response_len,
251 				scmd->sense_len);
252 
253 		if (copy_to_user(uptr64(hdr->response), scmd->sense_buffer,
254 				 len))
255 			ret = -EFAULT;
256 		else
257 			hdr->response_len = len;
258 	}
259 
260 	if (rq_data_dir(rq) == READ)
261 		hdr->din_resid = scmd->resid_len;
262 	else
263 		hdr->dout_resid = scmd->resid_len;
264 
265 	blk_rq_unmap_user(bio);
266 
267 out_put_request:
268 	blk_mq_free_request(rq);
269 	return ret;
270 }
271 
272 struct bsg_device *scsi_bsg_register_queue(struct scsi_device *sdev)
273 {
274 	return bsg_register_queue(sdev->request_queue, &sdev->sdev_gendev,
275 			dev_name(&sdev->sdev_gendev), scsi_bsg_sg_io_fn,
276 			scsi_bsg_uring_cmd);
277 }
278