xref: /linux/drivers/vhost/scsi.c (revision 821c9e515db512904250e1d460109a1dc4c7ef6b)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*******************************************************************************
3  * Vhost kernel TCM fabric driver for virtio SCSI initiators
4  *
5  * (C) Copyright 2010-2013 Datera, Inc.
6  * (C) Copyright 2010-2012 IBM Corp.
7  *
8  * Authors: Nicholas A. Bellinger <nab@daterainc.com>
9  *          Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
10  ****************************************************************************/
11 
12 #include <linux/module.h>
13 #include <linux/moduleparam.h>
14 #include <generated/utsrelease.h>
15 #include <linux/utsname.h>
16 #include <linux/init.h>
17 #include <linux/slab.h>
18 #include <linux/kthread.h>
19 #include <linux/types.h>
20 #include <linux/string.h>
21 #include <linux/configfs.h>
22 #include <linux/ctype.h>
23 #include <linux/compat.h>
24 #include <linux/eventfd.h>
25 #include <linux/fs.h>
26 #include <linux/vmalloc.h>
27 #include <linux/miscdevice.h>
28 #include <linux/blk_types.h>
29 #include <linux/bio.h>
30 #include <linux/unaligned.h>
31 #include <scsi/scsi_common.h>
32 #include <scsi/scsi_proto.h>
33 #include <target/target_core_base.h>
34 #include <target/target_core_fabric.h>
35 #include <linux/vhost.h>
36 #include <linux/virtio_scsi.h>
37 #include <linux/llist.h>
38 #include <linux/bitmap.h>
39 
40 #include "vhost.h"
41 
42 #define VHOST_SCSI_VERSION  "v0.1"
43 #define VHOST_SCSI_NAMELEN 256
44 #define VHOST_SCSI_MAX_CDB_SIZE 32
45 #define VHOST_SCSI_PREALLOC_SGLS 2048
46 #define VHOST_SCSI_PREALLOC_UPAGES 2048
47 #define VHOST_SCSI_PREALLOC_PROT_SGLS 2048
48 /*
49  * For the legacy descriptor case we allocate an iov per byte in the
50  * virtio_scsi_cmd_resp struct.
51  */
52 #define VHOST_SCSI_MAX_RESP_IOVS sizeof(struct virtio_scsi_cmd_resp)
53 
54 static unsigned int vhost_scsi_inline_sg_cnt = VHOST_SCSI_PREALLOC_SGLS;
55 
56 #ifdef CONFIG_ARCH_NO_SG_CHAIN
vhost_scsi_set_inline_sg_cnt(const char * buf,const struct kernel_param * kp)57 static int vhost_scsi_set_inline_sg_cnt(const char *buf,
58 					const struct kernel_param *kp)
59 {
60 	pr_err("Setting inline_sg_cnt is not supported.\n");
61 	return -EOPNOTSUPP;
62 }
63 #else
vhost_scsi_set_inline_sg_cnt(const char * buf,const struct kernel_param * kp)64 static int vhost_scsi_set_inline_sg_cnt(const char *buf,
65 					const struct kernel_param *kp)
66 {
67 	unsigned int cnt;
68 	int ret;
69 
70 	ret = kstrtouint(buf, 10, &cnt);
71 	if (ret)
72 		return ret;
73 
74 	if (cnt > VHOST_SCSI_PREALLOC_SGLS) {
75 		pr_err("Max inline_sg_cnt is %u\n", VHOST_SCSI_PREALLOC_SGLS);
76 		return -EINVAL;
77 	}
78 
79 	vhost_scsi_inline_sg_cnt = cnt;
80 	return 0;
81 }
82 #endif
83 
vhost_scsi_get_inline_sg_cnt(char * buf,const struct kernel_param * kp)84 static int vhost_scsi_get_inline_sg_cnt(char *buf,
85 					const struct kernel_param *kp)
86 {
87 	return sprintf(buf, "%u\n", vhost_scsi_inline_sg_cnt);
88 }
89 
90 static const struct kernel_param_ops vhost_scsi_inline_sg_cnt_op = {
91 	.get = vhost_scsi_get_inline_sg_cnt,
92 	.set = vhost_scsi_set_inline_sg_cnt,
93 };
94 
95 module_param_cb(inline_sg_cnt, &vhost_scsi_inline_sg_cnt_op, NULL, 0644);
96 MODULE_PARM_DESC(inline_sg_cnt, "Set the number of scatterlist entries to pre-allocate. The default is 2048.");
97 
98 /* Max number of requests before requeueing the job.
99  * Using this limit prevents one virtqueue from starving others with
100  * request.
101  */
102 #define VHOST_SCSI_WEIGHT 256
103 
104 struct vhost_scsi_inflight {
105 	/* Wait for the flush operation to finish */
106 	struct completion comp;
107 	/* Refcount for the inflight reqs */
108 	struct kref kref;
109 };
110 
111 struct vhost_scsi_cmd {
112 	/* Descriptor from vhost_get_vq_desc() for virt_queue segment */
113 	int tvc_vq_desc;
114 	/* The number of scatterlists associated with this cmd */
115 	u32 tvc_sgl_count;
116 	u32 tvc_prot_sgl_count;
117 	u32 copied_iov:1;
118 	const void *read_iov;
119 	struct iov_iter *read_iter;
120 	struct scatterlist *sgl;
121 	struct sg_table table;
122 	struct scatterlist *prot_sgl;
123 	struct sg_table prot_table;
124 	/* Fast path response header iovec used when only one vec is needed */
125 	struct iovec tvc_resp_iov;
126 	/* Number of iovs for response */
127 	unsigned int tvc_resp_iovs_cnt;
128 	/* Pointer to response header iovecs if more than one is needed */
129 	struct iovec *tvc_resp_iovs;
130 	/* Pointer to vhost_virtqueue for the cmd */
131 	struct vhost_virtqueue *tvc_vq;
132 	/* The TCM I/O descriptor that is accessed via container_of() */
133 	struct se_cmd tvc_se_cmd;
134 	/* Sense buffer that will be mapped into outgoing status */
135 	unsigned char tvc_sense_buf[TRANSPORT_SENSE_BUFFER];
136 	/*
137 	 * Dirty write descriptors of this command.
138 	 */
139 	struct vhost_log *tvc_log;
140 	unsigned int tvc_log_num;
141 	/* Completed commands list, serviced from vhost worker thread */
142 	struct llist_node tvc_completion_list;
143 	/* Used to track inflight cmd */
144 	struct vhost_scsi_inflight *inflight;
145 };
146 
147 struct vhost_scsi_nexus {
148 	/* Pointer to TCM session for I_T Nexus */
149 	struct se_session *tvn_se_sess;
150 };
151 
152 struct vhost_scsi_tpg {
153 	/* Vhost port target portal group tag for TCM */
154 	u16 tport_tpgt;
155 	/* Used to track number of TPG Port/Lun Links wrt to explicit I_T Nexus shutdown */
156 	int tv_tpg_port_count;
157 	/* Used for vhost_scsi device reference to tpg_nexus, protected by tv_tpg_mutex */
158 	int tv_tpg_vhost_count;
159 	/* Used for enabling T10-PI with legacy devices */
160 	int tv_fabric_prot_type;
161 	/* list for vhost_scsi_list */
162 	struct list_head tv_tpg_list;
163 	/* Used to protect access for tpg_nexus */
164 	struct mutex tv_tpg_mutex;
165 	/* Pointer to the TCM VHost I_T Nexus for this TPG endpoint */
166 	struct vhost_scsi_nexus *tpg_nexus;
167 	/* Pointer back to vhost_scsi_tport */
168 	struct vhost_scsi_tport *tport;
169 	/* Returned by vhost_scsi_make_tpg() */
170 	struct se_portal_group se_tpg;
171 	/* Pointer back to vhost_scsi, protected by tv_tpg_mutex */
172 	struct vhost_scsi *vhost_scsi;
173 };
174 
175 struct vhost_scsi_tport {
176 	/* SCSI protocol the tport is providing */
177 	u8 tport_proto_id;
178 	/* Binary World Wide unique Port Name for Vhost Target port */
179 	u64 tport_wwpn;
180 	/* ASCII formatted WWPN for Vhost Target port */
181 	char tport_name[VHOST_SCSI_NAMELEN];
182 	/* Returned by vhost_scsi_make_tport() */
183 	struct se_wwn tport_wwn;
184 };
185 
186 struct vhost_scsi_evt {
187 	/* event to be sent to guest */
188 	struct virtio_scsi_event event;
189 	/* event list, serviced from vhost worker thread */
190 	struct llist_node list;
191 };
192 
193 enum {
194 	VHOST_SCSI_VQ_CTL = 0,
195 	VHOST_SCSI_VQ_EVT = 1,
196 	VHOST_SCSI_VQ_IO = 2,
197 };
198 
199 /* Note: can't set VIRTIO_F_VERSION_1 yet, since that implies ANY_LAYOUT. */
200 enum {
201 	VHOST_SCSI_FEATURES = VHOST_FEATURES | (1ULL << VIRTIO_SCSI_F_HOTPLUG) |
202 					       (1ULL << VIRTIO_SCSI_F_T10_PI)
203 };
204 
205 #define VHOST_SCSI_MAX_TARGET	256
206 #define VHOST_SCSI_MAX_IO_VQ	1024
207 #define VHOST_SCSI_MAX_EVENT	128
208 
209 static unsigned vhost_scsi_max_io_vqs = 128;
210 module_param_named(max_io_vqs, vhost_scsi_max_io_vqs, uint, 0644);
211 MODULE_PARM_DESC(max_io_vqs, "Set the max number of IO virtqueues a vhost scsi device can support. The default is 128. The max is 1024.");
212 
213 struct vhost_scsi_virtqueue {
214 	struct vhost_virtqueue vq;
215 	struct vhost_scsi *vs;
216 	/*
217 	 * Reference counting for inflight reqs, used for flush operation. At
218 	 * each time, one reference tracks new commands submitted, while we
219 	 * wait for another one to reach 0.
220 	 */
221 	struct vhost_scsi_inflight inflights[2];
222 	/*
223 	 * Indicate current inflight in use, protected by vq->mutex.
224 	 * Writers must also take dev mutex and flush under it.
225 	 */
226 	int inflight_idx;
227 	struct vhost_scsi_cmd *scsi_cmds;
228 	struct sbitmap scsi_tags;
229 	int max_cmds;
230 	struct page **upages;
231 
232 	struct vhost_work completion_work;
233 	struct llist_head completion_list;
234 };
235 
236 struct vhost_scsi {
237 	/* Protected by vhost_scsi->dev.mutex */
238 	struct vhost_scsi_tpg **vs_tpg;
239 	char vs_vhost_wwpn[TRANSPORT_IQN_LEN];
240 
241 	struct vhost_dev dev;
242 	struct vhost_scsi_virtqueue *vqs;
243 	struct vhost_scsi_inflight **old_inflight;
244 
245 	struct vhost_work vs_event_work; /* evt injection work item */
246 	struct llist_head vs_event_list; /* evt injection queue */
247 
248 	bool vs_events_missed; /* any missed events, protected by vq->mutex */
249 	int vs_events_nr; /* num of pending events, protected by vq->mutex */
250 
251 	unsigned int inline_sg_cnt;
252 };
253 
254 struct vhost_scsi_tmf {
255 	struct vhost_work vwork;
256 	struct work_struct flush_work;
257 	struct vhost_scsi *vhost;
258 	struct vhost_scsi_virtqueue *svq;
259 
260 	struct se_cmd se_cmd;
261 	u8 scsi_resp;
262 	struct vhost_scsi_inflight *inflight;
263 	struct iovec resp_iov;
264 	int in_iovs;
265 	int vq_desc;
266 
267 	/*
268 	 * Dirty write descriptors of this command.
269 	 */
270 	struct vhost_log *tmf_log;
271 	unsigned int tmf_log_num;
272 };
273 
274 /*
275  * Context for processing request and control queue operations.
276  */
277 struct vhost_scsi_ctx {
278 	int head;
279 	unsigned int out, in;
280 	size_t req_size, rsp_size;
281 	size_t out_size, in_size;
282 	u8 *target, *lunp;
283 	void *req;
284 	struct iov_iter out_iter;
285 };
286 
287 /*
288  * Global mutex to protect vhost_scsi TPG list for vhost IOCTLs and LIO
289  * configfs management operations.
290  */
291 static DEFINE_MUTEX(vhost_scsi_mutex);
292 static LIST_HEAD(vhost_scsi_list);
293 
vhost_scsi_done_inflight(struct kref * kref)294 static void vhost_scsi_done_inflight(struct kref *kref)
295 {
296 	struct vhost_scsi_inflight *inflight;
297 
298 	inflight = container_of(kref, struct vhost_scsi_inflight, kref);
299 	complete(&inflight->comp);
300 }
301 
vhost_scsi_init_inflight(struct vhost_scsi * vs,struct vhost_scsi_inflight * old_inflight[])302 static void vhost_scsi_init_inflight(struct vhost_scsi *vs,
303 				    struct vhost_scsi_inflight *old_inflight[])
304 {
305 	struct vhost_scsi_inflight *new_inflight;
306 	struct vhost_virtqueue *vq;
307 	int idx, i;
308 
309 	for (i = 0; i < vs->dev.nvqs;  i++) {
310 		vq = &vs->vqs[i].vq;
311 
312 		mutex_lock(&vq->mutex);
313 
314 		/* store old inflight */
315 		idx = vs->vqs[i].inflight_idx;
316 		if (old_inflight)
317 			old_inflight[i] = &vs->vqs[i].inflights[idx];
318 
319 		/* setup new inflight */
320 		vs->vqs[i].inflight_idx = idx ^ 1;
321 		new_inflight = &vs->vqs[i].inflights[idx ^ 1];
322 		kref_init(&new_inflight->kref);
323 		init_completion(&new_inflight->comp);
324 
325 		mutex_unlock(&vq->mutex);
326 	}
327 }
328 
329 static struct vhost_scsi_inflight *
vhost_scsi_get_inflight(struct vhost_virtqueue * vq)330 vhost_scsi_get_inflight(struct vhost_virtqueue *vq)
331 {
332 	struct vhost_scsi_inflight *inflight;
333 	struct vhost_scsi_virtqueue *svq;
334 
335 	svq = container_of(vq, struct vhost_scsi_virtqueue, vq);
336 	inflight = &svq->inflights[svq->inflight_idx];
337 	kref_get(&inflight->kref);
338 
339 	return inflight;
340 }
341 
vhost_scsi_put_inflight(struct vhost_scsi_inflight * inflight)342 static void vhost_scsi_put_inflight(struct vhost_scsi_inflight *inflight)
343 {
344 	kref_put(&inflight->kref, vhost_scsi_done_inflight);
345 }
346 
vhost_scsi_check_true(struct se_portal_group * se_tpg)347 static int vhost_scsi_check_true(struct se_portal_group *se_tpg)
348 {
349 	return 1;
350 }
351 
vhost_scsi_get_fabric_wwn(struct se_portal_group * se_tpg)352 static char *vhost_scsi_get_fabric_wwn(struct se_portal_group *se_tpg)
353 {
354 	struct vhost_scsi_tpg *tpg = container_of(se_tpg,
355 				struct vhost_scsi_tpg, se_tpg);
356 	struct vhost_scsi_tport *tport = tpg->tport;
357 
358 	return &tport->tport_name[0];
359 }
360 
vhost_scsi_get_tpgt(struct se_portal_group * se_tpg)361 static u16 vhost_scsi_get_tpgt(struct se_portal_group *se_tpg)
362 {
363 	struct vhost_scsi_tpg *tpg = container_of(se_tpg,
364 				struct vhost_scsi_tpg, se_tpg);
365 	return tpg->tport_tpgt;
366 }
367 
vhost_scsi_check_prot_fabric_only(struct se_portal_group * se_tpg)368 static int vhost_scsi_check_prot_fabric_only(struct se_portal_group *se_tpg)
369 {
370 	struct vhost_scsi_tpg *tpg = container_of(se_tpg,
371 				struct vhost_scsi_tpg, se_tpg);
372 
373 	return tpg->tv_fabric_prot_type;
374 }
375 
vhost_scsi_copy_cmd_log(struct vhost_virtqueue * vq,struct vhost_scsi_cmd * cmd,struct vhost_log * log,unsigned int log_num)376 static int vhost_scsi_copy_cmd_log(struct vhost_virtqueue *vq,
377 				   struct vhost_scsi_cmd *cmd,
378 				   struct vhost_log *log,
379 				   unsigned int log_num)
380 {
381 	if (!cmd->tvc_log)
382 		cmd->tvc_log = kmalloc_array(vq->dev->iov_limit,
383 					     sizeof(*cmd->tvc_log),
384 					     GFP_KERNEL);
385 
386 	if (unlikely(!cmd->tvc_log)) {
387 		vq_err(vq, "Failed to alloc tvc_log\n");
388 		return -ENOMEM;
389 	}
390 
391 	memcpy(cmd->tvc_log, log, sizeof(*cmd->tvc_log) * log_num);
392 	cmd->tvc_log_num = log_num;
393 
394 	return 0;
395 }
396 
vhost_scsi_log_write(struct vhost_virtqueue * vq,struct vhost_log * log,unsigned int log_num)397 static void vhost_scsi_log_write(struct vhost_virtqueue *vq,
398 				 struct vhost_log *log,
399 				 unsigned int log_num)
400 {
401 	if (likely(!vhost_has_feature(vq, VHOST_F_LOG_ALL)))
402 		return;
403 
404 	if (likely(!log_num || !log))
405 		return;
406 
407 	/*
408 	 * vhost-scsi doesn't support VIRTIO_F_ACCESS_PLATFORM.
409 	 * No requirement for vq->iotlb case.
410 	 */
411 	WARN_ON_ONCE(unlikely(vq->iotlb));
412 	vhost_log_write(vq, log, log_num, U64_MAX, NULL, 0);
413 }
414 
vhost_scsi_release_cmd_res(struct se_cmd * se_cmd)415 static void vhost_scsi_release_cmd_res(struct se_cmd *se_cmd)
416 {
417 	struct vhost_scsi_cmd *tv_cmd = container_of(se_cmd,
418 				struct vhost_scsi_cmd, tvc_se_cmd);
419 	struct vhost_scsi_virtqueue *svq = container_of(tv_cmd->tvc_vq,
420 				struct vhost_scsi_virtqueue, vq);
421 	struct vhost_scsi *vs = svq->vs;
422 	struct vhost_scsi_inflight *inflight = tv_cmd->inflight;
423 	struct scatterlist *sg;
424 	struct page *page;
425 	int i;
426 
427 	if (tv_cmd->tvc_sgl_count) {
428 		for_each_sgtable_sg(&tv_cmd->table, sg, i) {
429 			page = sg_page(sg);
430 			if (!page)
431 				continue;
432 
433 			if (tv_cmd->copied_iov)
434 				__free_page(page);
435 			else
436 				put_page(page);
437 		}
438 		kfree(tv_cmd->read_iter);
439 		kfree(tv_cmd->read_iov);
440 		sg_free_table_chained(&tv_cmd->table, vs->inline_sg_cnt);
441 	}
442 	if (tv_cmd->tvc_prot_sgl_count) {
443 		for_each_sgtable_sg(&tv_cmd->prot_table, sg, i) {
444 			page = sg_page(sg);
445 			if (page)
446 				put_page(page);
447 		}
448 		sg_free_table_chained(&tv_cmd->prot_table, vs->inline_sg_cnt);
449 	}
450 
451 	if (tv_cmd->tvc_resp_iovs != &tv_cmd->tvc_resp_iov)
452 		kfree(tv_cmd->tvc_resp_iovs);
453 	sbitmap_clear_bit(&svq->scsi_tags, se_cmd->map_tag);
454 	vhost_scsi_put_inflight(inflight);
455 }
456 
vhost_scsi_release_tmf_res(struct vhost_scsi_tmf * tmf)457 static void vhost_scsi_release_tmf_res(struct vhost_scsi_tmf *tmf)
458 {
459 	struct vhost_scsi_inflight *inflight = tmf->inflight;
460 
461 	/*
462 	 * tmf->tmf_log is default NULL unless VHOST_F_LOG_ALL is set.
463 	 */
464 	kfree(tmf->tmf_log);
465 	kfree(tmf);
466 	vhost_scsi_put_inflight(inflight);
467 }
468 
vhost_scsi_drop_cmds(struct vhost_scsi_virtqueue * svq)469 static void vhost_scsi_drop_cmds(struct vhost_scsi_virtqueue *svq)
470 {
471 	struct vhost_scsi_cmd *cmd, *t;
472 	struct llist_node *llnode;
473 
474 	llnode = llist_del_all(&svq->completion_list);
475 	llist_for_each_entry_safe(cmd, t, llnode, tvc_completion_list)
476 		vhost_scsi_release_cmd_res(&cmd->tvc_se_cmd);
477 }
478 
vhost_scsi_release_cmd(struct se_cmd * se_cmd)479 static void vhost_scsi_release_cmd(struct se_cmd *se_cmd)
480 {
481 	if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) {
482 		struct vhost_scsi_tmf *tmf = container_of(se_cmd,
483 					struct vhost_scsi_tmf, se_cmd);
484 
485 		schedule_work(&tmf->flush_work);
486 	} else {
487 		struct vhost_scsi_cmd *cmd = container_of(se_cmd,
488 					struct vhost_scsi_cmd, tvc_se_cmd);
489 		struct vhost_scsi_virtqueue *svq =  container_of(cmd->tvc_vq,
490 					struct vhost_scsi_virtqueue, vq);
491 
492 		llist_add(&cmd->tvc_completion_list, &svq->completion_list);
493 		if (!vhost_vq_work_queue(&svq->vq, &svq->completion_work))
494 			vhost_scsi_drop_cmds(svq);
495 	}
496 }
497 
vhost_scsi_write_pending(struct se_cmd * se_cmd)498 static int vhost_scsi_write_pending(struct se_cmd *se_cmd)
499 {
500 	/* Go ahead and process the write immediately */
501 	target_execute_cmd(se_cmd);
502 	return 0;
503 }
504 
vhost_scsi_queue_data_in(struct se_cmd * se_cmd)505 static int vhost_scsi_queue_data_in(struct se_cmd *se_cmd)
506 {
507 	transport_generic_free_cmd(se_cmd, 0);
508 	return 0;
509 }
510 
vhost_scsi_queue_status(struct se_cmd * se_cmd)511 static int vhost_scsi_queue_status(struct se_cmd *se_cmd)
512 {
513 	transport_generic_free_cmd(se_cmd, 0);
514 	return 0;
515 }
516 
vhost_scsi_queue_tm_rsp(struct se_cmd * se_cmd)517 static void vhost_scsi_queue_tm_rsp(struct se_cmd *se_cmd)
518 {
519 	struct vhost_scsi_tmf *tmf = container_of(se_cmd, struct vhost_scsi_tmf,
520 						  se_cmd);
521 
522 	tmf->scsi_resp = se_cmd->se_tmr_req->response;
523 	transport_generic_free_cmd(&tmf->se_cmd, 0);
524 }
525 
vhost_scsi_aborted_task(struct se_cmd * se_cmd)526 static void vhost_scsi_aborted_task(struct se_cmd *se_cmd)
527 {
528 	return;
529 }
530 
vhost_scsi_free_evt(struct vhost_scsi * vs,struct vhost_scsi_evt * evt)531 static void vhost_scsi_free_evt(struct vhost_scsi *vs, struct vhost_scsi_evt *evt)
532 {
533 	vs->vs_events_nr--;
534 	kfree(evt);
535 }
536 
537 static struct vhost_scsi_evt *
vhost_scsi_allocate_evt(struct vhost_scsi * vs,u32 event,u32 reason)538 vhost_scsi_allocate_evt(struct vhost_scsi *vs,
539 		       u32 event, u32 reason)
540 {
541 	struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
542 	struct vhost_scsi_evt *evt;
543 
544 	if (vs->vs_events_nr > VHOST_SCSI_MAX_EVENT) {
545 		vs->vs_events_missed = true;
546 		return NULL;
547 	}
548 
549 	evt = kzalloc(sizeof(*evt), GFP_KERNEL);
550 	if (!evt) {
551 		vq_err(vq, "Failed to allocate vhost_scsi_evt\n");
552 		vs->vs_events_missed = true;
553 		return NULL;
554 	}
555 
556 	evt->event.event = cpu_to_vhost32(vq, event);
557 	evt->event.reason = cpu_to_vhost32(vq, reason);
558 	vs->vs_events_nr++;
559 
560 	return evt;
561 }
562 
vhost_scsi_check_stop_free(struct se_cmd * se_cmd)563 static int vhost_scsi_check_stop_free(struct se_cmd *se_cmd)
564 {
565 	return target_put_sess_cmd(se_cmd);
566 }
567 
568 static void
vhost_scsi_do_evt_work(struct vhost_scsi * vs,struct vhost_scsi_evt * evt)569 vhost_scsi_do_evt_work(struct vhost_scsi *vs, struct vhost_scsi_evt *evt)
570 {
571 	struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
572 	struct virtio_scsi_event *event = &evt->event;
573 	struct virtio_scsi_event __user *eventp;
574 	struct vhost_log *vq_log;
575 	unsigned int log_num;
576 	unsigned out, in;
577 	int head, ret;
578 
579 	if (!vhost_vq_get_backend(vq)) {
580 		vs->vs_events_missed = true;
581 		return;
582 	}
583 
584 again:
585 	vhost_disable_notify(&vs->dev, vq);
586 
587 	vq_log = unlikely(vhost_has_feature(vq, VHOST_F_LOG_ALL)) ?
588 		vq->log : NULL;
589 
590 	/*
591 	 * Reset 'log_num' since vhost_get_vq_desc() may reset it only
592 	 * after certain condition checks.
593 	 */
594 	log_num = 0;
595 
596 	head = vhost_get_vq_desc(vq, vq->iov,
597 			ARRAY_SIZE(vq->iov), &out, &in,
598 			vq_log, &log_num);
599 	if (head < 0) {
600 		vs->vs_events_missed = true;
601 		return;
602 	}
603 	if (head == vq->num) {
604 		if (vhost_enable_notify(&vs->dev, vq))
605 			goto again;
606 		vs->vs_events_missed = true;
607 		return;
608 	}
609 
610 	if ((vq->iov[out].iov_len != sizeof(struct virtio_scsi_event))) {
611 		vq_err(vq, "Expecting virtio_scsi_event, got %zu bytes\n",
612 				vq->iov[out].iov_len);
613 		vs->vs_events_missed = true;
614 		return;
615 	}
616 
617 	if (vs->vs_events_missed) {
618 		event->event |= cpu_to_vhost32(vq, VIRTIO_SCSI_T_EVENTS_MISSED);
619 		vs->vs_events_missed = false;
620 	}
621 
622 	eventp = vq->iov[out].iov_base;
623 	ret = __copy_to_user(eventp, event, sizeof(*event));
624 	if (!ret)
625 		vhost_add_used_and_signal(&vs->dev, vq, head, 0);
626 	else
627 		vq_err(vq, "Faulted on vhost_scsi_send_event\n");
628 
629 	vhost_scsi_log_write(vq, vq_log, log_num);
630 }
631 
vhost_scsi_complete_events(struct vhost_scsi * vs,bool drop)632 static void vhost_scsi_complete_events(struct vhost_scsi *vs, bool drop)
633 {
634 	struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
635 	struct vhost_scsi_evt *evt, *t;
636 	struct llist_node *llnode;
637 
638 	mutex_lock(&vq->mutex);
639 	llnode = llist_del_all(&vs->vs_event_list);
640 	llist_for_each_entry_safe(evt, t, llnode, list) {
641 		if (!drop)
642 			vhost_scsi_do_evt_work(vs, evt);
643 		vhost_scsi_free_evt(vs, evt);
644 	}
645 	mutex_unlock(&vq->mutex);
646 }
647 
vhost_scsi_evt_work(struct vhost_work * work)648 static void vhost_scsi_evt_work(struct vhost_work *work)
649 {
650 	struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
651 					     vs_event_work);
652 	vhost_scsi_complete_events(vs, false);
653 }
654 
vhost_scsi_copy_sgl_to_iov(struct vhost_scsi_cmd * cmd)655 static int vhost_scsi_copy_sgl_to_iov(struct vhost_scsi_cmd *cmd)
656 {
657 	struct iov_iter *iter = cmd->read_iter;
658 	struct scatterlist *sg;
659 	struct page *page;
660 	size_t len;
661 	int i;
662 
663 	for_each_sgtable_sg(&cmd->table, sg, i) {
664 		page = sg_page(sg);
665 		if (!page)
666 			continue;
667 
668 		len = sg->length;
669 
670 		if (copy_page_to_iter(page, 0, len, iter) != len) {
671 			pr_err("Could not copy data while handling misaligned cmd. Error %zu\n",
672 			       len);
673 			return -1;
674 		}
675 	}
676 
677 	return 0;
678 }
679 
680 /* Fill in status and signal that we are done processing this command
681  *
682  * This is scheduled in the vhost work queue so we are called with the owner
683  * process mm and can access the vring.
684  */
vhost_scsi_complete_cmd_work(struct vhost_work * work)685 static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
686 {
687 	struct vhost_scsi_virtqueue *svq = container_of(work,
688 				struct vhost_scsi_virtqueue, completion_work);
689 	struct virtio_scsi_cmd_resp v_rsp;
690 	struct vhost_scsi_cmd *cmd, *t;
691 	struct llist_node *llnode;
692 	struct se_cmd *se_cmd;
693 	struct iov_iter iov_iter;
694 	bool signal = false;
695 	int ret;
696 
697 	llnode = llist_del_all(&svq->completion_list);
698 
699 	mutex_lock(&svq->vq.mutex);
700 
701 	llist_for_each_entry_safe(cmd, t, llnode, tvc_completion_list) {
702 		se_cmd = &cmd->tvc_se_cmd;
703 
704 		pr_debug("%s tv_cmd %p resid %u status %#02x\n", __func__,
705 			cmd, se_cmd->residual_count, se_cmd->scsi_status);
706 		memset(&v_rsp, 0, sizeof(v_rsp));
707 
708 		if (cmd->read_iter && vhost_scsi_copy_sgl_to_iov(cmd)) {
709 			v_rsp.response = VIRTIO_SCSI_S_BAD_TARGET;
710 		} else {
711 			v_rsp.resid = cpu_to_vhost32(cmd->tvc_vq,
712 						     se_cmd->residual_count);
713 			/* TODO is status_qualifier field needed? */
714 			v_rsp.status = se_cmd->scsi_status;
715 			v_rsp.sense_len = cpu_to_vhost32(cmd->tvc_vq,
716 							 se_cmd->scsi_sense_length);
717 			memcpy(v_rsp.sense, cmd->tvc_sense_buf,
718 			       se_cmd->scsi_sense_length);
719 		}
720 
721 		iov_iter_init(&iov_iter, ITER_DEST, cmd->tvc_resp_iovs,
722 			      cmd->tvc_resp_iovs_cnt, sizeof(v_rsp));
723 		ret = copy_to_iter(&v_rsp, sizeof(v_rsp), &iov_iter);
724 		if (likely(ret == sizeof(v_rsp))) {
725 			signal = true;
726 
727 			vhost_add_used(cmd->tvc_vq, cmd->tvc_vq_desc, 0);
728 		} else
729 			pr_err("Faulted on virtio_scsi_cmd_resp\n");
730 
731 		vhost_scsi_log_write(cmd->tvc_vq, cmd->tvc_log,
732 				     cmd->tvc_log_num);
733 
734 		vhost_scsi_release_cmd_res(se_cmd);
735 	}
736 
737 	mutex_unlock(&svq->vq.mutex);
738 
739 	if (signal)
740 		vhost_signal(&svq->vs->dev, &svq->vq);
741 }
742 
743 static struct vhost_scsi_cmd *
vhost_scsi_get_cmd(struct vhost_virtqueue * vq,u64 scsi_tag)744 vhost_scsi_get_cmd(struct vhost_virtqueue *vq, u64 scsi_tag)
745 {
746 	struct vhost_scsi_virtqueue *svq = container_of(vq,
747 					struct vhost_scsi_virtqueue, vq);
748 	struct vhost_scsi_cmd *cmd;
749 	struct scatterlist *sgl, *prot_sgl;
750 	struct vhost_log *log;
751 	int tag;
752 
753 	tag = sbitmap_get(&svq->scsi_tags);
754 	if (tag < 0) {
755 		pr_warn_once("Guest sent too many cmds. Returning TASK_SET_FULL.\n");
756 		return ERR_PTR(-ENOMEM);
757 	}
758 
759 	cmd = &svq->scsi_cmds[tag];
760 	sgl = cmd->sgl;
761 	prot_sgl = cmd->prot_sgl;
762 	log = cmd->tvc_log;
763 	memset(cmd, 0, sizeof(*cmd));
764 	cmd->sgl = sgl;
765 	cmd->prot_sgl = prot_sgl;
766 	cmd->tvc_log = log;
767 	cmd->tvc_se_cmd.map_tag = tag;
768 	cmd->inflight = vhost_scsi_get_inflight(vq);
769 
770 	return cmd;
771 }
772 
vhost_scsi_revert_map_iov_to_sgl(struct iov_iter * iter,struct scatterlist * curr,struct scatterlist * end)773 static void vhost_scsi_revert_map_iov_to_sgl(struct iov_iter *iter,
774 					     struct scatterlist *curr,
775 					     struct scatterlist *end)
776 {
777 	size_t revert_bytes = 0;
778 	struct page *page;
779 
780 	while (curr != end) {
781 		page = sg_page(curr);
782 
783 		if (page) {
784 			put_page(page);
785 			revert_bytes += curr->length;
786 		}
787 		/* Clear so we can re-use it for the copy path */
788 		sg_set_page(curr, NULL, 0, 0);
789 		curr = sg_next(curr);
790 	}
791 	iov_iter_revert(iter, revert_bytes);
792 }
793 
794 /*
795  * Map a user memory range into a scatterlist
796  *
797  * Returns the number of scatterlist entries used or -errno on error.
798  */
799 static int
vhost_scsi_map_to_sgl(struct vhost_scsi_cmd * cmd,struct iov_iter * iter,struct sg_table * sg_table,struct scatterlist ** sgl,bool is_prot)800 vhost_scsi_map_to_sgl(struct vhost_scsi_cmd *cmd,
801 		      struct iov_iter *iter,
802 		      struct sg_table *sg_table,
803 		      struct scatterlist **sgl,
804 		      bool is_prot)
805 {
806 	struct vhost_scsi_virtqueue *svq = container_of(cmd->tvc_vq,
807 					struct vhost_scsi_virtqueue, vq);
808 	struct page **pages = svq->upages;
809 	struct scatterlist *sg = *sgl;
810 	ssize_t bytes;
811 	size_t offset;
812 	unsigned int n, npages = 0;
813 
814 	bytes = iov_iter_get_pages2(iter, pages, LONG_MAX,
815 				VHOST_SCSI_PREALLOC_UPAGES, &offset);
816 	/* No pages were pinned */
817 	if (bytes <= 0)
818 		return bytes < 0 ? bytes : -EFAULT;
819 
820 	while (bytes) {
821 		n = min_t(unsigned int, PAGE_SIZE - offset, bytes);
822 		/*
823 		 * The block layer requires bios/requests to be a multiple of
824 		 * 512 bytes, but Windows can send us vecs that are misaligned.
825 		 * This can result in bios and later requests with misaligned
826 		 * sizes if we have to break up a cmd/scatterlist into multiple
827 		 * bios.
828 		 *
829 		 * We currently only break up a command into multiple bios if
830 		 * we hit the vec/seg limit, so check if our sgl_count is
831 		 * greater than the max and if a vec in the cmd has a
832 		 * misaligned offset/size.
833 		 */
834 		if (!is_prot &&
835 		    (offset & (SECTOR_SIZE - 1) || n & (SECTOR_SIZE - 1)) &&
836 		    cmd->tvc_sgl_count > BIO_MAX_VECS) {
837 			WARN_ONCE(true,
838 				  "vhost-scsi detected misaligned IO. Performance may be degraded.");
839 			goto revert_iter_get_pages;
840 		}
841 
842 		sg_set_page(sg, pages[npages++], n, offset);
843 		sg = sg_next(sg);
844 		bytes -= n;
845 		offset = 0;
846 	}
847 
848 	*sgl = sg;
849 	return npages;
850 
851 revert_iter_get_pages:
852 	vhost_scsi_revert_map_iov_to_sgl(iter, *sgl, sg);
853 
854 	iov_iter_revert(iter, bytes);
855 	while (bytes) {
856 		n = min_t(unsigned int, PAGE_SIZE, bytes);
857 
858 		put_page(pages[npages++]);
859 		bytes -= n;
860 	}
861 
862 	return -EINVAL;
863 }
864 
865 static int
vhost_scsi_calc_sgls(struct iov_iter * iter,size_t bytes,int max_sgls)866 vhost_scsi_calc_sgls(struct iov_iter *iter, size_t bytes, int max_sgls)
867 {
868 	int sgl_count = 0;
869 
870 	if (!iter || !iter_iov(iter)) {
871 		pr_err("%s: iter->iov is NULL, but expected bytes: %zu"
872 		       " present\n", __func__, bytes);
873 		return -EINVAL;
874 	}
875 
876 	sgl_count = iov_iter_npages(iter, 0xffff);
877 	if (sgl_count > max_sgls) {
878 		pr_err("%s: requested sgl_count: %d exceeds pre-allocated"
879 		       " max_sgls: %d\n", __func__, sgl_count, max_sgls);
880 		return -EINVAL;
881 	}
882 	return sgl_count;
883 }
884 
885 static int
vhost_scsi_copy_iov_to_sgl(struct vhost_scsi_cmd * cmd,struct iov_iter * iter,struct sg_table * sg_table,int sg_count,int data_dir)886 vhost_scsi_copy_iov_to_sgl(struct vhost_scsi_cmd *cmd, struct iov_iter *iter,
887 			   struct sg_table *sg_table, int sg_count,
888 			   int data_dir)
889 {
890 	size_t len = iov_iter_count(iter);
891 	unsigned int nbytes = 0;
892 	struct scatterlist *sg;
893 	struct page *page;
894 	int i, ret;
895 
896 	if (data_dir == DMA_FROM_DEVICE) {
897 		cmd->read_iter = kzalloc(sizeof(*cmd->read_iter), GFP_KERNEL);
898 		if (!cmd->read_iter)
899 			return -ENOMEM;
900 
901 		cmd->read_iov = dup_iter(cmd->read_iter, iter, GFP_KERNEL);
902 		if (!cmd->read_iov) {
903 			ret = -ENOMEM;
904 			goto free_iter;
905 		}
906 	}
907 
908 	for_each_sgtable_sg(sg_table, sg, i) {
909 		page = alloc_page(GFP_KERNEL);
910 		if (!page) {
911 			ret = -ENOMEM;
912 			goto err;
913 		}
914 
915 		nbytes = min_t(unsigned int, PAGE_SIZE, len);
916 		sg_set_page(sg, page, nbytes, 0);
917 
918 		if (data_dir == DMA_TO_DEVICE &&
919 		    copy_page_from_iter(page, 0, nbytes, iter) != nbytes) {
920 			ret = -EFAULT;
921 			goto err;
922 		}
923 
924 		len -= nbytes;
925 	}
926 
927 	cmd->copied_iov = 1;
928 	return 0;
929 
930 err:
931 	pr_err("Could not read %u bytes while handling misaligned cmd\n",
932 	       nbytes);
933 
934 	for_each_sgtable_sg(sg_table, sg, i) {
935 		page = sg_page(sg);
936 		if (page)
937 			__free_page(page);
938 	}
939 	kfree(cmd->read_iov);
940 free_iter:
941 	kfree(cmd->read_iter);
942 	return ret;
943 }
944 
945 static int
vhost_scsi_map_iov_to_sgl(struct vhost_scsi_cmd * cmd,struct iov_iter * iter,struct sg_table * sg_table,int sg_count,bool is_prot)946 vhost_scsi_map_iov_to_sgl(struct vhost_scsi_cmd *cmd, struct iov_iter *iter,
947 			  struct sg_table *sg_table, int sg_count, bool is_prot)
948 {
949 	struct scatterlist *sg = sg_table->sgl;
950 	int ret;
951 
952 	while (iov_iter_count(iter)) {
953 		ret = vhost_scsi_map_to_sgl(cmd, iter, sg_table, &sg, is_prot);
954 		if (ret < 0) {
955 			vhost_scsi_revert_map_iov_to_sgl(iter, sg_table->sgl,
956 							 sg);
957 			return ret;
958 		}
959 	}
960 
961 	return 0;
962 }
963 
964 static int
vhost_scsi_mapal(struct vhost_scsi * vs,struct vhost_scsi_cmd * cmd,size_t prot_bytes,struct iov_iter * prot_iter,size_t data_bytes,struct iov_iter * data_iter,int data_dir)965 vhost_scsi_mapal(struct vhost_scsi *vs, struct vhost_scsi_cmd *cmd,
966 		 size_t prot_bytes, struct iov_iter *prot_iter,
967 		 size_t data_bytes, struct iov_iter *data_iter, int data_dir)
968 {
969 	int sgl_count, ret;
970 
971 	if (prot_bytes) {
972 		sgl_count = vhost_scsi_calc_sgls(prot_iter, prot_bytes,
973 						 VHOST_SCSI_PREALLOC_PROT_SGLS);
974 		cmd->prot_table.sgl = cmd->prot_sgl;
975 		ret = sg_alloc_table_chained(&cmd->prot_table, sgl_count,
976 					     cmd->prot_table.sgl,
977 					     vs->inline_sg_cnt);
978 		if (ret)
979 			return ret;
980 
981 		cmd->tvc_prot_sgl_count = sgl_count;
982 		pr_debug("%s prot_sg %p prot_sgl_count %u\n", __func__,
983 			 cmd->prot_table.sgl, cmd->tvc_prot_sgl_count);
984 
985 		ret = vhost_scsi_map_iov_to_sgl(cmd, prot_iter,
986 						&cmd->prot_table,
987 						cmd->tvc_prot_sgl_count, true);
988 		if (ret < 0) {
989 			sg_free_table_chained(&cmd->prot_table,
990 					      vs->inline_sg_cnt);
991 			cmd->tvc_prot_sgl_count = 0;
992 			return ret;
993 		}
994 	}
995 	sgl_count = vhost_scsi_calc_sgls(data_iter, data_bytes,
996 					 VHOST_SCSI_PREALLOC_SGLS);
997 	if (sgl_count < 0)
998 		return sgl_count;
999 
1000 	cmd->table.sgl = cmd->sgl;
1001 	ret = sg_alloc_table_chained(&cmd->table, sgl_count, cmd->table.sgl,
1002 				     vs->inline_sg_cnt);
1003 	if (ret)
1004 		return ret;
1005 
1006 	cmd->tvc_sgl_count = sgl_count;
1007 	pr_debug("%s data_sg %p data_sgl_count %u\n", __func__,
1008 		  cmd->table.sgl, cmd->tvc_sgl_count);
1009 
1010 	ret = vhost_scsi_map_iov_to_sgl(cmd, data_iter, &cmd->table,
1011 					cmd->tvc_sgl_count, false);
1012 	if (ret == -EINVAL)
1013 		ret = vhost_scsi_copy_iov_to_sgl(cmd, data_iter, &cmd->table,
1014 						 cmd->tvc_sgl_count, data_dir);
1015 	if (ret < 0) {
1016 		sg_free_table_chained(&cmd->table, vs->inline_sg_cnt);
1017 		cmd->tvc_sgl_count = 0;
1018 		return ret;
1019 	}
1020 	return 0;
1021 }
1022 
vhost_scsi_to_tcm_attr(int attr)1023 static int vhost_scsi_to_tcm_attr(int attr)
1024 {
1025 	switch (attr) {
1026 	case VIRTIO_SCSI_S_SIMPLE:
1027 		return TCM_SIMPLE_TAG;
1028 	case VIRTIO_SCSI_S_ORDERED:
1029 		return TCM_ORDERED_TAG;
1030 	case VIRTIO_SCSI_S_HEAD:
1031 		return TCM_HEAD_TAG;
1032 	case VIRTIO_SCSI_S_ACA:
1033 		return TCM_ACA_TAG;
1034 	default:
1035 		break;
1036 	}
1037 	return TCM_SIMPLE_TAG;
1038 }
1039 
vhost_scsi_target_queue_cmd(struct vhost_scsi_nexus * nexus,struct vhost_scsi_cmd * cmd,unsigned char * cdb,u16 lun,int task_attr,int data_dir,u32 exp_data_len)1040 static void vhost_scsi_target_queue_cmd(struct vhost_scsi_nexus *nexus,
1041 					struct vhost_scsi_cmd *cmd,
1042 					unsigned char *cdb, u16 lun,
1043 					int task_attr, int data_dir,
1044 					u32 exp_data_len)
1045 {
1046 	struct se_cmd *se_cmd = &cmd->tvc_se_cmd;
1047 	struct scatterlist *sg_ptr, *sg_prot_ptr = NULL;
1048 
1049 	/* FIXME: BIDI operation */
1050 	if (cmd->tvc_sgl_count) {
1051 		sg_ptr = cmd->table.sgl;
1052 
1053 		if (cmd->tvc_prot_sgl_count)
1054 			sg_prot_ptr = cmd->prot_table.sgl;
1055 		else
1056 			se_cmd->prot_pto = true;
1057 	} else {
1058 		sg_ptr = NULL;
1059 	}
1060 
1061 	se_cmd->tag = 0;
1062 	target_init_cmd(se_cmd, nexus->tvn_se_sess, &cmd->tvc_sense_buf[0],
1063 			lun, exp_data_len, vhost_scsi_to_tcm_attr(task_attr),
1064 			data_dir, TARGET_SCF_ACK_KREF);
1065 
1066 	if (target_submit_prep(se_cmd, cdb, sg_ptr,
1067 			       cmd->tvc_sgl_count, NULL, 0, sg_prot_ptr,
1068 			       cmd->tvc_prot_sgl_count, GFP_KERNEL))
1069 		return;
1070 
1071 	target_submit(se_cmd);
1072 }
1073 
1074 static void
vhost_scsi_send_status(struct vhost_scsi * vs,struct vhost_virtqueue * vq,struct vhost_scsi_ctx * vc,u8 status)1075 vhost_scsi_send_status(struct vhost_scsi *vs, struct vhost_virtqueue *vq,
1076 		       struct vhost_scsi_ctx *vc, u8 status)
1077 {
1078 	struct virtio_scsi_cmd_resp rsp;
1079 	struct iov_iter iov_iter;
1080 	int ret;
1081 
1082 	memset(&rsp, 0, sizeof(rsp));
1083 	rsp.status = status;
1084 
1085 	iov_iter_init(&iov_iter, ITER_DEST, &vq->iov[vc->out], vc->in,
1086 		      sizeof(rsp));
1087 
1088 	ret = copy_to_iter(&rsp, sizeof(rsp), &iov_iter);
1089 
1090 	if (likely(ret == sizeof(rsp)))
1091 		vhost_add_used_and_signal(&vs->dev, vq, vc->head, 0);
1092 	else
1093 		pr_err("Faulted on virtio_scsi_cmd_resp\n");
1094 }
1095 
1096 #define TYPE_IO_CMD    0
1097 #define TYPE_CTRL_TMF  1
1098 #define TYPE_CTRL_AN   2
1099 
1100 static void
vhost_scsi_send_bad_target(struct vhost_scsi * vs,struct vhost_virtqueue * vq,struct vhost_scsi_ctx * vc,int type)1101 vhost_scsi_send_bad_target(struct vhost_scsi *vs,
1102 			   struct vhost_virtqueue *vq,
1103 			   struct vhost_scsi_ctx *vc, int type)
1104 {
1105 	union {
1106 		struct virtio_scsi_cmd_resp cmd;
1107 		struct virtio_scsi_ctrl_tmf_resp tmf;
1108 		struct virtio_scsi_ctrl_an_resp an;
1109 	} rsp;
1110 	struct iov_iter iov_iter;
1111 	size_t rsp_size;
1112 	int ret;
1113 
1114 	memset(&rsp, 0, sizeof(rsp));
1115 
1116 	if (type == TYPE_IO_CMD) {
1117 		rsp_size = sizeof(struct virtio_scsi_cmd_resp);
1118 		rsp.cmd.response = VIRTIO_SCSI_S_BAD_TARGET;
1119 	} else if (type == TYPE_CTRL_TMF) {
1120 		rsp_size = sizeof(struct virtio_scsi_ctrl_tmf_resp);
1121 		rsp.tmf.response = VIRTIO_SCSI_S_BAD_TARGET;
1122 	} else {
1123 		rsp_size = sizeof(struct virtio_scsi_ctrl_an_resp);
1124 		rsp.an.response = VIRTIO_SCSI_S_BAD_TARGET;
1125 	}
1126 
1127 	iov_iter_init(&iov_iter, ITER_DEST, &vq->iov[vc->out], vc->in,
1128 		      rsp_size);
1129 
1130 	ret = copy_to_iter(&rsp, rsp_size, &iov_iter);
1131 
1132 	if (likely(ret == rsp_size))
1133 		vhost_add_used_and_signal(&vs->dev, vq, vc->head, 0);
1134 	else
1135 		pr_err("Faulted on virtio scsi type=%d\n", type);
1136 }
1137 
1138 static int
vhost_scsi_get_desc(struct vhost_scsi * vs,struct vhost_virtqueue * vq,struct vhost_scsi_ctx * vc,struct vhost_log * log,unsigned int * log_num)1139 vhost_scsi_get_desc(struct vhost_scsi *vs, struct vhost_virtqueue *vq,
1140 		    struct vhost_scsi_ctx *vc,
1141 		    struct vhost_log *log, unsigned int *log_num)
1142 {
1143 	int ret = -ENXIO;
1144 
1145 	if (likely(log_num))
1146 		*log_num = 0;
1147 
1148 	vc->head = vhost_get_vq_desc(vq, vq->iov,
1149 				     ARRAY_SIZE(vq->iov), &vc->out, &vc->in,
1150 				     log, log_num);
1151 
1152 	pr_debug("vhost_get_vq_desc: head: %d, out: %u in: %u\n",
1153 		 vc->head, vc->out, vc->in);
1154 
1155 	/* On error, stop handling until the next kick. */
1156 	if (unlikely(vc->head < 0))
1157 		goto done;
1158 
1159 	/* Nothing new?  Wait for eventfd to tell us they refilled. */
1160 	if (vc->head == vq->num) {
1161 		if (unlikely(vhost_enable_notify(&vs->dev, vq))) {
1162 			vhost_disable_notify(&vs->dev, vq);
1163 			ret = -EAGAIN;
1164 		}
1165 		goto done;
1166 	}
1167 
1168 	/*
1169 	 * Get the size of request and response buffers.
1170 	 * FIXME: Not correct for BIDI operation
1171 	 */
1172 	vc->out_size = iov_length(vq->iov, vc->out);
1173 	vc->in_size = iov_length(&vq->iov[vc->out], vc->in);
1174 
1175 	/*
1176 	 * Copy over the virtio-scsi request header, which for a
1177 	 * ANY_LAYOUT enabled guest may span multiple iovecs, or a
1178 	 * single iovec may contain both the header + outgoing
1179 	 * WRITE payloads.
1180 	 *
1181 	 * copy_from_iter() will advance out_iter, so that it will
1182 	 * point at the start of the outgoing WRITE payload, if
1183 	 * DMA_TO_DEVICE is set.
1184 	 */
1185 	iov_iter_init(&vc->out_iter, ITER_SOURCE, vq->iov, vc->out, vc->out_size);
1186 	ret = 0;
1187 
1188 done:
1189 	return ret;
1190 }
1191 
1192 static int
vhost_scsi_chk_size(struct vhost_virtqueue * vq,struct vhost_scsi_ctx * vc)1193 vhost_scsi_chk_size(struct vhost_virtqueue *vq, struct vhost_scsi_ctx *vc)
1194 {
1195 	if (unlikely(vc->in_size < vc->rsp_size)) {
1196 		vq_err(vq,
1197 		       "Response buf too small, need min %zu bytes got %zu",
1198 		       vc->rsp_size, vc->in_size);
1199 		return -EINVAL;
1200 	} else if (unlikely(vc->out_size < vc->req_size)) {
1201 		vq_err(vq,
1202 		       "Request buf too small, need min %zu bytes got %zu",
1203 		       vc->req_size, vc->out_size);
1204 		return -EIO;
1205 	}
1206 
1207 	return 0;
1208 }
1209 
1210 static int
vhost_scsi_get_req(struct vhost_virtqueue * vq,struct vhost_scsi_ctx * vc,struct vhost_scsi_tpg ** tpgp)1211 vhost_scsi_get_req(struct vhost_virtqueue *vq, struct vhost_scsi_ctx *vc,
1212 		   struct vhost_scsi_tpg **tpgp)
1213 {
1214 	int ret = -EIO;
1215 
1216 	if (unlikely(!copy_from_iter_full(vc->req, vc->req_size,
1217 					  &vc->out_iter))) {
1218 		vq_err(vq, "Faulted on copy_from_iter_full\n");
1219 	} else if (unlikely(*vc->lunp != 1)) {
1220 		/* virtio-scsi spec requires byte 0 of the lun to be 1 */
1221 		vq_err(vq, "Illegal virtio-scsi lun: %u\n", *vc->lunp);
1222 	} else {
1223 		struct vhost_scsi_tpg **vs_tpg, *tpg = NULL;
1224 
1225 		if (vc->target) {
1226 			/* validated at handler entry */
1227 			vs_tpg = vhost_vq_get_backend(vq);
1228 			tpg = READ_ONCE(vs_tpg[*vc->target]);
1229 			if (unlikely(!tpg))
1230 				goto out;
1231 		}
1232 
1233 		if (tpgp)
1234 			*tpgp = tpg;
1235 		ret = 0;
1236 	}
1237 out:
1238 	return ret;
1239 }
1240 
1241 static int
vhost_scsi_setup_resp_iovs(struct vhost_scsi_cmd * cmd,struct iovec * in_iovs,unsigned int in_iovs_cnt)1242 vhost_scsi_setup_resp_iovs(struct vhost_scsi_cmd *cmd, struct iovec *in_iovs,
1243 			   unsigned int in_iovs_cnt)
1244 {
1245 	int i, cnt;
1246 
1247 	if (!in_iovs_cnt)
1248 		return 0;
1249 	/*
1250 	 * Initiators normally just put the virtio_scsi_cmd_resp in the first
1251 	 * iov, but just in case they wedged in some data with it we check for
1252 	 * greater than or equal to the response struct.
1253 	 */
1254 	if (in_iovs[0].iov_len >= sizeof(struct virtio_scsi_cmd_resp)) {
1255 		cmd->tvc_resp_iovs = &cmd->tvc_resp_iov;
1256 		cmd->tvc_resp_iovs_cnt = 1;
1257 	} else {
1258 		/*
1259 		 * Legacy descriptor layouts didn't specify that we must put
1260 		 * the entire response in one iov. Worst case we have a
1261 		 * iov per byte.
1262 		 */
1263 		cnt = min(VHOST_SCSI_MAX_RESP_IOVS, in_iovs_cnt);
1264 		cmd->tvc_resp_iovs = kcalloc(cnt, sizeof(struct iovec),
1265 					     GFP_KERNEL);
1266 		if (!cmd->tvc_resp_iovs)
1267 			return -ENOMEM;
1268 
1269 		cmd->tvc_resp_iovs_cnt = cnt;
1270 	}
1271 
1272 	for (i = 0; i < cmd->tvc_resp_iovs_cnt; i++)
1273 		cmd->tvc_resp_iovs[i] = in_iovs[i];
1274 
1275 	return 0;
1276 }
1277 
vhost_buf_to_lun(u8 * lun_buf)1278 static u16 vhost_buf_to_lun(u8 *lun_buf)
1279 {
1280 	return ((lun_buf[2] << 8) | lun_buf[3]) & 0x3FFF;
1281 }
1282 
1283 static void
vhost_scsi_handle_vq(struct vhost_scsi * vs,struct vhost_virtqueue * vq)1284 vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
1285 {
1286 	struct vhost_scsi_tpg **vs_tpg, *tpg;
1287 	struct virtio_scsi_cmd_req v_req;
1288 	struct virtio_scsi_cmd_req_pi v_req_pi;
1289 	struct vhost_scsi_nexus *nexus;
1290 	struct vhost_scsi_ctx vc;
1291 	struct vhost_scsi_cmd *cmd;
1292 	struct iov_iter in_iter, prot_iter, data_iter;
1293 	u64 tag;
1294 	u32 exp_data_len, data_direction;
1295 	int ret, prot_bytes, c = 0;
1296 	u16 lun;
1297 	u8 task_attr;
1298 	bool t10_pi = vhost_has_feature(vq, VIRTIO_SCSI_F_T10_PI);
1299 	u8 *cdb;
1300 	struct vhost_log *vq_log;
1301 	unsigned int log_num;
1302 
1303 	mutex_lock(&vq->mutex);
1304 	/*
1305 	 * We can handle the vq only after the endpoint is setup by calling the
1306 	 * VHOST_SCSI_SET_ENDPOINT ioctl.
1307 	 */
1308 	vs_tpg = vhost_vq_get_backend(vq);
1309 	if (!vs_tpg)
1310 		goto out;
1311 
1312 	memset(&vc, 0, sizeof(vc));
1313 	vc.rsp_size = sizeof(struct virtio_scsi_cmd_resp);
1314 
1315 	vhost_disable_notify(&vs->dev, vq);
1316 
1317 	vq_log = unlikely(vhost_has_feature(vq, VHOST_F_LOG_ALL)) ?
1318 		vq->log : NULL;
1319 
1320 	do {
1321 		ret = vhost_scsi_get_desc(vs, vq, &vc, vq_log, &log_num);
1322 		if (ret)
1323 			goto err;
1324 
1325 		/*
1326 		 * Setup pointers and values based upon different virtio-scsi
1327 		 * request header if T10_PI is enabled in KVM guest.
1328 		 */
1329 		if (t10_pi) {
1330 			vc.req = &v_req_pi;
1331 			vc.req_size = sizeof(v_req_pi);
1332 			vc.lunp = &v_req_pi.lun[0];
1333 			vc.target = &v_req_pi.lun[1];
1334 		} else {
1335 			vc.req = &v_req;
1336 			vc.req_size = sizeof(v_req);
1337 			vc.lunp = &v_req.lun[0];
1338 			vc.target = &v_req.lun[1];
1339 		}
1340 
1341 		/*
1342 		 * Validate the size of request and response buffers.
1343 		 * Check for a sane response buffer so we can report
1344 		 * early errors back to the guest.
1345 		 */
1346 		ret = vhost_scsi_chk_size(vq, &vc);
1347 		if (ret)
1348 			goto err;
1349 
1350 		ret = vhost_scsi_get_req(vq, &vc, &tpg);
1351 		if (ret)
1352 			goto err;
1353 
1354 		ret = -EIO;	/* bad target on any error from here on */
1355 
1356 		/*
1357 		 * Determine data_direction by calculating the total outgoing
1358 		 * iovec sizes + incoming iovec sizes vs. virtio-scsi request +
1359 		 * response headers respectively.
1360 		 *
1361 		 * For DMA_TO_DEVICE this is out_iter, which is already pointing
1362 		 * to the right place.
1363 		 *
1364 		 * For DMA_FROM_DEVICE, the iovec will be just past the end
1365 		 * of the virtio-scsi response header in either the same
1366 		 * or immediately following iovec.
1367 		 *
1368 		 * Any associated T10_PI bytes for the outgoing / incoming
1369 		 * payloads are included in calculation of exp_data_len here.
1370 		 */
1371 		prot_bytes = 0;
1372 
1373 		if (vc.out_size > vc.req_size) {
1374 			data_direction = DMA_TO_DEVICE;
1375 			exp_data_len = vc.out_size - vc.req_size;
1376 			data_iter = vc.out_iter;
1377 		} else if (vc.in_size > vc.rsp_size) {
1378 			data_direction = DMA_FROM_DEVICE;
1379 			exp_data_len = vc.in_size - vc.rsp_size;
1380 
1381 			iov_iter_init(&in_iter, ITER_DEST, &vq->iov[vc.out], vc.in,
1382 				      vc.rsp_size + exp_data_len);
1383 			iov_iter_advance(&in_iter, vc.rsp_size);
1384 			data_iter = in_iter;
1385 		} else {
1386 			data_direction = DMA_NONE;
1387 			exp_data_len = 0;
1388 		}
1389 		/*
1390 		 * If T10_PI header + payload is present, setup prot_iter values
1391 		 * and recalculate data_iter for vhost_scsi_mapal() mapping to
1392 		 * host scatterlists via get_user_pages_fast().
1393 		 */
1394 		if (t10_pi) {
1395 			if (v_req_pi.pi_bytesout) {
1396 				if (data_direction != DMA_TO_DEVICE) {
1397 					vq_err(vq, "Received non zero pi_bytesout,"
1398 						" but wrong data_direction\n");
1399 					goto err;
1400 				}
1401 				prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesout);
1402 			} else if (v_req_pi.pi_bytesin) {
1403 				if (data_direction != DMA_FROM_DEVICE) {
1404 					vq_err(vq, "Received non zero pi_bytesin,"
1405 						" but wrong data_direction\n");
1406 					goto err;
1407 				}
1408 				prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesin);
1409 			}
1410 			/*
1411 			 * Set prot_iter to data_iter and truncate it to
1412 			 * prot_bytes, and advance data_iter past any
1413 			 * preceding prot_bytes that may be present.
1414 			 *
1415 			 * Also fix up the exp_data_len to reflect only the
1416 			 * actual data payload length.
1417 			 */
1418 			if (prot_bytes) {
1419 				exp_data_len -= prot_bytes;
1420 				prot_iter = data_iter;
1421 				iov_iter_truncate(&prot_iter, prot_bytes);
1422 				iov_iter_advance(&data_iter, prot_bytes);
1423 			}
1424 			tag = vhost64_to_cpu(vq, v_req_pi.tag);
1425 			task_attr = v_req_pi.task_attr;
1426 			cdb = &v_req_pi.cdb[0];
1427 			lun = vhost_buf_to_lun(v_req_pi.lun);
1428 		} else {
1429 			tag = vhost64_to_cpu(vq, v_req.tag);
1430 			task_attr = v_req.task_attr;
1431 			cdb = &v_req.cdb[0];
1432 			lun = vhost_buf_to_lun(v_req.lun);
1433 		}
1434 		/*
1435 		 * Check that the received CDB size does not exceeded our
1436 		 * hardcoded max for vhost-scsi, then get a pre-allocated
1437 		 * cmd descriptor for the new virtio-scsi tag.
1438 		 *
1439 		 * TODO what if cdb was too small for varlen cdb header?
1440 		 */
1441 		if (unlikely(scsi_command_size(cdb) > VHOST_SCSI_MAX_CDB_SIZE)) {
1442 			vq_err(vq, "Received SCSI CDB with command_size: %d that"
1443 				" exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
1444 				scsi_command_size(cdb), VHOST_SCSI_MAX_CDB_SIZE);
1445 				goto err;
1446 		}
1447 
1448 		nexus = tpg->tpg_nexus;
1449 		if (!nexus) {
1450 			vq_err(vq, "Unable to locate active struct vhost_scsi_nexus\n");
1451 			ret = -EIO;
1452 			goto err;
1453 		}
1454 
1455 		cmd = vhost_scsi_get_cmd(vq, tag);
1456 		if (IS_ERR(cmd)) {
1457 			ret = PTR_ERR(cmd);
1458 			vq_err(vq, "vhost_scsi_get_tag failed %d\n", ret);
1459 			goto err;
1460 		}
1461 		cmd->tvc_vq = vq;
1462 
1463 		ret = vhost_scsi_setup_resp_iovs(cmd, &vq->iov[vc.out], vc.in);
1464 		if (ret) {
1465 			vq_err(vq, "Failed to alloc recv iovs\n");
1466 			vhost_scsi_release_cmd_res(&cmd->tvc_se_cmd);
1467 			goto err;
1468 		}
1469 
1470 		if (unlikely(vq_log && log_num)) {
1471 			ret = vhost_scsi_copy_cmd_log(vq, cmd, vq_log, log_num);
1472 			if (unlikely(ret)) {
1473 				vhost_scsi_release_cmd_res(&cmd->tvc_se_cmd);
1474 				goto err;
1475 			}
1476 		}
1477 
1478 		pr_debug("vhost_scsi got command opcode: %#02x, lun: %d\n",
1479 			 cdb[0], lun);
1480 		pr_debug("cmd: %p exp_data_len: %d, prot_bytes: %d data_direction:"
1481 			 " %d\n", cmd, exp_data_len, prot_bytes, data_direction);
1482 
1483 		if (data_direction != DMA_NONE) {
1484 			ret = vhost_scsi_mapal(vs, cmd, prot_bytes, &prot_iter,
1485 					       exp_data_len, &data_iter,
1486 					       data_direction);
1487 			if (unlikely(ret)) {
1488 				vq_err(vq, "Failed to map iov to sgl\n");
1489 				vhost_scsi_release_cmd_res(&cmd->tvc_se_cmd);
1490 				goto err;
1491 			}
1492 		}
1493 		/*
1494 		 * Save the descriptor from vhost_get_vq_desc() to be used to
1495 		 * complete the virtio-scsi request in TCM callback context via
1496 		 * vhost_scsi_queue_data_in() and vhost_scsi_queue_status()
1497 		 */
1498 		cmd->tvc_vq_desc = vc.head;
1499 		vhost_scsi_target_queue_cmd(nexus, cmd, cdb, lun, task_attr,
1500 					    data_direction,
1501 					    exp_data_len + prot_bytes);
1502 		ret = 0;
1503 err:
1504 		/*
1505 		 * ENXIO:  No more requests, or read error, wait for next kick
1506 		 * EINVAL: Invalid response buffer, drop the request
1507 		 * EIO:    Respond with bad target
1508 		 * EAGAIN: Pending request
1509 		 * ENOMEM: Could not allocate resources for request
1510 		 */
1511 		if (ret == -ENXIO)
1512 			break;
1513 		else if (ret == -EIO) {
1514 			vhost_scsi_send_bad_target(vs, vq, &vc, TYPE_IO_CMD);
1515 			vhost_scsi_log_write(vq, vq_log, log_num);
1516 		} else if (ret == -ENOMEM) {
1517 			vhost_scsi_send_status(vs, vq, &vc,
1518 					       SAM_STAT_TASK_SET_FULL);
1519 			vhost_scsi_log_write(vq, vq_log, log_num);
1520 		}
1521 	} while (likely(!vhost_exceeds_weight(vq, ++c, 0)));
1522 out:
1523 	mutex_unlock(&vq->mutex);
1524 }
1525 
1526 static void
vhost_scsi_send_tmf_resp(struct vhost_scsi * vs,struct vhost_virtqueue * vq,int in_iovs,int vq_desc,struct iovec * resp_iov,int tmf_resp_code)1527 vhost_scsi_send_tmf_resp(struct vhost_scsi *vs, struct vhost_virtqueue *vq,
1528 			 int in_iovs, int vq_desc, struct iovec *resp_iov,
1529 			 int tmf_resp_code)
1530 {
1531 	struct virtio_scsi_ctrl_tmf_resp rsp;
1532 	struct iov_iter iov_iter;
1533 	int ret;
1534 
1535 	pr_debug("%s\n", __func__);
1536 	memset(&rsp, 0, sizeof(rsp));
1537 	rsp.response = tmf_resp_code;
1538 
1539 	iov_iter_init(&iov_iter, ITER_DEST, resp_iov, in_iovs, sizeof(rsp));
1540 
1541 	ret = copy_to_iter(&rsp, sizeof(rsp), &iov_iter);
1542 	if (likely(ret == sizeof(rsp)))
1543 		vhost_add_used_and_signal(&vs->dev, vq, vq_desc, 0);
1544 	else
1545 		pr_err("Faulted on virtio_scsi_ctrl_tmf_resp\n");
1546 }
1547 
vhost_scsi_tmf_resp_work(struct vhost_work * work)1548 static void vhost_scsi_tmf_resp_work(struct vhost_work *work)
1549 {
1550 	struct vhost_scsi_tmf *tmf = container_of(work, struct vhost_scsi_tmf,
1551 						  vwork);
1552 	int resp_code;
1553 
1554 	if (tmf->scsi_resp == TMR_FUNCTION_COMPLETE)
1555 		resp_code = VIRTIO_SCSI_S_FUNCTION_SUCCEEDED;
1556 	else
1557 		resp_code = VIRTIO_SCSI_S_FUNCTION_REJECTED;
1558 
1559 	mutex_lock(&tmf->svq->vq.mutex);
1560 	vhost_scsi_send_tmf_resp(tmf->vhost, &tmf->svq->vq, tmf->in_iovs,
1561 				 tmf->vq_desc, &tmf->resp_iov, resp_code);
1562 	vhost_scsi_log_write(&tmf->svq->vq, tmf->tmf_log,
1563 			     tmf->tmf_log_num);
1564 	mutex_unlock(&tmf->svq->vq.mutex);
1565 
1566 	vhost_scsi_release_tmf_res(tmf);
1567 }
1568 
vhost_scsi_tmf_flush_work(struct work_struct * work)1569 static void vhost_scsi_tmf_flush_work(struct work_struct *work)
1570 {
1571 	struct vhost_scsi_tmf *tmf = container_of(work, struct vhost_scsi_tmf,
1572 						 flush_work);
1573 	struct vhost_virtqueue *vq = &tmf->svq->vq;
1574 	/*
1575 	 * Make sure we have sent responses for other commands before we
1576 	 * send our response.
1577 	 */
1578 	vhost_dev_flush(vq->dev);
1579 	if (!vhost_vq_work_queue(vq, &tmf->vwork))
1580 		vhost_scsi_release_tmf_res(tmf);
1581 }
1582 
1583 static void
vhost_scsi_handle_tmf(struct vhost_scsi * vs,struct vhost_scsi_tpg * tpg,struct vhost_virtqueue * vq,struct virtio_scsi_ctrl_tmf_req * vtmf,struct vhost_scsi_ctx * vc,struct vhost_log * log,unsigned int log_num)1584 vhost_scsi_handle_tmf(struct vhost_scsi *vs, struct vhost_scsi_tpg *tpg,
1585 		      struct vhost_virtqueue *vq,
1586 		      struct virtio_scsi_ctrl_tmf_req *vtmf,
1587 		      struct vhost_scsi_ctx *vc,
1588 		      struct vhost_log *log, unsigned int log_num)
1589 {
1590 	struct vhost_scsi_virtqueue *svq = container_of(vq,
1591 					struct vhost_scsi_virtqueue, vq);
1592 	struct vhost_scsi_tmf *tmf;
1593 
1594 	if (vhost32_to_cpu(vq, vtmf->subtype) !=
1595 	    VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET)
1596 		goto send_reject;
1597 
1598 	if (!tpg->tpg_nexus || !tpg->tpg_nexus->tvn_se_sess) {
1599 		pr_err("Unable to locate active struct vhost_scsi_nexus for LUN RESET.\n");
1600 		goto send_reject;
1601 	}
1602 
1603 	tmf = kzalloc(sizeof(*tmf), GFP_KERNEL);
1604 	if (!tmf)
1605 		goto send_reject;
1606 
1607 	INIT_WORK(&tmf->flush_work, vhost_scsi_tmf_flush_work);
1608 	vhost_work_init(&tmf->vwork, vhost_scsi_tmf_resp_work);
1609 	tmf->vhost = vs;
1610 	tmf->svq = svq;
1611 	tmf->resp_iov = vq->iov[vc->out];
1612 	tmf->vq_desc = vc->head;
1613 	tmf->in_iovs = vc->in;
1614 	tmf->inflight = vhost_scsi_get_inflight(vq);
1615 
1616 	if (unlikely(log && log_num)) {
1617 		tmf->tmf_log = kmalloc_array(log_num, sizeof(*tmf->tmf_log),
1618 					     GFP_KERNEL);
1619 		if (tmf->tmf_log) {
1620 			memcpy(tmf->tmf_log, log, sizeof(*tmf->tmf_log) * log_num);
1621 			tmf->tmf_log_num = log_num;
1622 		} else {
1623 			pr_err("vhost_scsi tmf log allocation error\n");
1624 			vhost_scsi_release_tmf_res(tmf);
1625 			goto send_reject;
1626 		}
1627 	}
1628 
1629 	if (target_submit_tmr(&tmf->se_cmd, tpg->tpg_nexus->tvn_se_sess, NULL,
1630 			      vhost_buf_to_lun(vtmf->lun), NULL,
1631 			      TMR_LUN_RESET, GFP_KERNEL, 0,
1632 			      TARGET_SCF_ACK_KREF) < 0) {
1633 		vhost_scsi_release_tmf_res(tmf);
1634 		goto send_reject;
1635 	}
1636 
1637 	return;
1638 
1639 send_reject:
1640 	vhost_scsi_send_tmf_resp(vs, vq, vc->in, vc->head, &vq->iov[vc->out],
1641 				 VIRTIO_SCSI_S_FUNCTION_REJECTED);
1642 	vhost_scsi_log_write(vq, log, log_num);
1643 }
1644 
1645 static void
vhost_scsi_send_an_resp(struct vhost_scsi * vs,struct vhost_virtqueue * vq,struct vhost_scsi_ctx * vc)1646 vhost_scsi_send_an_resp(struct vhost_scsi *vs,
1647 			struct vhost_virtqueue *vq,
1648 			struct vhost_scsi_ctx *vc)
1649 {
1650 	struct virtio_scsi_ctrl_an_resp rsp;
1651 	struct iov_iter iov_iter;
1652 	int ret;
1653 
1654 	pr_debug("%s\n", __func__);
1655 	memset(&rsp, 0, sizeof(rsp));	/* event_actual = 0 */
1656 	rsp.response = VIRTIO_SCSI_S_OK;
1657 
1658 	iov_iter_init(&iov_iter, ITER_DEST, &vq->iov[vc->out], vc->in, sizeof(rsp));
1659 
1660 	ret = copy_to_iter(&rsp, sizeof(rsp), &iov_iter);
1661 	if (likely(ret == sizeof(rsp)))
1662 		vhost_add_used_and_signal(&vs->dev, vq, vc->head, 0);
1663 	else
1664 		pr_err("Faulted on virtio_scsi_ctrl_an_resp\n");
1665 }
1666 
1667 static void
vhost_scsi_ctl_handle_vq(struct vhost_scsi * vs,struct vhost_virtqueue * vq)1668 vhost_scsi_ctl_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
1669 {
1670 	struct vhost_scsi_tpg *tpg;
1671 	union {
1672 		__virtio32 type;
1673 		struct virtio_scsi_ctrl_an_req an;
1674 		struct virtio_scsi_ctrl_tmf_req tmf;
1675 	} v_req;
1676 	struct vhost_scsi_ctx vc;
1677 	size_t typ_size;
1678 	int ret, c = 0;
1679 	struct vhost_log *vq_log;
1680 	unsigned int log_num;
1681 
1682 	mutex_lock(&vq->mutex);
1683 	/*
1684 	 * We can handle the vq only after the endpoint is setup by calling the
1685 	 * VHOST_SCSI_SET_ENDPOINT ioctl.
1686 	 */
1687 	if (!vhost_vq_get_backend(vq))
1688 		goto out;
1689 
1690 	memset(&vc, 0, sizeof(vc));
1691 
1692 	vhost_disable_notify(&vs->dev, vq);
1693 
1694 	vq_log = unlikely(vhost_has_feature(vq, VHOST_F_LOG_ALL)) ?
1695 		vq->log : NULL;
1696 
1697 	do {
1698 		ret = vhost_scsi_get_desc(vs, vq, &vc, vq_log, &log_num);
1699 		if (ret)
1700 			goto err;
1701 
1702 		/*
1703 		 * Get the request type first in order to setup
1704 		 * other parameters dependent on the type.
1705 		 */
1706 		vc.req = &v_req.type;
1707 		typ_size = sizeof(v_req.type);
1708 
1709 		if (unlikely(!copy_from_iter_full(vc.req, typ_size,
1710 						  &vc.out_iter))) {
1711 			vq_err(vq, "Faulted on copy_from_iter tmf type\n");
1712 			/*
1713 			 * The size of the response buffer depends on the
1714 			 * request type and must be validated against it.
1715 			 * Since the request type is not known, don't send
1716 			 * a response.
1717 			 */
1718 			continue;
1719 		}
1720 
1721 		switch (vhost32_to_cpu(vq, v_req.type)) {
1722 		case VIRTIO_SCSI_T_TMF:
1723 			vc.req = &v_req.tmf;
1724 			vc.req_size = sizeof(struct virtio_scsi_ctrl_tmf_req);
1725 			vc.rsp_size = sizeof(struct virtio_scsi_ctrl_tmf_resp);
1726 			vc.lunp = &v_req.tmf.lun[0];
1727 			vc.target = &v_req.tmf.lun[1];
1728 			break;
1729 		case VIRTIO_SCSI_T_AN_QUERY:
1730 		case VIRTIO_SCSI_T_AN_SUBSCRIBE:
1731 			vc.req = &v_req.an;
1732 			vc.req_size = sizeof(struct virtio_scsi_ctrl_an_req);
1733 			vc.rsp_size = sizeof(struct virtio_scsi_ctrl_an_resp);
1734 			vc.lunp = &v_req.an.lun[0];
1735 			vc.target = NULL;
1736 			break;
1737 		default:
1738 			vq_err(vq, "Unknown control request %d", v_req.type);
1739 			continue;
1740 		}
1741 
1742 		/*
1743 		 * Validate the size of request and response buffers.
1744 		 * Check for a sane response buffer so we can report
1745 		 * early errors back to the guest.
1746 		 */
1747 		ret = vhost_scsi_chk_size(vq, &vc);
1748 		if (ret)
1749 			goto err;
1750 
1751 		/*
1752 		 * Get the rest of the request now that its size is known.
1753 		 */
1754 		vc.req += typ_size;
1755 		vc.req_size -= typ_size;
1756 
1757 		ret = vhost_scsi_get_req(vq, &vc, &tpg);
1758 		if (ret)
1759 			goto err;
1760 
1761 		if (v_req.type == VIRTIO_SCSI_T_TMF)
1762 			vhost_scsi_handle_tmf(vs, tpg, vq, &v_req.tmf, &vc,
1763 					      vq_log, log_num);
1764 		else {
1765 			vhost_scsi_send_an_resp(vs, vq, &vc);
1766 			vhost_scsi_log_write(vq, vq_log, log_num);
1767 		}
1768 err:
1769 		/*
1770 		 * ENXIO:  No more requests, or read error, wait for next kick
1771 		 * EINVAL: Invalid response buffer, drop the request
1772 		 * EIO:    Respond with bad target
1773 		 * EAGAIN: Pending request
1774 		 */
1775 		if (ret == -ENXIO)
1776 			break;
1777 		else if (ret == -EIO) {
1778 			vhost_scsi_send_bad_target(vs, vq, &vc,
1779 						   v_req.type == VIRTIO_SCSI_T_TMF ?
1780 						   TYPE_CTRL_TMF :
1781 						   TYPE_CTRL_AN);
1782 			vhost_scsi_log_write(vq, vq_log, log_num);
1783 		}
1784 	} while (likely(!vhost_exceeds_weight(vq, ++c, 0)));
1785 out:
1786 	mutex_unlock(&vq->mutex);
1787 }
1788 
vhost_scsi_ctl_handle_kick(struct vhost_work * work)1789 static void vhost_scsi_ctl_handle_kick(struct vhost_work *work)
1790 {
1791 	struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
1792 						poll.work);
1793 	struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
1794 
1795 	pr_debug("%s: The handling func for control queue.\n", __func__);
1796 	vhost_scsi_ctl_handle_vq(vs, vq);
1797 }
1798 
1799 static void
vhost_scsi_send_evt(struct vhost_scsi * vs,struct vhost_virtqueue * vq,struct vhost_scsi_tpg * tpg,struct se_lun * lun,u32 event,u32 reason)1800 vhost_scsi_send_evt(struct vhost_scsi *vs, struct vhost_virtqueue *vq,
1801 		    struct vhost_scsi_tpg *tpg, struct se_lun *lun,
1802 		    u32 event, u32 reason)
1803 {
1804 	struct vhost_scsi_evt *evt;
1805 
1806 	evt = vhost_scsi_allocate_evt(vs, event, reason);
1807 	if (!evt)
1808 		return;
1809 
1810 	if (tpg && lun) {
1811 		/* TODO: share lun setup code with virtio-scsi.ko */
1812 		/*
1813 		 * Note: evt->event is zeroed when we allocate it and
1814 		 * lun[4-7] need to be zero according to virtio-scsi spec.
1815 		 */
1816 		evt->event.lun[0] = 0x01;
1817 		evt->event.lun[1] = tpg->tport_tpgt;
1818 		if (lun->unpacked_lun >= 256)
1819 			evt->event.lun[2] = lun->unpacked_lun >> 8 | 0x40 ;
1820 		evt->event.lun[3] = lun->unpacked_lun & 0xFF;
1821 	}
1822 
1823 	llist_add(&evt->list, &vs->vs_event_list);
1824 	if (!vhost_vq_work_queue(vq, &vs->vs_event_work))
1825 		vhost_scsi_complete_events(vs, true);
1826 }
1827 
vhost_scsi_evt_handle_kick(struct vhost_work * work)1828 static void vhost_scsi_evt_handle_kick(struct vhost_work *work)
1829 {
1830 	struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
1831 						poll.work);
1832 	struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
1833 
1834 	mutex_lock(&vq->mutex);
1835 	if (!vhost_vq_get_backend(vq))
1836 		goto out;
1837 
1838 	if (vs->vs_events_missed)
1839 		vhost_scsi_send_evt(vs, vq, NULL, NULL, VIRTIO_SCSI_T_NO_EVENT,
1840 				    0);
1841 out:
1842 	mutex_unlock(&vq->mutex);
1843 }
1844 
vhost_scsi_handle_kick(struct vhost_work * work)1845 static void vhost_scsi_handle_kick(struct vhost_work *work)
1846 {
1847 	struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
1848 						poll.work);
1849 	struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
1850 
1851 	vhost_scsi_handle_vq(vs, vq);
1852 }
1853 
1854 /* Callers must hold dev mutex */
vhost_scsi_flush(struct vhost_scsi * vs)1855 static void vhost_scsi_flush(struct vhost_scsi *vs)
1856 {
1857 	int i;
1858 
1859 	/* Init new inflight and remember the old inflight */
1860 	vhost_scsi_init_inflight(vs, vs->old_inflight);
1861 
1862 	/*
1863 	 * The inflight->kref was initialized to 1. We decrement it here to
1864 	 * indicate the start of the flush operation so that it will reach 0
1865 	 * when all the reqs are finished.
1866 	 */
1867 	for (i = 0; i < vs->dev.nvqs; i++)
1868 		kref_put(&vs->old_inflight[i]->kref, vhost_scsi_done_inflight);
1869 
1870 	/* Flush both the vhost poll and vhost work */
1871 	vhost_dev_flush(&vs->dev);
1872 
1873 	/* Wait for all reqs issued before the flush to be finished */
1874 	for (i = 0; i < vs->dev.nvqs; i++)
1875 		wait_for_completion(&vs->old_inflight[i]->comp);
1876 }
1877 
vhost_scsi_destroy_vq_log(struct vhost_virtqueue * vq)1878 static void vhost_scsi_destroy_vq_log(struct vhost_virtqueue *vq)
1879 {
1880 	struct vhost_scsi_virtqueue *svq = container_of(vq,
1881 					struct vhost_scsi_virtqueue, vq);
1882 	struct vhost_scsi_cmd *tv_cmd;
1883 	unsigned int i;
1884 
1885 	if (!svq->scsi_cmds)
1886 		return;
1887 
1888 	for (i = 0; i < svq->max_cmds; i++) {
1889 		tv_cmd = &svq->scsi_cmds[i];
1890 		kfree(tv_cmd->tvc_log);
1891 		tv_cmd->tvc_log = NULL;
1892 		tv_cmd->tvc_log_num = 0;
1893 	}
1894 }
1895 
vhost_scsi_destroy_vq_cmds(struct vhost_virtqueue * vq)1896 static void vhost_scsi_destroy_vq_cmds(struct vhost_virtqueue *vq)
1897 {
1898 	struct vhost_scsi_virtqueue *svq = container_of(vq,
1899 					struct vhost_scsi_virtqueue, vq);
1900 	struct vhost_scsi_cmd *tv_cmd;
1901 	unsigned int i;
1902 
1903 	if (!svq->scsi_cmds)
1904 		return;
1905 
1906 	for (i = 0; i < svq->max_cmds; i++) {
1907 		tv_cmd = &svq->scsi_cmds[i];
1908 
1909 		kfree(tv_cmd->sgl);
1910 		kfree(tv_cmd->prot_sgl);
1911 	}
1912 
1913 	sbitmap_free(&svq->scsi_tags);
1914 	kfree(svq->upages);
1915 	vhost_scsi_destroy_vq_log(vq);
1916 	kfree(svq->scsi_cmds);
1917 	svq->scsi_cmds = NULL;
1918 }
1919 
vhost_scsi_setup_vq_cmds(struct vhost_virtqueue * vq,int max_cmds)1920 static int vhost_scsi_setup_vq_cmds(struct vhost_virtqueue *vq, int max_cmds)
1921 {
1922 	struct vhost_scsi_virtqueue *svq = container_of(vq,
1923 					struct vhost_scsi_virtqueue, vq);
1924 	struct vhost_scsi *vs = svq->vs;
1925 	struct vhost_scsi_cmd *tv_cmd;
1926 	unsigned int i;
1927 
1928 	if (svq->scsi_cmds)
1929 		return 0;
1930 
1931 	if (sbitmap_init_node(&svq->scsi_tags, max_cmds, -1, GFP_KERNEL,
1932 			      NUMA_NO_NODE, false, true))
1933 		return -ENOMEM;
1934 	svq->max_cmds = max_cmds;
1935 
1936 	svq->scsi_cmds = kcalloc(max_cmds, sizeof(*tv_cmd), GFP_KERNEL);
1937 	if (!svq->scsi_cmds) {
1938 		sbitmap_free(&svq->scsi_tags);
1939 		return -ENOMEM;
1940 	}
1941 
1942 	svq->upages = kcalloc(VHOST_SCSI_PREALLOC_UPAGES, sizeof(struct page *),
1943 			      GFP_KERNEL);
1944 	if (!svq->upages)
1945 		goto out;
1946 
1947 	for (i = 0; i < max_cmds; i++) {
1948 		tv_cmd = &svq->scsi_cmds[i];
1949 
1950 		if (vs->inline_sg_cnt) {
1951 			tv_cmd->sgl = kcalloc(vs->inline_sg_cnt,
1952 					      sizeof(struct scatterlist),
1953 					      GFP_KERNEL);
1954 			if (!tv_cmd->sgl) {
1955 				pr_err("Unable to allocate tv_cmd->sgl\n");
1956 				goto out;
1957 			}
1958 		}
1959 
1960 		if (vhost_has_feature(vq, VIRTIO_SCSI_F_T10_PI) &&
1961 		    vs->inline_sg_cnt) {
1962 			tv_cmd->prot_sgl = kcalloc(vs->inline_sg_cnt,
1963 						   sizeof(struct scatterlist),
1964 						   GFP_KERNEL);
1965 			if (!tv_cmd->prot_sgl) {
1966 				pr_err("Unable to allocate tv_cmd->prot_sgl\n");
1967 				goto out;
1968 			}
1969 		}
1970 	}
1971 	return 0;
1972 out:
1973 	vhost_scsi_destroy_vq_cmds(vq);
1974 	return -ENOMEM;
1975 }
1976 
1977 /*
1978  * Called from vhost_scsi_ioctl() context to walk the list of available
1979  * vhost_scsi_tpg with an active struct vhost_scsi_nexus
1980  *
1981  *  The lock nesting rule is:
1982  *    vs->dev.mutex -> vhost_scsi_mutex -> tpg->tv_tpg_mutex -> vq->mutex
1983  */
1984 static int
vhost_scsi_set_endpoint(struct vhost_scsi * vs,struct vhost_scsi_target * t)1985 vhost_scsi_set_endpoint(struct vhost_scsi *vs,
1986 			struct vhost_scsi_target *t)
1987 {
1988 	struct se_portal_group *se_tpg;
1989 	struct vhost_scsi_tport *tv_tport;
1990 	struct vhost_scsi_tpg *tpg;
1991 	struct vhost_scsi_tpg **vs_tpg;
1992 	struct vhost_virtqueue *vq;
1993 	int index, ret, i, len;
1994 	bool match = false;
1995 
1996 	mutex_lock(&vs->dev.mutex);
1997 
1998 	/* Verify that ring has been setup correctly. */
1999 	for (index = 0; index < vs->dev.nvqs; ++index) {
2000 		/* Verify that ring has been setup correctly. */
2001 		if (!vhost_vq_access_ok(&vs->vqs[index].vq)) {
2002 			ret = -EFAULT;
2003 			goto out;
2004 		}
2005 	}
2006 
2007 	if (vs->vs_tpg) {
2008 		pr_err("vhost-scsi endpoint already set for %s.\n",
2009 		       vs->vs_vhost_wwpn);
2010 		ret = -EEXIST;
2011 		goto out;
2012 	}
2013 
2014 	len = sizeof(vs_tpg[0]) * VHOST_SCSI_MAX_TARGET;
2015 	vs_tpg = kzalloc(len, GFP_KERNEL);
2016 	if (!vs_tpg) {
2017 		ret = -ENOMEM;
2018 		goto out;
2019 	}
2020 
2021 	mutex_lock(&vhost_scsi_mutex);
2022 	list_for_each_entry(tpg, &vhost_scsi_list, tv_tpg_list) {
2023 		mutex_lock(&tpg->tv_tpg_mutex);
2024 		if (!tpg->tpg_nexus) {
2025 			mutex_unlock(&tpg->tv_tpg_mutex);
2026 			continue;
2027 		}
2028 		if (tpg->tv_tpg_vhost_count != 0) {
2029 			mutex_unlock(&tpg->tv_tpg_mutex);
2030 			continue;
2031 		}
2032 		tv_tport = tpg->tport;
2033 
2034 		if (!strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
2035 			/*
2036 			 * In order to ensure individual vhost-scsi configfs
2037 			 * groups cannot be removed while in use by vhost ioctl,
2038 			 * go ahead and take an explicit se_tpg->tpg_group.cg_item
2039 			 * dependency now.
2040 			 */
2041 			se_tpg = &tpg->se_tpg;
2042 			ret = target_depend_item(&se_tpg->tpg_group.cg_item);
2043 			if (ret) {
2044 				pr_warn("target_depend_item() failed: %d\n", ret);
2045 				mutex_unlock(&tpg->tv_tpg_mutex);
2046 				mutex_unlock(&vhost_scsi_mutex);
2047 				goto undepend;
2048 			}
2049 			tpg->tv_tpg_vhost_count++;
2050 			tpg->vhost_scsi = vs;
2051 			vs_tpg[tpg->tport_tpgt] = tpg;
2052 			match = true;
2053 		}
2054 		mutex_unlock(&tpg->tv_tpg_mutex);
2055 	}
2056 	mutex_unlock(&vhost_scsi_mutex);
2057 
2058 	if (match) {
2059 		memcpy(vs->vs_vhost_wwpn, t->vhost_wwpn,
2060 		       sizeof(vs->vs_vhost_wwpn));
2061 
2062 		for (i = VHOST_SCSI_VQ_IO; i < vs->dev.nvqs; i++) {
2063 			vq = &vs->vqs[i].vq;
2064 			if (!vhost_vq_is_setup(vq))
2065 				continue;
2066 
2067 			ret = vhost_scsi_setup_vq_cmds(vq, vq->num);
2068 			if (ret)
2069 				goto destroy_vq_cmds;
2070 		}
2071 
2072 		for (i = 0; i < vs->dev.nvqs; i++) {
2073 			vq = &vs->vqs[i].vq;
2074 			mutex_lock(&vq->mutex);
2075 			vhost_vq_set_backend(vq, vs_tpg);
2076 			vhost_vq_init_access(vq);
2077 			mutex_unlock(&vq->mutex);
2078 		}
2079 		ret = 0;
2080 	} else {
2081 		ret = -ENODEV;
2082 		goto free_tpg;
2083 	}
2084 
2085 	/*
2086 	 * Act as synchronize_rcu to make sure requests after this point
2087 	 * see a fully setup device.
2088 	 */
2089 	vhost_scsi_flush(vs);
2090 	vs->vs_tpg = vs_tpg;
2091 	goto out;
2092 
2093 destroy_vq_cmds:
2094 	for (i--; i >= VHOST_SCSI_VQ_IO; i--) {
2095 		if (!vhost_vq_get_backend(&vs->vqs[i].vq))
2096 			vhost_scsi_destroy_vq_cmds(&vs->vqs[i].vq);
2097 	}
2098 undepend:
2099 	for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) {
2100 		tpg = vs_tpg[i];
2101 		if (tpg) {
2102 			mutex_lock(&tpg->tv_tpg_mutex);
2103 			tpg->vhost_scsi = NULL;
2104 			tpg->tv_tpg_vhost_count--;
2105 			mutex_unlock(&tpg->tv_tpg_mutex);
2106 			target_undepend_item(&tpg->se_tpg.tpg_group.cg_item);
2107 		}
2108 	}
2109 free_tpg:
2110 	kfree(vs_tpg);
2111 out:
2112 	mutex_unlock(&vs->dev.mutex);
2113 	return ret;
2114 }
2115 
2116 static int
vhost_scsi_clear_endpoint(struct vhost_scsi * vs,struct vhost_scsi_target * t)2117 vhost_scsi_clear_endpoint(struct vhost_scsi *vs,
2118 			  struct vhost_scsi_target *t)
2119 {
2120 	struct se_portal_group *se_tpg;
2121 	struct vhost_scsi_tport *tv_tport;
2122 	struct vhost_scsi_tpg *tpg;
2123 	struct vhost_virtqueue *vq;
2124 	bool match = false;
2125 	int index, ret, i;
2126 	u8 target;
2127 
2128 	mutex_lock(&vs->dev.mutex);
2129 	/* Verify that ring has been setup correctly. */
2130 	for (index = 0; index < vs->dev.nvqs; ++index) {
2131 		if (!vhost_vq_access_ok(&vs->vqs[index].vq)) {
2132 			ret = -EFAULT;
2133 			goto err_dev;
2134 		}
2135 	}
2136 
2137 	if (!vs->vs_tpg) {
2138 		ret = 0;
2139 		goto err_dev;
2140 	}
2141 
2142 	for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) {
2143 		target = i;
2144 		tpg = vs->vs_tpg[target];
2145 		if (!tpg)
2146 			continue;
2147 
2148 		tv_tport = tpg->tport;
2149 		if (!tv_tport) {
2150 			ret = -ENODEV;
2151 			goto err_dev;
2152 		}
2153 
2154 		if (strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
2155 			pr_warn("tv_tport->tport_name: %s, tpg->tport_tpgt: %hu"
2156 				" does not match t->vhost_wwpn: %s, t->vhost_tpgt: %hu\n",
2157 				tv_tport->tport_name, tpg->tport_tpgt,
2158 				t->vhost_wwpn, t->vhost_tpgt);
2159 			ret = -EINVAL;
2160 			goto err_dev;
2161 		}
2162 		match = true;
2163 	}
2164 	if (!match)
2165 		goto free_vs_tpg;
2166 
2167 	/* Prevent new cmds from starting and accessing the tpgs/sessions */
2168 	for (i = 0; i < vs->dev.nvqs; i++) {
2169 		vq = &vs->vqs[i].vq;
2170 		mutex_lock(&vq->mutex);
2171 		vhost_vq_set_backend(vq, NULL);
2172 		mutex_unlock(&vq->mutex);
2173 	}
2174 	/* Make sure cmds are not running before tearing them down. */
2175 	vhost_scsi_flush(vs);
2176 
2177 	for (i = 0; i < vs->dev.nvqs; i++) {
2178 		vq = &vs->vqs[i].vq;
2179 		vhost_scsi_destroy_vq_cmds(vq);
2180 	}
2181 
2182 	/*
2183 	 * We can now release our hold on the tpg and sessions and userspace
2184 	 * can free them after this point.
2185 	 */
2186 	for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) {
2187 		target = i;
2188 		tpg = vs->vs_tpg[target];
2189 		if (!tpg)
2190 			continue;
2191 
2192 		mutex_lock(&tpg->tv_tpg_mutex);
2193 
2194 		tpg->tv_tpg_vhost_count--;
2195 		tpg->vhost_scsi = NULL;
2196 		vs->vs_tpg[target] = NULL;
2197 
2198 		mutex_unlock(&tpg->tv_tpg_mutex);
2199 
2200 		se_tpg = &tpg->se_tpg;
2201 		target_undepend_item(&se_tpg->tpg_group.cg_item);
2202 	}
2203 
2204 free_vs_tpg:
2205 	/*
2206 	 * Act as synchronize_rcu to make sure access to
2207 	 * old vs->vs_tpg is finished.
2208 	 */
2209 	vhost_scsi_flush(vs);
2210 	kfree(vs->vs_tpg);
2211 	vs->vs_tpg = NULL;
2212 	memset(vs->vs_vhost_wwpn, 0, sizeof(vs->vs_vhost_wwpn));
2213 	WARN_ON(vs->vs_events_nr);
2214 	mutex_unlock(&vs->dev.mutex);
2215 	return 0;
2216 
2217 err_dev:
2218 	mutex_unlock(&vs->dev.mutex);
2219 	return ret;
2220 }
2221 
vhost_scsi_set_features(struct vhost_scsi * vs,u64 features)2222 static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
2223 {
2224 	struct vhost_virtqueue *vq;
2225 	bool is_log, was_log;
2226 	int i;
2227 
2228 	if (features & ~VHOST_SCSI_FEATURES)
2229 		return -EOPNOTSUPP;
2230 
2231 	mutex_lock(&vs->dev.mutex);
2232 	if ((features & (1 << VHOST_F_LOG_ALL)) &&
2233 	    !vhost_log_access_ok(&vs->dev)) {
2234 		mutex_unlock(&vs->dev.mutex);
2235 		return -EFAULT;
2236 	}
2237 
2238 	if (!vs->dev.nvqs)
2239 		goto out;
2240 
2241 	is_log = features & (1 << VHOST_F_LOG_ALL);
2242 	/*
2243 	 * All VQs should have same feature.
2244 	 */
2245 	was_log = vhost_has_feature(&vs->vqs[0].vq, VHOST_F_LOG_ALL);
2246 
2247 	for (i = 0; i < vs->dev.nvqs; i++) {
2248 		vq = &vs->vqs[i].vq;
2249 		mutex_lock(&vq->mutex);
2250 		vq->acked_features = features;
2251 		mutex_unlock(&vq->mutex);
2252 	}
2253 
2254 	/*
2255 	 * If VHOST_F_LOG_ALL is removed, free tvc_log after
2256 	 * vq->acked_features is committed.
2257 	 */
2258 	if (!is_log && was_log) {
2259 		for (i = VHOST_SCSI_VQ_IO; i < vs->dev.nvqs; i++) {
2260 			if (!vs->vqs[i].scsi_cmds)
2261 				continue;
2262 
2263 			vq = &vs->vqs[i].vq;
2264 			mutex_lock(&vq->mutex);
2265 			vhost_scsi_destroy_vq_log(vq);
2266 			mutex_unlock(&vq->mutex);
2267 		}
2268 	}
2269 
2270 out:
2271 	mutex_unlock(&vs->dev.mutex);
2272 	return 0;
2273 }
2274 
vhost_scsi_open(struct inode * inode,struct file * f)2275 static int vhost_scsi_open(struct inode *inode, struct file *f)
2276 {
2277 	struct vhost_scsi_virtqueue *svq;
2278 	struct vhost_scsi *vs;
2279 	struct vhost_virtqueue **vqs;
2280 	int r = -ENOMEM, i, nvqs = vhost_scsi_max_io_vqs;
2281 
2282 	vs = kvzalloc(sizeof(*vs), GFP_KERNEL);
2283 	if (!vs)
2284 		goto err_vs;
2285 	vs->inline_sg_cnt = vhost_scsi_inline_sg_cnt;
2286 
2287 	if (nvqs > VHOST_SCSI_MAX_IO_VQ) {
2288 		pr_err("Invalid max_io_vqs of %d. Using %d.\n", nvqs,
2289 		       VHOST_SCSI_MAX_IO_VQ);
2290 		nvqs = VHOST_SCSI_MAX_IO_VQ;
2291 	} else if (nvqs == 0) {
2292 		pr_err("Invalid max_io_vqs of %d. Using 1.\n", nvqs);
2293 		nvqs = 1;
2294 	}
2295 	nvqs += VHOST_SCSI_VQ_IO;
2296 
2297 	vs->old_inflight = kmalloc_array(nvqs, sizeof(*vs->old_inflight),
2298 					 GFP_KERNEL | __GFP_ZERO);
2299 	if (!vs->old_inflight)
2300 		goto err_inflight;
2301 
2302 	vs->vqs = kmalloc_array(nvqs, sizeof(*vs->vqs),
2303 				GFP_KERNEL | __GFP_ZERO);
2304 	if (!vs->vqs)
2305 		goto err_vqs;
2306 
2307 	vqs = kmalloc_array(nvqs, sizeof(*vqs), GFP_KERNEL);
2308 	if (!vqs)
2309 		goto err_local_vqs;
2310 
2311 	vhost_work_init(&vs->vs_event_work, vhost_scsi_evt_work);
2312 
2313 	vs->vs_events_nr = 0;
2314 	vs->vs_events_missed = false;
2315 
2316 	vqs[VHOST_SCSI_VQ_CTL] = &vs->vqs[VHOST_SCSI_VQ_CTL].vq;
2317 	vqs[VHOST_SCSI_VQ_EVT] = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
2318 	vs->vqs[VHOST_SCSI_VQ_CTL].vq.handle_kick = vhost_scsi_ctl_handle_kick;
2319 	vs->vqs[VHOST_SCSI_VQ_EVT].vq.handle_kick = vhost_scsi_evt_handle_kick;
2320 	for (i = VHOST_SCSI_VQ_IO; i < nvqs; i++) {
2321 		svq = &vs->vqs[i];
2322 
2323 		vqs[i] = &svq->vq;
2324 		svq->vs = vs;
2325 		init_llist_head(&svq->completion_list);
2326 		vhost_work_init(&svq->completion_work,
2327 				vhost_scsi_complete_cmd_work);
2328 		svq->vq.handle_kick = vhost_scsi_handle_kick;
2329 	}
2330 	vhost_dev_init(&vs->dev, vqs, nvqs, UIO_MAXIOV,
2331 		       VHOST_SCSI_WEIGHT, 0, true, NULL);
2332 
2333 	vhost_scsi_init_inflight(vs, NULL);
2334 
2335 	f->private_data = vs;
2336 	return 0;
2337 
2338 err_local_vqs:
2339 	kfree(vs->vqs);
2340 err_vqs:
2341 	kfree(vs->old_inflight);
2342 err_inflight:
2343 	kvfree(vs);
2344 err_vs:
2345 	return r;
2346 }
2347 
vhost_scsi_release(struct inode * inode,struct file * f)2348 static int vhost_scsi_release(struct inode *inode, struct file *f)
2349 {
2350 	struct vhost_scsi *vs = f->private_data;
2351 	struct vhost_scsi_target t;
2352 
2353 	mutex_lock(&vs->dev.mutex);
2354 	memcpy(t.vhost_wwpn, vs->vs_vhost_wwpn, sizeof(t.vhost_wwpn));
2355 	mutex_unlock(&vs->dev.mutex);
2356 	vhost_scsi_clear_endpoint(vs, &t);
2357 	vhost_dev_stop(&vs->dev);
2358 	vhost_dev_cleanup(&vs->dev);
2359 	kfree(vs->dev.vqs);
2360 	kfree(vs->vqs);
2361 	kfree(vs->old_inflight);
2362 	kvfree(vs);
2363 	return 0;
2364 }
2365 
2366 static long
vhost_scsi_ioctl(struct file * f,unsigned int ioctl,unsigned long arg)2367 vhost_scsi_ioctl(struct file *f,
2368 		 unsigned int ioctl,
2369 		 unsigned long arg)
2370 {
2371 	struct vhost_scsi *vs = f->private_data;
2372 	struct vhost_scsi_target backend;
2373 	void __user *argp = (void __user *)arg;
2374 	u64 __user *featurep = argp;
2375 	u32 __user *eventsp = argp;
2376 	u32 events_missed;
2377 	u64 features;
2378 	int r, abi_version = VHOST_SCSI_ABI_VERSION;
2379 	struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
2380 
2381 	switch (ioctl) {
2382 	case VHOST_SCSI_SET_ENDPOINT:
2383 		if (copy_from_user(&backend, argp, sizeof backend))
2384 			return -EFAULT;
2385 		if (backend.reserved != 0)
2386 			return -EOPNOTSUPP;
2387 
2388 		return vhost_scsi_set_endpoint(vs, &backend);
2389 	case VHOST_SCSI_CLEAR_ENDPOINT:
2390 		if (copy_from_user(&backend, argp, sizeof backend))
2391 			return -EFAULT;
2392 		if (backend.reserved != 0)
2393 			return -EOPNOTSUPP;
2394 
2395 		return vhost_scsi_clear_endpoint(vs, &backend);
2396 	case VHOST_SCSI_GET_ABI_VERSION:
2397 		if (copy_to_user(argp, &abi_version, sizeof abi_version))
2398 			return -EFAULT;
2399 		return 0;
2400 	case VHOST_SCSI_SET_EVENTS_MISSED:
2401 		if (get_user(events_missed, eventsp))
2402 			return -EFAULT;
2403 		mutex_lock(&vq->mutex);
2404 		vs->vs_events_missed = events_missed;
2405 		mutex_unlock(&vq->mutex);
2406 		return 0;
2407 	case VHOST_SCSI_GET_EVENTS_MISSED:
2408 		mutex_lock(&vq->mutex);
2409 		events_missed = vs->vs_events_missed;
2410 		mutex_unlock(&vq->mutex);
2411 		if (put_user(events_missed, eventsp))
2412 			return -EFAULT;
2413 		return 0;
2414 	case VHOST_GET_FEATURES:
2415 		features = VHOST_SCSI_FEATURES;
2416 		if (copy_to_user(featurep, &features, sizeof features))
2417 			return -EFAULT;
2418 		return 0;
2419 	case VHOST_SET_FEATURES:
2420 		if (copy_from_user(&features, featurep, sizeof features))
2421 			return -EFAULT;
2422 		return vhost_scsi_set_features(vs, features);
2423 	case VHOST_NEW_WORKER:
2424 	case VHOST_FREE_WORKER:
2425 	case VHOST_ATTACH_VRING_WORKER:
2426 	case VHOST_GET_VRING_WORKER:
2427 		mutex_lock(&vs->dev.mutex);
2428 		r = vhost_worker_ioctl(&vs->dev, ioctl, argp);
2429 		mutex_unlock(&vs->dev.mutex);
2430 		return r;
2431 	default:
2432 		mutex_lock(&vs->dev.mutex);
2433 		r = vhost_dev_ioctl(&vs->dev, ioctl, argp);
2434 		/* TODO: flush backend after dev ioctl. */
2435 		if (r == -ENOIOCTLCMD)
2436 			r = vhost_vring_ioctl(&vs->dev, ioctl, argp);
2437 		mutex_unlock(&vs->dev.mutex);
2438 		return r;
2439 	}
2440 }
2441 
2442 static const struct file_operations vhost_scsi_fops = {
2443 	.owner          = THIS_MODULE,
2444 	.release        = vhost_scsi_release,
2445 	.unlocked_ioctl = vhost_scsi_ioctl,
2446 	.compat_ioctl	= compat_ptr_ioctl,
2447 	.open           = vhost_scsi_open,
2448 	.llseek		= noop_llseek,
2449 };
2450 
2451 static struct miscdevice vhost_scsi_misc = {
2452 	MISC_DYNAMIC_MINOR,
2453 	"vhost-scsi",
2454 	&vhost_scsi_fops,
2455 };
2456 
vhost_scsi_register(void)2457 static int __init vhost_scsi_register(void)
2458 {
2459 	return misc_register(&vhost_scsi_misc);
2460 }
2461 
vhost_scsi_deregister(void)2462 static void vhost_scsi_deregister(void)
2463 {
2464 	misc_deregister(&vhost_scsi_misc);
2465 }
2466 
vhost_scsi_dump_proto_id(struct vhost_scsi_tport * tport)2467 static char *vhost_scsi_dump_proto_id(struct vhost_scsi_tport *tport)
2468 {
2469 	switch (tport->tport_proto_id) {
2470 	case SCSI_PROTOCOL_SAS:
2471 		return "SAS";
2472 	case SCSI_PROTOCOL_FCP:
2473 		return "FCP";
2474 	case SCSI_PROTOCOL_ISCSI:
2475 		return "iSCSI";
2476 	default:
2477 		break;
2478 	}
2479 
2480 	return "Unknown";
2481 }
2482 
2483 static void
vhost_scsi_do_plug(struct vhost_scsi_tpg * tpg,struct se_lun * lun,bool plug)2484 vhost_scsi_do_plug(struct vhost_scsi_tpg *tpg,
2485 		  struct se_lun *lun, bool plug)
2486 {
2487 
2488 	struct vhost_scsi *vs = tpg->vhost_scsi;
2489 	struct vhost_virtqueue *vq;
2490 	u32 reason;
2491 
2492 	if (!vs)
2493 		return;
2494 
2495 	if (plug)
2496 		reason = VIRTIO_SCSI_EVT_RESET_RESCAN;
2497 	else
2498 		reason = VIRTIO_SCSI_EVT_RESET_REMOVED;
2499 
2500 	vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
2501 	mutex_lock(&vq->mutex);
2502 	/*
2503 	 * We can't queue events if the backend has been cleared, because
2504 	 * we could end up queueing an event after the flush.
2505 	 */
2506 	if (!vhost_vq_get_backend(vq))
2507 		goto unlock;
2508 
2509 	if (vhost_has_feature(vq, VIRTIO_SCSI_F_HOTPLUG))
2510 		vhost_scsi_send_evt(vs, vq, tpg, lun,
2511 				   VIRTIO_SCSI_T_TRANSPORT_RESET, reason);
2512 unlock:
2513 	mutex_unlock(&vq->mutex);
2514 }
2515 
vhost_scsi_hotplug(struct vhost_scsi_tpg * tpg,struct se_lun * lun)2516 static void vhost_scsi_hotplug(struct vhost_scsi_tpg *tpg, struct se_lun *lun)
2517 {
2518 	vhost_scsi_do_plug(tpg, lun, true);
2519 }
2520 
vhost_scsi_hotunplug(struct vhost_scsi_tpg * tpg,struct se_lun * lun)2521 static void vhost_scsi_hotunplug(struct vhost_scsi_tpg *tpg, struct se_lun *lun)
2522 {
2523 	vhost_scsi_do_plug(tpg, lun, false);
2524 }
2525 
vhost_scsi_port_link(struct se_portal_group * se_tpg,struct se_lun * lun)2526 static int vhost_scsi_port_link(struct se_portal_group *se_tpg,
2527 			       struct se_lun *lun)
2528 {
2529 	struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2530 				struct vhost_scsi_tpg, se_tpg);
2531 
2532 	mutex_lock(&tpg->tv_tpg_mutex);
2533 	tpg->tv_tpg_port_count++;
2534 	vhost_scsi_hotplug(tpg, lun);
2535 	mutex_unlock(&tpg->tv_tpg_mutex);
2536 
2537 	return 0;
2538 }
2539 
vhost_scsi_port_unlink(struct se_portal_group * se_tpg,struct se_lun * lun)2540 static void vhost_scsi_port_unlink(struct se_portal_group *se_tpg,
2541 				  struct se_lun *lun)
2542 {
2543 	struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2544 				struct vhost_scsi_tpg, se_tpg);
2545 
2546 	mutex_lock(&tpg->tv_tpg_mutex);
2547 	tpg->tv_tpg_port_count--;
2548 	vhost_scsi_hotunplug(tpg, lun);
2549 	mutex_unlock(&tpg->tv_tpg_mutex);
2550 }
2551 
vhost_scsi_tpg_attrib_fabric_prot_type_store(struct config_item * item,const char * page,size_t count)2552 static ssize_t vhost_scsi_tpg_attrib_fabric_prot_type_store(
2553 		struct config_item *item, const char *page, size_t count)
2554 {
2555 	struct se_portal_group *se_tpg = attrib_to_tpg(item);
2556 	struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2557 				struct vhost_scsi_tpg, se_tpg);
2558 	unsigned long val;
2559 	int ret = kstrtoul(page, 0, &val);
2560 
2561 	if (ret) {
2562 		pr_err("kstrtoul() returned %d for fabric_prot_type\n", ret);
2563 		return ret;
2564 	}
2565 	if (val != 0 && val != 1 && val != 3) {
2566 		pr_err("Invalid vhost_scsi fabric_prot_type: %lu\n", val);
2567 		return -EINVAL;
2568 	}
2569 	tpg->tv_fabric_prot_type = val;
2570 
2571 	return count;
2572 }
2573 
vhost_scsi_tpg_attrib_fabric_prot_type_show(struct config_item * item,char * page)2574 static ssize_t vhost_scsi_tpg_attrib_fabric_prot_type_show(
2575 		struct config_item *item, char *page)
2576 {
2577 	struct se_portal_group *se_tpg = attrib_to_tpg(item);
2578 	struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2579 				struct vhost_scsi_tpg, se_tpg);
2580 
2581 	return sysfs_emit(page, "%d\n", tpg->tv_fabric_prot_type);
2582 }
2583 
2584 CONFIGFS_ATTR(vhost_scsi_tpg_attrib_, fabric_prot_type);
2585 
2586 static struct configfs_attribute *vhost_scsi_tpg_attrib_attrs[] = {
2587 	&vhost_scsi_tpg_attrib_attr_fabric_prot_type,
2588 	NULL,
2589 };
2590 
vhost_scsi_make_nexus(struct vhost_scsi_tpg * tpg,const char * name)2591 static int vhost_scsi_make_nexus(struct vhost_scsi_tpg *tpg,
2592 				const char *name)
2593 {
2594 	struct vhost_scsi_nexus *tv_nexus;
2595 
2596 	mutex_lock(&tpg->tv_tpg_mutex);
2597 	if (tpg->tpg_nexus) {
2598 		mutex_unlock(&tpg->tv_tpg_mutex);
2599 		pr_debug("tpg->tpg_nexus already exists\n");
2600 		return -EEXIST;
2601 	}
2602 
2603 	tv_nexus = kzalloc(sizeof(*tv_nexus), GFP_KERNEL);
2604 	if (!tv_nexus) {
2605 		mutex_unlock(&tpg->tv_tpg_mutex);
2606 		pr_err("Unable to allocate struct vhost_scsi_nexus\n");
2607 		return -ENOMEM;
2608 	}
2609 	/*
2610 	 * Since we are running in 'demo mode' this call will generate a
2611 	 * struct se_node_acl for the vhost_scsi struct se_portal_group with
2612 	 * the SCSI Initiator port name of the passed configfs group 'name'.
2613 	 */
2614 	tv_nexus->tvn_se_sess = target_setup_session(&tpg->se_tpg, 0, 0,
2615 					TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS,
2616 					(unsigned char *)name, tv_nexus, NULL);
2617 	if (IS_ERR(tv_nexus->tvn_se_sess)) {
2618 		mutex_unlock(&tpg->tv_tpg_mutex);
2619 		kfree(tv_nexus);
2620 		return -ENOMEM;
2621 	}
2622 	tpg->tpg_nexus = tv_nexus;
2623 
2624 	mutex_unlock(&tpg->tv_tpg_mutex);
2625 	return 0;
2626 }
2627 
vhost_scsi_drop_nexus(struct vhost_scsi_tpg * tpg)2628 static int vhost_scsi_drop_nexus(struct vhost_scsi_tpg *tpg)
2629 {
2630 	struct se_session *se_sess;
2631 	struct vhost_scsi_nexus *tv_nexus;
2632 
2633 	mutex_lock(&tpg->tv_tpg_mutex);
2634 	tv_nexus = tpg->tpg_nexus;
2635 	if (!tv_nexus) {
2636 		mutex_unlock(&tpg->tv_tpg_mutex);
2637 		return -ENODEV;
2638 	}
2639 
2640 	se_sess = tv_nexus->tvn_se_sess;
2641 	if (!se_sess) {
2642 		mutex_unlock(&tpg->tv_tpg_mutex);
2643 		return -ENODEV;
2644 	}
2645 
2646 	if (tpg->tv_tpg_port_count != 0) {
2647 		mutex_unlock(&tpg->tv_tpg_mutex);
2648 		pr_err("Unable to remove TCM_vhost I_T Nexus with"
2649 			" active TPG port count: %d\n",
2650 			tpg->tv_tpg_port_count);
2651 		return -EBUSY;
2652 	}
2653 
2654 	if (tpg->tv_tpg_vhost_count != 0) {
2655 		mutex_unlock(&tpg->tv_tpg_mutex);
2656 		pr_err("Unable to remove TCM_vhost I_T Nexus with"
2657 			" active TPG vhost count: %d\n",
2658 			tpg->tv_tpg_vhost_count);
2659 		return -EBUSY;
2660 	}
2661 
2662 	pr_debug("TCM_vhost_ConfigFS: Removing I_T Nexus to emulated"
2663 		" %s Initiator Port: %s\n", vhost_scsi_dump_proto_id(tpg->tport),
2664 		tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
2665 
2666 	/*
2667 	 * Release the SCSI I_T Nexus to the emulated vhost Target Port
2668 	 */
2669 	target_remove_session(se_sess);
2670 	tpg->tpg_nexus = NULL;
2671 	mutex_unlock(&tpg->tv_tpg_mutex);
2672 
2673 	kfree(tv_nexus);
2674 	return 0;
2675 }
2676 
vhost_scsi_tpg_nexus_show(struct config_item * item,char * page)2677 static ssize_t vhost_scsi_tpg_nexus_show(struct config_item *item, char *page)
2678 {
2679 	struct se_portal_group *se_tpg = to_tpg(item);
2680 	struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2681 				struct vhost_scsi_tpg, se_tpg);
2682 	struct vhost_scsi_nexus *tv_nexus;
2683 	ssize_t ret;
2684 
2685 	mutex_lock(&tpg->tv_tpg_mutex);
2686 	tv_nexus = tpg->tpg_nexus;
2687 	if (!tv_nexus) {
2688 		mutex_unlock(&tpg->tv_tpg_mutex);
2689 		return -ENODEV;
2690 	}
2691 	ret = sysfs_emit(page, "%s\n",
2692 			tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
2693 	mutex_unlock(&tpg->tv_tpg_mutex);
2694 
2695 	return ret;
2696 }
2697 
vhost_scsi_tpg_nexus_store(struct config_item * item,const char * page,size_t count)2698 static ssize_t vhost_scsi_tpg_nexus_store(struct config_item *item,
2699 		const char *page, size_t count)
2700 {
2701 	struct se_portal_group *se_tpg = to_tpg(item);
2702 	struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2703 				struct vhost_scsi_tpg, se_tpg);
2704 	struct vhost_scsi_tport *tport_wwn = tpg->tport;
2705 	unsigned char i_port[VHOST_SCSI_NAMELEN], *ptr, *port_ptr;
2706 	int ret;
2707 	/*
2708 	 * Shutdown the active I_T nexus if 'NULL' is passed..
2709 	 */
2710 	if (!strncmp(page, "NULL", 4)) {
2711 		ret = vhost_scsi_drop_nexus(tpg);
2712 		return (!ret) ? count : ret;
2713 	}
2714 	/*
2715 	 * Otherwise make sure the passed virtual Initiator port WWN matches
2716 	 * the fabric protocol_id set in vhost_scsi_make_tport(), and call
2717 	 * vhost_scsi_make_nexus().
2718 	 */
2719 	if (strlen(page) >= VHOST_SCSI_NAMELEN) {
2720 		pr_err("Emulated NAA Sas Address: %s, exceeds"
2721 				" max: %d\n", page, VHOST_SCSI_NAMELEN);
2722 		return -EINVAL;
2723 	}
2724 	snprintf(&i_port[0], VHOST_SCSI_NAMELEN, "%s", page);
2725 
2726 	ptr = strstr(i_port, "naa.");
2727 	if (ptr) {
2728 		if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_SAS) {
2729 			pr_err("Passed SAS Initiator Port %s does not"
2730 				" match target port protoid: %s\n", i_port,
2731 				vhost_scsi_dump_proto_id(tport_wwn));
2732 			return -EINVAL;
2733 		}
2734 		port_ptr = &i_port[0];
2735 		goto check_newline;
2736 	}
2737 	ptr = strstr(i_port, "fc.");
2738 	if (ptr) {
2739 		if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_FCP) {
2740 			pr_err("Passed FCP Initiator Port %s does not"
2741 				" match target port protoid: %s\n", i_port,
2742 				vhost_scsi_dump_proto_id(tport_wwn));
2743 			return -EINVAL;
2744 		}
2745 		port_ptr = &i_port[3]; /* Skip over "fc." */
2746 		goto check_newline;
2747 	}
2748 	ptr = strstr(i_port, "iqn.");
2749 	if (ptr) {
2750 		if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_ISCSI) {
2751 			pr_err("Passed iSCSI Initiator Port %s does not"
2752 				" match target port protoid: %s\n", i_port,
2753 				vhost_scsi_dump_proto_id(tport_wwn));
2754 			return -EINVAL;
2755 		}
2756 		port_ptr = &i_port[0];
2757 		goto check_newline;
2758 	}
2759 	pr_err("Unable to locate prefix for emulated Initiator Port:"
2760 			" %s\n", i_port);
2761 	return -EINVAL;
2762 	/*
2763 	 * Clear any trailing newline for the NAA WWN
2764 	 */
2765 check_newline:
2766 	if (i_port[strlen(i_port)-1] == '\n')
2767 		i_port[strlen(i_port)-1] = '\0';
2768 
2769 	ret = vhost_scsi_make_nexus(tpg, port_ptr);
2770 	if (ret < 0)
2771 		return ret;
2772 
2773 	return count;
2774 }
2775 
2776 CONFIGFS_ATTR(vhost_scsi_tpg_, nexus);
2777 
2778 static struct configfs_attribute *vhost_scsi_tpg_attrs[] = {
2779 	&vhost_scsi_tpg_attr_nexus,
2780 	NULL,
2781 };
2782 
2783 static struct se_portal_group *
vhost_scsi_make_tpg(struct se_wwn * wwn,const char * name)2784 vhost_scsi_make_tpg(struct se_wwn *wwn, const char *name)
2785 {
2786 	struct vhost_scsi_tport *tport = container_of(wwn,
2787 			struct vhost_scsi_tport, tport_wwn);
2788 
2789 	struct vhost_scsi_tpg *tpg;
2790 	u16 tpgt;
2791 	int ret;
2792 
2793 	if (strstr(name, "tpgt_") != name)
2794 		return ERR_PTR(-EINVAL);
2795 	if (kstrtou16(name + 5, 10, &tpgt) || tpgt >= VHOST_SCSI_MAX_TARGET)
2796 		return ERR_PTR(-EINVAL);
2797 
2798 	tpg = kzalloc(sizeof(*tpg), GFP_KERNEL);
2799 	if (!tpg) {
2800 		pr_err("Unable to allocate struct vhost_scsi_tpg");
2801 		return ERR_PTR(-ENOMEM);
2802 	}
2803 	mutex_init(&tpg->tv_tpg_mutex);
2804 	INIT_LIST_HEAD(&tpg->tv_tpg_list);
2805 	tpg->tport = tport;
2806 	tpg->tport_tpgt = tpgt;
2807 
2808 	ret = core_tpg_register(wwn, &tpg->se_tpg, tport->tport_proto_id);
2809 	if (ret < 0) {
2810 		kfree(tpg);
2811 		return NULL;
2812 	}
2813 	mutex_lock(&vhost_scsi_mutex);
2814 	list_add_tail(&tpg->tv_tpg_list, &vhost_scsi_list);
2815 	mutex_unlock(&vhost_scsi_mutex);
2816 
2817 	return &tpg->se_tpg;
2818 }
2819 
vhost_scsi_drop_tpg(struct se_portal_group * se_tpg)2820 static void vhost_scsi_drop_tpg(struct se_portal_group *se_tpg)
2821 {
2822 	struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2823 				struct vhost_scsi_tpg, se_tpg);
2824 
2825 	mutex_lock(&vhost_scsi_mutex);
2826 	list_del(&tpg->tv_tpg_list);
2827 	mutex_unlock(&vhost_scsi_mutex);
2828 	/*
2829 	 * Release the virtual I_T Nexus for this vhost TPG
2830 	 */
2831 	vhost_scsi_drop_nexus(tpg);
2832 	/*
2833 	 * Deregister the se_tpg from TCM..
2834 	 */
2835 	core_tpg_deregister(se_tpg);
2836 	kfree(tpg);
2837 }
2838 
2839 static struct se_wwn *
vhost_scsi_make_tport(struct target_fabric_configfs * tf,struct config_group * group,const char * name)2840 vhost_scsi_make_tport(struct target_fabric_configfs *tf,
2841 		     struct config_group *group,
2842 		     const char *name)
2843 {
2844 	struct vhost_scsi_tport *tport;
2845 	char *ptr;
2846 	u64 wwpn = 0;
2847 	int off = 0;
2848 
2849 	/* if (vhost_scsi_parse_wwn(name, &wwpn, 1) < 0)
2850 		return ERR_PTR(-EINVAL); */
2851 
2852 	tport = kzalloc(sizeof(*tport), GFP_KERNEL);
2853 	if (!tport) {
2854 		pr_err("Unable to allocate struct vhost_scsi_tport");
2855 		return ERR_PTR(-ENOMEM);
2856 	}
2857 	tport->tport_wwpn = wwpn;
2858 	/*
2859 	 * Determine the emulated Protocol Identifier and Target Port Name
2860 	 * based on the incoming configfs directory name.
2861 	 */
2862 	ptr = strstr(name, "naa.");
2863 	if (ptr) {
2864 		tport->tport_proto_id = SCSI_PROTOCOL_SAS;
2865 		goto check_len;
2866 	}
2867 	ptr = strstr(name, "fc.");
2868 	if (ptr) {
2869 		tport->tport_proto_id = SCSI_PROTOCOL_FCP;
2870 		off = 3; /* Skip over "fc." */
2871 		goto check_len;
2872 	}
2873 	ptr = strstr(name, "iqn.");
2874 	if (ptr) {
2875 		tport->tport_proto_id = SCSI_PROTOCOL_ISCSI;
2876 		goto check_len;
2877 	}
2878 
2879 	pr_err("Unable to locate prefix for emulated Target Port:"
2880 			" %s\n", name);
2881 	kfree(tport);
2882 	return ERR_PTR(-EINVAL);
2883 
2884 check_len:
2885 	if (strlen(name) >= VHOST_SCSI_NAMELEN) {
2886 		pr_err("Emulated %s Address: %s, exceeds"
2887 			" max: %d\n", name, vhost_scsi_dump_proto_id(tport),
2888 			VHOST_SCSI_NAMELEN);
2889 		kfree(tport);
2890 		return ERR_PTR(-EINVAL);
2891 	}
2892 	snprintf(&tport->tport_name[0], VHOST_SCSI_NAMELEN, "%s", &name[off]);
2893 
2894 	pr_debug("TCM_VHost_ConfigFS: Allocated emulated Target"
2895 		" %s Address: %s\n", vhost_scsi_dump_proto_id(tport), name);
2896 
2897 	return &tport->tport_wwn;
2898 }
2899 
vhost_scsi_drop_tport(struct se_wwn * wwn)2900 static void vhost_scsi_drop_tport(struct se_wwn *wwn)
2901 {
2902 	struct vhost_scsi_tport *tport = container_of(wwn,
2903 				struct vhost_scsi_tport, tport_wwn);
2904 
2905 	pr_debug("TCM_VHost_ConfigFS: Deallocating emulated Target"
2906 		" %s Address: %s\n", vhost_scsi_dump_proto_id(tport),
2907 		tport->tport_name);
2908 
2909 	kfree(tport);
2910 }
2911 
2912 static ssize_t
vhost_scsi_wwn_version_show(struct config_item * item,char * page)2913 vhost_scsi_wwn_version_show(struct config_item *item, char *page)
2914 {
2915 	return sysfs_emit(page, "TCM_VHOST fabric module %s on %s/%s"
2916 		" on "UTS_RELEASE"\n", VHOST_SCSI_VERSION, utsname()->sysname,
2917 		utsname()->machine);
2918 }
2919 
2920 CONFIGFS_ATTR_RO(vhost_scsi_wwn_, version);
2921 
2922 static struct configfs_attribute *vhost_scsi_wwn_attrs[] = {
2923 	&vhost_scsi_wwn_attr_version,
2924 	NULL,
2925 };
2926 
2927 static const struct target_core_fabric_ops vhost_scsi_ops = {
2928 	.module				= THIS_MODULE,
2929 	.fabric_name			= "vhost",
2930 	.max_data_sg_nents		= VHOST_SCSI_PREALLOC_SGLS,
2931 	.tpg_get_wwn			= vhost_scsi_get_fabric_wwn,
2932 	.tpg_get_tag			= vhost_scsi_get_tpgt,
2933 	.tpg_check_demo_mode		= vhost_scsi_check_true,
2934 	.tpg_check_demo_mode_cache	= vhost_scsi_check_true,
2935 	.tpg_check_prot_fabric_only	= vhost_scsi_check_prot_fabric_only,
2936 	.release_cmd			= vhost_scsi_release_cmd,
2937 	.check_stop_free		= vhost_scsi_check_stop_free,
2938 	.sess_get_initiator_sid		= NULL,
2939 	.write_pending			= vhost_scsi_write_pending,
2940 	.queue_data_in			= vhost_scsi_queue_data_in,
2941 	.queue_status			= vhost_scsi_queue_status,
2942 	.queue_tm_rsp			= vhost_scsi_queue_tm_rsp,
2943 	.aborted_task			= vhost_scsi_aborted_task,
2944 	/*
2945 	 * Setup callers for generic logic in target_core_fabric_configfs.c
2946 	 */
2947 	.fabric_make_wwn		= vhost_scsi_make_tport,
2948 	.fabric_drop_wwn		= vhost_scsi_drop_tport,
2949 	.fabric_make_tpg		= vhost_scsi_make_tpg,
2950 	.fabric_drop_tpg		= vhost_scsi_drop_tpg,
2951 	.fabric_post_link		= vhost_scsi_port_link,
2952 	.fabric_pre_unlink		= vhost_scsi_port_unlink,
2953 
2954 	.tfc_wwn_attrs			= vhost_scsi_wwn_attrs,
2955 	.tfc_tpg_base_attrs		= vhost_scsi_tpg_attrs,
2956 	.tfc_tpg_attrib_attrs		= vhost_scsi_tpg_attrib_attrs,
2957 
2958 	.default_submit_type		= TARGET_QUEUE_SUBMIT,
2959 	.direct_submit_supp		= 1,
2960 };
2961 
vhost_scsi_init(void)2962 static int __init vhost_scsi_init(void)
2963 {
2964 	int ret = -ENOMEM;
2965 
2966 	pr_debug("TCM_VHOST fabric module %s on %s/%s"
2967 		" on "UTS_RELEASE"\n", VHOST_SCSI_VERSION, utsname()->sysname,
2968 		utsname()->machine);
2969 
2970 	ret = vhost_scsi_register();
2971 	if (ret < 0)
2972 		goto out;
2973 
2974 	ret = target_register_template(&vhost_scsi_ops);
2975 	if (ret < 0)
2976 		goto out_vhost_scsi_deregister;
2977 
2978 	return 0;
2979 
2980 out_vhost_scsi_deregister:
2981 	vhost_scsi_deregister();
2982 out:
2983 	return ret;
2984 }
2985 
vhost_scsi_exit(void)2986 static void vhost_scsi_exit(void)
2987 {
2988 	target_unregister_template(&vhost_scsi_ops);
2989 	vhost_scsi_deregister();
2990 }
2991 
2992 MODULE_DESCRIPTION("VHOST_SCSI series fabric driver");
2993 MODULE_ALIAS("tcm_vhost");
2994 MODULE_LICENSE("GPL");
2995 module_init(vhost_scsi_init);
2996 module_exit(vhost_scsi_exit);
2997