xref: /linux/drivers/vhost/scsi.c (revision 0da908c291070d89482f6211dbe81d4d43c3f7cb)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*******************************************************************************
3  * Vhost kernel TCM fabric driver for virtio SCSI initiators
4  *
5  * (C) Copyright 2010-2013 Datera, Inc.
6  * (C) Copyright 2010-2012 IBM Corp.
7  *
8  * Authors: Nicholas A. Bellinger <nab@daterainc.com>
9  *          Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
10  ****************************************************************************/
11 
12 #include <linux/module.h>
13 #include <linux/moduleparam.h>
14 #include <generated/utsrelease.h>
15 #include <linux/utsname.h>
16 #include <linux/init.h>
17 #include <linux/slab.h>
18 #include <linux/kthread.h>
19 #include <linux/types.h>
20 #include <linux/string.h>
21 #include <linux/configfs.h>
22 #include <linux/ctype.h>
23 #include <linux/compat.h>
24 #include <linux/eventfd.h>
25 #include <linux/fs.h>
26 #include <linux/vmalloc.h>
27 #include <linux/miscdevice.h>
28 #include <asm/unaligned.h>
29 #include <scsi/scsi_common.h>
30 #include <scsi/scsi_proto.h>
31 #include <target/target_core_base.h>
32 #include <target/target_core_fabric.h>
33 #include <linux/vhost.h>
34 #include <linux/virtio_scsi.h>
35 #include <linux/llist.h>
36 #include <linux/bitmap.h>
37 
38 #include "vhost.h"
39 
40 #define VHOST_SCSI_VERSION  "v0.1"
41 #define VHOST_SCSI_NAMELEN 256
42 #define VHOST_SCSI_MAX_CDB_SIZE 32
43 #define VHOST_SCSI_PREALLOC_SGLS 2048
44 #define VHOST_SCSI_PREALLOC_UPAGES 2048
45 #define VHOST_SCSI_PREALLOC_PROT_SGLS 2048
46 
47 /* Max number of requests before requeueing the job.
48  * Using this limit prevents one virtqueue from starving others with
49  * request.
50  */
51 #define VHOST_SCSI_WEIGHT 256
52 
53 struct vhost_scsi_inflight {
54 	/* Wait for the flush operation to finish */
55 	struct completion comp;
56 	/* Refcount for the inflight reqs */
57 	struct kref kref;
58 };
59 
60 struct vhost_scsi_cmd {
61 	/* Descriptor from vhost_get_vq_desc() for virt_queue segment */
62 	int tvc_vq_desc;
63 	/* virtio-scsi initiator task attribute */
64 	int tvc_task_attr;
65 	/* virtio-scsi response incoming iovecs */
66 	int tvc_in_iovs;
67 	/* virtio-scsi initiator data direction */
68 	enum dma_data_direction tvc_data_direction;
69 	/* Expected data transfer length from virtio-scsi header */
70 	u32 tvc_exp_data_len;
71 	/* The Tag from include/linux/virtio_scsi.h:struct virtio_scsi_cmd_req */
72 	u64 tvc_tag;
73 	/* The number of scatterlists associated with this cmd */
74 	u32 tvc_sgl_count;
75 	u32 tvc_prot_sgl_count;
76 	/* Saved unpacked SCSI LUN for vhost_scsi_target_queue_cmd() */
77 	u32 tvc_lun;
78 	/* Pointer to the SGL formatted memory from virtio-scsi */
79 	struct scatterlist *tvc_sgl;
80 	struct scatterlist *tvc_prot_sgl;
81 	struct page **tvc_upages;
82 	/* Pointer to response header iovec */
83 	struct iovec tvc_resp_iov;
84 	/* Pointer to vhost_scsi for our device */
85 	struct vhost_scsi *tvc_vhost;
86 	/* Pointer to vhost_virtqueue for the cmd */
87 	struct vhost_virtqueue *tvc_vq;
88 	/* Pointer to vhost nexus memory */
89 	struct vhost_scsi_nexus *tvc_nexus;
90 	/* The TCM I/O descriptor that is accessed via container_of() */
91 	struct se_cmd tvc_se_cmd;
92 	/* Copy of the incoming SCSI command descriptor block (CDB) */
93 	unsigned char tvc_cdb[VHOST_SCSI_MAX_CDB_SIZE];
94 	/* Sense buffer that will be mapped into outgoing status */
95 	unsigned char tvc_sense_buf[TRANSPORT_SENSE_BUFFER];
96 	/* Completed commands list, serviced from vhost worker thread */
97 	struct llist_node tvc_completion_list;
98 	/* Used to track inflight cmd */
99 	struct vhost_scsi_inflight *inflight;
100 };
101 
102 struct vhost_scsi_nexus {
103 	/* Pointer to TCM session for I_T Nexus */
104 	struct se_session *tvn_se_sess;
105 };
106 
107 struct vhost_scsi_tpg {
108 	/* Vhost port target portal group tag for TCM */
109 	u16 tport_tpgt;
110 	/* Used to track number of TPG Port/Lun Links wrt to explict I_T Nexus shutdown */
111 	int tv_tpg_port_count;
112 	/* Used for vhost_scsi device reference to tpg_nexus, protected by tv_tpg_mutex */
113 	int tv_tpg_vhost_count;
114 	/* Used for enabling T10-PI with legacy devices */
115 	int tv_fabric_prot_type;
116 	/* list for vhost_scsi_list */
117 	struct list_head tv_tpg_list;
118 	/* Used to protect access for tpg_nexus */
119 	struct mutex tv_tpg_mutex;
120 	/* Pointer to the TCM VHost I_T Nexus for this TPG endpoint */
121 	struct vhost_scsi_nexus *tpg_nexus;
122 	/* Pointer back to vhost_scsi_tport */
123 	struct vhost_scsi_tport *tport;
124 	/* Returned by vhost_scsi_make_tpg() */
125 	struct se_portal_group se_tpg;
126 	/* Pointer back to vhost_scsi, protected by tv_tpg_mutex */
127 	struct vhost_scsi *vhost_scsi;
128 	struct list_head tmf_queue;
129 };
130 
131 struct vhost_scsi_tport {
132 	/* SCSI protocol the tport is providing */
133 	u8 tport_proto_id;
134 	/* Binary World Wide unique Port Name for Vhost Target port */
135 	u64 tport_wwpn;
136 	/* ASCII formatted WWPN for Vhost Target port */
137 	char tport_name[VHOST_SCSI_NAMELEN];
138 	/* Returned by vhost_scsi_make_tport() */
139 	struct se_wwn tport_wwn;
140 };
141 
142 struct vhost_scsi_evt {
143 	/* event to be sent to guest */
144 	struct virtio_scsi_event event;
145 	/* event list, serviced from vhost worker thread */
146 	struct llist_node list;
147 };
148 
149 enum {
150 	VHOST_SCSI_VQ_CTL = 0,
151 	VHOST_SCSI_VQ_EVT = 1,
152 	VHOST_SCSI_VQ_IO = 2,
153 };
154 
155 /* Note: can't set VIRTIO_F_VERSION_1 yet, since that implies ANY_LAYOUT. */
156 enum {
157 	VHOST_SCSI_FEATURES = VHOST_FEATURES | (1ULL << VIRTIO_SCSI_F_HOTPLUG) |
158 					       (1ULL << VIRTIO_SCSI_F_T10_PI)
159 };
160 
161 #define VHOST_SCSI_MAX_TARGET	256
162 #define VHOST_SCSI_MAX_IO_VQ	1024
163 #define VHOST_SCSI_MAX_EVENT	128
164 
165 static unsigned vhost_scsi_max_io_vqs = 128;
166 module_param_named(max_io_vqs, vhost_scsi_max_io_vqs, uint, 0644);
167 MODULE_PARM_DESC(max_io_vqs, "Set the max number of IO virtqueues a vhost scsi device can support. The default is 128. The max is 1024.");
168 
169 struct vhost_scsi_virtqueue {
170 	struct vhost_virtqueue vq;
171 	/*
172 	 * Reference counting for inflight reqs, used for flush operation. At
173 	 * each time, one reference tracks new commands submitted, while we
174 	 * wait for another one to reach 0.
175 	 */
176 	struct vhost_scsi_inflight inflights[2];
177 	/*
178 	 * Indicate current inflight in use, protected by vq->mutex.
179 	 * Writers must also take dev mutex and flush under it.
180 	 */
181 	int inflight_idx;
182 	struct vhost_scsi_cmd *scsi_cmds;
183 	struct sbitmap scsi_tags;
184 	int max_cmds;
185 };
186 
187 struct vhost_scsi {
188 	/* Protected by vhost_scsi->dev.mutex */
189 	struct vhost_scsi_tpg **vs_tpg;
190 	char vs_vhost_wwpn[TRANSPORT_IQN_LEN];
191 
192 	struct vhost_dev dev;
193 	struct vhost_scsi_virtqueue *vqs;
194 	unsigned long *compl_bitmap;
195 	struct vhost_scsi_inflight **old_inflight;
196 
197 	struct vhost_work vs_completion_work; /* cmd completion work item */
198 	struct llist_head vs_completion_list; /* cmd completion queue */
199 
200 	struct vhost_work vs_event_work; /* evt injection work item */
201 	struct llist_head vs_event_list; /* evt injection queue */
202 
203 	bool vs_events_missed; /* any missed events, protected by vq->mutex */
204 	int vs_events_nr; /* num of pending events, protected by vq->mutex */
205 };
206 
207 struct vhost_scsi_tmf {
208 	struct vhost_work vwork;
209 	struct vhost_scsi_tpg *tpg;
210 	struct vhost_scsi *vhost;
211 	struct vhost_scsi_virtqueue *svq;
212 	struct list_head queue_entry;
213 
214 	struct se_cmd se_cmd;
215 	u8 scsi_resp;
216 	struct vhost_scsi_inflight *inflight;
217 	struct iovec resp_iov;
218 	int in_iovs;
219 	int vq_desc;
220 };
221 
222 /*
223  * Context for processing request and control queue operations.
224  */
225 struct vhost_scsi_ctx {
226 	int head;
227 	unsigned int out, in;
228 	size_t req_size, rsp_size;
229 	size_t out_size, in_size;
230 	u8 *target, *lunp;
231 	void *req;
232 	struct iov_iter out_iter;
233 };
234 
235 /* Global spinlock to protect vhost_scsi TPG list for vhost IOCTL access */
236 static DEFINE_MUTEX(vhost_scsi_mutex);
237 static LIST_HEAD(vhost_scsi_list);
238 
239 static void vhost_scsi_done_inflight(struct kref *kref)
240 {
241 	struct vhost_scsi_inflight *inflight;
242 
243 	inflight = container_of(kref, struct vhost_scsi_inflight, kref);
244 	complete(&inflight->comp);
245 }
246 
247 static void vhost_scsi_init_inflight(struct vhost_scsi *vs,
248 				    struct vhost_scsi_inflight *old_inflight[])
249 {
250 	struct vhost_scsi_inflight *new_inflight;
251 	struct vhost_virtqueue *vq;
252 	int idx, i;
253 
254 	for (i = 0; i < vs->dev.nvqs;  i++) {
255 		vq = &vs->vqs[i].vq;
256 
257 		mutex_lock(&vq->mutex);
258 
259 		/* store old infight */
260 		idx = vs->vqs[i].inflight_idx;
261 		if (old_inflight)
262 			old_inflight[i] = &vs->vqs[i].inflights[idx];
263 
264 		/* setup new infight */
265 		vs->vqs[i].inflight_idx = idx ^ 1;
266 		new_inflight = &vs->vqs[i].inflights[idx ^ 1];
267 		kref_init(&new_inflight->kref);
268 		init_completion(&new_inflight->comp);
269 
270 		mutex_unlock(&vq->mutex);
271 	}
272 }
273 
274 static struct vhost_scsi_inflight *
275 vhost_scsi_get_inflight(struct vhost_virtqueue *vq)
276 {
277 	struct vhost_scsi_inflight *inflight;
278 	struct vhost_scsi_virtqueue *svq;
279 
280 	svq = container_of(vq, struct vhost_scsi_virtqueue, vq);
281 	inflight = &svq->inflights[svq->inflight_idx];
282 	kref_get(&inflight->kref);
283 
284 	return inflight;
285 }
286 
287 static void vhost_scsi_put_inflight(struct vhost_scsi_inflight *inflight)
288 {
289 	kref_put(&inflight->kref, vhost_scsi_done_inflight);
290 }
291 
292 static int vhost_scsi_check_true(struct se_portal_group *se_tpg)
293 {
294 	return 1;
295 }
296 
297 static int vhost_scsi_check_false(struct se_portal_group *se_tpg)
298 {
299 	return 0;
300 }
301 
302 static char *vhost_scsi_get_fabric_wwn(struct se_portal_group *se_tpg)
303 {
304 	struct vhost_scsi_tpg *tpg = container_of(se_tpg,
305 				struct vhost_scsi_tpg, se_tpg);
306 	struct vhost_scsi_tport *tport = tpg->tport;
307 
308 	return &tport->tport_name[0];
309 }
310 
311 static u16 vhost_scsi_get_tpgt(struct se_portal_group *se_tpg)
312 {
313 	struct vhost_scsi_tpg *tpg = container_of(se_tpg,
314 				struct vhost_scsi_tpg, se_tpg);
315 	return tpg->tport_tpgt;
316 }
317 
318 static int vhost_scsi_check_prot_fabric_only(struct se_portal_group *se_tpg)
319 {
320 	struct vhost_scsi_tpg *tpg = container_of(se_tpg,
321 				struct vhost_scsi_tpg, se_tpg);
322 
323 	return tpg->tv_fabric_prot_type;
324 }
325 
326 static u32 vhost_scsi_tpg_get_inst_index(struct se_portal_group *se_tpg)
327 {
328 	return 1;
329 }
330 
331 static void vhost_scsi_release_cmd_res(struct se_cmd *se_cmd)
332 {
333 	struct vhost_scsi_cmd *tv_cmd = container_of(se_cmd,
334 				struct vhost_scsi_cmd, tvc_se_cmd);
335 	struct vhost_scsi_virtqueue *svq = container_of(tv_cmd->tvc_vq,
336 				struct vhost_scsi_virtqueue, vq);
337 	struct vhost_scsi_inflight *inflight = tv_cmd->inflight;
338 	int i;
339 
340 	if (tv_cmd->tvc_sgl_count) {
341 		for (i = 0; i < tv_cmd->tvc_sgl_count; i++)
342 			put_page(sg_page(&tv_cmd->tvc_sgl[i]));
343 	}
344 	if (tv_cmd->tvc_prot_sgl_count) {
345 		for (i = 0; i < tv_cmd->tvc_prot_sgl_count; i++)
346 			put_page(sg_page(&tv_cmd->tvc_prot_sgl[i]));
347 	}
348 
349 	sbitmap_clear_bit(&svq->scsi_tags, se_cmd->map_tag);
350 	vhost_scsi_put_inflight(inflight);
351 }
352 
353 static void vhost_scsi_release_tmf_res(struct vhost_scsi_tmf *tmf)
354 {
355 	struct vhost_scsi_tpg *tpg = tmf->tpg;
356 	struct vhost_scsi_inflight *inflight = tmf->inflight;
357 
358 	mutex_lock(&tpg->tv_tpg_mutex);
359 	list_add_tail(&tpg->tmf_queue, &tmf->queue_entry);
360 	mutex_unlock(&tpg->tv_tpg_mutex);
361 	vhost_scsi_put_inflight(inflight);
362 }
363 
364 static void vhost_scsi_release_cmd(struct se_cmd *se_cmd)
365 {
366 	if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) {
367 		struct vhost_scsi_tmf *tmf = container_of(se_cmd,
368 					struct vhost_scsi_tmf, se_cmd);
369 
370 		vhost_work_queue(&tmf->vhost->dev, &tmf->vwork);
371 	} else {
372 		struct vhost_scsi_cmd *cmd = container_of(se_cmd,
373 					struct vhost_scsi_cmd, tvc_se_cmd);
374 		struct vhost_scsi *vs = cmd->tvc_vhost;
375 
376 		llist_add(&cmd->tvc_completion_list, &vs->vs_completion_list);
377 		vhost_work_queue(&vs->dev, &vs->vs_completion_work);
378 	}
379 }
380 
381 static u32 vhost_scsi_sess_get_index(struct se_session *se_sess)
382 {
383 	return 0;
384 }
385 
386 static int vhost_scsi_write_pending(struct se_cmd *se_cmd)
387 {
388 	/* Go ahead and process the write immediately */
389 	target_execute_cmd(se_cmd);
390 	return 0;
391 }
392 
393 static void vhost_scsi_set_default_node_attrs(struct se_node_acl *nacl)
394 {
395 	return;
396 }
397 
398 static int vhost_scsi_get_cmd_state(struct se_cmd *se_cmd)
399 {
400 	return 0;
401 }
402 
403 static int vhost_scsi_queue_data_in(struct se_cmd *se_cmd)
404 {
405 	transport_generic_free_cmd(se_cmd, 0);
406 	return 0;
407 }
408 
409 static int vhost_scsi_queue_status(struct se_cmd *se_cmd)
410 {
411 	transport_generic_free_cmd(se_cmd, 0);
412 	return 0;
413 }
414 
415 static void vhost_scsi_queue_tm_rsp(struct se_cmd *se_cmd)
416 {
417 	struct vhost_scsi_tmf *tmf = container_of(se_cmd, struct vhost_scsi_tmf,
418 						  se_cmd);
419 
420 	tmf->scsi_resp = se_cmd->se_tmr_req->response;
421 	transport_generic_free_cmd(&tmf->se_cmd, 0);
422 }
423 
424 static void vhost_scsi_aborted_task(struct se_cmd *se_cmd)
425 {
426 	return;
427 }
428 
429 static void vhost_scsi_free_evt(struct vhost_scsi *vs, struct vhost_scsi_evt *evt)
430 {
431 	vs->vs_events_nr--;
432 	kfree(evt);
433 }
434 
435 static struct vhost_scsi_evt *
436 vhost_scsi_allocate_evt(struct vhost_scsi *vs,
437 		       u32 event, u32 reason)
438 {
439 	struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
440 	struct vhost_scsi_evt *evt;
441 
442 	if (vs->vs_events_nr > VHOST_SCSI_MAX_EVENT) {
443 		vs->vs_events_missed = true;
444 		return NULL;
445 	}
446 
447 	evt = kzalloc(sizeof(*evt), GFP_KERNEL);
448 	if (!evt) {
449 		vq_err(vq, "Failed to allocate vhost_scsi_evt\n");
450 		vs->vs_events_missed = true;
451 		return NULL;
452 	}
453 
454 	evt->event.event = cpu_to_vhost32(vq, event);
455 	evt->event.reason = cpu_to_vhost32(vq, reason);
456 	vs->vs_events_nr++;
457 
458 	return evt;
459 }
460 
461 static int vhost_scsi_check_stop_free(struct se_cmd *se_cmd)
462 {
463 	return target_put_sess_cmd(se_cmd);
464 }
465 
466 static void
467 vhost_scsi_do_evt_work(struct vhost_scsi *vs, struct vhost_scsi_evt *evt)
468 {
469 	struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
470 	struct virtio_scsi_event *event = &evt->event;
471 	struct virtio_scsi_event __user *eventp;
472 	unsigned out, in;
473 	int head, ret;
474 
475 	if (!vhost_vq_get_backend(vq)) {
476 		vs->vs_events_missed = true;
477 		return;
478 	}
479 
480 again:
481 	vhost_disable_notify(&vs->dev, vq);
482 	head = vhost_get_vq_desc(vq, vq->iov,
483 			ARRAY_SIZE(vq->iov), &out, &in,
484 			NULL, NULL);
485 	if (head < 0) {
486 		vs->vs_events_missed = true;
487 		return;
488 	}
489 	if (head == vq->num) {
490 		if (vhost_enable_notify(&vs->dev, vq))
491 			goto again;
492 		vs->vs_events_missed = true;
493 		return;
494 	}
495 
496 	if ((vq->iov[out].iov_len != sizeof(struct virtio_scsi_event))) {
497 		vq_err(vq, "Expecting virtio_scsi_event, got %zu bytes\n",
498 				vq->iov[out].iov_len);
499 		vs->vs_events_missed = true;
500 		return;
501 	}
502 
503 	if (vs->vs_events_missed) {
504 		event->event |= cpu_to_vhost32(vq, VIRTIO_SCSI_T_EVENTS_MISSED);
505 		vs->vs_events_missed = false;
506 	}
507 
508 	eventp = vq->iov[out].iov_base;
509 	ret = __copy_to_user(eventp, event, sizeof(*event));
510 	if (!ret)
511 		vhost_add_used_and_signal(&vs->dev, vq, head, 0);
512 	else
513 		vq_err(vq, "Faulted on vhost_scsi_send_event\n");
514 }
515 
516 static void vhost_scsi_evt_work(struct vhost_work *work)
517 {
518 	struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
519 					vs_event_work);
520 	struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
521 	struct vhost_scsi_evt *evt, *t;
522 	struct llist_node *llnode;
523 
524 	mutex_lock(&vq->mutex);
525 	llnode = llist_del_all(&vs->vs_event_list);
526 	llist_for_each_entry_safe(evt, t, llnode, list) {
527 		vhost_scsi_do_evt_work(vs, evt);
528 		vhost_scsi_free_evt(vs, evt);
529 	}
530 	mutex_unlock(&vq->mutex);
531 }
532 
533 /* Fill in status and signal that we are done processing this command
534  *
535  * This is scheduled in the vhost work queue so we are called with the owner
536  * process mm and can access the vring.
537  */
538 static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
539 {
540 	struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
541 					vs_completion_work);
542 	struct virtio_scsi_cmd_resp v_rsp;
543 	struct vhost_scsi_cmd *cmd, *t;
544 	struct llist_node *llnode;
545 	struct se_cmd *se_cmd;
546 	struct iov_iter iov_iter;
547 	int ret, vq;
548 
549 	bitmap_zero(vs->compl_bitmap, vs->dev.nvqs);
550 	llnode = llist_del_all(&vs->vs_completion_list);
551 	llist_for_each_entry_safe(cmd, t, llnode, tvc_completion_list) {
552 		se_cmd = &cmd->tvc_se_cmd;
553 
554 		pr_debug("%s tv_cmd %p resid %u status %#02x\n", __func__,
555 			cmd, se_cmd->residual_count, se_cmd->scsi_status);
556 
557 		memset(&v_rsp, 0, sizeof(v_rsp));
558 		v_rsp.resid = cpu_to_vhost32(cmd->tvc_vq, se_cmd->residual_count);
559 		/* TODO is status_qualifier field needed? */
560 		v_rsp.status = se_cmd->scsi_status;
561 		v_rsp.sense_len = cpu_to_vhost32(cmd->tvc_vq,
562 						 se_cmd->scsi_sense_length);
563 		memcpy(v_rsp.sense, cmd->tvc_sense_buf,
564 		       se_cmd->scsi_sense_length);
565 
566 		iov_iter_init(&iov_iter, ITER_DEST, &cmd->tvc_resp_iov,
567 			      cmd->tvc_in_iovs, sizeof(v_rsp));
568 		ret = copy_to_iter(&v_rsp, sizeof(v_rsp), &iov_iter);
569 		if (likely(ret == sizeof(v_rsp))) {
570 			struct vhost_scsi_virtqueue *q;
571 			vhost_add_used(cmd->tvc_vq, cmd->tvc_vq_desc, 0);
572 			q = container_of(cmd->tvc_vq, struct vhost_scsi_virtqueue, vq);
573 			vq = q - vs->vqs;
574 			__set_bit(vq, vs->compl_bitmap);
575 		} else
576 			pr_err("Faulted on virtio_scsi_cmd_resp\n");
577 
578 		vhost_scsi_release_cmd_res(se_cmd);
579 	}
580 
581 	vq = -1;
582 	while ((vq = find_next_bit(vs->compl_bitmap, vs->dev.nvqs, vq + 1))
583 		< vs->dev.nvqs)
584 		vhost_signal(&vs->dev, &vs->vqs[vq].vq);
585 }
586 
587 static struct vhost_scsi_cmd *
588 vhost_scsi_get_cmd(struct vhost_virtqueue *vq, struct vhost_scsi_tpg *tpg,
589 		   unsigned char *cdb, u64 scsi_tag, u16 lun, u8 task_attr,
590 		   u32 exp_data_len, int data_direction)
591 {
592 	struct vhost_scsi_virtqueue *svq = container_of(vq,
593 					struct vhost_scsi_virtqueue, vq);
594 	struct vhost_scsi_cmd *cmd;
595 	struct vhost_scsi_nexus *tv_nexus;
596 	struct scatterlist *sg, *prot_sg;
597 	struct page **pages;
598 	int tag;
599 
600 	tv_nexus = tpg->tpg_nexus;
601 	if (!tv_nexus) {
602 		pr_err("Unable to locate active struct vhost_scsi_nexus\n");
603 		return ERR_PTR(-EIO);
604 	}
605 
606 	tag = sbitmap_get(&svq->scsi_tags);
607 	if (tag < 0) {
608 		pr_err("Unable to obtain tag for vhost_scsi_cmd\n");
609 		return ERR_PTR(-ENOMEM);
610 	}
611 
612 	cmd = &svq->scsi_cmds[tag];
613 	sg = cmd->tvc_sgl;
614 	prot_sg = cmd->tvc_prot_sgl;
615 	pages = cmd->tvc_upages;
616 	memset(cmd, 0, sizeof(*cmd));
617 	cmd->tvc_sgl = sg;
618 	cmd->tvc_prot_sgl = prot_sg;
619 	cmd->tvc_upages = pages;
620 	cmd->tvc_se_cmd.map_tag = tag;
621 	cmd->tvc_tag = scsi_tag;
622 	cmd->tvc_lun = lun;
623 	cmd->tvc_task_attr = task_attr;
624 	cmd->tvc_exp_data_len = exp_data_len;
625 	cmd->tvc_data_direction = data_direction;
626 	cmd->tvc_nexus = tv_nexus;
627 	cmd->inflight = vhost_scsi_get_inflight(vq);
628 
629 	memcpy(cmd->tvc_cdb, cdb, VHOST_SCSI_MAX_CDB_SIZE);
630 
631 	return cmd;
632 }
633 
634 /*
635  * Map a user memory range into a scatterlist
636  *
637  * Returns the number of scatterlist entries used or -errno on error.
638  */
639 static int
640 vhost_scsi_map_to_sgl(struct vhost_scsi_cmd *cmd,
641 		      struct iov_iter *iter,
642 		      struct scatterlist *sgl,
643 		      bool write)
644 {
645 	struct page **pages = cmd->tvc_upages;
646 	struct scatterlist *sg = sgl;
647 	ssize_t bytes;
648 	size_t offset;
649 	unsigned int npages = 0;
650 
651 	bytes = iov_iter_get_pages2(iter, pages, LONG_MAX,
652 				VHOST_SCSI_PREALLOC_UPAGES, &offset);
653 	/* No pages were pinned */
654 	if (bytes <= 0)
655 		return bytes < 0 ? bytes : -EFAULT;
656 
657 	while (bytes) {
658 		unsigned n = min_t(unsigned, PAGE_SIZE - offset, bytes);
659 		sg_set_page(sg++, pages[npages++], n, offset);
660 		bytes -= n;
661 		offset = 0;
662 	}
663 	return npages;
664 }
665 
666 static int
667 vhost_scsi_calc_sgls(struct iov_iter *iter, size_t bytes, int max_sgls)
668 {
669 	int sgl_count = 0;
670 
671 	if (!iter || !iter->iov) {
672 		pr_err("%s: iter->iov is NULL, but expected bytes: %zu"
673 		       " present\n", __func__, bytes);
674 		return -EINVAL;
675 	}
676 
677 	sgl_count = iov_iter_npages(iter, 0xffff);
678 	if (sgl_count > max_sgls) {
679 		pr_err("%s: requested sgl_count: %d exceeds pre-allocated"
680 		       " max_sgls: %d\n", __func__, sgl_count, max_sgls);
681 		return -EINVAL;
682 	}
683 	return sgl_count;
684 }
685 
686 static int
687 vhost_scsi_iov_to_sgl(struct vhost_scsi_cmd *cmd, bool write,
688 		      struct iov_iter *iter,
689 		      struct scatterlist *sg, int sg_count)
690 {
691 	struct scatterlist *p = sg;
692 	int ret;
693 
694 	while (iov_iter_count(iter)) {
695 		ret = vhost_scsi_map_to_sgl(cmd, iter, sg, write);
696 		if (ret < 0) {
697 			while (p < sg) {
698 				struct page *page = sg_page(p++);
699 				if (page)
700 					put_page(page);
701 			}
702 			return ret;
703 		}
704 		sg += ret;
705 	}
706 	return 0;
707 }
708 
709 static int
710 vhost_scsi_mapal(struct vhost_scsi_cmd *cmd,
711 		 size_t prot_bytes, struct iov_iter *prot_iter,
712 		 size_t data_bytes, struct iov_iter *data_iter)
713 {
714 	int sgl_count, ret;
715 	bool write = (cmd->tvc_data_direction == DMA_FROM_DEVICE);
716 
717 	if (prot_bytes) {
718 		sgl_count = vhost_scsi_calc_sgls(prot_iter, prot_bytes,
719 						 VHOST_SCSI_PREALLOC_PROT_SGLS);
720 		if (sgl_count < 0)
721 			return sgl_count;
722 
723 		sg_init_table(cmd->tvc_prot_sgl, sgl_count);
724 		cmd->tvc_prot_sgl_count = sgl_count;
725 		pr_debug("%s prot_sg %p prot_sgl_count %u\n", __func__,
726 			 cmd->tvc_prot_sgl, cmd->tvc_prot_sgl_count);
727 
728 		ret = vhost_scsi_iov_to_sgl(cmd, write, prot_iter,
729 					    cmd->tvc_prot_sgl,
730 					    cmd->tvc_prot_sgl_count);
731 		if (ret < 0) {
732 			cmd->tvc_prot_sgl_count = 0;
733 			return ret;
734 		}
735 	}
736 	sgl_count = vhost_scsi_calc_sgls(data_iter, data_bytes,
737 					 VHOST_SCSI_PREALLOC_SGLS);
738 	if (sgl_count < 0)
739 		return sgl_count;
740 
741 	sg_init_table(cmd->tvc_sgl, sgl_count);
742 	cmd->tvc_sgl_count = sgl_count;
743 	pr_debug("%s data_sg %p data_sgl_count %u\n", __func__,
744 		  cmd->tvc_sgl, cmd->tvc_sgl_count);
745 
746 	ret = vhost_scsi_iov_to_sgl(cmd, write, data_iter,
747 				    cmd->tvc_sgl, cmd->tvc_sgl_count);
748 	if (ret < 0) {
749 		cmd->tvc_sgl_count = 0;
750 		return ret;
751 	}
752 	return 0;
753 }
754 
755 static int vhost_scsi_to_tcm_attr(int attr)
756 {
757 	switch (attr) {
758 	case VIRTIO_SCSI_S_SIMPLE:
759 		return TCM_SIMPLE_TAG;
760 	case VIRTIO_SCSI_S_ORDERED:
761 		return TCM_ORDERED_TAG;
762 	case VIRTIO_SCSI_S_HEAD:
763 		return TCM_HEAD_TAG;
764 	case VIRTIO_SCSI_S_ACA:
765 		return TCM_ACA_TAG;
766 	default:
767 		break;
768 	}
769 	return TCM_SIMPLE_TAG;
770 }
771 
772 static void vhost_scsi_target_queue_cmd(struct vhost_scsi_cmd *cmd)
773 {
774 	struct se_cmd *se_cmd = &cmd->tvc_se_cmd;
775 	struct vhost_scsi_nexus *tv_nexus;
776 	struct scatterlist *sg_ptr, *sg_prot_ptr = NULL;
777 
778 	/* FIXME: BIDI operation */
779 	if (cmd->tvc_sgl_count) {
780 		sg_ptr = cmd->tvc_sgl;
781 
782 		if (cmd->tvc_prot_sgl_count)
783 			sg_prot_ptr = cmd->tvc_prot_sgl;
784 		else
785 			se_cmd->prot_pto = true;
786 	} else {
787 		sg_ptr = NULL;
788 	}
789 	tv_nexus = cmd->tvc_nexus;
790 
791 	se_cmd->tag = 0;
792 	target_init_cmd(se_cmd, tv_nexus->tvn_se_sess, &cmd->tvc_sense_buf[0],
793 			cmd->tvc_lun, cmd->tvc_exp_data_len,
794 			vhost_scsi_to_tcm_attr(cmd->tvc_task_attr),
795 			cmd->tvc_data_direction, TARGET_SCF_ACK_KREF);
796 
797 	if (target_submit_prep(se_cmd, cmd->tvc_cdb, sg_ptr,
798 			       cmd->tvc_sgl_count, NULL, 0, sg_prot_ptr,
799 			       cmd->tvc_prot_sgl_count, GFP_KERNEL))
800 		return;
801 
802 	target_queue_submission(se_cmd);
803 }
804 
805 static void
806 vhost_scsi_send_bad_target(struct vhost_scsi *vs,
807 			   struct vhost_virtqueue *vq,
808 			   int head, unsigned out)
809 {
810 	struct virtio_scsi_cmd_resp __user *resp;
811 	struct virtio_scsi_cmd_resp rsp;
812 	int ret;
813 
814 	memset(&rsp, 0, sizeof(rsp));
815 	rsp.response = VIRTIO_SCSI_S_BAD_TARGET;
816 	resp = vq->iov[out].iov_base;
817 	ret = __copy_to_user(resp, &rsp, sizeof(rsp));
818 	if (!ret)
819 		vhost_add_used_and_signal(&vs->dev, vq, head, 0);
820 	else
821 		pr_err("Faulted on virtio_scsi_cmd_resp\n");
822 }
823 
824 static int
825 vhost_scsi_get_desc(struct vhost_scsi *vs, struct vhost_virtqueue *vq,
826 		    struct vhost_scsi_ctx *vc)
827 {
828 	int ret = -ENXIO;
829 
830 	vc->head = vhost_get_vq_desc(vq, vq->iov,
831 				     ARRAY_SIZE(vq->iov), &vc->out, &vc->in,
832 				     NULL, NULL);
833 
834 	pr_debug("vhost_get_vq_desc: head: %d, out: %u in: %u\n",
835 		 vc->head, vc->out, vc->in);
836 
837 	/* On error, stop handling until the next kick. */
838 	if (unlikely(vc->head < 0))
839 		goto done;
840 
841 	/* Nothing new?  Wait for eventfd to tell us they refilled. */
842 	if (vc->head == vq->num) {
843 		if (unlikely(vhost_enable_notify(&vs->dev, vq))) {
844 			vhost_disable_notify(&vs->dev, vq);
845 			ret = -EAGAIN;
846 		}
847 		goto done;
848 	}
849 
850 	/*
851 	 * Get the size of request and response buffers.
852 	 * FIXME: Not correct for BIDI operation
853 	 */
854 	vc->out_size = iov_length(vq->iov, vc->out);
855 	vc->in_size = iov_length(&vq->iov[vc->out], vc->in);
856 
857 	/*
858 	 * Copy over the virtio-scsi request header, which for a
859 	 * ANY_LAYOUT enabled guest may span multiple iovecs, or a
860 	 * single iovec may contain both the header + outgoing
861 	 * WRITE payloads.
862 	 *
863 	 * copy_from_iter() will advance out_iter, so that it will
864 	 * point at the start of the outgoing WRITE payload, if
865 	 * DMA_TO_DEVICE is set.
866 	 */
867 	iov_iter_init(&vc->out_iter, ITER_SOURCE, vq->iov, vc->out, vc->out_size);
868 	ret = 0;
869 
870 done:
871 	return ret;
872 }
873 
874 static int
875 vhost_scsi_chk_size(struct vhost_virtqueue *vq, struct vhost_scsi_ctx *vc)
876 {
877 	if (unlikely(vc->in_size < vc->rsp_size)) {
878 		vq_err(vq,
879 		       "Response buf too small, need min %zu bytes got %zu",
880 		       vc->rsp_size, vc->in_size);
881 		return -EINVAL;
882 	} else if (unlikely(vc->out_size < vc->req_size)) {
883 		vq_err(vq,
884 		       "Request buf too small, need min %zu bytes got %zu",
885 		       vc->req_size, vc->out_size);
886 		return -EIO;
887 	}
888 
889 	return 0;
890 }
891 
892 static int
893 vhost_scsi_get_req(struct vhost_virtqueue *vq, struct vhost_scsi_ctx *vc,
894 		   struct vhost_scsi_tpg **tpgp)
895 {
896 	int ret = -EIO;
897 
898 	if (unlikely(!copy_from_iter_full(vc->req, vc->req_size,
899 					  &vc->out_iter))) {
900 		vq_err(vq, "Faulted on copy_from_iter_full\n");
901 	} else if (unlikely(*vc->lunp != 1)) {
902 		/* virtio-scsi spec requires byte 0 of the lun to be 1 */
903 		vq_err(vq, "Illegal virtio-scsi lun: %u\n", *vc->lunp);
904 	} else {
905 		struct vhost_scsi_tpg **vs_tpg, *tpg;
906 
907 		vs_tpg = vhost_vq_get_backend(vq);	/* validated at handler entry */
908 
909 		tpg = READ_ONCE(vs_tpg[*vc->target]);
910 		if (unlikely(!tpg)) {
911 			vq_err(vq, "Target 0x%x does not exist\n", *vc->target);
912 		} else {
913 			if (tpgp)
914 				*tpgp = tpg;
915 			ret = 0;
916 		}
917 	}
918 
919 	return ret;
920 }
921 
922 static u16 vhost_buf_to_lun(u8 *lun_buf)
923 {
924 	return ((lun_buf[2] << 8) | lun_buf[3]) & 0x3FFF;
925 }
926 
927 static void
928 vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
929 {
930 	struct vhost_scsi_tpg **vs_tpg, *tpg;
931 	struct virtio_scsi_cmd_req v_req;
932 	struct virtio_scsi_cmd_req_pi v_req_pi;
933 	struct vhost_scsi_ctx vc;
934 	struct vhost_scsi_cmd *cmd;
935 	struct iov_iter in_iter, prot_iter, data_iter;
936 	u64 tag;
937 	u32 exp_data_len, data_direction;
938 	int ret, prot_bytes, c = 0;
939 	u16 lun;
940 	u8 task_attr;
941 	bool t10_pi = vhost_has_feature(vq, VIRTIO_SCSI_F_T10_PI);
942 	void *cdb;
943 
944 	mutex_lock(&vq->mutex);
945 	/*
946 	 * We can handle the vq only after the endpoint is setup by calling the
947 	 * VHOST_SCSI_SET_ENDPOINT ioctl.
948 	 */
949 	vs_tpg = vhost_vq_get_backend(vq);
950 	if (!vs_tpg)
951 		goto out;
952 
953 	memset(&vc, 0, sizeof(vc));
954 	vc.rsp_size = sizeof(struct virtio_scsi_cmd_resp);
955 
956 	vhost_disable_notify(&vs->dev, vq);
957 
958 	do {
959 		ret = vhost_scsi_get_desc(vs, vq, &vc);
960 		if (ret)
961 			goto err;
962 
963 		/*
964 		 * Setup pointers and values based upon different virtio-scsi
965 		 * request header if T10_PI is enabled in KVM guest.
966 		 */
967 		if (t10_pi) {
968 			vc.req = &v_req_pi;
969 			vc.req_size = sizeof(v_req_pi);
970 			vc.lunp = &v_req_pi.lun[0];
971 			vc.target = &v_req_pi.lun[1];
972 		} else {
973 			vc.req = &v_req;
974 			vc.req_size = sizeof(v_req);
975 			vc.lunp = &v_req.lun[0];
976 			vc.target = &v_req.lun[1];
977 		}
978 
979 		/*
980 		 * Validate the size of request and response buffers.
981 		 * Check for a sane response buffer so we can report
982 		 * early errors back to the guest.
983 		 */
984 		ret = vhost_scsi_chk_size(vq, &vc);
985 		if (ret)
986 			goto err;
987 
988 		ret = vhost_scsi_get_req(vq, &vc, &tpg);
989 		if (ret)
990 			goto err;
991 
992 		ret = -EIO;	/* bad target on any error from here on */
993 
994 		/*
995 		 * Determine data_direction by calculating the total outgoing
996 		 * iovec sizes + incoming iovec sizes vs. virtio-scsi request +
997 		 * response headers respectively.
998 		 *
999 		 * For DMA_TO_DEVICE this is out_iter, which is already pointing
1000 		 * to the right place.
1001 		 *
1002 		 * For DMA_FROM_DEVICE, the iovec will be just past the end
1003 		 * of the virtio-scsi response header in either the same
1004 		 * or immediately following iovec.
1005 		 *
1006 		 * Any associated T10_PI bytes for the outgoing / incoming
1007 		 * payloads are included in calculation of exp_data_len here.
1008 		 */
1009 		prot_bytes = 0;
1010 
1011 		if (vc.out_size > vc.req_size) {
1012 			data_direction = DMA_TO_DEVICE;
1013 			exp_data_len = vc.out_size - vc.req_size;
1014 			data_iter = vc.out_iter;
1015 		} else if (vc.in_size > vc.rsp_size) {
1016 			data_direction = DMA_FROM_DEVICE;
1017 			exp_data_len = vc.in_size - vc.rsp_size;
1018 
1019 			iov_iter_init(&in_iter, ITER_DEST, &vq->iov[vc.out], vc.in,
1020 				      vc.rsp_size + exp_data_len);
1021 			iov_iter_advance(&in_iter, vc.rsp_size);
1022 			data_iter = in_iter;
1023 		} else {
1024 			data_direction = DMA_NONE;
1025 			exp_data_len = 0;
1026 		}
1027 		/*
1028 		 * If T10_PI header + payload is present, setup prot_iter values
1029 		 * and recalculate data_iter for vhost_scsi_mapal() mapping to
1030 		 * host scatterlists via get_user_pages_fast().
1031 		 */
1032 		if (t10_pi) {
1033 			if (v_req_pi.pi_bytesout) {
1034 				if (data_direction != DMA_TO_DEVICE) {
1035 					vq_err(vq, "Received non zero pi_bytesout,"
1036 						" but wrong data_direction\n");
1037 					goto err;
1038 				}
1039 				prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesout);
1040 			} else if (v_req_pi.pi_bytesin) {
1041 				if (data_direction != DMA_FROM_DEVICE) {
1042 					vq_err(vq, "Received non zero pi_bytesin,"
1043 						" but wrong data_direction\n");
1044 					goto err;
1045 				}
1046 				prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesin);
1047 			}
1048 			/*
1049 			 * Set prot_iter to data_iter and truncate it to
1050 			 * prot_bytes, and advance data_iter past any
1051 			 * preceeding prot_bytes that may be present.
1052 			 *
1053 			 * Also fix up the exp_data_len to reflect only the
1054 			 * actual data payload length.
1055 			 */
1056 			if (prot_bytes) {
1057 				exp_data_len -= prot_bytes;
1058 				prot_iter = data_iter;
1059 				iov_iter_truncate(&prot_iter, prot_bytes);
1060 				iov_iter_advance(&data_iter, prot_bytes);
1061 			}
1062 			tag = vhost64_to_cpu(vq, v_req_pi.tag);
1063 			task_attr = v_req_pi.task_attr;
1064 			cdb = &v_req_pi.cdb[0];
1065 			lun = vhost_buf_to_lun(v_req_pi.lun);
1066 		} else {
1067 			tag = vhost64_to_cpu(vq, v_req.tag);
1068 			task_attr = v_req.task_attr;
1069 			cdb = &v_req.cdb[0];
1070 			lun = vhost_buf_to_lun(v_req.lun);
1071 		}
1072 		/*
1073 		 * Check that the received CDB size does not exceeded our
1074 		 * hardcoded max for vhost-scsi, then get a pre-allocated
1075 		 * cmd descriptor for the new virtio-scsi tag.
1076 		 *
1077 		 * TODO what if cdb was too small for varlen cdb header?
1078 		 */
1079 		if (unlikely(scsi_command_size(cdb) > VHOST_SCSI_MAX_CDB_SIZE)) {
1080 			vq_err(vq, "Received SCSI CDB with command_size: %d that"
1081 				" exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
1082 				scsi_command_size(cdb), VHOST_SCSI_MAX_CDB_SIZE);
1083 				goto err;
1084 		}
1085 		cmd = vhost_scsi_get_cmd(vq, tpg, cdb, tag, lun, task_attr,
1086 					 exp_data_len + prot_bytes,
1087 					 data_direction);
1088 		if (IS_ERR(cmd)) {
1089 			vq_err(vq, "vhost_scsi_get_cmd failed %ld\n",
1090 			       PTR_ERR(cmd));
1091 			goto err;
1092 		}
1093 		cmd->tvc_vhost = vs;
1094 		cmd->tvc_vq = vq;
1095 		cmd->tvc_resp_iov = vq->iov[vc.out];
1096 		cmd->tvc_in_iovs = vc.in;
1097 
1098 		pr_debug("vhost_scsi got command opcode: %#02x, lun: %d\n",
1099 			 cmd->tvc_cdb[0], cmd->tvc_lun);
1100 		pr_debug("cmd: %p exp_data_len: %d, prot_bytes: %d data_direction:"
1101 			 " %d\n", cmd, exp_data_len, prot_bytes, data_direction);
1102 
1103 		if (data_direction != DMA_NONE) {
1104 			if (unlikely(vhost_scsi_mapal(cmd, prot_bytes,
1105 						      &prot_iter, exp_data_len,
1106 						      &data_iter))) {
1107 				vq_err(vq, "Failed to map iov to sgl\n");
1108 				vhost_scsi_release_cmd_res(&cmd->tvc_se_cmd);
1109 				goto err;
1110 			}
1111 		}
1112 		/*
1113 		 * Save the descriptor from vhost_get_vq_desc() to be used to
1114 		 * complete the virtio-scsi request in TCM callback context via
1115 		 * vhost_scsi_queue_data_in() and vhost_scsi_queue_status()
1116 		 */
1117 		cmd->tvc_vq_desc = vc.head;
1118 		vhost_scsi_target_queue_cmd(cmd);
1119 		ret = 0;
1120 err:
1121 		/*
1122 		 * ENXIO:  No more requests, or read error, wait for next kick
1123 		 * EINVAL: Invalid response buffer, drop the request
1124 		 * EIO:    Respond with bad target
1125 		 * EAGAIN: Pending request
1126 		 */
1127 		if (ret == -ENXIO)
1128 			break;
1129 		else if (ret == -EIO)
1130 			vhost_scsi_send_bad_target(vs, vq, vc.head, vc.out);
1131 	} while (likely(!vhost_exceeds_weight(vq, ++c, 0)));
1132 out:
1133 	mutex_unlock(&vq->mutex);
1134 }
1135 
1136 static void
1137 vhost_scsi_send_tmf_resp(struct vhost_scsi *vs, struct vhost_virtqueue *vq,
1138 			 int in_iovs, int vq_desc, struct iovec *resp_iov,
1139 			 int tmf_resp_code)
1140 {
1141 	struct virtio_scsi_ctrl_tmf_resp rsp;
1142 	struct iov_iter iov_iter;
1143 	int ret;
1144 
1145 	pr_debug("%s\n", __func__);
1146 	memset(&rsp, 0, sizeof(rsp));
1147 	rsp.response = tmf_resp_code;
1148 
1149 	iov_iter_init(&iov_iter, ITER_DEST, resp_iov, in_iovs, sizeof(rsp));
1150 
1151 	ret = copy_to_iter(&rsp, sizeof(rsp), &iov_iter);
1152 	if (likely(ret == sizeof(rsp)))
1153 		vhost_add_used_and_signal(&vs->dev, vq, vq_desc, 0);
1154 	else
1155 		pr_err("Faulted on virtio_scsi_ctrl_tmf_resp\n");
1156 }
1157 
1158 static void vhost_scsi_tmf_resp_work(struct vhost_work *work)
1159 {
1160 	struct vhost_scsi_tmf *tmf = container_of(work, struct vhost_scsi_tmf,
1161 						  vwork);
1162 	int resp_code;
1163 
1164 	if (tmf->scsi_resp == TMR_FUNCTION_COMPLETE)
1165 		resp_code = VIRTIO_SCSI_S_FUNCTION_SUCCEEDED;
1166 	else
1167 		resp_code = VIRTIO_SCSI_S_FUNCTION_REJECTED;
1168 
1169 	vhost_scsi_send_tmf_resp(tmf->vhost, &tmf->svq->vq, tmf->in_iovs,
1170 				 tmf->vq_desc, &tmf->resp_iov, resp_code);
1171 	vhost_scsi_release_tmf_res(tmf);
1172 }
1173 
1174 static void
1175 vhost_scsi_handle_tmf(struct vhost_scsi *vs, struct vhost_scsi_tpg *tpg,
1176 		      struct vhost_virtqueue *vq,
1177 		      struct virtio_scsi_ctrl_tmf_req *vtmf,
1178 		      struct vhost_scsi_ctx *vc)
1179 {
1180 	struct vhost_scsi_virtqueue *svq = container_of(vq,
1181 					struct vhost_scsi_virtqueue, vq);
1182 	struct vhost_scsi_tmf *tmf;
1183 
1184 	if (vhost32_to_cpu(vq, vtmf->subtype) !=
1185 	    VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET)
1186 		goto send_reject;
1187 
1188 	if (!tpg->tpg_nexus || !tpg->tpg_nexus->tvn_se_sess) {
1189 		pr_err("Unable to locate active struct vhost_scsi_nexus for LUN RESET.\n");
1190 		goto send_reject;
1191 	}
1192 
1193 	mutex_lock(&tpg->tv_tpg_mutex);
1194 	if (list_empty(&tpg->tmf_queue)) {
1195 		pr_err("Missing reserve TMF. Could not handle LUN RESET.\n");
1196 		mutex_unlock(&tpg->tv_tpg_mutex);
1197 		goto send_reject;
1198 	}
1199 
1200 	tmf = list_first_entry(&tpg->tmf_queue, struct vhost_scsi_tmf,
1201 			       queue_entry);
1202 	list_del_init(&tmf->queue_entry);
1203 	mutex_unlock(&tpg->tv_tpg_mutex);
1204 
1205 	tmf->tpg = tpg;
1206 	tmf->vhost = vs;
1207 	tmf->svq = svq;
1208 	tmf->resp_iov = vq->iov[vc->out];
1209 	tmf->vq_desc = vc->head;
1210 	tmf->in_iovs = vc->in;
1211 	tmf->inflight = vhost_scsi_get_inflight(vq);
1212 
1213 	if (target_submit_tmr(&tmf->se_cmd, tpg->tpg_nexus->tvn_se_sess, NULL,
1214 			      vhost_buf_to_lun(vtmf->lun), NULL,
1215 			      TMR_LUN_RESET, GFP_KERNEL, 0,
1216 			      TARGET_SCF_ACK_KREF) < 0) {
1217 		vhost_scsi_release_tmf_res(tmf);
1218 		goto send_reject;
1219 	}
1220 
1221 	return;
1222 
1223 send_reject:
1224 	vhost_scsi_send_tmf_resp(vs, vq, vc->in, vc->head, &vq->iov[vc->out],
1225 				 VIRTIO_SCSI_S_FUNCTION_REJECTED);
1226 }
1227 
1228 static void
1229 vhost_scsi_send_an_resp(struct vhost_scsi *vs,
1230 			struct vhost_virtqueue *vq,
1231 			struct vhost_scsi_ctx *vc)
1232 {
1233 	struct virtio_scsi_ctrl_an_resp rsp;
1234 	struct iov_iter iov_iter;
1235 	int ret;
1236 
1237 	pr_debug("%s\n", __func__);
1238 	memset(&rsp, 0, sizeof(rsp));	/* event_actual = 0 */
1239 	rsp.response = VIRTIO_SCSI_S_OK;
1240 
1241 	iov_iter_init(&iov_iter, ITER_DEST, &vq->iov[vc->out], vc->in, sizeof(rsp));
1242 
1243 	ret = copy_to_iter(&rsp, sizeof(rsp), &iov_iter);
1244 	if (likely(ret == sizeof(rsp)))
1245 		vhost_add_used_and_signal(&vs->dev, vq, vc->head, 0);
1246 	else
1247 		pr_err("Faulted on virtio_scsi_ctrl_an_resp\n");
1248 }
1249 
1250 static void
1251 vhost_scsi_ctl_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
1252 {
1253 	struct vhost_scsi_tpg *tpg;
1254 	union {
1255 		__virtio32 type;
1256 		struct virtio_scsi_ctrl_an_req an;
1257 		struct virtio_scsi_ctrl_tmf_req tmf;
1258 	} v_req;
1259 	struct vhost_scsi_ctx vc;
1260 	size_t typ_size;
1261 	int ret, c = 0;
1262 
1263 	mutex_lock(&vq->mutex);
1264 	/*
1265 	 * We can handle the vq only after the endpoint is setup by calling the
1266 	 * VHOST_SCSI_SET_ENDPOINT ioctl.
1267 	 */
1268 	if (!vhost_vq_get_backend(vq))
1269 		goto out;
1270 
1271 	memset(&vc, 0, sizeof(vc));
1272 
1273 	vhost_disable_notify(&vs->dev, vq);
1274 
1275 	do {
1276 		ret = vhost_scsi_get_desc(vs, vq, &vc);
1277 		if (ret)
1278 			goto err;
1279 
1280 		/*
1281 		 * Get the request type first in order to setup
1282 		 * other parameters dependent on the type.
1283 		 */
1284 		vc.req = &v_req.type;
1285 		typ_size = sizeof(v_req.type);
1286 
1287 		if (unlikely(!copy_from_iter_full(vc.req, typ_size,
1288 						  &vc.out_iter))) {
1289 			vq_err(vq, "Faulted on copy_from_iter tmf type\n");
1290 			/*
1291 			 * The size of the response buffer depends on the
1292 			 * request type and must be validated against it.
1293 			 * Since the request type is not known, don't send
1294 			 * a response.
1295 			 */
1296 			continue;
1297 		}
1298 
1299 		switch (vhost32_to_cpu(vq, v_req.type)) {
1300 		case VIRTIO_SCSI_T_TMF:
1301 			vc.req = &v_req.tmf;
1302 			vc.req_size = sizeof(struct virtio_scsi_ctrl_tmf_req);
1303 			vc.rsp_size = sizeof(struct virtio_scsi_ctrl_tmf_resp);
1304 			vc.lunp = &v_req.tmf.lun[0];
1305 			vc.target = &v_req.tmf.lun[1];
1306 			break;
1307 		case VIRTIO_SCSI_T_AN_QUERY:
1308 		case VIRTIO_SCSI_T_AN_SUBSCRIBE:
1309 			vc.req = &v_req.an;
1310 			vc.req_size = sizeof(struct virtio_scsi_ctrl_an_req);
1311 			vc.rsp_size = sizeof(struct virtio_scsi_ctrl_an_resp);
1312 			vc.lunp = &v_req.an.lun[0];
1313 			vc.target = NULL;
1314 			break;
1315 		default:
1316 			vq_err(vq, "Unknown control request %d", v_req.type);
1317 			continue;
1318 		}
1319 
1320 		/*
1321 		 * Validate the size of request and response buffers.
1322 		 * Check for a sane response buffer so we can report
1323 		 * early errors back to the guest.
1324 		 */
1325 		ret = vhost_scsi_chk_size(vq, &vc);
1326 		if (ret)
1327 			goto err;
1328 
1329 		/*
1330 		 * Get the rest of the request now that its size is known.
1331 		 */
1332 		vc.req += typ_size;
1333 		vc.req_size -= typ_size;
1334 
1335 		ret = vhost_scsi_get_req(vq, &vc, &tpg);
1336 		if (ret)
1337 			goto err;
1338 
1339 		if (v_req.type == VIRTIO_SCSI_T_TMF)
1340 			vhost_scsi_handle_tmf(vs, tpg, vq, &v_req.tmf, &vc);
1341 		else
1342 			vhost_scsi_send_an_resp(vs, vq, &vc);
1343 err:
1344 		/*
1345 		 * ENXIO:  No more requests, or read error, wait for next kick
1346 		 * EINVAL: Invalid response buffer, drop the request
1347 		 * EIO:    Respond with bad target
1348 		 * EAGAIN: Pending request
1349 		 */
1350 		if (ret == -ENXIO)
1351 			break;
1352 		else if (ret == -EIO)
1353 			vhost_scsi_send_bad_target(vs, vq, vc.head, vc.out);
1354 	} while (likely(!vhost_exceeds_weight(vq, ++c, 0)));
1355 out:
1356 	mutex_unlock(&vq->mutex);
1357 }
1358 
1359 static void vhost_scsi_ctl_handle_kick(struct vhost_work *work)
1360 {
1361 	struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
1362 						poll.work);
1363 	struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
1364 
1365 	pr_debug("%s: The handling func for control queue.\n", __func__);
1366 	vhost_scsi_ctl_handle_vq(vs, vq);
1367 }
1368 
1369 static void
1370 vhost_scsi_send_evt(struct vhost_scsi *vs,
1371 		   struct vhost_scsi_tpg *tpg,
1372 		   struct se_lun *lun,
1373 		   u32 event,
1374 		   u32 reason)
1375 {
1376 	struct vhost_scsi_evt *evt;
1377 
1378 	evt = vhost_scsi_allocate_evt(vs, event, reason);
1379 	if (!evt)
1380 		return;
1381 
1382 	if (tpg && lun) {
1383 		/* TODO: share lun setup code with virtio-scsi.ko */
1384 		/*
1385 		 * Note: evt->event is zeroed when we allocate it and
1386 		 * lun[4-7] need to be zero according to virtio-scsi spec.
1387 		 */
1388 		evt->event.lun[0] = 0x01;
1389 		evt->event.lun[1] = tpg->tport_tpgt;
1390 		if (lun->unpacked_lun >= 256)
1391 			evt->event.lun[2] = lun->unpacked_lun >> 8 | 0x40 ;
1392 		evt->event.lun[3] = lun->unpacked_lun & 0xFF;
1393 	}
1394 
1395 	llist_add(&evt->list, &vs->vs_event_list);
1396 	vhost_work_queue(&vs->dev, &vs->vs_event_work);
1397 }
1398 
1399 static void vhost_scsi_evt_handle_kick(struct vhost_work *work)
1400 {
1401 	struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
1402 						poll.work);
1403 	struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
1404 
1405 	mutex_lock(&vq->mutex);
1406 	if (!vhost_vq_get_backend(vq))
1407 		goto out;
1408 
1409 	if (vs->vs_events_missed)
1410 		vhost_scsi_send_evt(vs, NULL, NULL, VIRTIO_SCSI_T_NO_EVENT, 0);
1411 out:
1412 	mutex_unlock(&vq->mutex);
1413 }
1414 
1415 static void vhost_scsi_handle_kick(struct vhost_work *work)
1416 {
1417 	struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
1418 						poll.work);
1419 	struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
1420 
1421 	vhost_scsi_handle_vq(vs, vq);
1422 }
1423 
1424 /* Callers must hold dev mutex */
1425 static void vhost_scsi_flush(struct vhost_scsi *vs)
1426 {
1427 	int i;
1428 
1429 	/* Init new inflight and remember the old inflight */
1430 	vhost_scsi_init_inflight(vs, vs->old_inflight);
1431 
1432 	/*
1433 	 * The inflight->kref was initialized to 1. We decrement it here to
1434 	 * indicate the start of the flush operation so that it will reach 0
1435 	 * when all the reqs are finished.
1436 	 */
1437 	for (i = 0; i < vs->dev.nvqs; i++)
1438 		kref_put(&vs->old_inflight[i]->kref, vhost_scsi_done_inflight);
1439 
1440 	/* Flush both the vhost poll and vhost work */
1441 	vhost_dev_flush(&vs->dev);
1442 
1443 	/* Wait for all reqs issued before the flush to be finished */
1444 	for (i = 0; i < vs->dev.nvqs; i++)
1445 		wait_for_completion(&vs->old_inflight[i]->comp);
1446 }
1447 
1448 static void vhost_scsi_destroy_vq_cmds(struct vhost_virtqueue *vq)
1449 {
1450 	struct vhost_scsi_virtqueue *svq = container_of(vq,
1451 					struct vhost_scsi_virtqueue, vq);
1452 	struct vhost_scsi_cmd *tv_cmd;
1453 	unsigned int i;
1454 
1455 	if (!svq->scsi_cmds)
1456 		return;
1457 
1458 	for (i = 0; i < svq->max_cmds; i++) {
1459 		tv_cmd = &svq->scsi_cmds[i];
1460 
1461 		kfree(tv_cmd->tvc_sgl);
1462 		kfree(tv_cmd->tvc_prot_sgl);
1463 		kfree(tv_cmd->tvc_upages);
1464 	}
1465 
1466 	sbitmap_free(&svq->scsi_tags);
1467 	kfree(svq->scsi_cmds);
1468 	svq->scsi_cmds = NULL;
1469 }
1470 
1471 static int vhost_scsi_setup_vq_cmds(struct vhost_virtqueue *vq, int max_cmds)
1472 {
1473 	struct vhost_scsi_virtqueue *svq = container_of(vq,
1474 					struct vhost_scsi_virtqueue, vq);
1475 	struct vhost_scsi_cmd *tv_cmd;
1476 	unsigned int i;
1477 
1478 	if (svq->scsi_cmds)
1479 		return 0;
1480 
1481 	if (sbitmap_init_node(&svq->scsi_tags, max_cmds, -1, GFP_KERNEL,
1482 			      NUMA_NO_NODE, false, true))
1483 		return -ENOMEM;
1484 	svq->max_cmds = max_cmds;
1485 
1486 	svq->scsi_cmds = kcalloc(max_cmds, sizeof(*tv_cmd), GFP_KERNEL);
1487 	if (!svq->scsi_cmds) {
1488 		sbitmap_free(&svq->scsi_tags);
1489 		return -ENOMEM;
1490 	}
1491 
1492 	for (i = 0; i < max_cmds; i++) {
1493 		tv_cmd = &svq->scsi_cmds[i];
1494 
1495 		tv_cmd->tvc_sgl = kcalloc(VHOST_SCSI_PREALLOC_SGLS,
1496 					  sizeof(struct scatterlist),
1497 					  GFP_KERNEL);
1498 		if (!tv_cmd->tvc_sgl) {
1499 			pr_err("Unable to allocate tv_cmd->tvc_sgl\n");
1500 			goto out;
1501 		}
1502 
1503 		tv_cmd->tvc_upages = kcalloc(VHOST_SCSI_PREALLOC_UPAGES,
1504 					     sizeof(struct page *),
1505 					     GFP_KERNEL);
1506 		if (!tv_cmd->tvc_upages) {
1507 			pr_err("Unable to allocate tv_cmd->tvc_upages\n");
1508 			goto out;
1509 		}
1510 
1511 		tv_cmd->tvc_prot_sgl = kcalloc(VHOST_SCSI_PREALLOC_PROT_SGLS,
1512 					       sizeof(struct scatterlist),
1513 					       GFP_KERNEL);
1514 		if (!tv_cmd->tvc_prot_sgl) {
1515 			pr_err("Unable to allocate tv_cmd->tvc_prot_sgl\n");
1516 			goto out;
1517 		}
1518 	}
1519 	return 0;
1520 out:
1521 	vhost_scsi_destroy_vq_cmds(vq);
1522 	return -ENOMEM;
1523 }
1524 
1525 /*
1526  * Called from vhost_scsi_ioctl() context to walk the list of available
1527  * vhost_scsi_tpg with an active struct vhost_scsi_nexus
1528  *
1529  *  The lock nesting rule is:
1530  *    vhost_scsi_mutex -> vs->dev.mutex -> tpg->tv_tpg_mutex -> vq->mutex
1531  */
1532 static int
1533 vhost_scsi_set_endpoint(struct vhost_scsi *vs,
1534 			struct vhost_scsi_target *t)
1535 {
1536 	struct se_portal_group *se_tpg;
1537 	struct vhost_scsi_tport *tv_tport;
1538 	struct vhost_scsi_tpg *tpg;
1539 	struct vhost_scsi_tpg **vs_tpg;
1540 	struct vhost_virtqueue *vq;
1541 	int index, ret, i, len;
1542 	bool match = false;
1543 
1544 	mutex_lock(&vhost_scsi_mutex);
1545 	mutex_lock(&vs->dev.mutex);
1546 
1547 	/* Verify that ring has been setup correctly. */
1548 	for (index = 0; index < vs->dev.nvqs; ++index) {
1549 		/* Verify that ring has been setup correctly. */
1550 		if (!vhost_vq_access_ok(&vs->vqs[index].vq)) {
1551 			ret = -EFAULT;
1552 			goto out;
1553 		}
1554 	}
1555 
1556 	len = sizeof(vs_tpg[0]) * VHOST_SCSI_MAX_TARGET;
1557 	vs_tpg = kzalloc(len, GFP_KERNEL);
1558 	if (!vs_tpg) {
1559 		ret = -ENOMEM;
1560 		goto out;
1561 	}
1562 	if (vs->vs_tpg)
1563 		memcpy(vs_tpg, vs->vs_tpg, len);
1564 
1565 	list_for_each_entry(tpg, &vhost_scsi_list, tv_tpg_list) {
1566 		mutex_lock(&tpg->tv_tpg_mutex);
1567 		if (!tpg->tpg_nexus) {
1568 			mutex_unlock(&tpg->tv_tpg_mutex);
1569 			continue;
1570 		}
1571 		if (tpg->tv_tpg_vhost_count != 0) {
1572 			mutex_unlock(&tpg->tv_tpg_mutex);
1573 			continue;
1574 		}
1575 		tv_tport = tpg->tport;
1576 
1577 		if (!strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
1578 			if (vs->vs_tpg && vs->vs_tpg[tpg->tport_tpgt]) {
1579 				mutex_unlock(&tpg->tv_tpg_mutex);
1580 				ret = -EEXIST;
1581 				goto undepend;
1582 			}
1583 			/*
1584 			 * In order to ensure individual vhost-scsi configfs
1585 			 * groups cannot be removed while in use by vhost ioctl,
1586 			 * go ahead and take an explicit se_tpg->tpg_group.cg_item
1587 			 * dependency now.
1588 			 */
1589 			se_tpg = &tpg->se_tpg;
1590 			ret = target_depend_item(&se_tpg->tpg_group.cg_item);
1591 			if (ret) {
1592 				pr_warn("target_depend_item() failed: %d\n", ret);
1593 				mutex_unlock(&tpg->tv_tpg_mutex);
1594 				goto undepend;
1595 			}
1596 			tpg->tv_tpg_vhost_count++;
1597 			tpg->vhost_scsi = vs;
1598 			vs_tpg[tpg->tport_tpgt] = tpg;
1599 			match = true;
1600 		}
1601 		mutex_unlock(&tpg->tv_tpg_mutex);
1602 	}
1603 
1604 	if (match) {
1605 		memcpy(vs->vs_vhost_wwpn, t->vhost_wwpn,
1606 		       sizeof(vs->vs_vhost_wwpn));
1607 
1608 		for (i = VHOST_SCSI_VQ_IO; i < vs->dev.nvqs; i++) {
1609 			vq = &vs->vqs[i].vq;
1610 			if (!vhost_vq_is_setup(vq))
1611 				continue;
1612 
1613 			ret = vhost_scsi_setup_vq_cmds(vq, vq->num);
1614 			if (ret)
1615 				goto destroy_vq_cmds;
1616 		}
1617 
1618 		for (i = 0; i < vs->dev.nvqs; i++) {
1619 			vq = &vs->vqs[i].vq;
1620 			mutex_lock(&vq->mutex);
1621 			vhost_vq_set_backend(vq, vs_tpg);
1622 			vhost_vq_init_access(vq);
1623 			mutex_unlock(&vq->mutex);
1624 		}
1625 		ret = 0;
1626 	} else {
1627 		ret = -EEXIST;
1628 	}
1629 
1630 	/*
1631 	 * Act as synchronize_rcu to make sure access to
1632 	 * old vs->vs_tpg is finished.
1633 	 */
1634 	vhost_scsi_flush(vs);
1635 	kfree(vs->vs_tpg);
1636 	vs->vs_tpg = vs_tpg;
1637 	goto out;
1638 
1639 destroy_vq_cmds:
1640 	for (i--; i >= VHOST_SCSI_VQ_IO; i--) {
1641 		if (!vhost_vq_get_backend(&vs->vqs[i].vq))
1642 			vhost_scsi_destroy_vq_cmds(&vs->vqs[i].vq);
1643 	}
1644 undepend:
1645 	for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) {
1646 		tpg = vs_tpg[i];
1647 		if (tpg) {
1648 			tpg->tv_tpg_vhost_count--;
1649 			target_undepend_item(&tpg->se_tpg.tpg_group.cg_item);
1650 		}
1651 	}
1652 	kfree(vs_tpg);
1653 out:
1654 	mutex_unlock(&vs->dev.mutex);
1655 	mutex_unlock(&vhost_scsi_mutex);
1656 	return ret;
1657 }
1658 
1659 static int
1660 vhost_scsi_clear_endpoint(struct vhost_scsi *vs,
1661 			  struct vhost_scsi_target *t)
1662 {
1663 	struct se_portal_group *se_tpg;
1664 	struct vhost_scsi_tport *tv_tport;
1665 	struct vhost_scsi_tpg *tpg;
1666 	struct vhost_virtqueue *vq;
1667 	bool match = false;
1668 	int index, ret, i;
1669 	u8 target;
1670 
1671 	mutex_lock(&vhost_scsi_mutex);
1672 	mutex_lock(&vs->dev.mutex);
1673 	/* Verify that ring has been setup correctly. */
1674 	for (index = 0; index < vs->dev.nvqs; ++index) {
1675 		if (!vhost_vq_access_ok(&vs->vqs[index].vq)) {
1676 			ret = -EFAULT;
1677 			goto err_dev;
1678 		}
1679 	}
1680 
1681 	if (!vs->vs_tpg) {
1682 		ret = 0;
1683 		goto err_dev;
1684 	}
1685 
1686 	for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) {
1687 		target = i;
1688 		tpg = vs->vs_tpg[target];
1689 		if (!tpg)
1690 			continue;
1691 
1692 		mutex_lock(&tpg->tv_tpg_mutex);
1693 		tv_tport = tpg->tport;
1694 		if (!tv_tport) {
1695 			ret = -ENODEV;
1696 			goto err_tpg;
1697 		}
1698 
1699 		if (strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
1700 			pr_warn("tv_tport->tport_name: %s, tpg->tport_tpgt: %hu"
1701 				" does not match t->vhost_wwpn: %s, t->vhost_tpgt: %hu\n",
1702 				tv_tport->tport_name, tpg->tport_tpgt,
1703 				t->vhost_wwpn, t->vhost_tpgt);
1704 			ret = -EINVAL;
1705 			goto err_tpg;
1706 		}
1707 		tpg->tv_tpg_vhost_count--;
1708 		tpg->vhost_scsi = NULL;
1709 		vs->vs_tpg[target] = NULL;
1710 		match = true;
1711 		mutex_unlock(&tpg->tv_tpg_mutex);
1712 		/*
1713 		 * Release se_tpg->tpg_group.cg_item configfs dependency now
1714 		 * to allow vhost-scsi WWPN se_tpg->tpg_group shutdown to occur.
1715 		 */
1716 		se_tpg = &tpg->se_tpg;
1717 		target_undepend_item(&se_tpg->tpg_group.cg_item);
1718 	}
1719 	if (match) {
1720 		for (i = 0; i < vs->dev.nvqs; i++) {
1721 			vq = &vs->vqs[i].vq;
1722 			mutex_lock(&vq->mutex);
1723 			vhost_vq_set_backend(vq, NULL);
1724 			mutex_unlock(&vq->mutex);
1725 		}
1726 		/* Make sure cmds are not running before tearing them down. */
1727 		vhost_scsi_flush(vs);
1728 
1729 		for (i = 0; i < vs->dev.nvqs; i++) {
1730 			vq = &vs->vqs[i].vq;
1731 			vhost_scsi_destroy_vq_cmds(vq);
1732 		}
1733 	}
1734 	/*
1735 	 * Act as synchronize_rcu to make sure access to
1736 	 * old vs->vs_tpg is finished.
1737 	 */
1738 	vhost_scsi_flush(vs);
1739 	kfree(vs->vs_tpg);
1740 	vs->vs_tpg = NULL;
1741 	WARN_ON(vs->vs_events_nr);
1742 	mutex_unlock(&vs->dev.mutex);
1743 	mutex_unlock(&vhost_scsi_mutex);
1744 	return 0;
1745 
1746 err_tpg:
1747 	mutex_unlock(&tpg->tv_tpg_mutex);
1748 err_dev:
1749 	mutex_unlock(&vs->dev.mutex);
1750 	mutex_unlock(&vhost_scsi_mutex);
1751 	return ret;
1752 }
1753 
1754 static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
1755 {
1756 	struct vhost_virtqueue *vq;
1757 	int i;
1758 
1759 	if (features & ~VHOST_SCSI_FEATURES)
1760 		return -EOPNOTSUPP;
1761 
1762 	mutex_lock(&vs->dev.mutex);
1763 	if ((features & (1 << VHOST_F_LOG_ALL)) &&
1764 	    !vhost_log_access_ok(&vs->dev)) {
1765 		mutex_unlock(&vs->dev.mutex);
1766 		return -EFAULT;
1767 	}
1768 
1769 	for (i = 0; i < vs->dev.nvqs; i++) {
1770 		vq = &vs->vqs[i].vq;
1771 		mutex_lock(&vq->mutex);
1772 		vq->acked_features = features;
1773 		mutex_unlock(&vq->mutex);
1774 	}
1775 	mutex_unlock(&vs->dev.mutex);
1776 	return 0;
1777 }
1778 
1779 static int vhost_scsi_open(struct inode *inode, struct file *f)
1780 {
1781 	struct vhost_scsi *vs;
1782 	struct vhost_virtqueue **vqs;
1783 	int r = -ENOMEM, i, nvqs = vhost_scsi_max_io_vqs;
1784 
1785 	vs = kvzalloc(sizeof(*vs), GFP_KERNEL);
1786 	if (!vs)
1787 		goto err_vs;
1788 
1789 	if (nvqs > VHOST_SCSI_MAX_IO_VQ) {
1790 		pr_err("Invalid max_io_vqs of %d. Using %d.\n", nvqs,
1791 		       VHOST_SCSI_MAX_IO_VQ);
1792 		nvqs = VHOST_SCSI_MAX_IO_VQ;
1793 	} else if (nvqs == 0) {
1794 		pr_err("Invalid max_io_vqs of %d. Using 1.\n", nvqs);
1795 		nvqs = 1;
1796 	}
1797 	nvqs += VHOST_SCSI_VQ_IO;
1798 
1799 	vs->compl_bitmap = bitmap_alloc(nvqs, GFP_KERNEL);
1800 	if (!vs->compl_bitmap)
1801 		goto err_compl_bitmap;
1802 
1803 	vs->old_inflight = kmalloc_array(nvqs, sizeof(*vs->old_inflight),
1804 					 GFP_KERNEL | __GFP_ZERO);
1805 	if (!vs->old_inflight)
1806 		goto err_inflight;
1807 
1808 	vs->vqs = kmalloc_array(nvqs, sizeof(*vs->vqs),
1809 				GFP_KERNEL | __GFP_ZERO);
1810 	if (!vs->vqs)
1811 		goto err_vqs;
1812 
1813 	vqs = kmalloc_array(nvqs, sizeof(*vqs), GFP_KERNEL);
1814 	if (!vqs)
1815 		goto err_local_vqs;
1816 
1817 	vhost_work_init(&vs->vs_completion_work, vhost_scsi_complete_cmd_work);
1818 	vhost_work_init(&vs->vs_event_work, vhost_scsi_evt_work);
1819 
1820 	vs->vs_events_nr = 0;
1821 	vs->vs_events_missed = false;
1822 
1823 	vqs[VHOST_SCSI_VQ_CTL] = &vs->vqs[VHOST_SCSI_VQ_CTL].vq;
1824 	vqs[VHOST_SCSI_VQ_EVT] = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
1825 	vs->vqs[VHOST_SCSI_VQ_CTL].vq.handle_kick = vhost_scsi_ctl_handle_kick;
1826 	vs->vqs[VHOST_SCSI_VQ_EVT].vq.handle_kick = vhost_scsi_evt_handle_kick;
1827 	for (i = VHOST_SCSI_VQ_IO; i < nvqs; i++) {
1828 		vqs[i] = &vs->vqs[i].vq;
1829 		vs->vqs[i].vq.handle_kick = vhost_scsi_handle_kick;
1830 	}
1831 	vhost_dev_init(&vs->dev, vqs, nvqs, UIO_MAXIOV,
1832 		       VHOST_SCSI_WEIGHT, 0, true, NULL);
1833 
1834 	vhost_scsi_init_inflight(vs, NULL);
1835 
1836 	f->private_data = vs;
1837 	return 0;
1838 
1839 err_local_vqs:
1840 	kfree(vs->vqs);
1841 err_vqs:
1842 	kfree(vs->old_inflight);
1843 err_inflight:
1844 	bitmap_free(vs->compl_bitmap);
1845 err_compl_bitmap:
1846 	kvfree(vs);
1847 err_vs:
1848 	return r;
1849 }
1850 
1851 static int vhost_scsi_release(struct inode *inode, struct file *f)
1852 {
1853 	struct vhost_scsi *vs = f->private_data;
1854 	struct vhost_scsi_target t;
1855 
1856 	mutex_lock(&vs->dev.mutex);
1857 	memcpy(t.vhost_wwpn, vs->vs_vhost_wwpn, sizeof(t.vhost_wwpn));
1858 	mutex_unlock(&vs->dev.mutex);
1859 	vhost_scsi_clear_endpoint(vs, &t);
1860 	vhost_dev_stop(&vs->dev);
1861 	vhost_dev_cleanup(&vs->dev);
1862 	kfree(vs->dev.vqs);
1863 	kfree(vs->vqs);
1864 	kfree(vs->old_inflight);
1865 	bitmap_free(vs->compl_bitmap);
1866 	kvfree(vs);
1867 	return 0;
1868 }
1869 
1870 static long
1871 vhost_scsi_ioctl(struct file *f,
1872 		 unsigned int ioctl,
1873 		 unsigned long arg)
1874 {
1875 	struct vhost_scsi *vs = f->private_data;
1876 	struct vhost_scsi_target backend;
1877 	void __user *argp = (void __user *)arg;
1878 	u64 __user *featurep = argp;
1879 	u32 __user *eventsp = argp;
1880 	u32 events_missed;
1881 	u64 features;
1882 	int r, abi_version = VHOST_SCSI_ABI_VERSION;
1883 	struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
1884 
1885 	switch (ioctl) {
1886 	case VHOST_SCSI_SET_ENDPOINT:
1887 		if (copy_from_user(&backend, argp, sizeof backend))
1888 			return -EFAULT;
1889 		if (backend.reserved != 0)
1890 			return -EOPNOTSUPP;
1891 
1892 		return vhost_scsi_set_endpoint(vs, &backend);
1893 	case VHOST_SCSI_CLEAR_ENDPOINT:
1894 		if (copy_from_user(&backend, argp, sizeof backend))
1895 			return -EFAULT;
1896 		if (backend.reserved != 0)
1897 			return -EOPNOTSUPP;
1898 
1899 		return vhost_scsi_clear_endpoint(vs, &backend);
1900 	case VHOST_SCSI_GET_ABI_VERSION:
1901 		if (copy_to_user(argp, &abi_version, sizeof abi_version))
1902 			return -EFAULT;
1903 		return 0;
1904 	case VHOST_SCSI_SET_EVENTS_MISSED:
1905 		if (get_user(events_missed, eventsp))
1906 			return -EFAULT;
1907 		mutex_lock(&vq->mutex);
1908 		vs->vs_events_missed = events_missed;
1909 		mutex_unlock(&vq->mutex);
1910 		return 0;
1911 	case VHOST_SCSI_GET_EVENTS_MISSED:
1912 		mutex_lock(&vq->mutex);
1913 		events_missed = vs->vs_events_missed;
1914 		mutex_unlock(&vq->mutex);
1915 		if (put_user(events_missed, eventsp))
1916 			return -EFAULT;
1917 		return 0;
1918 	case VHOST_GET_FEATURES:
1919 		features = VHOST_SCSI_FEATURES;
1920 		if (copy_to_user(featurep, &features, sizeof features))
1921 			return -EFAULT;
1922 		return 0;
1923 	case VHOST_SET_FEATURES:
1924 		if (copy_from_user(&features, featurep, sizeof features))
1925 			return -EFAULT;
1926 		return vhost_scsi_set_features(vs, features);
1927 	default:
1928 		mutex_lock(&vs->dev.mutex);
1929 		r = vhost_dev_ioctl(&vs->dev, ioctl, argp);
1930 		/* TODO: flush backend after dev ioctl. */
1931 		if (r == -ENOIOCTLCMD)
1932 			r = vhost_vring_ioctl(&vs->dev, ioctl, argp);
1933 		mutex_unlock(&vs->dev.mutex);
1934 		return r;
1935 	}
1936 }
1937 
1938 static const struct file_operations vhost_scsi_fops = {
1939 	.owner          = THIS_MODULE,
1940 	.release        = vhost_scsi_release,
1941 	.unlocked_ioctl = vhost_scsi_ioctl,
1942 	.compat_ioctl	= compat_ptr_ioctl,
1943 	.open           = vhost_scsi_open,
1944 	.llseek		= noop_llseek,
1945 };
1946 
1947 static struct miscdevice vhost_scsi_misc = {
1948 	MISC_DYNAMIC_MINOR,
1949 	"vhost-scsi",
1950 	&vhost_scsi_fops,
1951 };
1952 
1953 static int __init vhost_scsi_register(void)
1954 {
1955 	return misc_register(&vhost_scsi_misc);
1956 }
1957 
1958 static void vhost_scsi_deregister(void)
1959 {
1960 	misc_deregister(&vhost_scsi_misc);
1961 }
1962 
1963 static char *vhost_scsi_dump_proto_id(struct vhost_scsi_tport *tport)
1964 {
1965 	switch (tport->tport_proto_id) {
1966 	case SCSI_PROTOCOL_SAS:
1967 		return "SAS";
1968 	case SCSI_PROTOCOL_FCP:
1969 		return "FCP";
1970 	case SCSI_PROTOCOL_ISCSI:
1971 		return "iSCSI";
1972 	default:
1973 		break;
1974 	}
1975 
1976 	return "Unknown";
1977 }
1978 
1979 static void
1980 vhost_scsi_do_plug(struct vhost_scsi_tpg *tpg,
1981 		  struct se_lun *lun, bool plug)
1982 {
1983 
1984 	struct vhost_scsi *vs = tpg->vhost_scsi;
1985 	struct vhost_virtqueue *vq;
1986 	u32 reason;
1987 
1988 	if (!vs)
1989 		return;
1990 
1991 	mutex_lock(&vs->dev.mutex);
1992 
1993 	if (plug)
1994 		reason = VIRTIO_SCSI_EVT_RESET_RESCAN;
1995 	else
1996 		reason = VIRTIO_SCSI_EVT_RESET_REMOVED;
1997 
1998 	vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
1999 	mutex_lock(&vq->mutex);
2000 	if (vhost_has_feature(vq, VIRTIO_SCSI_F_HOTPLUG))
2001 		vhost_scsi_send_evt(vs, tpg, lun,
2002 				   VIRTIO_SCSI_T_TRANSPORT_RESET, reason);
2003 	mutex_unlock(&vq->mutex);
2004 	mutex_unlock(&vs->dev.mutex);
2005 }
2006 
2007 static void vhost_scsi_hotplug(struct vhost_scsi_tpg *tpg, struct se_lun *lun)
2008 {
2009 	vhost_scsi_do_plug(tpg, lun, true);
2010 }
2011 
2012 static void vhost_scsi_hotunplug(struct vhost_scsi_tpg *tpg, struct se_lun *lun)
2013 {
2014 	vhost_scsi_do_plug(tpg, lun, false);
2015 }
2016 
2017 static int vhost_scsi_port_link(struct se_portal_group *se_tpg,
2018 			       struct se_lun *lun)
2019 {
2020 	struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2021 				struct vhost_scsi_tpg, se_tpg);
2022 	struct vhost_scsi_tmf *tmf;
2023 
2024 	tmf = kzalloc(sizeof(*tmf), GFP_KERNEL);
2025 	if (!tmf)
2026 		return -ENOMEM;
2027 	INIT_LIST_HEAD(&tmf->queue_entry);
2028 	vhost_work_init(&tmf->vwork, vhost_scsi_tmf_resp_work);
2029 
2030 	mutex_lock(&vhost_scsi_mutex);
2031 
2032 	mutex_lock(&tpg->tv_tpg_mutex);
2033 	tpg->tv_tpg_port_count++;
2034 	list_add_tail(&tmf->queue_entry, &tpg->tmf_queue);
2035 	mutex_unlock(&tpg->tv_tpg_mutex);
2036 
2037 	vhost_scsi_hotplug(tpg, lun);
2038 
2039 	mutex_unlock(&vhost_scsi_mutex);
2040 
2041 	return 0;
2042 }
2043 
2044 static void vhost_scsi_port_unlink(struct se_portal_group *se_tpg,
2045 				  struct se_lun *lun)
2046 {
2047 	struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2048 				struct vhost_scsi_tpg, se_tpg);
2049 	struct vhost_scsi_tmf *tmf;
2050 
2051 	mutex_lock(&vhost_scsi_mutex);
2052 
2053 	mutex_lock(&tpg->tv_tpg_mutex);
2054 	tpg->tv_tpg_port_count--;
2055 	tmf = list_first_entry(&tpg->tmf_queue, struct vhost_scsi_tmf,
2056 			       queue_entry);
2057 	list_del(&tmf->queue_entry);
2058 	kfree(tmf);
2059 	mutex_unlock(&tpg->tv_tpg_mutex);
2060 
2061 	vhost_scsi_hotunplug(tpg, lun);
2062 
2063 	mutex_unlock(&vhost_scsi_mutex);
2064 }
2065 
2066 static ssize_t vhost_scsi_tpg_attrib_fabric_prot_type_store(
2067 		struct config_item *item, const char *page, size_t count)
2068 {
2069 	struct se_portal_group *se_tpg = attrib_to_tpg(item);
2070 	struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2071 				struct vhost_scsi_tpg, se_tpg);
2072 	unsigned long val;
2073 	int ret = kstrtoul(page, 0, &val);
2074 
2075 	if (ret) {
2076 		pr_err("kstrtoul() returned %d for fabric_prot_type\n", ret);
2077 		return ret;
2078 	}
2079 	if (val != 0 && val != 1 && val != 3) {
2080 		pr_err("Invalid vhost_scsi fabric_prot_type: %lu\n", val);
2081 		return -EINVAL;
2082 	}
2083 	tpg->tv_fabric_prot_type = val;
2084 
2085 	return count;
2086 }
2087 
2088 static ssize_t vhost_scsi_tpg_attrib_fabric_prot_type_show(
2089 		struct config_item *item, char *page)
2090 {
2091 	struct se_portal_group *se_tpg = attrib_to_tpg(item);
2092 	struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2093 				struct vhost_scsi_tpg, se_tpg);
2094 
2095 	return sprintf(page, "%d\n", tpg->tv_fabric_prot_type);
2096 }
2097 
2098 CONFIGFS_ATTR(vhost_scsi_tpg_attrib_, fabric_prot_type);
2099 
2100 static struct configfs_attribute *vhost_scsi_tpg_attrib_attrs[] = {
2101 	&vhost_scsi_tpg_attrib_attr_fabric_prot_type,
2102 	NULL,
2103 };
2104 
2105 static int vhost_scsi_make_nexus(struct vhost_scsi_tpg *tpg,
2106 				const char *name)
2107 {
2108 	struct vhost_scsi_nexus *tv_nexus;
2109 
2110 	mutex_lock(&tpg->tv_tpg_mutex);
2111 	if (tpg->tpg_nexus) {
2112 		mutex_unlock(&tpg->tv_tpg_mutex);
2113 		pr_debug("tpg->tpg_nexus already exists\n");
2114 		return -EEXIST;
2115 	}
2116 
2117 	tv_nexus = kzalloc(sizeof(*tv_nexus), GFP_KERNEL);
2118 	if (!tv_nexus) {
2119 		mutex_unlock(&tpg->tv_tpg_mutex);
2120 		pr_err("Unable to allocate struct vhost_scsi_nexus\n");
2121 		return -ENOMEM;
2122 	}
2123 	/*
2124 	 * Since we are running in 'demo mode' this call with generate a
2125 	 * struct se_node_acl for the vhost_scsi struct se_portal_group with
2126 	 * the SCSI Initiator port name of the passed configfs group 'name'.
2127 	 */
2128 	tv_nexus->tvn_se_sess = target_setup_session(&tpg->se_tpg, 0, 0,
2129 					TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS,
2130 					(unsigned char *)name, tv_nexus, NULL);
2131 	if (IS_ERR(tv_nexus->tvn_se_sess)) {
2132 		mutex_unlock(&tpg->tv_tpg_mutex);
2133 		kfree(tv_nexus);
2134 		return -ENOMEM;
2135 	}
2136 	tpg->tpg_nexus = tv_nexus;
2137 
2138 	mutex_unlock(&tpg->tv_tpg_mutex);
2139 	return 0;
2140 }
2141 
2142 static int vhost_scsi_drop_nexus(struct vhost_scsi_tpg *tpg)
2143 {
2144 	struct se_session *se_sess;
2145 	struct vhost_scsi_nexus *tv_nexus;
2146 
2147 	mutex_lock(&tpg->tv_tpg_mutex);
2148 	tv_nexus = tpg->tpg_nexus;
2149 	if (!tv_nexus) {
2150 		mutex_unlock(&tpg->tv_tpg_mutex);
2151 		return -ENODEV;
2152 	}
2153 
2154 	se_sess = tv_nexus->tvn_se_sess;
2155 	if (!se_sess) {
2156 		mutex_unlock(&tpg->tv_tpg_mutex);
2157 		return -ENODEV;
2158 	}
2159 
2160 	if (tpg->tv_tpg_port_count != 0) {
2161 		mutex_unlock(&tpg->tv_tpg_mutex);
2162 		pr_err("Unable to remove TCM_vhost I_T Nexus with"
2163 			" active TPG port count: %d\n",
2164 			tpg->tv_tpg_port_count);
2165 		return -EBUSY;
2166 	}
2167 
2168 	if (tpg->tv_tpg_vhost_count != 0) {
2169 		mutex_unlock(&tpg->tv_tpg_mutex);
2170 		pr_err("Unable to remove TCM_vhost I_T Nexus with"
2171 			" active TPG vhost count: %d\n",
2172 			tpg->tv_tpg_vhost_count);
2173 		return -EBUSY;
2174 	}
2175 
2176 	pr_debug("TCM_vhost_ConfigFS: Removing I_T Nexus to emulated"
2177 		" %s Initiator Port: %s\n", vhost_scsi_dump_proto_id(tpg->tport),
2178 		tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
2179 
2180 	/*
2181 	 * Release the SCSI I_T Nexus to the emulated vhost Target Port
2182 	 */
2183 	target_remove_session(se_sess);
2184 	tpg->tpg_nexus = NULL;
2185 	mutex_unlock(&tpg->tv_tpg_mutex);
2186 
2187 	kfree(tv_nexus);
2188 	return 0;
2189 }
2190 
2191 static ssize_t vhost_scsi_tpg_nexus_show(struct config_item *item, char *page)
2192 {
2193 	struct se_portal_group *se_tpg = to_tpg(item);
2194 	struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2195 				struct vhost_scsi_tpg, se_tpg);
2196 	struct vhost_scsi_nexus *tv_nexus;
2197 	ssize_t ret;
2198 
2199 	mutex_lock(&tpg->tv_tpg_mutex);
2200 	tv_nexus = tpg->tpg_nexus;
2201 	if (!tv_nexus) {
2202 		mutex_unlock(&tpg->tv_tpg_mutex);
2203 		return -ENODEV;
2204 	}
2205 	ret = snprintf(page, PAGE_SIZE, "%s\n",
2206 			tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
2207 	mutex_unlock(&tpg->tv_tpg_mutex);
2208 
2209 	return ret;
2210 }
2211 
2212 static ssize_t vhost_scsi_tpg_nexus_store(struct config_item *item,
2213 		const char *page, size_t count)
2214 {
2215 	struct se_portal_group *se_tpg = to_tpg(item);
2216 	struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2217 				struct vhost_scsi_tpg, se_tpg);
2218 	struct vhost_scsi_tport *tport_wwn = tpg->tport;
2219 	unsigned char i_port[VHOST_SCSI_NAMELEN], *ptr, *port_ptr;
2220 	int ret;
2221 	/*
2222 	 * Shutdown the active I_T nexus if 'NULL' is passed..
2223 	 */
2224 	if (!strncmp(page, "NULL", 4)) {
2225 		ret = vhost_scsi_drop_nexus(tpg);
2226 		return (!ret) ? count : ret;
2227 	}
2228 	/*
2229 	 * Otherwise make sure the passed virtual Initiator port WWN matches
2230 	 * the fabric protocol_id set in vhost_scsi_make_tport(), and call
2231 	 * vhost_scsi_make_nexus().
2232 	 */
2233 	if (strlen(page) >= VHOST_SCSI_NAMELEN) {
2234 		pr_err("Emulated NAA Sas Address: %s, exceeds"
2235 				" max: %d\n", page, VHOST_SCSI_NAMELEN);
2236 		return -EINVAL;
2237 	}
2238 	snprintf(&i_port[0], VHOST_SCSI_NAMELEN, "%s", page);
2239 
2240 	ptr = strstr(i_port, "naa.");
2241 	if (ptr) {
2242 		if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_SAS) {
2243 			pr_err("Passed SAS Initiator Port %s does not"
2244 				" match target port protoid: %s\n", i_port,
2245 				vhost_scsi_dump_proto_id(tport_wwn));
2246 			return -EINVAL;
2247 		}
2248 		port_ptr = &i_port[0];
2249 		goto check_newline;
2250 	}
2251 	ptr = strstr(i_port, "fc.");
2252 	if (ptr) {
2253 		if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_FCP) {
2254 			pr_err("Passed FCP Initiator Port %s does not"
2255 				" match target port protoid: %s\n", i_port,
2256 				vhost_scsi_dump_proto_id(tport_wwn));
2257 			return -EINVAL;
2258 		}
2259 		port_ptr = &i_port[3]; /* Skip over "fc." */
2260 		goto check_newline;
2261 	}
2262 	ptr = strstr(i_port, "iqn.");
2263 	if (ptr) {
2264 		if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_ISCSI) {
2265 			pr_err("Passed iSCSI Initiator Port %s does not"
2266 				" match target port protoid: %s\n", i_port,
2267 				vhost_scsi_dump_proto_id(tport_wwn));
2268 			return -EINVAL;
2269 		}
2270 		port_ptr = &i_port[0];
2271 		goto check_newline;
2272 	}
2273 	pr_err("Unable to locate prefix for emulated Initiator Port:"
2274 			" %s\n", i_port);
2275 	return -EINVAL;
2276 	/*
2277 	 * Clear any trailing newline for the NAA WWN
2278 	 */
2279 check_newline:
2280 	if (i_port[strlen(i_port)-1] == '\n')
2281 		i_port[strlen(i_port)-1] = '\0';
2282 
2283 	ret = vhost_scsi_make_nexus(tpg, port_ptr);
2284 	if (ret < 0)
2285 		return ret;
2286 
2287 	return count;
2288 }
2289 
2290 CONFIGFS_ATTR(vhost_scsi_tpg_, nexus);
2291 
2292 static struct configfs_attribute *vhost_scsi_tpg_attrs[] = {
2293 	&vhost_scsi_tpg_attr_nexus,
2294 	NULL,
2295 };
2296 
2297 static struct se_portal_group *
2298 vhost_scsi_make_tpg(struct se_wwn *wwn, const char *name)
2299 {
2300 	struct vhost_scsi_tport *tport = container_of(wwn,
2301 			struct vhost_scsi_tport, tport_wwn);
2302 
2303 	struct vhost_scsi_tpg *tpg;
2304 	u16 tpgt;
2305 	int ret;
2306 
2307 	if (strstr(name, "tpgt_") != name)
2308 		return ERR_PTR(-EINVAL);
2309 	if (kstrtou16(name + 5, 10, &tpgt) || tpgt >= VHOST_SCSI_MAX_TARGET)
2310 		return ERR_PTR(-EINVAL);
2311 
2312 	tpg = kzalloc(sizeof(*tpg), GFP_KERNEL);
2313 	if (!tpg) {
2314 		pr_err("Unable to allocate struct vhost_scsi_tpg");
2315 		return ERR_PTR(-ENOMEM);
2316 	}
2317 	mutex_init(&tpg->tv_tpg_mutex);
2318 	INIT_LIST_HEAD(&tpg->tv_tpg_list);
2319 	INIT_LIST_HEAD(&tpg->tmf_queue);
2320 	tpg->tport = tport;
2321 	tpg->tport_tpgt = tpgt;
2322 
2323 	ret = core_tpg_register(wwn, &tpg->se_tpg, tport->tport_proto_id);
2324 	if (ret < 0) {
2325 		kfree(tpg);
2326 		return NULL;
2327 	}
2328 	mutex_lock(&vhost_scsi_mutex);
2329 	list_add_tail(&tpg->tv_tpg_list, &vhost_scsi_list);
2330 	mutex_unlock(&vhost_scsi_mutex);
2331 
2332 	return &tpg->se_tpg;
2333 }
2334 
2335 static void vhost_scsi_drop_tpg(struct se_portal_group *se_tpg)
2336 {
2337 	struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2338 				struct vhost_scsi_tpg, se_tpg);
2339 
2340 	mutex_lock(&vhost_scsi_mutex);
2341 	list_del(&tpg->tv_tpg_list);
2342 	mutex_unlock(&vhost_scsi_mutex);
2343 	/*
2344 	 * Release the virtual I_T Nexus for this vhost TPG
2345 	 */
2346 	vhost_scsi_drop_nexus(tpg);
2347 	/*
2348 	 * Deregister the se_tpg from TCM..
2349 	 */
2350 	core_tpg_deregister(se_tpg);
2351 	kfree(tpg);
2352 }
2353 
2354 static struct se_wwn *
2355 vhost_scsi_make_tport(struct target_fabric_configfs *tf,
2356 		     struct config_group *group,
2357 		     const char *name)
2358 {
2359 	struct vhost_scsi_tport *tport;
2360 	char *ptr;
2361 	u64 wwpn = 0;
2362 	int off = 0;
2363 
2364 	/* if (vhost_scsi_parse_wwn(name, &wwpn, 1) < 0)
2365 		return ERR_PTR(-EINVAL); */
2366 
2367 	tport = kzalloc(sizeof(*tport), GFP_KERNEL);
2368 	if (!tport) {
2369 		pr_err("Unable to allocate struct vhost_scsi_tport");
2370 		return ERR_PTR(-ENOMEM);
2371 	}
2372 	tport->tport_wwpn = wwpn;
2373 	/*
2374 	 * Determine the emulated Protocol Identifier and Target Port Name
2375 	 * based on the incoming configfs directory name.
2376 	 */
2377 	ptr = strstr(name, "naa.");
2378 	if (ptr) {
2379 		tport->tport_proto_id = SCSI_PROTOCOL_SAS;
2380 		goto check_len;
2381 	}
2382 	ptr = strstr(name, "fc.");
2383 	if (ptr) {
2384 		tport->tport_proto_id = SCSI_PROTOCOL_FCP;
2385 		off = 3; /* Skip over "fc." */
2386 		goto check_len;
2387 	}
2388 	ptr = strstr(name, "iqn.");
2389 	if (ptr) {
2390 		tport->tport_proto_id = SCSI_PROTOCOL_ISCSI;
2391 		goto check_len;
2392 	}
2393 
2394 	pr_err("Unable to locate prefix for emulated Target Port:"
2395 			" %s\n", name);
2396 	kfree(tport);
2397 	return ERR_PTR(-EINVAL);
2398 
2399 check_len:
2400 	if (strlen(name) >= VHOST_SCSI_NAMELEN) {
2401 		pr_err("Emulated %s Address: %s, exceeds"
2402 			" max: %d\n", name, vhost_scsi_dump_proto_id(tport),
2403 			VHOST_SCSI_NAMELEN);
2404 		kfree(tport);
2405 		return ERR_PTR(-EINVAL);
2406 	}
2407 	snprintf(&tport->tport_name[0], VHOST_SCSI_NAMELEN, "%s", &name[off]);
2408 
2409 	pr_debug("TCM_VHost_ConfigFS: Allocated emulated Target"
2410 		" %s Address: %s\n", vhost_scsi_dump_proto_id(tport), name);
2411 
2412 	return &tport->tport_wwn;
2413 }
2414 
2415 static void vhost_scsi_drop_tport(struct se_wwn *wwn)
2416 {
2417 	struct vhost_scsi_tport *tport = container_of(wwn,
2418 				struct vhost_scsi_tport, tport_wwn);
2419 
2420 	pr_debug("TCM_VHost_ConfigFS: Deallocating emulated Target"
2421 		" %s Address: %s\n", vhost_scsi_dump_proto_id(tport),
2422 		tport->tport_name);
2423 
2424 	kfree(tport);
2425 }
2426 
2427 static ssize_t
2428 vhost_scsi_wwn_version_show(struct config_item *item, char *page)
2429 {
2430 	return sprintf(page, "TCM_VHOST fabric module %s on %s/%s"
2431 		"on "UTS_RELEASE"\n", VHOST_SCSI_VERSION, utsname()->sysname,
2432 		utsname()->machine);
2433 }
2434 
2435 CONFIGFS_ATTR_RO(vhost_scsi_wwn_, version);
2436 
2437 static struct configfs_attribute *vhost_scsi_wwn_attrs[] = {
2438 	&vhost_scsi_wwn_attr_version,
2439 	NULL,
2440 };
2441 
2442 static const struct target_core_fabric_ops vhost_scsi_ops = {
2443 	.module				= THIS_MODULE,
2444 	.fabric_name			= "vhost",
2445 	.max_data_sg_nents		= VHOST_SCSI_PREALLOC_SGLS,
2446 	.tpg_get_wwn			= vhost_scsi_get_fabric_wwn,
2447 	.tpg_get_tag			= vhost_scsi_get_tpgt,
2448 	.tpg_check_demo_mode		= vhost_scsi_check_true,
2449 	.tpg_check_demo_mode_cache	= vhost_scsi_check_true,
2450 	.tpg_check_demo_mode_write_protect = vhost_scsi_check_false,
2451 	.tpg_check_prod_mode_write_protect = vhost_scsi_check_false,
2452 	.tpg_check_prot_fabric_only	= vhost_scsi_check_prot_fabric_only,
2453 	.tpg_get_inst_index		= vhost_scsi_tpg_get_inst_index,
2454 	.release_cmd			= vhost_scsi_release_cmd,
2455 	.check_stop_free		= vhost_scsi_check_stop_free,
2456 	.sess_get_index			= vhost_scsi_sess_get_index,
2457 	.sess_get_initiator_sid		= NULL,
2458 	.write_pending			= vhost_scsi_write_pending,
2459 	.set_default_node_attributes	= vhost_scsi_set_default_node_attrs,
2460 	.get_cmd_state			= vhost_scsi_get_cmd_state,
2461 	.queue_data_in			= vhost_scsi_queue_data_in,
2462 	.queue_status			= vhost_scsi_queue_status,
2463 	.queue_tm_rsp			= vhost_scsi_queue_tm_rsp,
2464 	.aborted_task			= vhost_scsi_aborted_task,
2465 	/*
2466 	 * Setup callers for generic logic in target_core_fabric_configfs.c
2467 	 */
2468 	.fabric_make_wwn		= vhost_scsi_make_tport,
2469 	.fabric_drop_wwn		= vhost_scsi_drop_tport,
2470 	.fabric_make_tpg		= vhost_scsi_make_tpg,
2471 	.fabric_drop_tpg		= vhost_scsi_drop_tpg,
2472 	.fabric_post_link		= vhost_scsi_port_link,
2473 	.fabric_pre_unlink		= vhost_scsi_port_unlink,
2474 
2475 	.tfc_wwn_attrs			= vhost_scsi_wwn_attrs,
2476 	.tfc_tpg_base_attrs		= vhost_scsi_tpg_attrs,
2477 	.tfc_tpg_attrib_attrs		= vhost_scsi_tpg_attrib_attrs,
2478 };
2479 
2480 static int __init vhost_scsi_init(void)
2481 {
2482 	int ret = -ENOMEM;
2483 
2484 	pr_debug("TCM_VHOST fabric module %s on %s/%s"
2485 		" on "UTS_RELEASE"\n", VHOST_SCSI_VERSION, utsname()->sysname,
2486 		utsname()->machine);
2487 
2488 	ret = vhost_scsi_register();
2489 	if (ret < 0)
2490 		goto out;
2491 
2492 	ret = target_register_template(&vhost_scsi_ops);
2493 	if (ret < 0)
2494 		goto out_vhost_scsi_deregister;
2495 
2496 	return 0;
2497 
2498 out_vhost_scsi_deregister:
2499 	vhost_scsi_deregister();
2500 out:
2501 	return ret;
2502 };
2503 
2504 static void vhost_scsi_exit(void)
2505 {
2506 	target_unregister_template(&vhost_scsi_ops);
2507 	vhost_scsi_deregister();
2508 };
2509 
2510 MODULE_DESCRIPTION("VHOST_SCSI series fabric driver");
2511 MODULE_ALIAS("tcm_vhost");
2512 MODULE_LICENSE("GPL");
2513 module_init(vhost_scsi_init);
2514 module_exit(vhost_scsi_exit);
2515