xref: /linux/drivers/vhost/scsi.c (revision c39b9fd728d8173ecda993524089fbc38211a17f)
1 /*******************************************************************************
2  * Vhost kernel TCM fabric driver for virtio SCSI initiators
3  *
4  * (C) Copyright 2010-2012 RisingTide Systems LLC.
5  * (C) Copyright 2010-2012 IBM Corp.
6  *
7  * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
8  *
9  * Authors: Nicholas A. Bellinger <nab@risingtidesystems.com>
10  *          Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of the GNU General Public License as published by
14  * the Free Software Foundation; either version 2 of the License, or
15  * (at your option) any later version.
16  *
17  * This program is distributed in the hope that it will be useful,
18  * but WITHOUT ANY WARRANTY; without even the implied warranty of
19  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
20  * GNU General Public License for more details.
21  *
22  ****************************************************************************/
23 
24 #include <linux/module.h>
25 #include <linux/moduleparam.h>
26 #include <generated/utsrelease.h>
27 #include <linux/utsname.h>
28 #include <linux/init.h>
29 #include <linux/slab.h>
30 #include <linux/kthread.h>
31 #include <linux/types.h>
32 #include <linux/string.h>
33 #include <linux/configfs.h>
34 #include <linux/ctype.h>
35 #include <linux/compat.h>
36 #include <linux/eventfd.h>
37 #include <linux/fs.h>
38 #include <linux/miscdevice.h>
39 #include <asm/unaligned.h>
40 #include <scsi/scsi.h>
41 #include <scsi/scsi_tcq.h>
42 #include <target/target_core_base.h>
43 #include <target/target_core_fabric.h>
44 #include <target/target_core_fabric_configfs.h>
45 #include <target/target_core_configfs.h>
46 #include <target/configfs_macros.h>
47 #include <linux/vhost.h>
48 #include <linux/virtio_scsi.h>
49 #include <linux/llist.h>
50 #include <linux/bitmap.h>
51 
52 #include "vhost.c"
53 #include "vhost.h"
54 
55 #define TCM_VHOST_VERSION  "v0.1"
56 #define TCM_VHOST_NAMELEN 256
57 #define TCM_VHOST_MAX_CDB_SIZE 32
58 
59 struct vhost_scsi_inflight {
60 	/* Wait for the flush operation to finish */
61 	struct completion comp;
62 	/* Refcount for the inflight reqs */
63 	struct kref kref;
64 };
65 
66 struct tcm_vhost_cmd {
67 	/* Descriptor from vhost_get_vq_desc() for virt_queue segment */
68 	int tvc_vq_desc;
69 	/* virtio-scsi initiator task attribute */
70 	int tvc_task_attr;
71 	/* virtio-scsi initiator data direction */
72 	enum dma_data_direction tvc_data_direction;
73 	/* Expected data transfer length from virtio-scsi header */
74 	u32 tvc_exp_data_len;
75 	/* The Tag from include/linux/virtio_scsi.h:struct virtio_scsi_cmd_req */
76 	u64 tvc_tag;
77 	/* The number of scatterlists associated with this cmd */
78 	u32 tvc_sgl_count;
79 	/* Saved unpacked SCSI LUN for tcm_vhost_submission_work() */
80 	u32 tvc_lun;
81 	/* Pointer to the SGL formatted memory from virtio-scsi */
82 	struct scatterlist *tvc_sgl;
83 	/* Pointer to response */
84 	struct virtio_scsi_cmd_resp __user *tvc_resp;
85 	/* Pointer to vhost_scsi for our device */
86 	struct vhost_scsi *tvc_vhost;
87 	/* Pointer to vhost_virtqueue for the cmd */
88 	struct vhost_virtqueue *tvc_vq;
89 	/* Pointer to vhost nexus memory */
90 	struct tcm_vhost_nexus *tvc_nexus;
91 	/* The TCM I/O descriptor that is accessed via container_of() */
92 	struct se_cmd tvc_se_cmd;
93 	/* work item used for cmwq dispatch to tcm_vhost_submission_work() */
94 	struct work_struct work;
95 	/* Copy of the incoming SCSI command descriptor block (CDB) */
96 	unsigned char tvc_cdb[TCM_VHOST_MAX_CDB_SIZE];
97 	/* Sense buffer that will be mapped into outgoing status */
98 	unsigned char tvc_sense_buf[TRANSPORT_SENSE_BUFFER];
99 	/* Completed commands list, serviced from vhost worker thread */
100 	struct llist_node tvc_completion_list;
101 	/* Used to track inflight cmd */
102 	struct vhost_scsi_inflight *inflight;
103 };
104 
105 struct tcm_vhost_nexus {
106 	/* Pointer to TCM session for I_T Nexus */
107 	struct se_session *tvn_se_sess;
108 };
109 
110 struct tcm_vhost_nacl {
111 	/* Binary World Wide unique Port Name for Vhost Initiator port */
112 	u64 iport_wwpn;
113 	/* ASCII formatted WWPN for Sas Initiator port */
114 	char iport_name[TCM_VHOST_NAMELEN];
115 	/* Returned by tcm_vhost_make_nodeacl() */
116 	struct se_node_acl se_node_acl;
117 };
118 
119 struct vhost_scsi;
120 struct tcm_vhost_tpg {
121 	/* Vhost port target portal group tag for TCM */
122 	u16 tport_tpgt;
123 	/* Used to track number of TPG Port/Lun Links wrt to explict I_T Nexus shutdown */
124 	int tv_tpg_port_count;
125 	/* Used for vhost_scsi device reference to tpg_nexus, protected by tv_tpg_mutex */
126 	int tv_tpg_vhost_count;
127 	/* list for tcm_vhost_list */
128 	struct list_head tv_tpg_list;
129 	/* Used to protect access for tpg_nexus */
130 	struct mutex tv_tpg_mutex;
131 	/* Pointer to the TCM VHost I_T Nexus for this TPG endpoint */
132 	struct tcm_vhost_nexus *tpg_nexus;
133 	/* Pointer back to tcm_vhost_tport */
134 	struct tcm_vhost_tport *tport;
135 	/* Returned by tcm_vhost_make_tpg() */
136 	struct se_portal_group se_tpg;
137 	/* Pointer back to vhost_scsi, protected by tv_tpg_mutex */
138 	struct vhost_scsi *vhost_scsi;
139 };
140 
141 struct tcm_vhost_tport {
142 	/* SCSI protocol the tport is providing */
143 	u8 tport_proto_id;
144 	/* Binary World Wide unique Port Name for Vhost Target port */
145 	u64 tport_wwpn;
146 	/* ASCII formatted WWPN for Vhost Target port */
147 	char tport_name[TCM_VHOST_NAMELEN];
148 	/* Returned by tcm_vhost_make_tport() */
149 	struct se_wwn tport_wwn;
150 };
151 
152 struct tcm_vhost_evt {
153 	/* event to be sent to guest */
154 	struct virtio_scsi_event event;
155 	/* event list, serviced from vhost worker thread */
156 	struct llist_node list;
157 };
158 
159 enum {
160 	VHOST_SCSI_VQ_CTL = 0,
161 	VHOST_SCSI_VQ_EVT = 1,
162 	VHOST_SCSI_VQ_IO = 2,
163 };
164 
165 /*
166  * VIRTIO_RING_F_EVENT_IDX seems broken. Not sure the bug is in
167  * kernel but disabling it helps.
168  * TODO: debug and remove the workaround.
169  */
170 enum {
171 	VHOST_SCSI_FEATURES = (VHOST_FEATURES & (~VIRTIO_RING_F_EVENT_IDX)) |
172 			      (1ULL << VIRTIO_SCSI_F_HOTPLUG)
173 };
174 
175 #define VHOST_SCSI_MAX_TARGET	256
176 #define VHOST_SCSI_MAX_VQ	128
177 #define VHOST_SCSI_MAX_EVENT	128
178 
179 struct vhost_scsi_virtqueue {
180 	struct vhost_virtqueue vq;
181 	/*
182 	 * Reference counting for inflight reqs, used for flush operation. At
183 	 * each time, one reference tracks new commands submitted, while we
184 	 * wait for another one to reach 0.
185 	 */
186 	struct vhost_scsi_inflight inflights[2];
187 	/*
188 	 * Indicate current inflight in use, protected by vq->mutex.
189 	 * Writers must also take dev mutex and flush under it.
190 	 */
191 	int inflight_idx;
192 };
193 
194 struct vhost_scsi {
195 	/* Protected by vhost_scsi->dev.mutex */
196 	struct tcm_vhost_tpg **vs_tpg;
197 	char vs_vhost_wwpn[TRANSPORT_IQN_LEN];
198 
199 	struct vhost_dev dev;
200 	struct vhost_scsi_virtqueue vqs[VHOST_SCSI_MAX_VQ];
201 
202 	struct vhost_work vs_completion_work; /* cmd completion work item */
203 	struct llist_head vs_completion_list; /* cmd completion queue */
204 
205 	struct vhost_work vs_event_work; /* evt injection work item */
206 	struct llist_head vs_event_list; /* evt injection queue */
207 
208 	bool vs_events_missed; /* any missed events, protected by vq->mutex */
209 	int vs_events_nr; /* num of pending events, protected by vq->mutex */
210 };
211 
212 /* Local pointer to allocated TCM configfs fabric module */
213 static struct target_fabric_configfs *tcm_vhost_fabric_configfs;
214 
215 static struct workqueue_struct *tcm_vhost_workqueue;
216 
217 /* Global spinlock to protect tcm_vhost TPG list for vhost IOCTL access */
218 static DEFINE_MUTEX(tcm_vhost_mutex);
219 static LIST_HEAD(tcm_vhost_list);
220 
221 static int iov_num_pages(struct iovec *iov)
222 {
223 	return (PAGE_ALIGN((unsigned long)iov->iov_base + iov->iov_len) -
224 	       ((unsigned long)iov->iov_base & PAGE_MASK)) >> PAGE_SHIFT;
225 }
226 
227 void tcm_vhost_done_inflight(struct kref *kref)
228 {
229 	struct vhost_scsi_inflight *inflight;
230 
231 	inflight = container_of(kref, struct vhost_scsi_inflight, kref);
232 	complete(&inflight->comp);
233 }
234 
235 static void tcm_vhost_init_inflight(struct vhost_scsi *vs,
236 				    struct vhost_scsi_inflight *old_inflight[])
237 {
238 	struct vhost_scsi_inflight *new_inflight;
239 	struct vhost_virtqueue *vq;
240 	int idx, i;
241 
242 	for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
243 		vq = &vs->vqs[i].vq;
244 
245 		mutex_lock(&vq->mutex);
246 
247 		/* store old infight */
248 		idx = vs->vqs[i].inflight_idx;
249 		if (old_inflight)
250 			old_inflight[i] = &vs->vqs[i].inflights[idx];
251 
252 		/* setup new infight */
253 		vs->vqs[i].inflight_idx = idx ^ 1;
254 		new_inflight = &vs->vqs[i].inflights[idx ^ 1];
255 		kref_init(&new_inflight->kref);
256 		init_completion(&new_inflight->comp);
257 
258 		mutex_unlock(&vq->mutex);
259 	}
260 }
261 
262 static struct vhost_scsi_inflight *
263 tcm_vhost_get_inflight(struct vhost_virtqueue *vq)
264 {
265 	struct vhost_scsi_inflight *inflight;
266 	struct vhost_scsi_virtqueue *svq;
267 
268 	svq = container_of(vq, struct vhost_scsi_virtqueue, vq);
269 	inflight = &svq->inflights[svq->inflight_idx];
270 	kref_get(&inflight->kref);
271 
272 	return inflight;
273 }
274 
275 static void tcm_vhost_put_inflight(struct vhost_scsi_inflight *inflight)
276 {
277 	kref_put(&inflight->kref, tcm_vhost_done_inflight);
278 }
279 
280 static int tcm_vhost_check_true(struct se_portal_group *se_tpg)
281 {
282 	return 1;
283 }
284 
285 static int tcm_vhost_check_false(struct se_portal_group *se_tpg)
286 {
287 	return 0;
288 }
289 
290 static char *tcm_vhost_get_fabric_name(void)
291 {
292 	return "vhost";
293 }
294 
295 static u8 tcm_vhost_get_fabric_proto_ident(struct se_portal_group *se_tpg)
296 {
297 	struct tcm_vhost_tpg *tpg = container_of(se_tpg,
298 				struct tcm_vhost_tpg, se_tpg);
299 	struct tcm_vhost_tport *tport = tpg->tport;
300 
301 	switch (tport->tport_proto_id) {
302 	case SCSI_PROTOCOL_SAS:
303 		return sas_get_fabric_proto_ident(se_tpg);
304 	case SCSI_PROTOCOL_FCP:
305 		return fc_get_fabric_proto_ident(se_tpg);
306 	case SCSI_PROTOCOL_ISCSI:
307 		return iscsi_get_fabric_proto_ident(se_tpg);
308 	default:
309 		pr_err("Unknown tport_proto_id: 0x%02x, using"
310 			" SAS emulation\n", tport->tport_proto_id);
311 		break;
312 	}
313 
314 	return sas_get_fabric_proto_ident(se_tpg);
315 }
316 
317 static char *tcm_vhost_get_fabric_wwn(struct se_portal_group *se_tpg)
318 {
319 	struct tcm_vhost_tpg *tpg = container_of(se_tpg,
320 				struct tcm_vhost_tpg, se_tpg);
321 	struct tcm_vhost_tport *tport = tpg->tport;
322 
323 	return &tport->tport_name[0];
324 }
325 
326 static u16 tcm_vhost_get_tag(struct se_portal_group *se_tpg)
327 {
328 	struct tcm_vhost_tpg *tpg = container_of(se_tpg,
329 				struct tcm_vhost_tpg, se_tpg);
330 	return tpg->tport_tpgt;
331 }
332 
333 static u32 tcm_vhost_get_default_depth(struct se_portal_group *se_tpg)
334 {
335 	return 1;
336 }
337 
338 static u32 tcm_vhost_get_pr_transport_id(struct se_portal_group *se_tpg,
339 	struct se_node_acl *se_nacl,
340 	struct t10_pr_registration *pr_reg,
341 	int *format_code,
342 	unsigned char *buf)
343 {
344 	struct tcm_vhost_tpg *tpg = container_of(se_tpg,
345 				struct tcm_vhost_tpg, se_tpg);
346 	struct tcm_vhost_tport *tport = tpg->tport;
347 
348 	switch (tport->tport_proto_id) {
349 	case SCSI_PROTOCOL_SAS:
350 		return sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
351 					format_code, buf);
352 	case SCSI_PROTOCOL_FCP:
353 		return fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
354 					format_code, buf);
355 	case SCSI_PROTOCOL_ISCSI:
356 		return iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
357 					format_code, buf);
358 	default:
359 		pr_err("Unknown tport_proto_id: 0x%02x, using"
360 			" SAS emulation\n", tport->tport_proto_id);
361 		break;
362 	}
363 
364 	return sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
365 			format_code, buf);
366 }
367 
368 static u32 tcm_vhost_get_pr_transport_id_len(struct se_portal_group *se_tpg,
369 	struct se_node_acl *se_nacl,
370 	struct t10_pr_registration *pr_reg,
371 	int *format_code)
372 {
373 	struct tcm_vhost_tpg *tpg = container_of(se_tpg,
374 				struct tcm_vhost_tpg, se_tpg);
375 	struct tcm_vhost_tport *tport = tpg->tport;
376 
377 	switch (tport->tport_proto_id) {
378 	case SCSI_PROTOCOL_SAS:
379 		return sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
380 					format_code);
381 	case SCSI_PROTOCOL_FCP:
382 		return fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
383 					format_code);
384 	case SCSI_PROTOCOL_ISCSI:
385 		return iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
386 					format_code);
387 	default:
388 		pr_err("Unknown tport_proto_id: 0x%02x, using"
389 			" SAS emulation\n", tport->tport_proto_id);
390 		break;
391 	}
392 
393 	return sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
394 			format_code);
395 }
396 
397 static char *tcm_vhost_parse_pr_out_transport_id(struct se_portal_group *se_tpg,
398 	const char *buf,
399 	u32 *out_tid_len,
400 	char **port_nexus_ptr)
401 {
402 	struct tcm_vhost_tpg *tpg = container_of(se_tpg,
403 				struct tcm_vhost_tpg, se_tpg);
404 	struct tcm_vhost_tport *tport = tpg->tport;
405 
406 	switch (tport->tport_proto_id) {
407 	case SCSI_PROTOCOL_SAS:
408 		return sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
409 					port_nexus_ptr);
410 	case SCSI_PROTOCOL_FCP:
411 		return fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
412 					port_nexus_ptr);
413 	case SCSI_PROTOCOL_ISCSI:
414 		return iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
415 					port_nexus_ptr);
416 	default:
417 		pr_err("Unknown tport_proto_id: 0x%02x, using"
418 			" SAS emulation\n", tport->tport_proto_id);
419 		break;
420 	}
421 
422 	return sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
423 			port_nexus_ptr);
424 }
425 
426 static struct se_node_acl *tcm_vhost_alloc_fabric_acl(
427 	struct se_portal_group *se_tpg)
428 {
429 	struct tcm_vhost_nacl *nacl;
430 
431 	nacl = kzalloc(sizeof(struct tcm_vhost_nacl), GFP_KERNEL);
432 	if (!nacl) {
433 		pr_err("Unable to allocate struct tcm_vhost_nacl\n");
434 		return NULL;
435 	}
436 
437 	return &nacl->se_node_acl;
438 }
439 
440 static void tcm_vhost_release_fabric_acl(struct se_portal_group *se_tpg,
441 	struct se_node_acl *se_nacl)
442 {
443 	struct tcm_vhost_nacl *nacl = container_of(se_nacl,
444 			struct tcm_vhost_nacl, se_node_acl);
445 	kfree(nacl);
446 }
447 
448 static u32 tcm_vhost_tpg_get_inst_index(struct se_portal_group *se_tpg)
449 {
450 	return 1;
451 }
452 
453 static void tcm_vhost_release_cmd(struct se_cmd *se_cmd)
454 {
455 	return;
456 }
457 
458 static int tcm_vhost_shutdown_session(struct se_session *se_sess)
459 {
460 	return 0;
461 }
462 
463 static void tcm_vhost_close_session(struct se_session *se_sess)
464 {
465 	return;
466 }
467 
468 static u32 tcm_vhost_sess_get_index(struct se_session *se_sess)
469 {
470 	return 0;
471 }
472 
473 static int tcm_vhost_write_pending(struct se_cmd *se_cmd)
474 {
475 	/* Go ahead and process the write immediately */
476 	target_execute_cmd(se_cmd);
477 	return 0;
478 }
479 
480 static int tcm_vhost_write_pending_status(struct se_cmd *se_cmd)
481 {
482 	return 0;
483 }
484 
485 static void tcm_vhost_set_default_node_attrs(struct se_node_acl *nacl)
486 {
487 	return;
488 }
489 
490 static u32 tcm_vhost_get_task_tag(struct se_cmd *se_cmd)
491 {
492 	return 0;
493 }
494 
495 static int tcm_vhost_get_cmd_state(struct se_cmd *se_cmd)
496 {
497 	return 0;
498 }
499 
500 static void vhost_scsi_complete_cmd(struct tcm_vhost_cmd *tv_cmd)
501 {
502 	struct vhost_scsi *vs = tv_cmd->tvc_vhost;
503 
504 	llist_add(&tv_cmd->tvc_completion_list, &vs->vs_completion_list);
505 
506 	vhost_work_queue(&vs->dev, &vs->vs_completion_work);
507 }
508 
509 static int tcm_vhost_queue_data_in(struct se_cmd *se_cmd)
510 {
511 	struct tcm_vhost_cmd *tv_cmd = container_of(se_cmd,
512 				struct tcm_vhost_cmd, tvc_se_cmd);
513 	vhost_scsi_complete_cmd(tv_cmd);
514 	return 0;
515 }
516 
517 static int tcm_vhost_queue_status(struct se_cmd *se_cmd)
518 {
519 	struct tcm_vhost_cmd *tv_cmd = container_of(se_cmd,
520 				struct tcm_vhost_cmd, tvc_se_cmd);
521 	vhost_scsi_complete_cmd(tv_cmd);
522 	return 0;
523 }
524 
525 static int tcm_vhost_queue_tm_rsp(struct se_cmd *se_cmd)
526 {
527 	return 0;
528 }
529 
530 static void tcm_vhost_free_evt(struct vhost_scsi *vs, struct tcm_vhost_evt *evt)
531 {
532 	vs->vs_events_nr--;
533 	kfree(evt);
534 }
535 
536 static struct tcm_vhost_evt *tcm_vhost_allocate_evt(struct vhost_scsi *vs,
537 	u32 event, u32 reason)
538 {
539 	struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
540 	struct tcm_vhost_evt *evt;
541 
542 	if (vs->vs_events_nr > VHOST_SCSI_MAX_EVENT) {
543 		vs->vs_events_missed = true;
544 		return NULL;
545 	}
546 
547 	evt = kzalloc(sizeof(*evt), GFP_KERNEL);
548 	if (!evt) {
549 		vq_err(vq, "Failed to allocate tcm_vhost_evt\n");
550 		vs->vs_events_missed = true;
551 		return NULL;
552 	}
553 
554 	evt->event.event = event;
555 	evt->event.reason = reason;
556 	vs->vs_events_nr++;
557 
558 	return evt;
559 }
560 
561 static void vhost_scsi_free_cmd(struct tcm_vhost_cmd *tv_cmd)
562 {
563 	struct se_cmd *se_cmd = &tv_cmd->tvc_se_cmd;
564 
565 	/* TODO locking against target/backend threads? */
566 	transport_generic_free_cmd(se_cmd, 1);
567 
568 	if (tv_cmd->tvc_sgl_count) {
569 		u32 i;
570 		for (i = 0; i < tv_cmd->tvc_sgl_count; i++)
571 			put_page(sg_page(&tv_cmd->tvc_sgl[i]));
572 
573 		kfree(tv_cmd->tvc_sgl);
574 	}
575 
576 	tcm_vhost_put_inflight(tv_cmd->inflight);
577 
578 	kfree(tv_cmd);
579 }
580 
581 static void tcm_vhost_do_evt_work(struct vhost_scsi *vs,
582 	struct tcm_vhost_evt *evt)
583 {
584 	struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
585 	struct virtio_scsi_event *event = &evt->event;
586 	struct virtio_scsi_event __user *eventp;
587 	unsigned out, in;
588 	int head, ret;
589 
590 	if (!vq->private_data) {
591 		vs->vs_events_missed = true;
592 		return;
593 	}
594 
595 again:
596 	vhost_disable_notify(&vs->dev, vq);
597 	head = vhost_get_vq_desc(&vs->dev, vq, vq->iov,
598 			ARRAY_SIZE(vq->iov), &out, &in,
599 			NULL, NULL);
600 	if (head < 0) {
601 		vs->vs_events_missed = true;
602 		return;
603 	}
604 	if (head == vq->num) {
605 		if (vhost_enable_notify(&vs->dev, vq))
606 			goto again;
607 		vs->vs_events_missed = true;
608 		return;
609 	}
610 
611 	if ((vq->iov[out].iov_len != sizeof(struct virtio_scsi_event))) {
612 		vq_err(vq, "Expecting virtio_scsi_event, got %zu bytes\n",
613 				vq->iov[out].iov_len);
614 		vs->vs_events_missed = true;
615 		return;
616 	}
617 
618 	if (vs->vs_events_missed) {
619 		event->event |= VIRTIO_SCSI_T_EVENTS_MISSED;
620 		vs->vs_events_missed = false;
621 	}
622 
623 	eventp = vq->iov[out].iov_base;
624 	ret = __copy_to_user(eventp, event, sizeof(*event));
625 	if (!ret)
626 		vhost_add_used_and_signal(&vs->dev, vq, head, 0);
627 	else
628 		vq_err(vq, "Faulted on tcm_vhost_send_event\n");
629 }
630 
631 static void tcm_vhost_evt_work(struct vhost_work *work)
632 {
633 	struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
634 					vs_event_work);
635 	struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
636 	struct tcm_vhost_evt *evt;
637 	struct llist_node *llnode;
638 
639 	mutex_lock(&vq->mutex);
640 	llnode = llist_del_all(&vs->vs_event_list);
641 	while (llnode) {
642 		evt = llist_entry(llnode, struct tcm_vhost_evt, list);
643 		llnode = llist_next(llnode);
644 		tcm_vhost_do_evt_work(vs, evt);
645 		tcm_vhost_free_evt(vs, evt);
646 	}
647 	mutex_unlock(&vq->mutex);
648 }
649 
650 /* Fill in status and signal that we are done processing this command
651  *
652  * This is scheduled in the vhost work queue so we are called with the owner
653  * process mm and can access the vring.
654  */
655 static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
656 {
657 	struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
658 					vs_completion_work);
659 	DECLARE_BITMAP(signal, VHOST_SCSI_MAX_VQ);
660 	struct virtio_scsi_cmd_resp v_rsp;
661 	struct tcm_vhost_cmd *tv_cmd;
662 	struct llist_node *llnode;
663 	struct se_cmd *se_cmd;
664 	int ret, vq;
665 
666 	bitmap_zero(signal, VHOST_SCSI_MAX_VQ);
667 	llnode = llist_del_all(&vs->vs_completion_list);
668 	while (llnode) {
669 		tv_cmd = llist_entry(llnode, struct tcm_vhost_cmd,
670 				     tvc_completion_list);
671 		llnode = llist_next(llnode);
672 		se_cmd = &tv_cmd->tvc_se_cmd;
673 
674 		pr_debug("%s tv_cmd %p resid %u status %#02x\n", __func__,
675 			tv_cmd, se_cmd->residual_count, se_cmd->scsi_status);
676 
677 		memset(&v_rsp, 0, sizeof(v_rsp));
678 		v_rsp.resid = se_cmd->residual_count;
679 		/* TODO is status_qualifier field needed? */
680 		v_rsp.status = se_cmd->scsi_status;
681 		v_rsp.sense_len = se_cmd->scsi_sense_length;
682 		memcpy(v_rsp.sense, tv_cmd->tvc_sense_buf,
683 		       v_rsp.sense_len);
684 		ret = copy_to_user(tv_cmd->tvc_resp, &v_rsp, sizeof(v_rsp));
685 		if (likely(ret == 0)) {
686 			struct vhost_scsi_virtqueue *q;
687 			vhost_add_used(tv_cmd->tvc_vq, tv_cmd->tvc_vq_desc, 0);
688 			q = container_of(tv_cmd->tvc_vq, struct vhost_scsi_virtqueue, vq);
689 			vq = q - vs->vqs;
690 			__set_bit(vq, signal);
691 		} else
692 			pr_err("Faulted on virtio_scsi_cmd_resp\n");
693 
694 		vhost_scsi_free_cmd(tv_cmd);
695 	}
696 
697 	vq = -1;
698 	while ((vq = find_next_bit(signal, VHOST_SCSI_MAX_VQ, vq + 1))
699 		< VHOST_SCSI_MAX_VQ)
700 		vhost_signal(&vs->dev, &vs->vqs[vq].vq);
701 }
702 
703 static struct tcm_vhost_cmd *vhost_scsi_allocate_cmd(
704 	struct vhost_virtqueue *vq,
705 	struct tcm_vhost_tpg *tv_tpg,
706 	struct virtio_scsi_cmd_req *v_req,
707 	u32 exp_data_len,
708 	int data_direction)
709 {
710 	struct tcm_vhost_cmd *tv_cmd;
711 	struct tcm_vhost_nexus *tv_nexus;
712 
713 	tv_nexus = tv_tpg->tpg_nexus;
714 	if (!tv_nexus) {
715 		pr_err("Unable to locate active struct tcm_vhost_nexus\n");
716 		return ERR_PTR(-EIO);
717 	}
718 
719 	tv_cmd = kzalloc(sizeof(struct tcm_vhost_cmd), GFP_ATOMIC);
720 	if (!tv_cmd) {
721 		pr_err("Unable to allocate struct tcm_vhost_cmd\n");
722 		return ERR_PTR(-ENOMEM);
723 	}
724 	tv_cmd->tvc_tag = v_req->tag;
725 	tv_cmd->tvc_task_attr = v_req->task_attr;
726 	tv_cmd->tvc_exp_data_len = exp_data_len;
727 	tv_cmd->tvc_data_direction = data_direction;
728 	tv_cmd->tvc_nexus = tv_nexus;
729 	tv_cmd->inflight = tcm_vhost_get_inflight(vq);
730 
731 	return tv_cmd;
732 }
733 
734 /*
735  * Map a user memory range into a scatterlist
736  *
737  * Returns the number of scatterlist entries used or -errno on error.
738  */
739 static int vhost_scsi_map_to_sgl(struct scatterlist *sgl,
740 	unsigned int sgl_count, struct iovec *iov, int write)
741 {
742 	unsigned int npages = 0, pages_nr, offset, nbytes;
743 	struct scatterlist *sg = sgl;
744 	void __user *ptr = iov->iov_base;
745 	size_t len = iov->iov_len;
746 	struct page **pages;
747 	int ret, i;
748 
749 	pages_nr = iov_num_pages(iov);
750 	if (pages_nr > sgl_count)
751 		return -ENOBUFS;
752 
753 	pages = kmalloc(pages_nr * sizeof(struct page *), GFP_KERNEL);
754 	if (!pages)
755 		return -ENOMEM;
756 
757 	ret = get_user_pages_fast((unsigned long)ptr, pages_nr, write, pages);
758 	/* No pages were pinned */
759 	if (ret < 0)
760 		goto out;
761 	/* Less pages pinned than wanted */
762 	if (ret != pages_nr) {
763 		for (i = 0; i < ret; i++)
764 			put_page(pages[i]);
765 		ret = -EFAULT;
766 		goto out;
767 	}
768 
769 	while (len > 0) {
770 		offset = (uintptr_t)ptr & ~PAGE_MASK;
771 		nbytes = min_t(unsigned int, PAGE_SIZE - offset, len);
772 		sg_set_page(sg, pages[npages], nbytes, offset);
773 		ptr += nbytes;
774 		len -= nbytes;
775 		sg++;
776 		npages++;
777 	}
778 
779 out:
780 	kfree(pages);
781 	return ret;
782 }
783 
784 static int vhost_scsi_map_iov_to_sgl(struct tcm_vhost_cmd *tv_cmd,
785 	struct iovec *iov, unsigned int niov, int write)
786 {
787 	int ret;
788 	unsigned int i;
789 	u32 sgl_count;
790 	struct scatterlist *sg;
791 
792 	/*
793 	 * Find out how long sglist needs to be
794 	 */
795 	sgl_count = 0;
796 	for (i = 0; i < niov; i++)
797 		sgl_count += iov_num_pages(&iov[i]);
798 
799 	/* TODO overflow checking */
800 
801 	sg = kmalloc(sizeof(tv_cmd->tvc_sgl[0]) * sgl_count, GFP_ATOMIC);
802 	if (!sg)
803 		return -ENOMEM;
804 	pr_debug("%s sg %p sgl_count %u is_err %d\n", __func__,
805 	       sg, sgl_count, !sg);
806 	sg_init_table(sg, sgl_count);
807 
808 	tv_cmd->tvc_sgl = sg;
809 	tv_cmd->tvc_sgl_count = sgl_count;
810 
811 	pr_debug("Mapping %u iovecs for %u pages\n", niov, sgl_count);
812 	for (i = 0; i < niov; i++) {
813 		ret = vhost_scsi_map_to_sgl(sg, sgl_count, &iov[i], write);
814 		if (ret < 0) {
815 			for (i = 0; i < tv_cmd->tvc_sgl_count; i++)
816 				put_page(sg_page(&tv_cmd->tvc_sgl[i]));
817 			kfree(tv_cmd->tvc_sgl);
818 			tv_cmd->tvc_sgl = NULL;
819 			tv_cmd->tvc_sgl_count = 0;
820 			return ret;
821 		}
822 
823 		sg += ret;
824 		sgl_count -= ret;
825 	}
826 	return 0;
827 }
828 
829 static void tcm_vhost_submission_work(struct work_struct *work)
830 {
831 	struct tcm_vhost_cmd *tv_cmd =
832 		container_of(work, struct tcm_vhost_cmd, work);
833 	struct tcm_vhost_nexus *tv_nexus;
834 	struct se_cmd *se_cmd = &tv_cmd->tvc_se_cmd;
835 	struct scatterlist *sg_ptr, *sg_bidi_ptr = NULL;
836 	int rc, sg_no_bidi = 0;
837 
838 	if (tv_cmd->tvc_sgl_count) {
839 		sg_ptr = tv_cmd->tvc_sgl;
840 /* FIXME: Fix BIDI operation in tcm_vhost_submission_work() */
841 #if 0
842 		if (se_cmd->se_cmd_flags & SCF_BIDI) {
843 			sg_bidi_ptr = NULL;
844 			sg_no_bidi = 0;
845 		}
846 #endif
847 	} else {
848 		sg_ptr = NULL;
849 	}
850 	tv_nexus = tv_cmd->tvc_nexus;
851 
852 	rc = target_submit_cmd_map_sgls(se_cmd, tv_nexus->tvn_se_sess,
853 			tv_cmd->tvc_cdb, &tv_cmd->tvc_sense_buf[0],
854 			tv_cmd->tvc_lun, tv_cmd->tvc_exp_data_len,
855 			tv_cmd->tvc_task_attr, tv_cmd->tvc_data_direction,
856 			0, sg_ptr, tv_cmd->tvc_sgl_count,
857 			sg_bidi_ptr, sg_no_bidi);
858 	if (rc < 0) {
859 		transport_send_check_condition_and_sense(se_cmd,
860 				TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
861 		transport_generic_free_cmd(se_cmd, 0);
862 	}
863 }
864 
865 static void vhost_scsi_send_bad_target(struct vhost_scsi *vs,
866 	struct vhost_virtqueue *vq, int head, unsigned out)
867 {
868 	struct virtio_scsi_cmd_resp __user *resp;
869 	struct virtio_scsi_cmd_resp rsp;
870 	int ret;
871 
872 	memset(&rsp, 0, sizeof(rsp));
873 	rsp.response = VIRTIO_SCSI_S_BAD_TARGET;
874 	resp = vq->iov[out].iov_base;
875 	ret = __copy_to_user(resp, &rsp, sizeof(rsp));
876 	if (!ret)
877 		vhost_add_used_and_signal(&vs->dev, vq, head, 0);
878 	else
879 		pr_err("Faulted on virtio_scsi_cmd_resp\n");
880 }
881 
882 static void vhost_scsi_handle_vq(struct vhost_scsi *vs,
883 	struct vhost_virtqueue *vq)
884 {
885 	struct tcm_vhost_tpg **vs_tpg;
886 	struct virtio_scsi_cmd_req v_req;
887 	struct tcm_vhost_tpg *tv_tpg;
888 	struct tcm_vhost_cmd *tv_cmd;
889 	u32 exp_data_len, data_first, data_num, data_direction;
890 	unsigned out, in, i;
891 	int head, ret;
892 	u8 target;
893 
894 	/*
895 	 * We can handle the vq only after the endpoint is setup by calling the
896 	 * VHOST_SCSI_SET_ENDPOINT ioctl.
897 	 *
898 	 * TODO: Check that we are running from vhost_worker which acts
899 	 * as read-side critical section for vhost kind of RCU.
900 	 * See the comments in struct vhost_virtqueue in drivers/vhost/vhost.h
901 	 */
902 	vs_tpg = rcu_dereference_check(vq->private_data, 1);
903 	if (!vs_tpg)
904 		return;
905 
906 	mutex_lock(&vq->mutex);
907 	vhost_disable_notify(&vs->dev, vq);
908 
909 	for (;;) {
910 		head = vhost_get_vq_desc(&vs->dev, vq, vq->iov,
911 					ARRAY_SIZE(vq->iov), &out, &in,
912 					NULL, NULL);
913 		pr_debug("vhost_get_vq_desc: head: %d, out: %u in: %u\n",
914 					head, out, in);
915 		/* On error, stop handling until the next kick. */
916 		if (unlikely(head < 0))
917 			break;
918 		/* Nothing new?  Wait for eventfd to tell us they refilled. */
919 		if (head == vq->num) {
920 			if (unlikely(vhost_enable_notify(&vs->dev, vq))) {
921 				vhost_disable_notify(&vs->dev, vq);
922 				continue;
923 			}
924 			break;
925 		}
926 
927 /* FIXME: BIDI operation */
928 		if (out == 1 && in == 1) {
929 			data_direction = DMA_NONE;
930 			data_first = 0;
931 			data_num = 0;
932 		} else if (out == 1 && in > 1) {
933 			data_direction = DMA_FROM_DEVICE;
934 			data_first = out + 1;
935 			data_num = in - 1;
936 		} else if (out > 1 && in == 1) {
937 			data_direction = DMA_TO_DEVICE;
938 			data_first = 1;
939 			data_num = out - 1;
940 		} else {
941 			vq_err(vq, "Invalid buffer layout out: %u in: %u\n",
942 					out, in);
943 			break;
944 		}
945 
946 		/*
947 		 * Check for a sane resp buffer so we can report errors to
948 		 * the guest.
949 		 */
950 		if (unlikely(vq->iov[out].iov_len !=
951 					sizeof(struct virtio_scsi_cmd_resp))) {
952 			vq_err(vq, "Expecting virtio_scsi_cmd_resp, got %zu"
953 				" bytes\n", vq->iov[out].iov_len);
954 			break;
955 		}
956 
957 		if (unlikely(vq->iov[0].iov_len != sizeof(v_req))) {
958 			vq_err(vq, "Expecting virtio_scsi_cmd_req, got %zu"
959 				" bytes\n", vq->iov[0].iov_len);
960 			break;
961 		}
962 		pr_debug("Calling __copy_from_user: vq->iov[0].iov_base: %p,"
963 			" len: %zu\n", vq->iov[0].iov_base, sizeof(v_req));
964 		ret = __copy_from_user(&v_req, vq->iov[0].iov_base,
965 				sizeof(v_req));
966 		if (unlikely(ret)) {
967 			vq_err(vq, "Faulted on virtio_scsi_cmd_req\n");
968 			break;
969 		}
970 
971 		/* Extract the tpgt */
972 		target = v_req.lun[1];
973 		tv_tpg = ACCESS_ONCE(vs_tpg[target]);
974 
975 		/* Target does not exist, fail the request */
976 		if (unlikely(!tv_tpg)) {
977 			vhost_scsi_send_bad_target(vs, vq, head, out);
978 			continue;
979 		}
980 
981 		exp_data_len = 0;
982 		for (i = 0; i < data_num; i++)
983 			exp_data_len += vq->iov[data_first + i].iov_len;
984 
985 		tv_cmd = vhost_scsi_allocate_cmd(vq, tv_tpg, &v_req,
986 					exp_data_len, data_direction);
987 		if (IS_ERR(tv_cmd)) {
988 			vq_err(vq, "vhost_scsi_allocate_cmd failed %ld\n",
989 					PTR_ERR(tv_cmd));
990 			goto err_cmd;
991 		}
992 		pr_debug("Allocated tv_cmd: %p exp_data_len: %d, data_direction"
993 			": %d\n", tv_cmd, exp_data_len, data_direction);
994 
995 		tv_cmd->tvc_vhost = vs;
996 		tv_cmd->tvc_vq = vq;
997 		tv_cmd->tvc_resp = vq->iov[out].iov_base;
998 
999 		/*
1000 		 * Copy in the recieved CDB descriptor into tv_cmd->tvc_cdb
1001 		 * that will be used by tcm_vhost_new_cmd_map() and down into
1002 		 * target_setup_cmd_from_cdb()
1003 		 */
1004 		memcpy(tv_cmd->tvc_cdb, v_req.cdb, TCM_VHOST_MAX_CDB_SIZE);
1005 		/*
1006 		 * Check that the recieved CDB size does not exceeded our
1007 		 * hardcoded max for tcm_vhost
1008 		 */
1009 		/* TODO what if cdb was too small for varlen cdb header? */
1010 		if (unlikely(scsi_command_size(tv_cmd->tvc_cdb) >
1011 					TCM_VHOST_MAX_CDB_SIZE)) {
1012 			vq_err(vq, "Received SCSI CDB with command_size: %d that"
1013 				" exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
1014 				scsi_command_size(tv_cmd->tvc_cdb),
1015 				TCM_VHOST_MAX_CDB_SIZE);
1016 			goto err_free;
1017 		}
1018 		tv_cmd->tvc_lun = ((v_req.lun[2] << 8) | v_req.lun[3]) & 0x3FFF;
1019 
1020 		pr_debug("vhost_scsi got command opcode: %#02x, lun: %d\n",
1021 			tv_cmd->tvc_cdb[0], tv_cmd->tvc_lun);
1022 
1023 		if (data_direction != DMA_NONE) {
1024 			ret = vhost_scsi_map_iov_to_sgl(tv_cmd,
1025 					&vq->iov[data_first], data_num,
1026 					data_direction == DMA_TO_DEVICE);
1027 			if (unlikely(ret)) {
1028 				vq_err(vq, "Failed to map iov to sgl\n");
1029 				goto err_free;
1030 			}
1031 		}
1032 
1033 		/*
1034 		 * Save the descriptor from vhost_get_vq_desc() to be used to
1035 		 * complete the virtio-scsi request in TCM callback context via
1036 		 * tcm_vhost_queue_data_in() and tcm_vhost_queue_status()
1037 		 */
1038 		tv_cmd->tvc_vq_desc = head;
1039 		/*
1040 		 * Dispatch tv_cmd descriptor for cmwq execution in process
1041 		 * context provided by tcm_vhost_workqueue.  This also ensures
1042 		 * tv_cmd is executed on the same kworker CPU as this vhost
1043 		 * thread to gain positive L2 cache locality effects..
1044 		 */
1045 		INIT_WORK(&tv_cmd->work, tcm_vhost_submission_work);
1046 		queue_work(tcm_vhost_workqueue, &tv_cmd->work);
1047 	}
1048 
1049 	mutex_unlock(&vq->mutex);
1050 	return;
1051 
1052 err_free:
1053 	vhost_scsi_free_cmd(tv_cmd);
1054 err_cmd:
1055 	vhost_scsi_send_bad_target(vs, vq, head, out);
1056 	mutex_unlock(&vq->mutex);
1057 }
1058 
1059 static void vhost_scsi_ctl_handle_kick(struct vhost_work *work)
1060 {
1061 	pr_debug("%s: The handling func for control queue.\n", __func__);
1062 }
1063 
1064 static void tcm_vhost_send_evt(struct vhost_scsi *vs, struct tcm_vhost_tpg *tpg,
1065 	struct se_lun *lun, u32 event, u32 reason)
1066 {
1067 	struct tcm_vhost_evt *evt;
1068 
1069 	evt = tcm_vhost_allocate_evt(vs, event, reason);
1070 	if (!evt)
1071 		return;
1072 
1073 	if (tpg && lun) {
1074 		/* TODO: share lun setup code with virtio-scsi.ko */
1075 		/*
1076 		 * Note: evt->event is zeroed when we allocate it and
1077 		 * lun[4-7] need to be zero according to virtio-scsi spec.
1078 		 */
1079 		evt->event.lun[0] = 0x01;
1080 		evt->event.lun[1] = tpg->tport_tpgt & 0xFF;
1081 		if (lun->unpacked_lun >= 256)
1082 			evt->event.lun[2] = lun->unpacked_lun >> 8 | 0x40 ;
1083 		evt->event.lun[3] = lun->unpacked_lun & 0xFF;
1084 	}
1085 
1086 	llist_add(&evt->list, &vs->vs_event_list);
1087 	vhost_work_queue(&vs->dev, &vs->vs_event_work);
1088 }
1089 
1090 static void vhost_scsi_evt_handle_kick(struct vhost_work *work)
1091 {
1092 	struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
1093 						poll.work);
1094 	struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
1095 
1096 	mutex_lock(&vq->mutex);
1097 	if (!vq->private_data)
1098 		goto out;
1099 
1100 	if (vs->vs_events_missed)
1101 		tcm_vhost_send_evt(vs, NULL, NULL, VIRTIO_SCSI_T_NO_EVENT, 0);
1102 out:
1103 	mutex_unlock(&vq->mutex);
1104 }
1105 
1106 static void vhost_scsi_handle_kick(struct vhost_work *work)
1107 {
1108 	struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
1109 						poll.work);
1110 	struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
1111 
1112 	vhost_scsi_handle_vq(vs, vq);
1113 }
1114 
1115 static void vhost_scsi_flush_vq(struct vhost_scsi *vs, int index)
1116 {
1117 	vhost_poll_flush(&vs->vqs[index].vq.poll);
1118 }
1119 
1120 /* Callers must hold dev mutex */
1121 static void vhost_scsi_flush(struct vhost_scsi *vs)
1122 {
1123 	struct vhost_scsi_inflight *old_inflight[VHOST_SCSI_MAX_VQ];
1124 	int i;
1125 
1126 	/* Init new inflight and remember the old inflight */
1127 	tcm_vhost_init_inflight(vs, old_inflight);
1128 
1129 	/*
1130 	 * The inflight->kref was initialized to 1. We decrement it here to
1131 	 * indicate the start of the flush operation so that it will reach 0
1132 	 * when all the reqs are finished.
1133 	 */
1134 	for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
1135 		kref_put(&old_inflight[i]->kref, tcm_vhost_done_inflight);
1136 
1137 	/* Flush both the vhost poll and vhost work */
1138 	for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
1139 		vhost_scsi_flush_vq(vs, i);
1140 	vhost_work_flush(&vs->dev, &vs->vs_completion_work);
1141 	vhost_work_flush(&vs->dev, &vs->vs_event_work);
1142 
1143 	/* Wait for all reqs issued before the flush to be finished */
1144 	for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
1145 		wait_for_completion(&old_inflight[i]->comp);
1146 }
1147 
1148 /*
1149  * Called from vhost_scsi_ioctl() context to walk the list of available
1150  * tcm_vhost_tpg with an active struct tcm_vhost_nexus
1151  *
1152  *  The lock nesting rule is:
1153  *    tcm_vhost_mutex -> vs->dev.mutex -> tpg->tv_tpg_mutex -> vq->mutex
1154  */
1155 static int vhost_scsi_set_endpoint(
1156 	struct vhost_scsi *vs,
1157 	struct vhost_scsi_target *t)
1158 {
1159 	struct tcm_vhost_tport *tv_tport;
1160 	struct tcm_vhost_tpg *tv_tpg;
1161 	struct tcm_vhost_tpg **vs_tpg;
1162 	struct vhost_virtqueue *vq;
1163 	int index, ret, i, len;
1164 	bool match = false;
1165 
1166 	mutex_lock(&tcm_vhost_mutex);
1167 	mutex_lock(&vs->dev.mutex);
1168 
1169 	/* Verify that ring has been setup correctly. */
1170 	for (index = 0; index < vs->dev.nvqs; ++index) {
1171 		/* Verify that ring has been setup correctly. */
1172 		if (!vhost_vq_access_ok(&vs->vqs[index].vq)) {
1173 			ret = -EFAULT;
1174 			goto out;
1175 		}
1176 	}
1177 
1178 	len = sizeof(vs_tpg[0]) * VHOST_SCSI_MAX_TARGET;
1179 	vs_tpg = kzalloc(len, GFP_KERNEL);
1180 	if (!vs_tpg) {
1181 		ret = -ENOMEM;
1182 		goto out;
1183 	}
1184 	if (vs->vs_tpg)
1185 		memcpy(vs_tpg, vs->vs_tpg, len);
1186 
1187 	list_for_each_entry(tv_tpg, &tcm_vhost_list, tv_tpg_list) {
1188 		mutex_lock(&tv_tpg->tv_tpg_mutex);
1189 		if (!tv_tpg->tpg_nexus) {
1190 			mutex_unlock(&tv_tpg->tv_tpg_mutex);
1191 			continue;
1192 		}
1193 		if (tv_tpg->tv_tpg_vhost_count != 0) {
1194 			mutex_unlock(&tv_tpg->tv_tpg_mutex);
1195 			continue;
1196 		}
1197 		tv_tport = tv_tpg->tport;
1198 
1199 		if (!strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
1200 			if (vs->vs_tpg && vs->vs_tpg[tv_tpg->tport_tpgt]) {
1201 				kfree(vs_tpg);
1202 				mutex_unlock(&tv_tpg->tv_tpg_mutex);
1203 				ret = -EEXIST;
1204 				goto out;
1205 			}
1206 			tv_tpg->tv_tpg_vhost_count++;
1207 			tv_tpg->vhost_scsi = vs;
1208 			vs_tpg[tv_tpg->tport_tpgt] = tv_tpg;
1209 			smp_mb__after_atomic_inc();
1210 			match = true;
1211 		}
1212 		mutex_unlock(&tv_tpg->tv_tpg_mutex);
1213 	}
1214 
1215 	if (match) {
1216 		memcpy(vs->vs_vhost_wwpn, t->vhost_wwpn,
1217 		       sizeof(vs->vs_vhost_wwpn));
1218 		for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
1219 			vq = &vs->vqs[i].vq;
1220 			/* Flushing the vhost_work acts as synchronize_rcu */
1221 			mutex_lock(&vq->mutex);
1222 			rcu_assign_pointer(vq->private_data, vs_tpg);
1223 			vhost_init_used(vq);
1224 			mutex_unlock(&vq->mutex);
1225 		}
1226 		ret = 0;
1227 	} else {
1228 		ret = -EEXIST;
1229 	}
1230 
1231 	/*
1232 	 * Act as synchronize_rcu to make sure access to
1233 	 * old vs->vs_tpg is finished.
1234 	 */
1235 	vhost_scsi_flush(vs);
1236 	kfree(vs->vs_tpg);
1237 	vs->vs_tpg = vs_tpg;
1238 
1239 out:
1240 	mutex_unlock(&vs->dev.mutex);
1241 	mutex_unlock(&tcm_vhost_mutex);
1242 	return ret;
1243 }
1244 
1245 static int vhost_scsi_clear_endpoint(
1246 	struct vhost_scsi *vs,
1247 	struct vhost_scsi_target *t)
1248 {
1249 	struct tcm_vhost_tport *tv_tport;
1250 	struct tcm_vhost_tpg *tv_tpg;
1251 	struct vhost_virtqueue *vq;
1252 	bool match = false;
1253 	int index, ret, i;
1254 	u8 target;
1255 
1256 	mutex_lock(&tcm_vhost_mutex);
1257 	mutex_lock(&vs->dev.mutex);
1258 	/* Verify that ring has been setup correctly. */
1259 	for (index = 0; index < vs->dev.nvqs; ++index) {
1260 		if (!vhost_vq_access_ok(&vs->vqs[index].vq)) {
1261 			ret = -EFAULT;
1262 			goto err_dev;
1263 		}
1264 	}
1265 
1266 	if (!vs->vs_tpg) {
1267 		ret = 0;
1268 		goto err_dev;
1269 	}
1270 
1271 	for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) {
1272 		target = i;
1273 		tv_tpg = vs->vs_tpg[target];
1274 		if (!tv_tpg)
1275 			continue;
1276 
1277 		mutex_lock(&tv_tpg->tv_tpg_mutex);
1278 		tv_tport = tv_tpg->tport;
1279 		if (!tv_tport) {
1280 			ret = -ENODEV;
1281 			goto err_tpg;
1282 		}
1283 
1284 		if (strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
1285 			pr_warn("tv_tport->tport_name: %s, tv_tpg->tport_tpgt: %hu"
1286 				" does not match t->vhost_wwpn: %s, t->vhost_tpgt: %hu\n",
1287 				tv_tport->tport_name, tv_tpg->tport_tpgt,
1288 				t->vhost_wwpn, t->vhost_tpgt);
1289 			ret = -EINVAL;
1290 			goto err_tpg;
1291 		}
1292 		tv_tpg->tv_tpg_vhost_count--;
1293 		tv_tpg->vhost_scsi = NULL;
1294 		vs->vs_tpg[target] = NULL;
1295 		match = true;
1296 		mutex_unlock(&tv_tpg->tv_tpg_mutex);
1297 	}
1298 	if (match) {
1299 		for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
1300 			vq = &vs->vqs[i].vq;
1301 			/* Flushing the vhost_work acts as synchronize_rcu */
1302 			mutex_lock(&vq->mutex);
1303 			rcu_assign_pointer(vq->private_data, NULL);
1304 			mutex_unlock(&vq->mutex);
1305 		}
1306 	}
1307 	/*
1308 	 * Act as synchronize_rcu to make sure access to
1309 	 * old vs->vs_tpg is finished.
1310 	 */
1311 	vhost_scsi_flush(vs);
1312 	kfree(vs->vs_tpg);
1313 	vs->vs_tpg = NULL;
1314 	WARN_ON(vs->vs_events_nr);
1315 	mutex_unlock(&vs->dev.mutex);
1316 	mutex_unlock(&tcm_vhost_mutex);
1317 	return 0;
1318 
1319 err_tpg:
1320 	mutex_unlock(&tv_tpg->tv_tpg_mutex);
1321 err_dev:
1322 	mutex_unlock(&vs->dev.mutex);
1323 	mutex_unlock(&tcm_vhost_mutex);
1324 	return ret;
1325 }
1326 
1327 static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
1328 {
1329 	if (features & ~VHOST_SCSI_FEATURES)
1330 		return -EOPNOTSUPP;
1331 
1332 	mutex_lock(&vs->dev.mutex);
1333 	if ((features & (1 << VHOST_F_LOG_ALL)) &&
1334 	    !vhost_log_access_ok(&vs->dev)) {
1335 		mutex_unlock(&vs->dev.mutex);
1336 		return -EFAULT;
1337 	}
1338 	vs->dev.acked_features = features;
1339 	smp_wmb();
1340 	vhost_scsi_flush(vs);
1341 	mutex_unlock(&vs->dev.mutex);
1342 	return 0;
1343 }
1344 
1345 static int vhost_scsi_open(struct inode *inode, struct file *f)
1346 {
1347 	struct vhost_scsi *s;
1348 	struct vhost_virtqueue **vqs;
1349 	int r, i;
1350 
1351 	s = kzalloc(sizeof(*s), GFP_KERNEL);
1352 	if (!s)
1353 		return -ENOMEM;
1354 
1355 	vqs = kmalloc(VHOST_SCSI_MAX_VQ * sizeof(*vqs), GFP_KERNEL);
1356 	if (!vqs) {
1357 		kfree(s);
1358 		return -ENOMEM;
1359 	}
1360 
1361 	vhost_work_init(&s->vs_completion_work, vhost_scsi_complete_cmd_work);
1362 	vhost_work_init(&s->vs_event_work, tcm_vhost_evt_work);
1363 
1364 	s->vs_events_nr = 0;
1365 	s->vs_events_missed = false;
1366 
1367 	vqs[VHOST_SCSI_VQ_CTL] = &s->vqs[VHOST_SCSI_VQ_CTL].vq;
1368 	vqs[VHOST_SCSI_VQ_EVT] = &s->vqs[VHOST_SCSI_VQ_EVT].vq;
1369 	s->vqs[VHOST_SCSI_VQ_CTL].vq.handle_kick = vhost_scsi_ctl_handle_kick;
1370 	s->vqs[VHOST_SCSI_VQ_EVT].vq.handle_kick = vhost_scsi_evt_handle_kick;
1371 	for (i = VHOST_SCSI_VQ_IO; i < VHOST_SCSI_MAX_VQ; i++) {
1372 		vqs[i] = &s->vqs[i].vq;
1373 		s->vqs[i].vq.handle_kick = vhost_scsi_handle_kick;
1374 	}
1375 	r = vhost_dev_init(&s->dev, vqs, VHOST_SCSI_MAX_VQ);
1376 
1377 	tcm_vhost_init_inflight(s, NULL);
1378 
1379 	if (r < 0) {
1380 		kfree(vqs);
1381 		kfree(s);
1382 		return r;
1383 	}
1384 
1385 	f->private_data = s;
1386 	return 0;
1387 }
1388 
1389 static int vhost_scsi_release(struct inode *inode, struct file *f)
1390 {
1391 	struct vhost_scsi *s = f->private_data;
1392 	struct vhost_scsi_target t;
1393 
1394 	mutex_lock(&s->dev.mutex);
1395 	memcpy(t.vhost_wwpn, s->vs_vhost_wwpn, sizeof(t.vhost_wwpn));
1396 	mutex_unlock(&s->dev.mutex);
1397 	vhost_scsi_clear_endpoint(s, &t);
1398 	vhost_dev_stop(&s->dev);
1399 	vhost_dev_cleanup(&s->dev, false);
1400 	/* Jobs can re-queue themselves in evt kick handler. Do extra flush. */
1401 	vhost_scsi_flush(s);
1402 	kfree(s->dev.vqs);
1403 	kfree(s);
1404 	return 0;
1405 }
1406 
1407 static long vhost_scsi_ioctl(struct file *f, unsigned int ioctl,
1408 				unsigned long arg)
1409 {
1410 	struct vhost_scsi *vs = f->private_data;
1411 	struct vhost_scsi_target backend;
1412 	void __user *argp = (void __user *)arg;
1413 	u64 __user *featurep = argp;
1414 	u32 __user *eventsp = argp;
1415 	u32 events_missed;
1416 	u64 features;
1417 	int r, abi_version = VHOST_SCSI_ABI_VERSION;
1418 	struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
1419 
1420 	switch (ioctl) {
1421 	case VHOST_SCSI_SET_ENDPOINT:
1422 		if (copy_from_user(&backend, argp, sizeof backend))
1423 			return -EFAULT;
1424 		if (backend.reserved != 0)
1425 			return -EOPNOTSUPP;
1426 
1427 		return vhost_scsi_set_endpoint(vs, &backend);
1428 	case VHOST_SCSI_CLEAR_ENDPOINT:
1429 		if (copy_from_user(&backend, argp, sizeof backend))
1430 			return -EFAULT;
1431 		if (backend.reserved != 0)
1432 			return -EOPNOTSUPP;
1433 
1434 		return vhost_scsi_clear_endpoint(vs, &backend);
1435 	case VHOST_SCSI_GET_ABI_VERSION:
1436 		if (copy_to_user(argp, &abi_version, sizeof abi_version))
1437 			return -EFAULT;
1438 		return 0;
1439 	case VHOST_SCSI_SET_EVENTS_MISSED:
1440 		if (get_user(events_missed, eventsp))
1441 			return -EFAULT;
1442 		mutex_lock(&vq->mutex);
1443 		vs->vs_events_missed = events_missed;
1444 		mutex_unlock(&vq->mutex);
1445 		return 0;
1446 	case VHOST_SCSI_GET_EVENTS_MISSED:
1447 		mutex_lock(&vq->mutex);
1448 		events_missed = vs->vs_events_missed;
1449 		mutex_unlock(&vq->mutex);
1450 		if (put_user(events_missed, eventsp))
1451 			return -EFAULT;
1452 		return 0;
1453 	case VHOST_GET_FEATURES:
1454 		features = VHOST_SCSI_FEATURES;
1455 		if (copy_to_user(featurep, &features, sizeof features))
1456 			return -EFAULT;
1457 		return 0;
1458 	case VHOST_SET_FEATURES:
1459 		if (copy_from_user(&features, featurep, sizeof features))
1460 			return -EFAULT;
1461 		return vhost_scsi_set_features(vs, features);
1462 	default:
1463 		mutex_lock(&vs->dev.mutex);
1464 		r = vhost_dev_ioctl(&vs->dev, ioctl, argp);
1465 		/* TODO: flush backend after dev ioctl. */
1466 		if (r == -ENOIOCTLCMD)
1467 			r = vhost_vring_ioctl(&vs->dev, ioctl, argp);
1468 		mutex_unlock(&vs->dev.mutex);
1469 		return r;
1470 	}
1471 }
1472 
1473 #ifdef CONFIG_COMPAT
1474 static long vhost_scsi_compat_ioctl(struct file *f, unsigned int ioctl,
1475 				unsigned long arg)
1476 {
1477 	return vhost_scsi_ioctl(f, ioctl, (unsigned long)compat_ptr(arg));
1478 }
1479 #endif
1480 
1481 static const struct file_operations vhost_scsi_fops = {
1482 	.owner          = THIS_MODULE,
1483 	.release        = vhost_scsi_release,
1484 	.unlocked_ioctl = vhost_scsi_ioctl,
1485 #ifdef CONFIG_COMPAT
1486 	.compat_ioctl	= vhost_scsi_compat_ioctl,
1487 #endif
1488 	.open           = vhost_scsi_open,
1489 	.llseek		= noop_llseek,
1490 };
1491 
1492 static struct miscdevice vhost_scsi_misc = {
1493 	MISC_DYNAMIC_MINOR,
1494 	"vhost-scsi",
1495 	&vhost_scsi_fops,
1496 };
1497 
1498 static int __init vhost_scsi_register(void)
1499 {
1500 	return misc_register(&vhost_scsi_misc);
1501 }
1502 
1503 static int vhost_scsi_deregister(void)
1504 {
1505 	return misc_deregister(&vhost_scsi_misc);
1506 }
1507 
1508 static char *tcm_vhost_dump_proto_id(struct tcm_vhost_tport *tport)
1509 {
1510 	switch (tport->tport_proto_id) {
1511 	case SCSI_PROTOCOL_SAS:
1512 		return "SAS";
1513 	case SCSI_PROTOCOL_FCP:
1514 		return "FCP";
1515 	case SCSI_PROTOCOL_ISCSI:
1516 		return "iSCSI";
1517 	default:
1518 		break;
1519 	}
1520 
1521 	return "Unknown";
1522 }
1523 
1524 static void tcm_vhost_do_plug(struct tcm_vhost_tpg *tpg,
1525 	struct se_lun *lun, bool plug)
1526 {
1527 
1528 	struct vhost_scsi *vs = tpg->vhost_scsi;
1529 	struct vhost_virtqueue *vq;
1530 	u32 reason;
1531 
1532 	if (!vs)
1533 		return;
1534 
1535 	mutex_lock(&vs->dev.mutex);
1536 	if (!vhost_has_feature(&vs->dev, VIRTIO_SCSI_F_HOTPLUG)) {
1537 		mutex_unlock(&vs->dev.mutex);
1538 		return;
1539 	}
1540 
1541 	if (plug)
1542 		reason = VIRTIO_SCSI_EVT_RESET_RESCAN;
1543 	else
1544 		reason = VIRTIO_SCSI_EVT_RESET_REMOVED;
1545 
1546 	vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
1547 	mutex_lock(&vq->mutex);
1548 	tcm_vhost_send_evt(vs, tpg, lun,
1549 			VIRTIO_SCSI_T_TRANSPORT_RESET, reason);
1550 	mutex_unlock(&vq->mutex);
1551 	mutex_unlock(&vs->dev.mutex);
1552 }
1553 
1554 static void tcm_vhost_hotplug(struct tcm_vhost_tpg *tpg, struct se_lun *lun)
1555 {
1556 	tcm_vhost_do_plug(tpg, lun, true);
1557 }
1558 
1559 static void tcm_vhost_hotunplug(struct tcm_vhost_tpg *tpg, struct se_lun *lun)
1560 {
1561 	tcm_vhost_do_plug(tpg, lun, false);
1562 }
1563 
1564 static int tcm_vhost_port_link(struct se_portal_group *se_tpg,
1565 	struct se_lun *lun)
1566 {
1567 	struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg,
1568 				struct tcm_vhost_tpg, se_tpg);
1569 
1570 	mutex_lock(&tcm_vhost_mutex);
1571 
1572 	mutex_lock(&tv_tpg->tv_tpg_mutex);
1573 	tv_tpg->tv_tpg_port_count++;
1574 	mutex_unlock(&tv_tpg->tv_tpg_mutex);
1575 
1576 	tcm_vhost_hotplug(tv_tpg, lun);
1577 
1578 	mutex_unlock(&tcm_vhost_mutex);
1579 
1580 	return 0;
1581 }
1582 
1583 static void tcm_vhost_port_unlink(struct se_portal_group *se_tpg,
1584 	struct se_lun *lun)
1585 {
1586 	struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg,
1587 				struct tcm_vhost_tpg, se_tpg);
1588 
1589 	mutex_lock(&tcm_vhost_mutex);
1590 
1591 	mutex_lock(&tv_tpg->tv_tpg_mutex);
1592 	tv_tpg->tv_tpg_port_count--;
1593 	mutex_unlock(&tv_tpg->tv_tpg_mutex);
1594 
1595 	tcm_vhost_hotunplug(tv_tpg, lun);
1596 
1597 	mutex_unlock(&tcm_vhost_mutex);
1598 }
1599 
1600 static struct se_node_acl *tcm_vhost_make_nodeacl(
1601 	struct se_portal_group *se_tpg,
1602 	struct config_group *group,
1603 	const char *name)
1604 {
1605 	struct se_node_acl *se_nacl, *se_nacl_new;
1606 	struct tcm_vhost_nacl *nacl;
1607 	u64 wwpn = 0;
1608 	u32 nexus_depth;
1609 
1610 	/* tcm_vhost_parse_wwn(name, &wwpn, 1) < 0)
1611 		return ERR_PTR(-EINVAL); */
1612 	se_nacl_new = tcm_vhost_alloc_fabric_acl(se_tpg);
1613 	if (!se_nacl_new)
1614 		return ERR_PTR(-ENOMEM);
1615 
1616 	nexus_depth = 1;
1617 	/*
1618 	 * se_nacl_new may be released by core_tpg_add_initiator_node_acl()
1619 	 * when converting a NodeACL from demo mode -> explict
1620 	 */
1621 	se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,
1622 				name, nexus_depth);
1623 	if (IS_ERR(se_nacl)) {
1624 		tcm_vhost_release_fabric_acl(se_tpg, se_nacl_new);
1625 		return se_nacl;
1626 	}
1627 	/*
1628 	 * Locate our struct tcm_vhost_nacl and set the FC Nport WWPN
1629 	 */
1630 	nacl = container_of(se_nacl, struct tcm_vhost_nacl, se_node_acl);
1631 	nacl->iport_wwpn = wwpn;
1632 
1633 	return se_nacl;
1634 }
1635 
1636 static void tcm_vhost_drop_nodeacl(struct se_node_acl *se_acl)
1637 {
1638 	struct tcm_vhost_nacl *nacl = container_of(se_acl,
1639 				struct tcm_vhost_nacl, se_node_acl);
1640 	core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1);
1641 	kfree(nacl);
1642 }
1643 
1644 static int tcm_vhost_make_nexus(struct tcm_vhost_tpg *tv_tpg,
1645 	const char *name)
1646 {
1647 	struct se_portal_group *se_tpg;
1648 	struct tcm_vhost_nexus *tv_nexus;
1649 
1650 	mutex_lock(&tv_tpg->tv_tpg_mutex);
1651 	if (tv_tpg->tpg_nexus) {
1652 		mutex_unlock(&tv_tpg->tv_tpg_mutex);
1653 		pr_debug("tv_tpg->tpg_nexus already exists\n");
1654 		return -EEXIST;
1655 	}
1656 	se_tpg = &tv_tpg->se_tpg;
1657 
1658 	tv_nexus = kzalloc(sizeof(struct tcm_vhost_nexus), GFP_KERNEL);
1659 	if (!tv_nexus) {
1660 		mutex_unlock(&tv_tpg->tv_tpg_mutex);
1661 		pr_err("Unable to allocate struct tcm_vhost_nexus\n");
1662 		return -ENOMEM;
1663 	}
1664 	/*
1665 	 *  Initialize the struct se_session pointer
1666 	 */
1667 	tv_nexus->tvn_se_sess = transport_init_session();
1668 	if (IS_ERR(tv_nexus->tvn_se_sess)) {
1669 		mutex_unlock(&tv_tpg->tv_tpg_mutex);
1670 		kfree(tv_nexus);
1671 		return -ENOMEM;
1672 	}
1673 	/*
1674 	 * Since we are running in 'demo mode' this call with generate a
1675 	 * struct se_node_acl for the tcm_vhost struct se_portal_group with
1676 	 * the SCSI Initiator port name of the passed configfs group 'name'.
1677 	 */
1678 	tv_nexus->tvn_se_sess->se_node_acl = core_tpg_check_initiator_node_acl(
1679 				se_tpg, (unsigned char *)name);
1680 	if (!tv_nexus->tvn_se_sess->se_node_acl) {
1681 		mutex_unlock(&tv_tpg->tv_tpg_mutex);
1682 		pr_debug("core_tpg_check_initiator_node_acl() failed"
1683 				" for %s\n", name);
1684 		transport_free_session(tv_nexus->tvn_se_sess);
1685 		kfree(tv_nexus);
1686 		return -ENOMEM;
1687 	}
1688 	/*
1689 	 * Now register the TCM vhost virtual I_T Nexus as active with the
1690 	 * call to __transport_register_session()
1691 	 */
1692 	__transport_register_session(se_tpg, tv_nexus->tvn_se_sess->se_node_acl,
1693 			tv_nexus->tvn_se_sess, tv_nexus);
1694 	tv_tpg->tpg_nexus = tv_nexus;
1695 
1696 	mutex_unlock(&tv_tpg->tv_tpg_mutex);
1697 	return 0;
1698 }
1699 
1700 static int tcm_vhost_drop_nexus(struct tcm_vhost_tpg *tpg)
1701 {
1702 	struct se_session *se_sess;
1703 	struct tcm_vhost_nexus *tv_nexus;
1704 
1705 	mutex_lock(&tpg->tv_tpg_mutex);
1706 	tv_nexus = tpg->tpg_nexus;
1707 	if (!tv_nexus) {
1708 		mutex_unlock(&tpg->tv_tpg_mutex);
1709 		return -ENODEV;
1710 	}
1711 
1712 	se_sess = tv_nexus->tvn_se_sess;
1713 	if (!se_sess) {
1714 		mutex_unlock(&tpg->tv_tpg_mutex);
1715 		return -ENODEV;
1716 	}
1717 
1718 	if (tpg->tv_tpg_port_count != 0) {
1719 		mutex_unlock(&tpg->tv_tpg_mutex);
1720 		pr_err("Unable to remove TCM_vhost I_T Nexus with"
1721 			" active TPG port count: %d\n",
1722 			tpg->tv_tpg_port_count);
1723 		return -EBUSY;
1724 	}
1725 
1726 	if (tpg->tv_tpg_vhost_count != 0) {
1727 		mutex_unlock(&tpg->tv_tpg_mutex);
1728 		pr_err("Unable to remove TCM_vhost I_T Nexus with"
1729 			" active TPG vhost count: %d\n",
1730 			tpg->tv_tpg_vhost_count);
1731 		return -EBUSY;
1732 	}
1733 
1734 	pr_debug("TCM_vhost_ConfigFS: Removing I_T Nexus to emulated"
1735 		" %s Initiator Port: %s\n", tcm_vhost_dump_proto_id(tpg->tport),
1736 		tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
1737 	/*
1738 	 * Release the SCSI I_T Nexus to the emulated vhost Target Port
1739 	 */
1740 	transport_deregister_session(tv_nexus->tvn_se_sess);
1741 	tpg->tpg_nexus = NULL;
1742 	mutex_unlock(&tpg->tv_tpg_mutex);
1743 
1744 	kfree(tv_nexus);
1745 	return 0;
1746 }
1747 
1748 static ssize_t tcm_vhost_tpg_show_nexus(struct se_portal_group *se_tpg,
1749 	char *page)
1750 {
1751 	struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg,
1752 				struct tcm_vhost_tpg, se_tpg);
1753 	struct tcm_vhost_nexus *tv_nexus;
1754 	ssize_t ret;
1755 
1756 	mutex_lock(&tv_tpg->tv_tpg_mutex);
1757 	tv_nexus = tv_tpg->tpg_nexus;
1758 	if (!tv_nexus) {
1759 		mutex_unlock(&tv_tpg->tv_tpg_mutex);
1760 		return -ENODEV;
1761 	}
1762 	ret = snprintf(page, PAGE_SIZE, "%s\n",
1763 			tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
1764 	mutex_unlock(&tv_tpg->tv_tpg_mutex);
1765 
1766 	return ret;
1767 }
1768 
1769 static ssize_t tcm_vhost_tpg_store_nexus(struct se_portal_group *se_tpg,
1770 	const char *page,
1771 	size_t count)
1772 {
1773 	struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg,
1774 				struct tcm_vhost_tpg, se_tpg);
1775 	struct tcm_vhost_tport *tport_wwn = tv_tpg->tport;
1776 	unsigned char i_port[TCM_VHOST_NAMELEN], *ptr, *port_ptr;
1777 	int ret;
1778 	/*
1779 	 * Shutdown the active I_T nexus if 'NULL' is passed..
1780 	 */
1781 	if (!strncmp(page, "NULL", 4)) {
1782 		ret = tcm_vhost_drop_nexus(tv_tpg);
1783 		return (!ret) ? count : ret;
1784 	}
1785 	/*
1786 	 * Otherwise make sure the passed virtual Initiator port WWN matches
1787 	 * the fabric protocol_id set in tcm_vhost_make_tport(), and call
1788 	 * tcm_vhost_make_nexus().
1789 	 */
1790 	if (strlen(page) >= TCM_VHOST_NAMELEN) {
1791 		pr_err("Emulated NAA Sas Address: %s, exceeds"
1792 				" max: %d\n", page, TCM_VHOST_NAMELEN);
1793 		return -EINVAL;
1794 	}
1795 	snprintf(&i_port[0], TCM_VHOST_NAMELEN, "%s", page);
1796 
1797 	ptr = strstr(i_port, "naa.");
1798 	if (ptr) {
1799 		if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_SAS) {
1800 			pr_err("Passed SAS Initiator Port %s does not"
1801 				" match target port protoid: %s\n", i_port,
1802 				tcm_vhost_dump_proto_id(tport_wwn));
1803 			return -EINVAL;
1804 		}
1805 		port_ptr = &i_port[0];
1806 		goto check_newline;
1807 	}
1808 	ptr = strstr(i_port, "fc.");
1809 	if (ptr) {
1810 		if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_FCP) {
1811 			pr_err("Passed FCP Initiator Port %s does not"
1812 				" match target port protoid: %s\n", i_port,
1813 				tcm_vhost_dump_proto_id(tport_wwn));
1814 			return -EINVAL;
1815 		}
1816 		port_ptr = &i_port[3]; /* Skip over "fc." */
1817 		goto check_newline;
1818 	}
1819 	ptr = strstr(i_port, "iqn.");
1820 	if (ptr) {
1821 		if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_ISCSI) {
1822 			pr_err("Passed iSCSI Initiator Port %s does not"
1823 				" match target port protoid: %s\n", i_port,
1824 				tcm_vhost_dump_proto_id(tport_wwn));
1825 			return -EINVAL;
1826 		}
1827 		port_ptr = &i_port[0];
1828 		goto check_newline;
1829 	}
1830 	pr_err("Unable to locate prefix for emulated Initiator Port:"
1831 			" %s\n", i_port);
1832 	return -EINVAL;
1833 	/*
1834 	 * Clear any trailing newline for the NAA WWN
1835 	 */
1836 check_newline:
1837 	if (i_port[strlen(i_port)-1] == '\n')
1838 		i_port[strlen(i_port)-1] = '\0';
1839 
1840 	ret = tcm_vhost_make_nexus(tv_tpg, port_ptr);
1841 	if (ret < 0)
1842 		return ret;
1843 
1844 	return count;
1845 }
1846 
1847 TF_TPG_BASE_ATTR(tcm_vhost, nexus, S_IRUGO | S_IWUSR);
1848 
1849 static struct configfs_attribute *tcm_vhost_tpg_attrs[] = {
1850 	&tcm_vhost_tpg_nexus.attr,
1851 	NULL,
1852 };
1853 
1854 static struct se_portal_group *tcm_vhost_make_tpg(struct se_wwn *wwn,
1855 	struct config_group *group,
1856 	const char *name)
1857 {
1858 	struct tcm_vhost_tport *tport = container_of(wwn,
1859 			struct tcm_vhost_tport, tport_wwn);
1860 
1861 	struct tcm_vhost_tpg *tpg;
1862 	unsigned long tpgt;
1863 	int ret;
1864 
1865 	if (strstr(name, "tpgt_") != name)
1866 		return ERR_PTR(-EINVAL);
1867 	if (kstrtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)
1868 		return ERR_PTR(-EINVAL);
1869 
1870 	tpg = kzalloc(sizeof(struct tcm_vhost_tpg), GFP_KERNEL);
1871 	if (!tpg) {
1872 		pr_err("Unable to allocate struct tcm_vhost_tpg");
1873 		return ERR_PTR(-ENOMEM);
1874 	}
1875 	mutex_init(&tpg->tv_tpg_mutex);
1876 	INIT_LIST_HEAD(&tpg->tv_tpg_list);
1877 	tpg->tport = tport;
1878 	tpg->tport_tpgt = tpgt;
1879 
1880 	ret = core_tpg_register(&tcm_vhost_fabric_configfs->tf_ops, wwn,
1881 				&tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL);
1882 	if (ret < 0) {
1883 		kfree(tpg);
1884 		return NULL;
1885 	}
1886 	mutex_lock(&tcm_vhost_mutex);
1887 	list_add_tail(&tpg->tv_tpg_list, &tcm_vhost_list);
1888 	mutex_unlock(&tcm_vhost_mutex);
1889 
1890 	return &tpg->se_tpg;
1891 }
1892 
1893 static void tcm_vhost_drop_tpg(struct se_portal_group *se_tpg)
1894 {
1895 	struct tcm_vhost_tpg *tpg = container_of(se_tpg,
1896 				struct tcm_vhost_tpg, se_tpg);
1897 
1898 	mutex_lock(&tcm_vhost_mutex);
1899 	list_del(&tpg->tv_tpg_list);
1900 	mutex_unlock(&tcm_vhost_mutex);
1901 	/*
1902 	 * Release the virtual I_T Nexus for this vhost TPG
1903 	 */
1904 	tcm_vhost_drop_nexus(tpg);
1905 	/*
1906 	 * Deregister the se_tpg from TCM..
1907 	 */
1908 	core_tpg_deregister(se_tpg);
1909 	kfree(tpg);
1910 }
1911 
1912 static struct se_wwn *tcm_vhost_make_tport(struct target_fabric_configfs *tf,
1913 	struct config_group *group,
1914 	const char *name)
1915 {
1916 	struct tcm_vhost_tport *tport;
1917 	char *ptr;
1918 	u64 wwpn = 0;
1919 	int off = 0;
1920 
1921 	/* if (tcm_vhost_parse_wwn(name, &wwpn, 1) < 0)
1922 		return ERR_PTR(-EINVAL); */
1923 
1924 	tport = kzalloc(sizeof(struct tcm_vhost_tport), GFP_KERNEL);
1925 	if (!tport) {
1926 		pr_err("Unable to allocate struct tcm_vhost_tport");
1927 		return ERR_PTR(-ENOMEM);
1928 	}
1929 	tport->tport_wwpn = wwpn;
1930 	/*
1931 	 * Determine the emulated Protocol Identifier and Target Port Name
1932 	 * based on the incoming configfs directory name.
1933 	 */
1934 	ptr = strstr(name, "naa.");
1935 	if (ptr) {
1936 		tport->tport_proto_id = SCSI_PROTOCOL_SAS;
1937 		goto check_len;
1938 	}
1939 	ptr = strstr(name, "fc.");
1940 	if (ptr) {
1941 		tport->tport_proto_id = SCSI_PROTOCOL_FCP;
1942 		off = 3; /* Skip over "fc." */
1943 		goto check_len;
1944 	}
1945 	ptr = strstr(name, "iqn.");
1946 	if (ptr) {
1947 		tport->tport_proto_id = SCSI_PROTOCOL_ISCSI;
1948 		goto check_len;
1949 	}
1950 
1951 	pr_err("Unable to locate prefix for emulated Target Port:"
1952 			" %s\n", name);
1953 	kfree(tport);
1954 	return ERR_PTR(-EINVAL);
1955 
1956 check_len:
1957 	if (strlen(name) >= TCM_VHOST_NAMELEN) {
1958 		pr_err("Emulated %s Address: %s, exceeds"
1959 			" max: %d\n", name, tcm_vhost_dump_proto_id(tport),
1960 			TCM_VHOST_NAMELEN);
1961 		kfree(tport);
1962 		return ERR_PTR(-EINVAL);
1963 	}
1964 	snprintf(&tport->tport_name[0], TCM_VHOST_NAMELEN, "%s", &name[off]);
1965 
1966 	pr_debug("TCM_VHost_ConfigFS: Allocated emulated Target"
1967 		" %s Address: %s\n", tcm_vhost_dump_proto_id(tport), name);
1968 
1969 	return &tport->tport_wwn;
1970 }
1971 
1972 static void tcm_vhost_drop_tport(struct se_wwn *wwn)
1973 {
1974 	struct tcm_vhost_tport *tport = container_of(wwn,
1975 				struct tcm_vhost_tport, tport_wwn);
1976 
1977 	pr_debug("TCM_VHost_ConfigFS: Deallocating emulated Target"
1978 		" %s Address: %s\n", tcm_vhost_dump_proto_id(tport),
1979 		tport->tport_name);
1980 
1981 	kfree(tport);
1982 }
1983 
1984 static ssize_t tcm_vhost_wwn_show_attr_version(
1985 	struct target_fabric_configfs *tf,
1986 	char *page)
1987 {
1988 	return sprintf(page, "TCM_VHOST fabric module %s on %s/%s"
1989 		"on "UTS_RELEASE"\n", TCM_VHOST_VERSION, utsname()->sysname,
1990 		utsname()->machine);
1991 }
1992 
1993 TF_WWN_ATTR_RO(tcm_vhost, version);
1994 
1995 static struct configfs_attribute *tcm_vhost_wwn_attrs[] = {
1996 	&tcm_vhost_wwn_version.attr,
1997 	NULL,
1998 };
1999 
2000 static struct target_core_fabric_ops tcm_vhost_ops = {
2001 	.get_fabric_name		= tcm_vhost_get_fabric_name,
2002 	.get_fabric_proto_ident		= tcm_vhost_get_fabric_proto_ident,
2003 	.tpg_get_wwn			= tcm_vhost_get_fabric_wwn,
2004 	.tpg_get_tag			= tcm_vhost_get_tag,
2005 	.tpg_get_default_depth		= tcm_vhost_get_default_depth,
2006 	.tpg_get_pr_transport_id	= tcm_vhost_get_pr_transport_id,
2007 	.tpg_get_pr_transport_id_len	= tcm_vhost_get_pr_transport_id_len,
2008 	.tpg_parse_pr_out_transport_id	= tcm_vhost_parse_pr_out_transport_id,
2009 	.tpg_check_demo_mode		= tcm_vhost_check_true,
2010 	.tpg_check_demo_mode_cache	= tcm_vhost_check_true,
2011 	.tpg_check_demo_mode_write_protect = tcm_vhost_check_false,
2012 	.tpg_check_prod_mode_write_protect = tcm_vhost_check_false,
2013 	.tpg_alloc_fabric_acl		= tcm_vhost_alloc_fabric_acl,
2014 	.tpg_release_fabric_acl		= tcm_vhost_release_fabric_acl,
2015 	.tpg_get_inst_index		= tcm_vhost_tpg_get_inst_index,
2016 	.release_cmd			= tcm_vhost_release_cmd,
2017 	.shutdown_session		= tcm_vhost_shutdown_session,
2018 	.close_session			= tcm_vhost_close_session,
2019 	.sess_get_index			= tcm_vhost_sess_get_index,
2020 	.sess_get_initiator_sid		= NULL,
2021 	.write_pending			= tcm_vhost_write_pending,
2022 	.write_pending_status		= tcm_vhost_write_pending_status,
2023 	.set_default_node_attributes	= tcm_vhost_set_default_node_attrs,
2024 	.get_task_tag			= tcm_vhost_get_task_tag,
2025 	.get_cmd_state			= tcm_vhost_get_cmd_state,
2026 	.queue_data_in			= tcm_vhost_queue_data_in,
2027 	.queue_status			= tcm_vhost_queue_status,
2028 	.queue_tm_rsp			= tcm_vhost_queue_tm_rsp,
2029 	/*
2030 	 * Setup callers for generic logic in target_core_fabric_configfs.c
2031 	 */
2032 	.fabric_make_wwn		= tcm_vhost_make_tport,
2033 	.fabric_drop_wwn		= tcm_vhost_drop_tport,
2034 	.fabric_make_tpg		= tcm_vhost_make_tpg,
2035 	.fabric_drop_tpg		= tcm_vhost_drop_tpg,
2036 	.fabric_post_link		= tcm_vhost_port_link,
2037 	.fabric_pre_unlink		= tcm_vhost_port_unlink,
2038 	.fabric_make_np			= NULL,
2039 	.fabric_drop_np			= NULL,
2040 	.fabric_make_nodeacl		= tcm_vhost_make_nodeacl,
2041 	.fabric_drop_nodeacl		= tcm_vhost_drop_nodeacl,
2042 };
2043 
2044 static int tcm_vhost_register_configfs(void)
2045 {
2046 	struct target_fabric_configfs *fabric;
2047 	int ret;
2048 
2049 	pr_debug("TCM_VHOST fabric module %s on %s/%s"
2050 		" on "UTS_RELEASE"\n", TCM_VHOST_VERSION, utsname()->sysname,
2051 		utsname()->machine);
2052 	/*
2053 	 * Register the top level struct config_item_type with TCM core
2054 	 */
2055 	fabric = target_fabric_configfs_init(THIS_MODULE, "vhost");
2056 	if (IS_ERR(fabric)) {
2057 		pr_err("target_fabric_configfs_init() failed\n");
2058 		return PTR_ERR(fabric);
2059 	}
2060 	/*
2061 	 * Setup fabric->tf_ops from our local tcm_vhost_ops
2062 	 */
2063 	fabric->tf_ops = tcm_vhost_ops;
2064 	/*
2065 	 * Setup default attribute lists for various fabric->tf_cit_tmpl
2066 	 */
2067 	TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = tcm_vhost_wwn_attrs;
2068 	TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = tcm_vhost_tpg_attrs;
2069 	TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL;
2070 	TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;
2071 	TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;
2072 	TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL;
2073 	TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
2074 	TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
2075 	TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;
2076 	/*
2077 	 * Register the fabric for use within TCM
2078 	 */
2079 	ret = target_fabric_configfs_register(fabric);
2080 	if (ret < 0) {
2081 		pr_err("target_fabric_configfs_register() failed"
2082 				" for TCM_VHOST\n");
2083 		return ret;
2084 	}
2085 	/*
2086 	 * Setup our local pointer to *fabric
2087 	 */
2088 	tcm_vhost_fabric_configfs = fabric;
2089 	pr_debug("TCM_VHOST[0] - Set fabric -> tcm_vhost_fabric_configfs\n");
2090 	return 0;
2091 };
2092 
2093 static void tcm_vhost_deregister_configfs(void)
2094 {
2095 	if (!tcm_vhost_fabric_configfs)
2096 		return;
2097 
2098 	target_fabric_configfs_deregister(tcm_vhost_fabric_configfs);
2099 	tcm_vhost_fabric_configfs = NULL;
2100 	pr_debug("TCM_VHOST[0] - Cleared tcm_vhost_fabric_configfs\n");
2101 };
2102 
2103 static int __init tcm_vhost_init(void)
2104 {
2105 	int ret = -ENOMEM;
2106 	/*
2107 	 * Use our own dedicated workqueue for submitting I/O into
2108 	 * target core to avoid contention within system_wq.
2109 	 */
2110 	tcm_vhost_workqueue = alloc_workqueue("tcm_vhost", 0, 0);
2111 	if (!tcm_vhost_workqueue)
2112 		goto out;
2113 
2114 	ret = vhost_scsi_register();
2115 	if (ret < 0)
2116 		goto out_destroy_workqueue;
2117 
2118 	ret = tcm_vhost_register_configfs();
2119 	if (ret < 0)
2120 		goto out_vhost_scsi_deregister;
2121 
2122 	return 0;
2123 
2124 out_vhost_scsi_deregister:
2125 	vhost_scsi_deregister();
2126 out_destroy_workqueue:
2127 	destroy_workqueue(tcm_vhost_workqueue);
2128 out:
2129 	return ret;
2130 };
2131 
2132 static void tcm_vhost_exit(void)
2133 {
2134 	tcm_vhost_deregister_configfs();
2135 	vhost_scsi_deregister();
2136 	destroy_workqueue(tcm_vhost_workqueue);
2137 };
2138 
2139 MODULE_DESCRIPTION("VHOST_SCSI series fabric driver");
2140 MODULE_ALIAS("tcm_vhost");
2141 MODULE_LICENSE("GPL");
2142 module_init(tcm_vhost_init);
2143 module_exit(tcm_vhost_exit);
2144