1 // SPDX-License-Identifier: GPL-2.0+
2 /*******************************************************************************
3 * Vhost kernel TCM fabric driver for virtio SCSI initiators
4 *
5 * (C) Copyright 2010-2013 Datera, Inc.
6 * (C) Copyright 2010-2012 IBM Corp.
7 *
8 * Authors: Nicholas A. Bellinger <nab@daterainc.com>
9 * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
10 ****************************************************************************/
11
12 #include <linux/module.h>
13 #include <linux/moduleparam.h>
14 #include <generated/utsrelease.h>
15 #include <linux/utsname.h>
16 #include <linux/init.h>
17 #include <linux/slab.h>
18 #include <linux/kthread.h>
19 #include <linux/types.h>
20 #include <linux/string.h>
21 #include <linux/configfs.h>
22 #include <linux/ctype.h>
23 #include <linux/compat.h>
24 #include <linux/eventfd.h>
25 #include <linux/fs.h>
26 #include <linux/vmalloc.h>
27 #include <linux/miscdevice.h>
28 #include <linux/blk_types.h>
29 #include <linux/bio.h>
30 #include <linux/unaligned.h>
31 #include <scsi/scsi_common.h>
32 #include <scsi/scsi_proto.h>
33 #include <target/target_core_base.h>
34 #include <target/target_core_fabric.h>
35 #include <linux/vhost.h>
36 #include <linux/virtio_scsi.h>
37 #include <linux/llist.h>
38 #include <linux/bitmap.h>
39
40 #include "vhost.h"
41
42 #define VHOST_SCSI_VERSION "v0.1"
43 #define VHOST_SCSI_NAMELEN 256
44 #define VHOST_SCSI_MAX_CDB_SIZE 32
45 #define VHOST_SCSI_PREALLOC_SGLS 2048
46 #define VHOST_SCSI_PREALLOC_UPAGES 2048
47 #define VHOST_SCSI_PREALLOC_PROT_SGLS 2048
48
49 /* Max number of requests before requeueing the job.
50 * Using this limit prevents one virtqueue from starving others with
51 * request.
52 */
53 #define VHOST_SCSI_WEIGHT 256
54
55 struct vhost_scsi_inflight {
56 /* Wait for the flush operation to finish */
57 struct completion comp;
58 /* Refcount for the inflight reqs */
59 struct kref kref;
60 };
61
62 struct vhost_scsi_cmd {
63 /* Descriptor from vhost_get_vq_desc() for virt_queue segment */
64 int tvc_vq_desc;
65 /* virtio-scsi initiator task attribute */
66 int tvc_task_attr;
67 /* virtio-scsi response incoming iovecs */
68 int tvc_in_iovs;
69 /* virtio-scsi initiator data direction */
70 enum dma_data_direction tvc_data_direction;
71 /* Expected data transfer length from virtio-scsi header */
72 u32 tvc_exp_data_len;
73 /* The Tag from include/linux/virtio_scsi.h:struct virtio_scsi_cmd_req */
74 u64 tvc_tag;
75 /* The number of scatterlists associated with this cmd */
76 u32 tvc_sgl_count;
77 u32 tvc_prot_sgl_count;
78 /* Saved unpacked SCSI LUN for vhost_scsi_target_queue_cmd() */
79 u32 tvc_lun;
80 u32 copied_iov:1;
81 const void *saved_iter_addr;
82 struct iov_iter saved_iter;
83 /* Pointer to the SGL formatted memory from virtio-scsi */
84 struct scatterlist *tvc_sgl;
85 struct scatterlist *tvc_prot_sgl;
86 struct page **tvc_upages;
87 /* Pointer to response header iovec */
88 struct iovec *tvc_resp_iov;
89 /* Pointer to vhost_scsi for our device */
90 struct vhost_scsi *tvc_vhost;
91 /* Pointer to vhost_virtqueue for the cmd */
92 struct vhost_virtqueue *tvc_vq;
93 /* Pointer to vhost nexus memory */
94 struct vhost_scsi_nexus *tvc_nexus;
95 /* The TCM I/O descriptor that is accessed via container_of() */
96 struct se_cmd tvc_se_cmd;
97 /* Copy of the incoming SCSI command descriptor block (CDB) */
98 unsigned char tvc_cdb[VHOST_SCSI_MAX_CDB_SIZE];
99 /* Sense buffer that will be mapped into outgoing status */
100 unsigned char tvc_sense_buf[TRANSPORT_SENSE_BUFFER];
101 /* Completed commands list, serviced from vhost worker thread */
102 struct llist_node tvc_completion_list;
103 /* Used to track inflight cmd */
104 struct vhost_scsi_inflight *inflight;
105 };
106
107 struct vhost_scsi_nexus {
108 /* Pointer to TCM session for I_T Nexus */
109 struct se_session *tvn_se_sess;
110 };
111
112 struct vhost_scsi_tpg {
113 /* Vhost port target portal group tag for TCM */
114 u16 tport_tpgt;
115 /* Used to track number of TPG Port/Lun Links wrt to explict I_T Nexus shutdown */
116 int tv_tpg_port_count;
117 /* Used for vhost_scsi device reference to tpg_nexus, protected by tv_tpg_mutex */
118 int tv_tpg_vhost_count;
119 /* Used for enabling T10-PI with legacy devices */
120 int tv_fabric_prot_type;
121 /* list for vhost_scsi_list */
122 struct list_head tv_tpg_list;
123 /* Used to protect access for tpg_nexus */
124 struct mutex tv_tpg_mutex;
125 /* Pointer to the TCM VHost I_T Nexus for this TPG endpoint */
126 struct vhost_scsi_nexus *tpg_nexus;
127 /* Pointer back to vhost_scsi_tport */
128 struct vhost_scsi_tport *tport;
129 /* Returned by vhost_scsi_make_tpg() */
130 struct se_portal_group se_tpg;
131 /* Pointer back to vhost_scsi, protected by tv_tpg_mutex */
132 struct vhost_scsi *vhost_scsi;
133 };
134
135 struct vhost_scsi_tport {
136 /* SCSI protocol the tport is providing */
137 u8 tport_proto_id;
138 /* Binary World Wide unique Port Name for Vhost Target port */
139 u64 tport_wwpn;
140 /* ASCII formatted WWPN for Vhost Target port */
141 char tport_name[VHOST_SCSI_NAMELEN];
142 /* Returned by vhost_scsi_make_tport() */
143 struct se_wwn tport_wwn;
144 };
145
146 struct vhost_scsi_evt {
147 /* event to be sent to guest */
148 struct virtio_scsi_event event;
149 /* event list, serviced from vhost worker thread */
150 struct llist_node list;
151 };
152
153 enum {
154 VHOST_SCSI_VQ_CTL = 0,
155 VHOST_SCSI_VQ_EVT = 1,
156 VHOST_SCSI_VQ_IO = 2,
157 };
158
159 /* Note: can't set VIRTIO_F_VERSION_1 yet, since that implies ANY_LAYOUT. */
160 enum {
161 VHOST_SCSI_FEATURES = VHOST_FEATURES | (1ULL << VIRTIO_SCSI_F_HOTPLUG) |
162 (1ULL << VIRTIO_SCSI_F_T10_PI)
163 };
164
165 #define VHOST_SCSI_MAX_TARGET 256
166 #define VHOST_SCSI_MAX_IO_VQ 1024
167 #define VHOST_SCSI_MAX_EVENT 128
168
169 static unsigned vhost_scsi_max_io_vqs = 128;
170 module_param_named(max_io_vqs, vhost_scsi_max_io_vqs, uint, 0644);
171 MODULE_PARM_DESC(max_io_vqs, "Set the max number of IO virtqueues a vhost scsi device can support. The default is 128. The max is 1024.");
172
173 struct vhost_scsi_virtqueue {
174 struct vhost_virtqueue vq;
175 struct vhost_scsi *vs;
176 /*
177 * Reference counting for inflight reqs, used for flush operation. At
178 * each time, one reference tracks new commands submitted, while we
179 * wait for another one to reach 0.
180 */
181 struct vhost_scsi_inflight inflights[2];
182 /*
183 * Indicate current inflight in use, protected by vq->mutex.
184 * Writers must also take dev mutex and flush under it.
185 */
186 int inflight_idx;
187 struct vhost_scsi_cmd *scsi_cmds;
188 struct sbitmap scsi_tags;
189 int max_cmds;
190
191 struct vhost_work completion_work;
192 struct llist_head completion_list;
193 };
194
195 struct vhost_scsi {
196 /* Protected by vhost_scsi->dev.mutex */
197 struct vhost_scsi_tpg **vs_tpg;
198 char vs_vhost_wwpn[TRANSPORT_IQN_LEN];
199
200 struct vhost_dev dev;
201 struct vhost_scsi_virtqueue *vqs;
202 struct vhost_scsi_inflight **old_inflight;
203
204 struct vhost_work vs_event_work; /* evt injection work item */
205 struct llist_head vs_event_list; /* evt injection queue */
206
207 bool vs_events_missed; /* any missed events, protected by vq->mutex */
208 int vs_events_nr; /* num of pending events, protected by vq->mutex */
209 };
210
211 struct vhost_scsi_tmf {
212 struct vhost_work vwork;
213 struct work_struct flush_work;
214 struct vhost_scsi *vhost;
215 struct vhost_scsi_virtqueue *svq;
216
217 struct se_cmd se_cmd;
218 u8 scsi_resp;
219 struct vhost_scsi_inflight *inflight;
220 struct iovec resp_iov;
221 int in_iovs;
222 int vq_desc;
223 };
224
225 /*
226 * Context for processing request and control queue operations.
227 */
228 struct vhost_scsi_ctx {
229 int head;
230 unsigned int out, in;
231 size_t req_size, rsp_size;
232 size_t out_size, in_size;
233 u8 *target, *lunp;
234 void *req;
235 struct iov_iter out_iter;
236 };
237
238 /*
239 * Global mutex to protect vhost_scsi TPG list for vhost IOCTLs and LIO
240 * configfs management operations.
241 */
242 static DEFINE_MUTEX(vhost_scsi_mutex);
243 static LIST_HEAD(vhost_scsi_list);
244
vhost_scsi_done_inflight(struct kref * kref)245 static void vhost_scsi_done_inflight(struct kref *kref)
246 {
247 struct vhost_scsi_inflight *inflight;
248
249 inflight = container_of(kref, struct vhost_scsi_inflight, kref);
250 complete(&inflight->comp);
251 }
252
vhost_scsi_init_inflight(struct vhost_scsi * vs,struct vhost_scsi_inflight * old_inflight[])253 static void vhost_scsi_init_inflight(struct vhost_scsi *vs,
254 struct vhost_scsi_inflight *old_inflight[])
255 {
256 struct vhost_scsi_inflight *new_inflight;
257 struct vhost_virtqueue *vq;
258 int idx, i;
259
260 for (i = 0; i < vs->dev.nvqs; i++) {
261 vq = &vs->vqs[i].vq;
262
263 mutex_lock(&vq->mutex);
264
265 /* store old infight */
266 idx = vs->vqs[i].inflight_idx;
267 if (old_inflight)
268 old_inflight[i] = &vs->vqs[i].inflights[idx];
269
270 /* setup new infight */
271 vs->vqs[i].inflight_idx = idx ^ 1;
272 new_inflight = &vs->vqs[i].inflights[idx ^ 1];
273 kref_init(&new_inflight->kref);
274 init_completion(&new_inflight->comp);
275
276 mutex_unlock(&vq->mutex);
277 }
278 }
279
280 static struct vhost_scsi_inflight *
vhost_scsi_get_inflight(struct vhost_virtqueue * vq)281 vhost_scsi_get_inflight(struct vhost_virtqueue *vq)
282 {
283 struct vhost_scsi_inflight *inflight;
284 struct vhost_scsi_virtqueue *svq;
285
286 svq = container_of(vq, struct vhost_scsi_virtqueue, vq);
287 inflight = &svq->inflights[svq->inflight_idx];
288 kref_get(&inflight->kref);
289
290 return inflight;
291 }
292
vhost_scsi_put_inflight(struct vhost_scsi_inflight * inflight)293 static void vhost_scsi_put_inflight(struct vhost_scsi_inflight *inflight)
294 {
295 kref_put(&inflight->kref, vhost_scsi_done_inflight);
296 }
297
vhost_scsi_check_true(struct se_portal_group * se_tpg)298 static int vhost_scsi_check_true(struct se_portal_group *se_tpg)
299 {
300 return 1;
301 }
302
vhost_scsi_get_fabric_wwn(struct se_portal_group * se_tpg)303 static char *vhost_scsi_get_fabric_wwn(struct se_portal_group *se_tpg)
304 {
305 struct vhost_scsi_tpg *tpg = container_of(se_tpg,
306 struct vhost_scsi_tpg, se_tpg);
307 struct vhost_scsi_tport *tport = tpg->tport;
308
309 return &tport->tport_name[0];
310 }
311
vhost_scsi_get_tpgt(struct se_portal_group * se_tpg)312 static u16 vhost_scsi_get_tpgt(struct se_portal_group *se_tpg)
313 {
314 struct vhost_scsi_tpg *tpg = container_of(se_tpg,
315 struct vhost_scsi_tpg, se_tpg);
316 return tpg->tport_tpgt;
317 }
318
vhost_scsi_check_prot_fabric_only(struct se_portal_group * se_tpg)319 static int vhost_scsi_check_prot_fabric_only(struct se_portal_group *se_tpg)
320 {
321 struct vhost_scsi_tpg *tpg = container_of(se_tpg,
322 struct vhost_scsi_tpg, se_tpg);
323
324 return tpg->tv_fabric_prot_type;
325 }
326
vhost_scsi_release_cmd_res(struct se_cmd * se_cmd)327 static void vhost_scsi_release_cmd_res(struct se_cmd *se_cmd)
328 {
329 struct vhost_scsi_cmd *tv_cmd = container_of(se_cmd,
330 struct vhost_scsi_cmd, tvc_se_cmd);
331 struct vhost_scsi_virtqueue *svq = container_of(tv_cmd->tvc_vq,
332 struct vhost_scsi_virtqueue, vq);
333 struct vhost_scsi_inflight *inflight = tv_cmd->inflight;
334 int i;
335
336 if (tv_cmd->tvc_sgl_count) {
337 for (i = 0; i < tv_cmd->tvc_sgl_count; i++) {
338 if (tv_cmd->copied_iov)
339 __free_page(sg_page(&tv_cmd->tvc_sgl[i]));
340 else
341 put_page(sg_page(&tv_cmd->tvc_sgl[i]));
342 }
343 kfree(tv_cmd->saved_iter_addr);
344 }
345 if (tv_cmd->tvc_prot_sgl_count) {
346 for (i = 0; i < tv_cmd->tvc_prot_sgl_count; i++)
347 put_page(sg_page(&tv_cmd->tvc_prot_sgl[i]));
348 }
349
350 sbitmap_clear_bit(&svq->scsi_tags, se_cmd->map_tag);
351 vhost_scsi_put_inflight(inflight);
352 }
353
vhost_scsi_release_tmf_res(struct vhost_scsi_tmf * tmf)354 static void vhost_scsi_release_tmf_res(struct vhost_scsi_tmf *tmf)
355 {
356 struct vhost_scsi_inflight *inflight = tmf->inflight;
357
358 kfree(tmf);
359 vhost_scsi_put_inflight(inflight);
360 }
361
vhost_scsi_drop_cmds(struct vhost_scsi_virtqueue * svq)362 static void vhost_scsi_drop_cmds(struct vhost_scsi_virtqueue *svq)
363 {
364 struct vhost_scsi_cmd *cmd, *t;
365 struct llist_node *llnode;
366
367 llnode = llist_del_all(&svq->completion_list);
368 llist_for_each_entry_safe(cmd, t, llnode, tvc_completion_list)
369 vhost_scsi_release_cmd_res(&cmd->tvc_se_cmd);
370 }
371
vhost_scsi_release_cmd(struct se_cmd * se_cmd)372 static void vhost_scsi_release_cmd(struct se_cmd *se_cmd)
373 {
374 if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) {
375 struct vhost_scsi_tmf *tmf = container_of(se_cmd,
376 struct vhost_scsi_tmf, se_cmd);
377
378 schedule_work(&tmf->flush_work);
379 } else {
380 struct vhost_scsi_cmd *cmd = container_of(se_cmd,
381 struct vhost_scsi_cmd, tvc_se_cmd);
382 struct vhost_scsi_virtqueue *svq = container_of(cmd->tvc_vq,
383 struct vhost_scsi_virtqueue, vq);
384
385 llist_add(&cmd->tvc_completion_list, &svq->completion_list);
386 if (!vhost_vq_work_queue(&svq->vq, &svq->completion_work))
387 vhost_scsi_drop_cmds(svq);
388 }
389 }
390
vhost_scsi_write_pending(struct se_cmd * se_cmd)391 static int vhost_scsi_write_pending(struct se_cmd *se_cmd)
392 {
393 /* Go ahead and process the write immediately */
394 target_execute_cmd(se_cmd);
395 return 0;
396 }
397
vhost_scsi_queue_data_in(struct se_cmd * se_cmd)398 static int vhost_scsi_queue_data_in(struct se_cmd *se_cmd)
399 {
400 transport_generic_free_cmd(se_cmd, 0);
401 return 0;
402 }
403
vhost_scsi_queue_status(struct se_cmd * se_cmd)404 static int vhost_scsi_queue_status(struct se_cmd *se_cmd)
405 {
406 transport_generic_free_cmd(se_cmd, 0);
407 return 0;
408 }
409
vhost_scsi_queue_tm_rsp(struct se_cmd * se_cmd)410 static void vhost_scsi_queue_tm_rsp(struct se_cmd *se_cmd)
411 {
412 struct vhost_scsi_tmf *tmf = container_of(se_cmd, struct vhost_scsi_tmf,
413 se_cmd);
414
415 tmf->scsi_resp = se_cmd->se_tmr_req->response;
416 transport_generic_free_cmd(&tmf->se_cmd, 0);
417 }
418
vhost_scsi_aborted_task(struct se_cmd * se_cmd)419 static void vhost_scsi_aborted_task(struct se_cmd *se_cmd)
420 {
421 return;
422 }
423
vhost_scsi_free_evt(struct vhost_scsi * vs,struct vhost_scsi_evt * evt)424 static void vhost_scsi_free_evt(struct vhost_scsi *vs, struct vhost_scsi_evt *evt)
425 {
426 vs->vs_events_nr--;
427 kfree(evt);
428 }
429
430 static struct vhost_scsi_evt *
vhost_scsi_allocate_evt(struct vhost_scsi * vs,u32 event,u32 reason)431 vhost_scsi_allocate_evt(struct vhost_scsi *vs,
432 u32 event, u32 reason)
433 {
434 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
435 struct vhost_scsi_evt *evt;
436
437 if (vs->vs_events_nr > VHOST_SCSI_MAX_EVENT) {
438 vs->vs_events_missed = true;
439 return NULL;
440 }
441
442 evt = kzalloc(sizeof(*evt), GFP_KERNEL);
443 if (!evt) {
444 vq_err(vq, "Failed to allocate vhost_scsi_evt\n");
445 vs->vs_events_missed = true;
446 return NULL;
447 }
448
449 evt->event.event = cpu_to_vhost32(vq, event);
450 evt->event.reason = cpu_to_vhost32(vq, reason);
451 vs->vs_events_nr++;
452
453 return evt;
454 }
455
vhost_scsi_check_stop_free(struct se_cmd * se_cmd)456 static int vhost_scsi_check_stop_free(struct se_cmd *se_cmd)
457 {
458 return target_put_sess_cmd(se_cmd);
459 }
460
461 static void
vhost_scsi_do_evt_work(struct vhost_scsi * vs,struct vhost_scsi_evt * evt)462 vhost_scsi_do_evt_work(struct vhost_scsi *vs, struct vhost_scsi_evt *evt)
463 {
464 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
465 struct virtio_scsi_event *event = &evt->event;
466 struct virtio_scsi_event __user *eventp;
467 unsigned out, in;
468 int head, ret;
469
470 if (!vhost_vq_get_backend(vq)) {
471 vs->vs_events_missed = true;
472 return;
473 }
474
475 again:
476 vhost_disable_notify(&vs->dev, vq);
477 head = vhost_get_vq_desc(vq, vq->iov,
478 ARRAY_SIZE(vq->iov), &out, &in,
479 NULL, NULL);
480 if (head < 0) {
481 vs->vs_events_missed = true;
482 return;
483 }
484 if (head == vq->num) {
485 if (vhost_enable_notify(&vs->dev, vq))
486 goto again;
487 vs->vs_events_missed = true;
488 return;
489 }
490
491 if ((vq->iov[out].iov_len != sizeof(struct virtio_scsi_event))) {
492 vq_err(vq, "Expecting virtio_scsi_event, got %zu bytes\n",
493 vq->iov[out].iov_len);
494 vs->vs_events_missed = true;
495 return;
496 }
497
498 if (vs->vs_events_missed) {
499 event->event |= cpu_to_vhost32(vq, VIRTIO_SCSI_T_EVENTS_MISSED);
500 vs->vs_events_missed = false;
501 }
502
503 eventp = vq->iov[out].iov_base;
504 ret = __copy_to_user(eventp, event, sizeof(*event));
505 if (!ret)
506 vhost_add_used_and_signal(&vs->dev, vq, head, 0);
507 else
508 vq_err(vq, "Faulted on vhost_scsi_send_event\n");
509 }
510
vhost_scsi_complete_events(struct vhost_scsi * vs,bool drop)511 static void vhost_scsi_complete_events(struct vhost_scsi *vs, bool drop)
512 {
513 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
514 struct vhost_scsi_evt *evt, *t;
515 struct llist_node *llnode;
516
517 mutex_lock(&vq->mutex);
518 llnode = llist_del_all(&vs->vs_event_list);
519 llist_for_each_entry_safe(evt, t, llnode, list) {
520 if (!drop)
521 vhost_scsi_do_evt_work(vs, evt);
522 vhost_scsi_free_evt(vs, evt);
523 }
524 mutex_unlock(&vq->mutex);
525 }
526
vhost_scsi_evt_work(struct vhost_work * work)527 static void vhost_scsi_evt_work(struct vhost_work *work)
528 {
529 struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
530 vs_event_work);
531 vhost_scsi_complete_events(vs, false);
532 }
533
vhost_scsi_copy_sgl_to_iov(struct vhost_scsi_cmd * cmd)534 static int vhost_scsi_copy_sgl_to_iov(struct vhost_scsi_cmd *cmd)
535 {
536 struct iov_iter *iter = &cmd->saved_iter;
537 struct scatterlist *sg = cmd->tvc_sgl;
538 struct page *page;
539 size_t len;
540 int i;
541
542 for (i = 0; i < cmd->tvc_sgl_count; i++) {
543 page = sg_page(&sg[i]);
544 len = sg[i].length;
545
546 if (copy_page_to_iter(page, 0, len, iter) != len) {
547 pr_err("Could not copy data while handling misaligned cmd. Error %zu\n",
548 len);
549 return -1;
550 }
551 }
552
553 return 0;
554 }
555
556 /* Fill in status and signal that we are done processing this command
557 *
558 * This is scheduled in the vhost work queue so we are called with the owner
559 * process mm and can access the vring.
560 */
vhost_scsi_complete_cmd_work(struct vhost_work * work)561 static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
562 {
563 struct vhost_scsi_virtqueue *svq = container_of(work,
564 struct vhost_scsi_virtqueue, completion_work);
565 struct virtio_scsi_cmd_resp v_rsp;
566 struct vhost_scsi_cmd *cmd, *t;
567 struct llist_node *llnode;
568 struct se_cmd *se_cmd;
569 struct iov_iter iov_iter;
570 bool signal = false;
571 int ret;
572
573 llnode = llist_del_all(&svq->completion_list);
574 llist_for_each_entry_safe(cmd, t, llnode, tvc_completion_list) {
575 se_cmd = &cmd->tvc_se_cmd;
576
577 pr_debug("%s tv_cmd %p resid %u status %#02x\n", __func__,
578 cmd, se_cmd->residual_count, se_cmd->scsi_status);
579 memset(&v_rsp, 0, sizeof(v_rsp));
580
581 if (cmd->saved_iter_addr && vhost_scsi_copy_sgl_to_iov(cmd)) {
582 v_rsp.response = VIRTIO_SCSI_S_BAD_TARGET;
583 } else {
584 v_rsp.resid = cpu_to_vhost32(cmd->tvc_vq,
585 se_cmd->residual_count);
586 /* TODO is status_qualifier field needed? */
587 v_rsp.status = se_cmd->scsi_status;
588 v_rsp.sense_len = cpu_to_vhost32(cmd->tvc_vq,
589 se_cmd->scsi_sense_length);
590 memcpy(v_rsp.sense, cmd->tvc_sense_buf,
591 se_cmd->scsi_sense_length);
592 }
593
594 iov_iter_init(&iov_iter, ITER_DEST, cmd->tvc_resp_iov,
595 cmd->tvc_in_iovs, sizeof(v_rsp));
596 ret = copy_to_iter(&v_rsp, sizeof(v_rsp), &iov_iter);
597 if (likely(ret == sizeof(v_rsp))) {
598 signal = true;
599
600 vhost_add_used(cmd->tvc_vq, cmd->tvc_vq_desc, 0);
601 } else
602 pr_err("Faulted on virtio_scsi_cmd_resp\n");
603
604 vhost_scsi_release_cmd_res(se_cmd);
605 }
606
607 if (signal)
608 vhost_signal(&svq->vs->dev, &svq->vq);
609 }
610
611 static struct vhost_scsi_cmd *
vhost_scsi_get_cmd(struct vhost_virtqueue * vq,struct vhost_scsi_tpg * tpg,unsigned char * cdb,u64 scsi_tag,u16 lun,u8 task_attr,u32 exp_data_len,int data_direction)612 vhost_scsi_get_cmd(struct vhost_virtqueue *vq, struct vhost_scsi_tpg *tpg,
613 unsigned char *cdb, u64 scsi_tag, u16 lun, u8 task_attr,
614 u32 exp_data_len, int data_direction)
615 {
616 struct vhost_scsi_virtqueue *svq = container_of(vq,
617 struct vhost_scsi_virtqueue, vq);
618 struct vhost_scsi_cmd *cmd;
619 struct vhost_scsi_nexus *tv_nexus;
620 struct scatterlist *sg, *prot_sg;
621 struct iovec *tvc_resp_iov;
622 struct page **pages;
623 int tag;
624
625 tv_nexus = tpg->tpg_nexus;
626 if (!tv_nexus) {
627 pr_err("Unable to locate active struct vhost_scsi_nexus\n");
628 return ERR_PTR(-EIO);
629 }
630
631 tag = sbitmap_get(&svq->scsi_tags);
632 if (tag < 0) {
633 pr_err("Unable to obtain tag for vhost_scsi_cmd\n");
634 return ERR_PTR(-ENOMEM);
635 }
636
637 cmd = &svq->scsi_cmds[tag];
638 sg = cmd->tvc_sgl;
639 prot_sg = cmd->tvc_prot_sgl;
640 pages = cmd->tvc_upages;
641 tvc_resp_iov = cmd->tvc_resp_iov;
642 memset(cmd, 0, sizeof(*cmd));
643 cmd->tvc_sgl = sg;
644 cmd->tvc_prot_sgl = prot_sg;
645 cmd->tvc_upages = pages;
646 cmd->tvc_se_cmd.map_tag = tag;
647 cmd->tvc_tag = scsi_tag;
648 cmd->tvc_lun = lun;
649 cmd->tvc_task_attr = task_attr;
650 cmd->tvc_exp_data_len = exp_data_len;
651 cmd->tvc_data_direction = data_direction;
652 cmd->tvc_nexus = tv_nexus;
653 cmd->inflight = vhost_scsi_get_inflight(vq);
654 cmd->tvc_resp_iov = tvc_resp_iov;
655
656 memcpy(cmd->tvc_cdb, cdb, VHOST_SCSI_MAX_CDB_SIZE);
657
658 return cmd;
659 }
660
661 /*
662 * Map a user memory range into a scatterlist
663 *
664 * Returns the number of scatterlist entries used or -errno on error.
665 */
666 static int
vhost_scsi_map_to_sgl(struct vhost_scsi_cmd * cmd,struct iov_iter * iter,struct scatterlist * sgl,bool is_prot)667 vhost_scsi_map_to_sgl(struct vhost_scsi_cmd *cmd,
668 struct iov_iter *iter,
669 struct scatterlist *sgl,
670 bool is_prot)
671 {
672 struct page **pages = cmd->tvc_upages;
673 struct scatterlist *sg = sgl;
674 ssize_t bytes, mapped_bytes;
675 size_t offset, mapped_offset;
676 unsigned int npages = 0;
677
678 bytes = iov_iter_get_pages2(iter, pages, LONG_MAX,
679 VHOST_SCSI_PREALLOC_UPAGES, &offset);
680 /* No pages were pinned */
681 if (bytes <= 0)
682 return bytes < 0 ? bytes : -EFAULT;
683
684 mapped_bytes = bytes;
685 mapped_offset = offset;
686
687 while (bytes) {
688 unsigned n = min_t(unsigned, PAGE_SIZE - offset, bytes);
689 /*
690 * The block layer requires bios/requests to be a multiple of
691 * 512 bytes, but Windows can send us vecs that are misaligned.
692 * This can result in bios and later requests with misaligned
693 * sizes if we have to break up a cmd/scatterlist into multiple
694 * bios.
695 *
696 * We currently only break up a command into multiple bios if
697 * we hit the vec/seg limit, so check if our sgl_count is
698 * greater than the max and if a vec in the cmd has a
699 * misaligned offset/size.
700 */
701 if (!is_prot &&
702 (offset & (SECTOR_SIZE - 1) || n & (SECTOR_SIZE - 1)) &&
703 cmd->tvc_sgl_count > BIO_MAX_VECS) {
704 WARN_ONCE(true,
705 "vhost-scsi detected misaligned IO. Performance may be degraded.");
706 goto revert_iter_get_pages;
707 }
708
709 sg_set_page(sg++, pages[npages++], n, offset);
710 bytes -= n;
711 offset = 0;
712 }
713
714 return npages;
715
716 revert_iter_get_pages:
717 iov_iter_revert(iter, mapped_bytes);
718
719 npages = 0;
720 while (mapped_bytes) {
721 unsigned int n = min_t(unsigned int, PAGE_SIZE - mapped_offset,
722 mapped_bytes);
723
724 put_page(pages[npages++]);
725
726 mapped_bytes -= n;
727 mapped_offset = 0;
728 }
729
730 return -EINVAL;
731 }
732
733 static int
vhost_scsi_calc_sgls(struct iov_iter * iter,size_t bytes,int max_sgls)734 vhost_scsi_calc_sgls(struct iov_iter *iter, size_t bytes, int max_sgls)
735 {
736 int sgl_count = 0;
737
738 if (!iter || !iter_iov(iter)) {
739 pr_err("%s: iter->iov is NULL, but expected bytes: %zu"
740 " present\n", __func__, bytes);
741 return -EINVAL;
742 }
743
744 sgl_count = iov_iter_npages(iter, 0xffff);
745 if (sgl_count > max_sgls) {
746 pr_err("%s: requested sgl_count: %d exceeds pre-allocated"
747 " max_sgls: %d\n", __func__, sgl_count, max_sgls);
748 return -EINVAL;
749 }
750 return sgl_count;
751 }
752
753 static int
vhost_scsi_copy_iov_to_sgl(struct vhost_scsi_cmd * cmd,struct iov_iter * iter,struct scatterlist * sg,int sg_count)754 vhost_scsi_copy_iov_to_sgl(struct vhost_scsi_cmd *cmd, struct iov_iter *iter,
755 struct scatterlist *sg, int sg_count)
756 {
757 size_t len = iov_iter_count(iter);
758 unsigned int nbytes = 0;
759 struct page *page;
760 int i;
761
762 if (cmd->tvc_data_direction == DMA_FROM_DEVICE) {
763 cmd->saved_iter_addr = dup_iter(&cmd->saved_iter, iter,
764 GFP_KERNEL);
765 if (!cmd->saved_iter_addr)
766 return -ENOMEM;
767 }
768
769 for (i = 0; i < sg_count; i++) {
770 page = alloc_page(GFP_KERNEL);
771 if (!page) {
772 i--;
773 goto err;
774 }
775
776 nbytes = min_t(unsigned int, PAGE_SIZE, len);
777 sg_set_page(&sg[i], page, nbytes, 0);
778
779 if (cmd->tvc_data_direction == DMA_TO_DEVICE &&
780 copy_page_from_iter(page, 0, nbytes, iter) != nbytes)
781 goto err;
782
783 len -= nbytes;
784 }
785
786 cmd->copied_iov = 1;
787 return 0;
788
789 err:
790 pr_err("Could not read %u bytes while handling misaligned cmd\n",
791 nbytes);
792
793 for (; i >= 0; i--)
794 __free_page(sg_page(&sg[i]));
795 kfree(cmd->saved_iter_addr);
796 return -ENOMEM;
797 }
798
799 static int
vhost_scsi_map_iov_to_sgl(struct vhost_scsi_cmd * cmd,struct iov_iter * iter,struct scatterlist * sg,int sg_count,bool is_prot)800 vhost_scsi_map_iov_to_sgl(struct vhost_scsi_cmd *cmd, struct iov_iter *iter,
801 struct scatterlist *sg, int sg_count, bool is_prot)
802 {
803 struct scatterlist *p = sg;
804 size_t revert_bytes;
805 int ret;
806
807 while (iov_iter_count(iter)) {
808 ret = vhost_scsi_map_to_sgl(cmd, iter, sg, is_prot);
809 if (ret < 0) {
810 revert_bytes = 0;
811
812 while (p < sg) {
813 struct page *page = sg_page(p);
814
815 if (page) {
816 put_page(page);
817 revert_bytes += p->length;
818 }
819 p++;
820 }
821
822 iov_iter_revert(iter, revert_bytes);
823 return ret;
824 }
825 sg += ret;
826 }
827
828 return 0;
829 }
830
831 static int
vhost_scsi_mapal(struct vhost_scsi_cmd * cmd,size_t prot_bytes,struct iov_iter * prot_iter,size_t data_bytes,struct iov_iter * data_iter)832 vhost_scsi_mapal(struct vhost_scsi_cmd *cmd,
833 size_t prot_bytes, struct iov_iter *prot_iter,
834 size_t data_bytes, struct iov_iter *data_iter)
835 {
836 int sgl_count, ret;
837
838 if (prot_bytes) {
839 sgl_count = vhost_scsi_calc_sgls(prot_iter, prot_bytes,
840 VHOST_SCSI_PREALLOC_PROT_SGLS);
841 if (sgl_count < 0)
842 return sgl_count;
843
844 sg_init_table(cmd->tvc_prot_sgl, sgl_count);
845 cmd->tvc_prot_sgl_count = sgl_count;
846 pr_debug("%s prot_sg %p prot_sgl_count %u\n", __func__,
847 cmd->tvc_prot_sgl, cmd->tvc_prot_sgl_count);
848
849 ret = vhost_scsi_map_iov_to_sgl(cmd, prot_iter,
850 cmd->tvc_prot_sgl,
851 cmd->tvc_prot_sgl_count, true);
852 if (ret < 0) {
853 cmd->tvc_prot_sgl_count = 0;
854 return ret;
855 }
856 }
857 sgl_count = vhost_scsi_calc_sgls(data_iter, data_bytes,
858 VHOST_SCSI_PREALLOC_SGLS);
859 if (sgl_count < 0)
860 return sgl_count;
861
862 sg_init_table(cmd->tvc_sgl, sgl_count);
863 cmd->tvc_sgl_count = sgl_count;
864 pr_debug("%s data_sg %p data_sgl_count %u\n", __func__,
865 cmd->tvc_sgl, cmd->tvc_sgl_count);
866
867 ret = vhost_scsi_map_iov_to_sgl(cmd, data_iter, cmd->tvc_sgl,
868 cmd->tvc_sgl_count, false);
869 if (ret == -EINVAL) {
870 sg_init_table(cmd->tvc_sgl, cmd->tvc_sgl_count);
871 ret = vhost_scsi_copy_iov_to_sgl(cmd, data_iter, cmd->tvc_sgl,
872 cmd->tvc_sgl_count);
873 }
874
875 if (ret < 0) {
876 cmd->tvc_sgl_count = 0;
877 return ret;
878 }
879 return 0;
880 }
881
vhost_scsi_to_tcm_attr(int attr)882 static int vhost_scsi_to_tcm_attr(int attr)
883 {
884 switch (attr) {
885 case VIRTIO_SCSI_S_SIMPLE:
886 return TCM_SIMPLE_TAG;
887 case VIRTIO_SCSI_S_ORDERED:
888 return TCM_ORDERED_TAG;
889 case VIRTIO_SCSI_S_HEAD:
890 return TCM_HEAD_TAG;
891 case VIRTIO_SCSI_S_ACA:
892 return TCM_ACA_TAG;
893 default:
894 break;
895 }
896 return TCM_SIMPLE_TAG;
897 }
898
vhost_scsi_target_queue_cmd(struct vhost_scsi_cmd * cmd)899 static void vhost_scsi_target_queue_cmd(struct vhost_scsi_cmd *cmd)
900 {
901 struct se_cmd *se_cmd = &cmd->tvc_se_cmd;
902 struct vhost_scsi_nexus *tv_nexus;
903 struct scatterlist *sg_ptr, *sg_prot_ptr = NULL;
904
905 /* FIXME: BIDI operation */
906 if (cmd->tvc_sgl_count) {
907 sg_ptr = cmd->tvc_sgl;
908
909 if (cmd->tvc_prot_sgl_count)
910 sg_prot_ptr = cmd->tvc_prot_sgl;
911 else
912 se_cmd->prot_pto = true;
913 } else {
914 sg_ptr = NULL;
915 }
916 tv_nexus = cmd->tvc_nexus;
917
918 se_cmd->tag = 0;
919 target_init_cmd(se_cmd, tv_nexus->tvn_se_sess, &cmd->tvc_sense_buf[0],
920 cmd->tvc_lun, cmd->tvc_exp_data_len,
921 vhost_scsi_to_tcm_attr(cmd->tvc_task_attr),
922 cmd->tvc_data_direction, TARGET_SCF_ACK_KREF);
923
924 if (target_submit_prep(se_cmd, cmd->tvc_cdb, sg_ptr,
925 cmd->tvc_sgl_count, NULL, 0, sg_prot_ptr,
926 cmd->tvc_prot_sgl_count, GFP_KERNEL))
927 return;
928
929 target_submit(se_cmd);
930 }
931
932 static void
vhost_scsi_send_bad_target(struct vhost_scsi * vs,struct vhost_virtqueue * vq,int head,unsigned out)933 vhost_scsi_send_bad_target(struct vhost_scsi *vs,
934 struct vhost_virtqueue *vq,
935 int head, unsigned out)
936 {
937 struct virtio_scsi_cmd_resp __user *resp;
938 struct virtio_scsi_cmd_resp rsp;
939 int ret;
940
941 memset(&rsp, 0, sizeof(rsp));
942 rsp.response = VIRTIO_SCSI_S_BAD_TARGET;
943 resp = vq->iov[out].iov_base;
944 ret = __copy_to_user(resp, &rsp, sizeof(rsp));
945 if (!ret)
946 vhost_add_used_and_signal(&vs->dev, vq, head, 0);
947 else
948 pr_err("Faulted on virtio_scsi_cmd_resp\n");
949 }
950
951 static int
vhost_scsi_get_desc(struct vhost_scsi * vs,struct vhost_virtqueue * vq,struct vhost_scsi_ctx * vc)952 vhost_scsi_get_desc(struct vhost_scsi *vs, struct vhost_virtqueue *vq,
953 struct vhost_scsi_ctx *vc)
954 {
955 int ret = -ENXIO;
956
957 vc->head = vhost_get_vq_desc(vq, vq->iov,
958 ARRAY_SIZE(vq->iov), &vc->out, &vc->in,
959 NULL, NULL);
960
961 pr_debug("vhost_get_vq_desc: head: %d, out: %u in: %u\n",
962 vc->head, vc->out, vc->in);
963
964 /* On error, stop handling until the next kick. */
965 if (unlikely(vc->head < 0))
966 goto done;
967
968 /* Nothing new? Wait for eventfd to tell us they refilled. */
969 if (vc->head == vq->num) {
970 if (unlikely(vhost_enable_notify(&vs->dev, vq))) {
971 vhost_disable_notify(&vs->dev, vq);
972 ret = -EAGAIN;
973 }
974 goto done;
975 }
976
977 /*
978 * Get the size of request and response buffers.
979 * FIXME: Not correct for BIDI operation
980 */
981 vc->out_size = iov_length(vq->iov, vc->out);
982 vc->in_size = iov_length(&vq->iov[vc->out], vc->in);
983
984 /*
985 * Copy over the virtio-scsi request header, which for a
986 * ANY_LAYOUT enabled guest may span multiple iovecs, or a
987 * single iovec may contain both the header + outgoing
988 * WRITE payloads.
989 *
990 * copy_from_iter() will advance out_iter, so that it will
991 * point at the start of the outgoing WRITE payload, if
992 * DMA_TO_DEVICE is set.
993 */
994 iov_iter_init(&vc->out_iter, ITER_SOURCE, vq->iov, vc->out, vc->out_size);
995 ret = 0;
996
997 done:
998 return ret;
999 }
1000
1001 static int
vhost_scsi_chk_size(struct vhost_virtqueue * vq,struct vhost_scsi_ctx * vc)1002 vhost_scsi_chk_size(struct vhost_virtqueue *vq, struct vhost_scsi_ctx *vc)
1003 {
1004 if (unlikely(vc->in_size < vc->rsp_size)) {
1005 vq_err(vq,
1006 "Response buf too small, need min %zu bytes got %zu",
1007 vc->rsp_size, vc->in_size);
1008 return -EINVAL;
1009 } else if (unlikely(vc->out_size < vc->req_size)) {
1010 vq_err(vq,
1011 "Request buf too small, need min %zu bytes got %zu",
1012 vc->req_size, vc->out_size);
1013 return -EIO;
1014 }
1015
1016 return 0;
1017 }
1018
1019 static int
vhost_scsi_get_req(struct vhost_virtqueue * vq,struct vhost_scsi_ctx * vc,struct vhost_scsi_tpg ** tpgp)1020 vhost_scsi_get_req(struct vhost_virtqueue *vq, struct vhost_scsi_ctx *vc,
1021 struct vhost_scsi_tpg **tpgp)
1022 {
1023 int ret = -EIO;
1024
1025 if (unlikely(!copy_from_iter_full(vc->req, vc->req_size,
1026 &vc->out_iter))) {
1027 vq_err(vq, "Faulted on copy_from_iter_full\n");
1028 } else if (unlikely(*vc->lunp != 1)) {
1029 /* virtio-scsi spec requires byte 0 of the lun to be 1 */
1030 vq_err(vq, "Illegal virtio-scsi lun: %u\n", *vc->lunp);
1031 } else {
1032 struct vhost_scsi_tpg **vs_tpg, *tpg = NULL;
1033
1034 if (vc->target) {
1035 /* validated at handler entry */
1036 vs_tpg = vhost_vq_get_backend(vq);
1037 tpg = READ_ONCE(vs_tpg[*vc->target]);
1038 if (unlikely(!tpg)) {
1039 vq_err(vq, "Target 0x%x does not exist\n", *vc->target);
1040 goto out;
1041 }
1042 }
1043
1044 if (tpgp)
1045 *tpgp = tpg;
1046 ret = 0;
1047 }
1048 out:
1049 return ret;
1050 }
1051
vhost_buf_to_lun(u8 * lun_buf)1052 static u16 vhost_buf_to_lun(u8 *lun_buf)
1053 {
1054 return ((lun_buf[2] << 8) | lun_buf[3]) & 0x3FFF;
1055 }
1056
1057 static void
vhost_scsi_handle_vq(struct vhost_scsi * vs,struct vhost_virtqueue * vq)1058 vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
1059 {
1060 struct vhost_scsi_tpg **vs_tpg, *tpg;
1061 struct virtio_scsi_cmd_req v_req;
1062 struct virtio_scsi_cmd_req_pi v_req_pi;
1063 struct vhost_scsi_ctx vc;
1064 struct vhost_scsi_cmd *cmd;
1065 struct iov_iter in_iter, prot_iter, data_iter;
1066 u64 tag;
1067 u32 exp_data_len, data_direction;
1068 int ret, prot_bytes, i, c = 0;
1069 u16 lun;
1070 u8 task_attr;
1071 bool t10_pi = vhost_has_feature(vq, VIRTIO_SCSI_F_T10_PI);
1072 void *cdb;
1073
1074 mutex_lock(&vq->mutex);
1075 /*
1076 * We can handle the vq only after the endpoint is setup by calling the
1077 * VHOST_SCSI_SET_ENDPOINT ioctl.
1078 */
1079 vs_tpg = vhost_vq_get_backend(vq);
1080 if (!vs_tpg)
1081 goto out;
1082
1083 memset(&vc, 0, sizeof(vc));
1084 vc.rsp_size = sizeof(struct virtio_scsi_cmd_resp);
1085
1086 vhost_disable_notify(&vs->dev, vq);
1087
1088 do {
1089 ret = vhost_scsi_get_desc(vs, vq, &vc);
1090 if (ret)
1091 goto err;
1092
1093 /*
1094 * Setup pointers and values based upon different virtio-scsi
1095 * request header if T10_PI is enabled in KVM guest.
1096 */
1097 if (t10_pi) {
1098 vc.req = &v_req_pi;
1099 vc.req_size = sizeof(v_req_pi);
1100 vc.lunp = &v_req_pi.lun[0];
1101 vc.target = &v_req_pi.lun[1];
1102 } else {
1103 vc.req = &v_req;
1104 vc.req_size = sizeof(v_req);
1105 vc.lunp = &v_req.lun[0];
1106 vc.target = &v_req.lun[1];
1107 }
1108
1109 /*
1110 * Validate the size of request and response buffers.
1111 * Check for a sane response buffer so we can report
1112 * early errors back to the guest.
1113 */
1114 ret = vhost_scsi_chk_size(vq, &vc);
1115 if (ret)
1116 goto err;
1117
1118 ret = vhost_scsi_get_req(vq, &vc, &tpg);
1119 if (ret)
1120 goto err;
1121
1122 ret = -EIO; /* bad target on any error from here on */
1123
1124 /*
1125 * Determine data_direction by calculating the total outgoing
1126 * iovec sizes + incoming iovec sizes vs. virtio-scsi request +
1127 * response headers respectively.
1128 *
1129 * For DMA_TO_DEVICE this is out_iter, which is already pointing
1130 * to the right place.
1131 *
1132 * For DMA_FROM_DEVICE, the iovec will be just past the end
1133 * of the virtio-scsi response header in either the same
1134 * or immediately following iovec.
1135 *
1136 * Any associated T10_PI bytes for the outgoing / incoming
1137 * payloads are included in calculation of exp_data_len here.
1138 */
1139 prot_bytes = 0;
1140
1141 if (vc.out_size > vc.req_size) {
1142 data_direction = DMA_TO_DEVICE;
1143 exp_data_len = vc.out_size - vc.req_size;
1144 data_iter = vc.out_iter;
1145 } else if (vc.in_size > vc.rsp_size) {
1146 data_direction = DMA_FROM_DEVICE;
1147 exp_data_len = vc.in_size - vc.rsp_size;
1148
1149 iov_iter_init(&in_iter, ITER_DEST, &vq->iov[vc.out], vc.in,
1150 vc.rsp_size + exp_data_len);
1151 iov_iter_advance(&in_iter, vc.rsp_size);
1152 data_iter = in_iter;
1153 } else {
1154 data_direction = DMA_NONE;
1155 exp_data_len = 0;
1156 }
1157 /*
1158 * If T10_PI header + payload is present, setup prot_iter values
1159 * and recalculate data_iter for vhost_scsi_mapal() mapping to
1160 * host scatterlists via get_user_pages_fast().
1161 */
1162 if (t10_pi) {
1163 if (v_req_pi.pi_bytesout) {
1164 if (data_direction != DMA_TO_DEVICE) {
1165 vq_err(vq, "Received non zero pi_bytesout,"
1166 " but wrong data_direction\n");
1167 goto err;
1168 }
1169 prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesout);
1170 } else if (v_req_pi.pi_bytesin) {
1171 if (data_direction != DMA_FROM_DEVICE) {
1172 vq_err(vq, "Received non zero pi_bytesin,"
1173 " but wrong data_direction\n");
1174 goto err;
1175 }
1176 prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesin);
1177 }
1178 /*
1179 * Set prot_iter to data_iter and truncate it to
1180 * prot_bytes, and advance data_iter past any
1181 * preceding prot_bytes that may be present.
1182 *
1183 * Also fix up the exp_data_len to reflect only the
1184 * actual data payload length.
1185 */
1186 if (prot_bytes) {
1187 exp_data_len -= prot_bytes;
1188 prot_iter = data_iter;
1189 iov_iter_truncate(&prot_iter, prot_bytes);
1190 iov_iter_advance(&data_iter, prot_bytes);
1191 }
1192 tag = vhost64_to_cpu(vq, v_req_pi.tag);
1193 task_attr = v_req_pi.task_attr;
1194 cdb = &v_req_pi.cdb[0];
1195 lun = vhost_buf_to_lun(v_req_pi.lun);
1196 } else {
1197 tag = vhost64_to_cpu(vq, v_req.tag);
1198 task_attr = v_req.task_attr;
1199 cdb = &v_req.cdb[0];
1200 lun = vhost_buf_to_lun(v_req.lun);
1201 }
1202 /*
1203 * Check that the received CDB size does not exceeded our
1204 * hardcoded max for vhost-scsi, then get a pre-allocated
1205 * cmd descriptor for the new virtio-scsi tag.
1206 *
1207 * TODO what if cdb was too small for varlen cdb header?
1208 */
1209 if (unlikely(scsi_command_size(cdb) > VHOST_SCSI_MAX_CDB_SIZE)) {
1210 vq_err(vq, "Received SCSI CDB with command_size: %d that"
1211 " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
1212 scsi_command_size(cdb), VHOST_SCSI_MAX_CDB_SIZE);
1213 goto err;
1214 }
1215 cmd = vhost_scsi_get_cmd(vq, tpg, cdb, tag, lun, task_attr,
1216 exp_data_len + prot_bytes,
1217 data_direction);
1218 if (IS_ERR(cmd)) {
1219 vq_err(vq, "vhost_scsi_get_cmd failed %ld\n",
1220 PTR_ERR(cmd));
1221 goto err;
1222 }
1223 cmd->tvc_vhost = vs;
1224 cmd->tvc_vq = vq;
1225 for (i = 0; i < vc.in ; i++)
1226 cmd->tvc_resp_iov[i] = vq->iov[vc.out + i];
1227 cmd->tvc_in_iovs = vc.in;
1228
1229 pr_debug("vhost_scsi got command opcode: %#02x, lun: %d\n",
1230 cmd->tvc_cdb[0], cmd->tvc_lun);
1231 pr_debug("cmd: %p exp_data_len: %d, prot_bytes: %d data_direction:"
1232 " %d\n", cmd, exp_data_len, prot_bytes, data_direction);
1233
1234 if (data_direction != DMA_NONE) {
1235 if (unlikely(vhost_scsi_mapal(cmd, prot_bytes,
1236 &prot_iter, exp_data_len,
1237 &data_iter))) {
1238 vq_err(vq, "Failed to map iov to sgl\n");
1239 vhost_scsi_release_cmd_res(&cmd->tvc_se_cmd);
1240 goto err;
1241 }
1242 }
1243 /*
1244 * Save the descriptor from vhost_get_vq_desc() to be used to
1245 * complete the virtio-scsi request in TCM callback context via
1246 * vhost_scsi_queue_data_in() and vhost_scsi_queue_status()
1247 */
1248 cmd->tvc_vq_desc = vc.head;
1249 vhost_scsi_target_queue_cmd(cmd);
1250 ret = 0;
1251 err:
1252 /*
1253 * ENXIO: No more requests, or read error, wait for next kick
1254 * EINVAL: Invalid response buffer, drop the request
1255 * EIO: Respond with bad target
1256 * EAGAIN: Pending request
1257 */
1258 if (ret == -ENXIO)
1259 break;
1260 else if (ret == -EIO)
1261 vhost_scsi_send_bad_target(vs, vq, vc.head, vc.out);
1262 } while (likely(!vhost_exceeds_weight(vq, ++c, 0)));
1263 out:
1264 mutex_unlock(&vq->mutex);
1265 }
1266
1267 static void
vhost_scsi_send_tmf_resp(struct vhost_scsi * vs,struct vhost_virtqueue * vq,int in_iovs,int vq_desc,struct iovec * resp_iov,int tmf_resp_code)1268 vhost_scsi_send_tmf_resp(struct vhost_scsi *vs, struct vhost_virtqueue *vq,
1269 int in_iovs, int vq_desc, struct iovec *resp_iov,
1270 int tmf_resp_code)
1271 {
1272 struct virtio_scsi_ctrl_tmf_resp rsp;
1273 struct iov_iter iov_iter;
1274 int ret;
1275
1276 pr_debug("%s\n", __func__);
1277 memset(&rsp, 0, sizeof(rsp));
1278 rsp.response = tmf_resp_code;
1279
1280 iov_iter_init(&iov_iter, ITER_DEST, resp_iov, in_iovs, sizeof(rsp));
1281
1282 ret = copy_to_iter(&rsp, sizeof(rsp), &iov_iter);
1283 if (likely(ret == sizeof(rsp)))
1284 vhost_add_used_and_signal(&vs->dev, vq, vq_desc, 0);
1285 else
1286 pr_err("Faulted on virtio_scsi_ctrl_tmf_resp\n");
1287 }
1288
vhost_scsi_tmf_resp_work(struct vhost_work * work)1289 static void vhost_scsi_tmf_resp_work(struct vhost_work *work)
1290 {
1291 struct vhost_scsi_tmf *tmf = container_of(work, struct vhost_scsi_tmf,
1292 vwork);
1293 int resp_code;
1294
1295 if (tmf->scsi_resp == TMR_FUNCTION_COMPLETE)
1296 resp_code = VIRTIO_SCSI_S_FUNCTION_SUCCEEDED;
1297 else
1298 resp_code = VIRTIO_SCSI_S_FUNCTION_REJECTED;
1299
1300 vhost_scsi_send_tmf_resp(tmf->vhost, &tmf->svq->vq, tmf->in_iovs,
1301 tmf->vq_desc, &tmf->resp_iov, resp_code);
1302 vhost_scsi_release_tmf_res(tmf);
1303 }
1304
vhost_scsi_tmf_flush_work(struct work_struct * work)1305 static void vhost_scsi_tmf_flush_work(struct work_struct *work)
1306 {
1307 struct vhost_scsi_tmf *tmf = container_of(work, struct vhost_scsi_tmf,
1308 flush_work);
1309 struct vhost_virtqueue *vq = &tmf->svq->vq;
1310 /*
1311 * Make sure we have sent responses for other commands before we
1312 * send our response.
1313 */
1314 vhost_dev_flush(vq->dev);
1315 if (!vhost_vq_work_queue(vq, &tmf->vwork))
1316 vhost_scsi_release_tmf_res(tmf);
1317 }
1318
1319 static void
vhost_scsi_handle_tmf(struct vhost_scsi * vs,struct vhost_scsi_tpg * tpg,struct vhost_virtqueue * vq,struct virtio_scsi_ctrl_tmf_req * vtmf,struct vhost_scsi_ctx * vc)1320 vhost_scsi_handle_tmf(struct vhost_scsi *vs, struct vhost_scsi_tpg *tpg,
1321 struct vhost_virtqueue *vq,
1322 struct virtio_scsi_ctrl_tmf_req *vtmf,
1323 struct vhost_scsi_ctx *vc)
1324 {
1325 struct vhost_scsi_virtqueue *svq = container_of(vq,
1326 struct vhost_scsi_virtqueue, vq);
1327 struct vhost_scsi_tmf *tmf;
1328
1329 if (vhost32_to_cpu(vq, vtmf->subtype) !=
1330 VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET)
1331 goto send_reject;
1332
1333 if (!tpg->tpg_nexus || !tpg->tpg_nexus->tvn_se_sess) {
1334 pr_err("Unable to locate active struct vhost_scsi_nexus for LUN RESET.\n");
1335 goto send_reject;
1336 }
1337
1338 tmf = kzalloc(sizeof(*tmf), GFP_KERNEL);
1339 if (!tmf)
1340 goto send_reject;
1341
1342 INIT_WORK(&tmf->flush_work, vhost_scsi_tmf_flush_work);
1343 vhost_work_init(&tmf->vwork, vhost_scsi_tmf_resp_work);
1344 tmf->vhost = vs;
1345 tmf->svq = svq;
1346 tmf->resp_iov = vq->iov[vc->out];
1347 tmf->vq_desc = vc->head;
1348 tmf->in_iovs = vc->in;
1349 tmf->inflight = vhost_scsi_get_inflight(vq);
1350
1351 if (target_submit_tmr(&tmf->se_cmd, tpg->tpg_nexus->tvn_se_sess, NULL,
1352 vhost_buf_to_lun(vtmf->lun), NULL,
1353 TMR_LUN_RESET, GFP_KERNEL, 0,
1354 TARGET_SCF_ACK_KREF) < 0) {
1355 vhost_scsi_release_tmf_res(tmf);
1356 goto send_reject;
1357 }
1358
1359 return;
1360
1361 send_reject:
1362 vhost_scsi_send_tmf_resp(vs, vq, vc->in, vc->head, &vq->iov[vc->out],
1363 VIRTIO_SCSI_S_FUNCTION_REJECTED);
1364 }
1365
1366 static void
vhost_scsi_send_an_resp(struct vhost_scsi * vs,struct vhost_virtqueue * vq,struct vhost_scsi_ctx * vc)1367 vhost_scsi_send_an_resp(struct vhost_scsi *vs,
1368 struct vhost_virtqueue *vq,
1369 struct vhost_scsi_ctx *vc)
1370 {
1371 struct virtio_scsi_ctrl_an_resp rsp;
1372 struct iov_iter iov_iter;
1373 int ret;
1374
1375 pr_debug("%s\n", __func__);
1376 memset(&rsp, 0, sizeof(rsp)); /* event_actual = 0 */
1377 rsp.response = VIRTIO_SCSI_S_OK;
1378
1379 iov_iter_init(&iov_iter, ITER_DEST, &vq->iov[vc->out], vc->in, sizeof(rsp));
1380
1381 ret = copy_to_iter(&rsp, sizeof(rsp), &iov_iter);
1382 if (likely(ret == sizeof(rsp)))
1383 vhost_add_used_and_signal(&vs->dev, vq, vc->head, 0);
1384 else
1385 pr_err("Faulted on virtio_scsi_ctrl_an_resp\n");
1386 }
1387
1388 static void
vhost_scsi_ctl_handle_vq(struct vhost_scsi * vs,struct vhost_virtqueue * vq)1389 vhost_scsi_ctl_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
1390 {
1391 struct vhost_scsi_tpg *tpg;
1392 union {
1393 __virtio32 type;
1394 struct virtio_scsi_ctrl_an_req an;
1395 struct virtio_scsi_ctrl_tmf_req tmf;
1396 } v_req;
1397 struct vhost_scsi_ctx vc;
1398 size_t typ_size;
1399 int ret, c = 0;
1400
1401 mutex_lock(&vq->mutex);
1402 /*
1403 * We can handle the vq only after the endpoint is setup by calling the
1404 * VHOST_SCSI_SET_ENDPOINT ioctl.
1405 */
1406 if (!vhost_vq_get_backend(vq))
1407 goto out;
1408
1409 memset(&vc, 0, sizeof(vc));
1410
1411 vhost_disable_notify(&vs->dev, vq);
1412
1413 do {
1414 ret = vhost_scsi_get_desc(vs, vq, &vc);
1415 if (ret)
1416 goto err;
1417
1418 /*
1419 * Get the request type first in order to setup
1420 * other parameters dependent on the type.
1421 */
1422 vc.req = &v_req.type;
1423 typ_size = sizeof(v_req.type);
1424
1425 if (unlikely(!copy_from_iter_full(vc.req, typ_size,
1426 &vc.out_iter))) {
1427 vq_err(vq, "Faulted on copy_from_iter tmf type\n");
1428 /*
1429 * The size of the response buffer depends on the
1430 * request type and must be validated against it.
1431 * Since the request type is not known, don't send
1432 * a response.
1433 */
1434 continue;
1435 }
1436
1437 switch (vhost32_to_cpu(vq, v_req.type)) {
1438 case VIRTIO_SCSI_T_TMF:
1439 vc.req = &v_req.tmf;
1440 vc.req_size = sizeof(struct virtio_scsi_ctrl_tmf_req);
1441 vc.rsp_size = sizeof(struct virtio_scsi_ctrl_tmf_resp);
1442 vc.lunp = &v_req.tmf.lun[0];
1443 vc.target = &v_req.tmf.lun[1];
1444 break;
1445 case VIRTIO_SCSI_T_AN_QUERY:
1446 case VIRTIO_SCSI_T_AN_SUBSCRIBE:
1447 vc.req = &v_req.an;
1448 vc.req_size = sizeof(struct virtio_scsi_ctrl_an_req);
1449 vc.rsp_size = sizeof(struct virtio_scsi_ctrl_an_resp);
1450 vc.lunp = &v_req.an.lun[0];
1451 vc.target = NULL;
1452 break;
1453 default:
1454 vq_err(vq, "Unknown control request %d", v_req.type);
1455 continue;
1456 }
1457
1458 /*
1459 * Validate the size of request and response buffers.
1460 * Check for a sane response buffer so we can report
1461 * early errors back to the guest.
1462 */
1463 ret = vhost_scsi_chk_size(vq, &vc);
1464 if (ret)
1465 goto err;
1466
1467 /*
1468 * Get the rest of the request now that its size is known.
1469 */
1470 vc.req += typ_size;
1471 vc.req_size -= typ_size;
1472
1473 ret = vhost_scsi_get_req(vq, &vc, &tpg);
1474 if (ret)
1475 goto err;
1476
1477 if (v_req.type == VIRTIO_SCSI_T_TMF)
1478 vhost_scsi_handle_tmf(vs, tpg, vq, &v_req.tmf, &vc);
1479 else
1480 vhost_scsi_send_an_resp(vs, vq, &vc);
1481 err:
1482 /*
1483 * ENXIO: No more requests, or read error, wait for next kick
1484 * EINVAL: Invalid response buffer, drop the request
1485 * EIO: Respond with bad target
1486 * EAGAIN: Pending request
1487 */
1488 if (ret == -ENXIO)
1489 break;
1490 else if (ret == -EIO)
1491 vhost_scsi_send_bad_target(vs, vq, vc.head, vc.out);
1492 } while (likely(!vhost_exceeds_weight(vq, ++c, 0)));
1493 out:
1494 mutex_unlock(&vq->mutex);
1495 }
1496
vhost_scsi_ctl_handle_kick(struct vhost_work * work)1497 static void vhost_scsi_ctl_handle_kick(struct vhost_work *work)
1498 {
1499 struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
1500 poll.work);
1501 struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
1502
1503 pr_debug("%s: The handling func for control queue.\n", __func__);
1504 vhost_scsi_ctl_handle_vq(vs, vq);
1505 }
1506
1507 static void
vhost_scsi_send_evt(struct vhost_scsi * vs,struct vhost_virtqueue * vq,struct vhost_scsi_tpg * tpg,struct se_lun * lun,u32 event,u32 reason)1508 vhost_scsi_send_evt(struct vhost_scsi *vs, struct vhost_virtqueue *vq,
1509 struct vhost_scsi_tpg *tpg, struct se_lun *lun,
1510 u32 event, u32 reason)
1511 {
1512 struct vhost_scsi_evt *evt;
1513
1514 evt = vhost_scsi_allocate_evt(vs, event, reason);
1515 if (!evt)
1516 return;
1517
1518 if (tpg && lun) {
1519 /* TODO: share lun setup code with virtio-scsi.ko */
1520 /*
1521 * Note: evt->event is zeroed when we allocate it and
1522 * lun[4-7] need to be zero according to virtio-scsi spec.
1523 */
1524 evt->event.lun[0] = 0x01;
1525 evt->event.lun[1] = tpg->tport_tpgt;
1526 if (lun->unpacked_lun >= 256)
1527 evt->event.lun[2] = lun->unpacked_lun >> 8 | 0x40 ;
1528 evt->event.lun[3] = lun->unpacked_lun & 0xFF;
1529 }
1530
1531 llist_add(&evt->list, &vs->vs_event_list);
1532 if (!vhost_vq_work_queue(vq, &vs->vs_event_work))
1533 vhost_scsi_complete_events(vs, true);
1534 }
1535
vhost_scsi_evt_handle_kick(struct vhost_work * work)1536 static void vhost_scsi_evt_handle_kick(struct vhost_work *work)
1537 {
1538 struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
1539 poll.work);
1540 struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
1541
1542 mutex_lock(&vq->mutex);
1543 if (!vhost_vq_get_backend(vq))
1544 goto out;
1545
1546 if (vs->vs_events_missed)
1547 vhost_scsi_send_evt(vs, vq, NULL, NULL, VIRTIO_SCSI_T_NO_EVENT,
1548 0);
1549 out:
1550 mutex_unlock(&vq->mutex);
1551 }
1552
vhost_scsi_handle_kick(struct vhost_work * work)1553 static void vhost_scsi_handle_kick(struct vhost_work *work)
1554 {
1555 struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
1556 poll.work);
1557 struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
1558
1559 vhost_scsi_handle_vq(vs, vq);
1560 }
1561
1562 /* Callers must hold dev mutex */
vhost_scsi_flush(struct vhost_scsi * vs)1563 static void vhost_scsi_flush(struct vhost_scsi *vs)
1564 {
1565 int i;
1566
1567 /* Init new inflight and remember the old inflight */
1568 vhost_scsi_init_inflight(vs, vs->old_inflight);
1569
1570 /*
1571 * The inflight->kref was initialized to 1. We decrement it here to
1572 * indicate the start of the flush operation so that it will reach 0
1573 * when all the reqs are finished.
1574 */
1575 for (i = 0; i < vs->dev.nvqs; i++)
1576 kref_put(&vs->old_inflight[i]->kref, vhost_scsi_done_inflight);
1577
1578 /* Flush both the vhost poll and vhost work */
1579 vhost_dev_flush(&vs->dev);
1580
1581 /* Wait for all reqs issued before the flush to be finished */
1582 for (i = 0; i < vs->dev.nvqs; i++)
1583 wait_for_completion(&vs->old_inflight[i]->comp);
1584 }
1585
vhost_scsi_destroy_vq_cmds(struct vhost_virtqueue * vq)1586 static void vhost_scsi_destroy_vq_cmds(struct vhost_virtqueue *vq)
1587 {
1588 struct vhost_scsi_virtqueue *svq = container_of(vq,
1589 struct vhost_scsi_virtqueue, vq);
1590 struct vhost_scsi_cmd *tv_cmd;
1591 unsigned int i;
1592
1593 if (!svq->scsi_cmds)
1594 return;
1595
1596 for (i = 0; i < svq->max_cmds; i++) {
1597 tv_cmd = &svq->scsi_cmds[i];
1598
1599 kfree(tv_cmd->tvc_sgl);
1600 kfree(tv_cmd->tvc_prot_sgl);
1601 kfree(tv_cmd->tvc_upages);
1602 kfree(tv_cmd->tvc_resp_iov);
1603 }
1604
1605 sbitmap_free(&svq->scsi_tags);
1606 kfree(svq->scsi_cmds);
1607 svq->scsi_cmds = NULL;
1608 }
1609
vhost_scsi_setup_vq_cmds(struct vhost_virtqueue * vq,int max_cmds)1610 static int vhost_scsi_setup_vq_cmds(struct vhost_virtqueue *vq, int max_cmds)
1611 {
1612 struct vhost_scsi_virtqueue *svq = container_of(vq,
1613 struct vhost_scsi_virtqueue, vq);
1614 struct vhost_scsi_cmd *tv_cmd;
1615 unsigned int i;
1616
1617 if (svq->scsi_cmds)
1618 return 0;
1619
1620 if (sbitmap_init_node(&svq->scsi_tags, max_cmds, -1, GFP_KERNEL,
1621 NUMA_NO_NODE, false, true))
1622 return -ENOMEM;
1623 svq->max_cmds = max_cmds;
1624
1625 svq->scsi_cmds = kcalloc(max_cmds, sizeof(*tv_cmd), GFP_KERNEL);
1626 if (!svq->scsi_cmds) {
1627 sbitmap_free(&svq->scsi_tags);
1628 return -ENOMEM;
1629 }
1630
1631 for (i = 0; i < max_cmds; i++) {
1632 tv_cmd = &svq->scsi_cmds[i];
1633
1634 tv_cmd->tvc_sgl = kcalloc(VHOST_SCSI_PREALLOC_SGLS,
1635 sizeof(struct scatterlist),
1636 GFP_KERNEL);
1637 if (!tv_cmd->tvc_sgl) {
1638 pr_err("Unable to allocate tv_cmd->tvc_sgl\n");
1639 goto out;
1640 }
1641
1642 tv_cmd->tvc_upages = kcalloc(VHOST_SCSI_PREALLOC_UPAGES,
1643 sizeof(struct page *),
1644 GFP_KERNEL);
1645 if (!tv_cmd->tvc_upages) {
1646 pr_err("Unable to allocate tv_cmd->tvc_upages\n");
1647 goto out;
1648 }
1649
1650 tv_cmd->tvc_resp_iov = kcalloc(UIO_MAXIOV,
1651 sizeof(struct iovec),
1652 GFP_KERNEL);
1653 if (!tv_cmd->tvc_resp_iov) {
1654 pr_err("Unable to allocate tv_cmd->tvc_resp_iov\n");
1655 goto out;
1656 }
1657
1658 tv_cmd->tvc_prot_sgl = kcalloc(VHOST_SCSI_PREALLOC_PROT_SGLS,
1659 sizeof(struct scatterlist),
1660 GFP_KERNEL);
1661 if (!tv_cmd->tvc_prot_sgl) {
1662 pr_err("Unable to allocate tv_cmd->tvc_prot_sgl\n");
1663 goto out;
1664 }
1665 }
1666 return 0;
1667 out:
1668 vhost_scsi_destroy_vq_cmds(vq);
1669 return -ENOMEM;
1670 }
1671
1672 /*
1673 * Called from vhost_scsi_ioctl() context to walk the list of available
1674 * vhost_scsi_tpg with an active struct vhost_scsi_nexus
1675 *
1676 * The lock nesting rule is:
1677 * vs->dev.mutex -> vhost_scsi_mutex -> tpg->tv_tpg_mutex -> vq->mutex
1678 */
1679 static int
vhost_scsi_set_endpoint(struct vhost_scsi * vs,struct vhost_scsi_target * t)1680 vhost_scsi_set_endpoint(struct vhost_scsi *vs,
1681 struct vhost_scsi_target *t)
1682 {
1683 struct se_portal_group *se_tpg;
1684 struct vhost_scsi_tport *tv_tport;
1685 struct vhost_scsi_tpg *tpg;
1686 struct vhost_scsi_tpg **vs_tpg;
1687 struct vhost_virtqueue *vq;
1688 int index, ret, i, len;
1689 bool match = false;
1690
1691 mutex_lock(&vs->dev.mutex);
1692
1693 /* Verify that ring has been setup correctly. */
1694 for (index = 0; index < vs->dev.nvqs; ++index) {
1695 /* Verify that ring has been setup correctly. */
1696 if (!vhost_vq_access_ok(&vs->vqs[index].vq)) {
1697 ret = -EFAULT;
1698 goto out;
1699 }
1700 }
1701
1702 len = sizeof(vs_tpg[0]) * VHOST_SCSI_MAX_TARGET;
1703 vs_tpg = kzalloc(len, GFP_KERNEL);
1704 if (!vs_tpg) {
1705 ret = -ENOMEM;
1706 goto out;
1707 }
1708 if (vs->vs_tpg)
1709 memcpy(vs_tpg, vs->vs_tpg, len);
1710
1711 mutex_lock(&vhost_scsi_mutex);
1712 list_for_each_entry(tpg, &vhost_scsi_list, tv_tpg_list) {
1713 mutex_lock(&tpg->tv_tpg_mutex);
1714 if (!tpg->tpg_nexus) {
1715 mutex_unlock(&tpg->tv_tpg_mutex);
1716 continue;
1717 }
1718 if (tpg->tv_tpg_vhost_count != 0) {
1719 mutex_unlock(&tpg->tv_tpg_mutex);
1720 continue;
1721 }
1722 tv_tport = tpg->tport;
1723
1724 if (!strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
1725 if (vs->vs_tpg && vs->vs_tpg[tpg->tport_tpgt]) {
1726 mutex_unlock(&tpg->tv_tpg_mutex);
1727 mutex_unlock(&vhost_scsi_mutex);
1728 ret = -EEXIST;
1729 goto undepend;
1730 }
1731 /*
1732 * In order to ensure individual vhost-scsi configfs
1733 * groups cannot be removed while in use by vhost ioctl,
1734 * go ahead and take an explicit se_tpg->tpg_group.cg_item
1735 * dependency now.
1736 */
1737 se_tpg = &tpg->se_tpg;
1738 ret = target_depend_item(&se_tpg->tpg_group.cg_item);
1739 if (ret) {
1740 pr_warn("target_depend_item() failed: %d\n", ret);
1741 mutex_unlock(&tpg->tv_tpg_mutex);
1742 mutex_unlock(&vhost_scsi_mutex);
1743 goto undepend;
1744 }
1745 tpg->tv_tpg_vhost_count++;
1746 tpg->vhost_scsi = vs;
1747 vs_tpg[tpg->tport_tpgt] = tpg;
1748 match = true;
1749 }
1750 mutex_unlock(&tpg->tv_tpg_mutex);
1751 }
1752 mutex_unlock(&vhost_scsi_mutex);
1753
1754 if (match) {
1755 memcpy(vs->vs_vhost_wwpn, t->vhost_wwpn,
1756 sizeof(vs->vs_vhost_wwpn));
1757
1758 for (i = VHOST_SCSI_VQ_IO; i < vs->dev.nvqs; i++) {
1759 vq = &vs->vqs[i].vq;
1760 if (!vhost_vq_is_setup(vq))
1761 continue;
1762
1763 ret = vhost_scsi_setup_vq_cmds(vq, vq->num);
1764 if (ret)
1765 goto destroy_vq_cmds;
1766 }
1767
1768 for (i = 0; i < vs->dev.nvqs; i++) {
1769 vq = &vs->vqs[i].vq;
1770 mutex_lock(&vq->mutex);
1771 vhost_vq_set_backend(vq, vs_tpg);
1772 vhost_vq_init_access(vq);
1773 mutex_unlock(&vq->mutex);
1774 }
1775 ret = 0;
1776 } else {
1777 ret = -EEXIST;
1778 }
1779
1780 /*
1781 * Act as synchronize_rcu to make sure access to
1782 * old vs->vs_tpg is finished.
1783 */
1784 vhost_scsi_flush(vs);
1785 kfree(vs->vs_tpg);
1786 vs->vs_tpg = vs_tpg;
1787 goto out;
1788
1789 destroy_vq_cmds:
1790 for (i--; i >= VHOST_SCSI_VQ_IO; i--) {
1791 if (!vhost_vq_get_backend(&vs->vqs[i].vq))
1792 vhost_scsi_destroy_vq_cmds(&vs->vqs[i].vq);
1793 }
1794 undepend:
1795 for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) {
1796 tpg = vs_tpg[i];
1797 if (tpg) {
1798 mutex_lock(&tpg->tv_tpg_mutex);
1799 tpg->vhost_scsi = NULL;
1800 tpg->tv_tpg_vhost_count--;
1801 mutex_unlock(&tpg->tv_tpg_mutex);
1802 target_undepend_item(&tpg->se_tpg.tpg_group.cg_item);
1803 }
1804 }
1805 kfree(vs_tpg);
1806 out:
1807 mutex_unlock(&vs->dev.mutex);
1808 return ret;
1809 }
1810
1811 static int
vhost_scsi_clear_endpoint(struct vhost_scsi * vs,struct vhost_scsi_target * t)1812 vhost_scsi_clear_endpoint(struct vhost_scsi *vs,
1813 struct vhost_scsi_target *t)
1814 {
1815 struct se_portal_group *se_tpg;
1816 struct vhost_scsi_tport *tv_tport;
1817 struct vhost_scsi_tpg *tpg;
1818 struct vhost_virtqueue *vq;
1819 bool match = false;
1820 int index, ret, i;
1821 u8 target;
1822
1823 mutex_lock(&vs->dev.mutex);
1824 /* Verify that ring has been setup correctly. */
1825 for (index = 0; index < vs->dev.nvqs; ++index) {
1826 if (!vhost_vq_access_ok(&vs->vqs[index].vq)) {
1827 ret = -EFAULT;
1828 goto err_dev;
1829 }
1830 }
1831
1832 if (!vs->vs_tpg) {
1833 ret = 0;
1834 goto err_dev;
1835 }
1836
1837 for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) {
1838 target = i;
1839 tpg = vs->vs_tpg[target];
1840 if (!tpg)
1841 continue;
1842
1843 tv_tport = tpg->tport;
1844 if (!tv_tport) {
1845 ret = -ENODEV;
1846 goto err_dev;
1847 }
1848
1849 if (strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
1850 pr_warn("tv_tport->tport_name: %s, tpg->tport_tpgt: %hu"
1851 " does not match t->vhost_wwpn: %s, t->vhost_tpgt: %hu\n",
1852 tv_tport->tport_name, tpg->tport_tpgt,
1853 t->vhost_wwpn, t->vhost_tpgt);
1854 ret = -EINVAL;
1855 goto err_dev;
1856 }
1857 match = true;
1858 }
1859 if (!match)
1860 goto free_vs_tpg;
1861
1862 /* Prevent new cmds from starting and accessing the tpgs/sessions */
1863 for (i = 0; i < vs->dev.nvqs; i++) {
1864 vq = &vs->vqs[i].vq;
1865 mutex_lock(&vq->mutex);
1866 vhost_vq_set_backend(vq, NULL);
1867 mutex_unlock(&vq->mutex);
1868 }
1869 /* Make sure cmds are not running before tearing them down. */
1870 vhost_scsi_flush(vs);
1871
1872 for (i = 0; i < vs->dev.nvqs; i++) {
1873 vq = &vs->vqs[i].vq;
1874 vhost_scsi_destroy_vq_cmds(vq);
1875 }
1876
1877 /*
1878 * We can now release our hold on the tpg and sessions and userspace
1879 * can free them after this point.
1880 */
1881 for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) {
1882 target = i;
1883 tpg = vs->vs_tpg[target];
1884 if (!tpg)
1885 continue;
1886
1887 mutex_lock(&tpg->tv_tpg_mutex);
1888
1889 tpg->tv_tpg_vhost_count--;
1890 tpg->vhost_scsi = NULL;
1891 vs->vs_tpg[target] = NULL;
1892
1893 mutex_unlock(&tpg->tv_tpg_mutex);
1894
1895 se_tpg = &tpg->se_tpg;
1896 target_undepend_item(&se_tpg->tpg_group.cg_item);
1897 }
1898
1899 free_vs_tpg:
1900 /*
1901 * Act as synchronize_rcu to make sure access to
1902 * old vs->vs_tpg is finished.
1903 */
1904 vhost_scsi_flush(vs);
1905 kfree(vs->vs_tpg);
1906 vs->vs_tpg = NULL;
1907 WARN_ON(vs->vs_events_nr);
1908 mutex_unlock(&vs->dev.mutex);
1909 return 0;
1910
1911 err_dev:
1912 mutex_unlock(&vs->dev.mutex);
1913 return ret;
1914 }
1915
vhost_scsi_set_features(struct vhost_scsi * vs,u64 features)1916 static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
1917 {
1918 struct vhost_virtqueue *vq;
1919 int i;
1920
1921 if (features & ~VHOST_SCSI_FEATURES)
1922 return -EOPNOTSUPP;
1923
1924 mutex_lock(&vs->dev.mutex);
1925 if ((features & (1 << VHOST_F_LOG_ALL)) &&
1926 !vhost_log_access_ok(&vs->dev)) {
1927 mutex_unlock(&vs->dev.mutex);
1928 return -EFAULT;
1929 }
1930
1931 for (i = 0; i < vs->dev.nvqs; i++) {
1932 vq = &vs->vqs[i].vq;
1933 mutex_lock(&vq->mutex);
1934 vq->acked_features = features;
1935 mutex_unlock(&vq->mutex);
1936 }
1937 mutex_unlock(&vs->dev.mutex);
1938 return 0;
1939 }
1940
vhost_scsi_open(struct inode * inode,struct file * f)1941 static int vhost_scsi_open(struct inode *inode, struct file *f)
1942 {
1943 struct vhost_scsi_virtqueue *svq;
1944 struct vhost_scsi *vs;
1945 struct vhost_virtqueue **vqs;
1946 int r = -ENOMEM, i, nvqs = vhost_scsi_max_io_vqs;
1947
1948 vs = kvzalloc(sizeof(*vs), GFP_KERNEL);
1949 if (!vs)
1950 goto err_vs;
1951
1952 if (nvqs > VHOST_SCSI_MAX_IO_VQ) {
1953 pr_err("Invalid max_io_vqs of %d. Using %d.\n", nvqs,
1954 VHOST_SCSI_MAX_IO_VQ);
1955 nvqs = VHOST_SCSI_MAX_IO_VQ;
1956 } else if (nvqs == 0) {
1957 pr_err("Invalid max_io_vqs of %d. Using 1.\n", nvqs);
1958 nvqs = 1;
1959 }
1960 nvqs += VHOST_SCSI_VQ_IO;
1961
1962 vs->old_inflight = kmalloc_array(nvqs, sizeof(*vs->old_inflight),
1963 GFP_KERNEL | __GFP_ZERO);
1964 if (!vs->old_inflight)
1965 goto err_inflight;
1966
1967 vs->vqs = kmalloc_array(nvqs, sizeof(*vs->vqs),
1968 GFP_KERNEL | __GFP_ZERO);
1969 if (!vs->vqs)
1970 goto err_vqs;
1971
1972 vqs = kmalloc_array(nvqs, sizeof(*vqs), GFP_KERNEL);
1973 if (!vqs)
1974 goto err_local_vqs;
1975
1976 vhost_work_init(&vs->vs_event_work, vhost_scsi_evt_work);
1977
1978 vs->vs_events_nr = 0;
1979 vs->vs_events_missed = false;
1980
1981 vqs[VHOST_SCSI_VQ_CTL] = &vs->vqs[VHOST_SCSI_VQ_CTL].vq;
1982 vqs[VHOST_SCSI_VQ_EVT] = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
1983 vs->vqs[VHOST_SCSI_VQ_CTL].vq.handle_kick = vhost_scsi_ctl_handle_kick;
1984 vs->vqs[VHOST_SCSI_VQ_EVT].vq.handle_kick = vhost_scsi_evt_handle_kick;
1985 for (i = VHOST_SCSI_VQ_IO; i < nvqs; i++) {
1986 svq = &vs->vqs[i];
1987
1988 vqs[i] = &svq->vq;
1989 svq->vs = vs;
1990 init_llist_head(&svq->completion_list);
1991 vhost_work_init(&svq->completion_work,
1992 vhost_scsi_complete_cmd_work);
1993 svq->vq.handle_kick = vhost_scsi_handle_kick;
1994 }
1995 vhost_dev_init(&vs->dev, vqs, nvqs, UIO_MAXIOV,
1996 VHOST_SCSI_WEIGHT, 0, true, NULL);
1997
1998 vhost_scsi_init_inflight(vs, NULL);
1999
2000 f->private_data = vs;
2001 return 0;
2002
2003 err_local_vqs:
2004 kfree(vs->vqs);
2005 err_vqs:
2006 kfree(vs->old_inflight);
2007 err_inflight:
2008 kvfree(vs);
2009 err_vs:
2010 return r;
2011 }
2012
vhost_scsi_release(struct inode * inode,struct file * f)2013 static int vhost_scsi_release(struct inode *inode, struct file *f)
2014 {
2015 struct vhost_scsi *vs = f->private_data;
2016 struct vhost_scsi_target t;
2017
2018 mutex_lock(&vs->dev.mutex);
2019 memcpy(t.vhost_wwpn, vs->vs_vhost_wwpn, sizeof(t.vhost_wwpn));
2020 mutex_unlock(&vs->dev.mutex);
2021 vhost_scsi_clear_endpoint(vs, &t);
2022 vhost_dev_stop(&vs->dev);
2023 vhost_dev_cleanup(&vs->dev);
2024 kfree(vs->dev.vqs);
2025 kfree(vs->vqs);
2026 kfree(vs->old_inflight);
2027 kvfree(vs);
2028 return 0;
2029 }
2030
2031 static long
vhost_scsi_ioctl(struct file * f,unsigned int ioctl,unsigned long arg)2032 vhost_scsi_ioctl(struct file *f,
2033 unsigned int ioctl,
2034 unsigned long arg)
2035 {
2036 struct vhost_scsi *vs = f->private_data;
2037 struct vhost_scsi_target backend;
2038 void __user *argp = (void __user *)arg;
2039 u64 __user *featurep = argp;
2040 u32 __user *eventsp = argp;
2041 u32 events_missed;
2042 u64 features;
2043 int r, abi_version = VHOST_SCSI_ABI_VERSION;
2044 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
2045
2046 switch (ioctl) {
2047 case VHOST_SCSI_SET_ENDPOINT:
2048 if (copy_from_user(&backend, argp, sizeof backend))
2049 return -EFAULT;
2050 if (backend.reserved != 0)
2051 return -EOPNOTSUPP;
2052
2053 return vhost_scsi_set_endpoint(vs, &backend);
2054 case VHOST_SCSI_CLEAR_ENDPOINT:
2055 if (copy_from_user(&backend, argp, sizeof backend))
2056 return -EFAULT;
2057 if (backend.reserved != 0)
2058 return -EOPNOTSUPP;
2059
2060 return vhost_scsi_clear_endpoint(vs, &backend);
2061 case VHOST_SCSI_GET_ABI_VERSION:
2062 if (copy_to_user(argp, &abi_version, sizeof abi_version))
2063 return -EFAULT;
2064 return 0;
2065 case VHOST_SCSI_SET_EVENTS_MISSED:
2066 if (get_user(events_missed, eventsp))
2067 return -EFAULT;
2068 mutex_lock(&vq->mutex);
2069 vs->vs_events_missed = events_missed;
2070 mutex_unlock(&vq->mutex);
2071 return 0;
2072 case VHOST_SCSI_GET_EVENTS_MISSED:
2073 mutex_lock(&vq->mutex);
2074 events_missed = vs->vs_events_missed;
2075 mutex_unlock(&vq->mutex);
2076 if (put_user(events_missed, eventsp))
2077 return -EFAULT;
2078 return 0;
2079 case VHOST_GET_FEATURES:
2080 features = VHOST_SCSI_FEATURES;
2081 if (copy_to_user(featurep, &features, sizeof features))
2082 return -EFAULT;
2083 return 0;
2084 case VHOST_SET_FEATURES:
2085 if (copy_from_user(&features, featurep, sizeof features))
2086 return -EFAULT;
2087 return vhost_scsi_set_features(vs, features);
2088 case VHOST_NEW_WORKER:
2089 case VHOST_FREE_WORKER:
2090 case VHOST_ATTACH_VRING_WORKER:
2091 case VHOST_GET_VRING_WORKER:
2092 mutex_lock(&vs->dev.mutex);
2093 r = vhost_worker_ioctl(&vs->dev, ioctl, argp);
2094 mutex_unlock(&vs->dev.mutex);
2095 return r;
2096 default:
2097 mutex_lock(&vs->dev.mutex);
2098 r = vhost_dev_ioctl(&vs->dev, ioctl, argp);
2099 /* TODO: flush backend after dev ioctl. */
2100 if (r == -ENOIOCTLCMD)
2101 r = vhost_vring_ioctl(&vs->dev, ioctl, argp);
2102 mutex_unlock(&vs->dev.mutex);
2103 return r;
2104 }
2105 }
2106
2107 static const struct file_operations vhost_scsi_fops = {
2108 .owner = THIS_MODULE,
2109 .release = vhost_scsi_release,
2110 .unlocked_ioctl = vhost_scsi_ioctl,
2111 .compat_ioctl = compat_ptr_ioctl,
2112 .open = vhost_scsi_open,
2113 .llseek = noop_llseek,
2114 };
2115
2116 static struct miscdevice vhost_scsi_misc = {
2117 MISC_DYNAMIC_MINOR,
2118 "vhost-scsi",
2119 &vhost_scsi_fops,
2120 };
2121
vhost_scsi_register(void)2122 static int __init vhost_scsi_register(void)
2123 {
2124 return misc_register(&vhost_scsi_misc);
2125 }
2126
vhost_scsi_deregister(void)2127 static void vhost_scsi_deregister(void)
2128 {
2129 misc_deregister(&vhost_scsi_misc);
2130 }
2131
vhost_scsi_dump_proto_id(struct vhost_scsi_tport * tport)2132 static char *vhost_scsi_dump_proto_id(struct vhost_scsi_tport *tport)
2133 {
2134 switch (tport->tport_proto_id) {
2135 case SCSI_PROTOCOL_SAS:
2136 return "SAS";
2137 case SCSI_PROTOCOL_FCP:
2138 return "FCP";
2139 case SCSI_PROTOCOL_ISCSI:
2140 return "iSCSI";
2141 default:
2142 break;
2143 }
2144
2145 return "Unknown";
2146 }
2147
2148 static void
vhost_scsi_do_plug(struct vhost_scsi_tpg * tpg,struct se_lun * lun,bool plug)2149 vhost_scsi_do_plug(struct vhost_scsi_tpg *tpg,
2150 struct se_lun *lun, bool plug)
2151 {
2152
2153 struct vhost_scsi *vs = tpg->vhost_scsi;
2154 struct vhost_virtqueue *vq;
2155 u32 reason;
2156
2157 if (!vs)
2158 return;
2159
2160 if (plug)
2161 reason = VIRTIO_SCSI_EVT_RESET_RESCAN;
2162 else
2163 reason = VIRTIO_SCSI_EVT_RESET_REMOVED;
2164
2165 vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
2166 mutex_lock(&vq->mutex);
2167 /*
2168 * We can't queue events if the backend has been cleared, because
2169 * we could end up queueing an event after the flush.
2170 */
2171 if (!vhost_vq_get_backend(vq))
2172 goto unlock;
2173
2174 if (vhost_has_feature(vq, VIRTIO_SCSI_F_HOTPLUG))
2175 vhost_scsi_send_evt(vs, vq, tpg, lun,
2176 VIRTIO_SCSI_T_TRANSPORT_RESET, reason);
2177 unlock:
2178 mutex_unlock(&vq->mutex);
2179 }
2180
vhost_scsi_hotplug(struct vhost_scsi_tpg * tpg,struct se_lun * lun)2181 static void vhost_scsi_hotplug(struct vhost_scsi_tpg *tpg, struct se_lun *lun)
2182 {
2183 vhost_scsi_do_plug(tpg, lun, true);
2184 }
2185
vhost_scsi_hotunplug(struct vhost_scsi_tpg * tpg,struct se_lun * lun)2186 static void vhost_scsi_hotunplug(struct vhost_scsi_tpg *tpg, struct se_lun *lun)
2187 {
2188 vhost_scsi_do_plug(tpg, lun, false);
2189 }
2190
vhost_scsi_port_link(struct se_portal_group * se_tpg,struct se_lun * lun)2191 static int vhost_scsi_port_link(struct se_portal_group *se_tpg,
2192 struct se_lun *lun)
2193 {
2194 struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2195 struct vhost_scsi_tpg, se_tpg);
2196
2197 mutex_lock(&tpg->tv_tpg_mutex);
2198 tpg->tv_tpg_port_count++;
2199 vhost_scsi_hotplug(tpg, lun);
2200 mutex_unlock(&tpg->tv_tpg_mutex);
2201
2202 return 0;
2203 }
2204
vhost_scsi_port_unlink(struct se_portal_group * se_tpg,struct se_lun * lun)2205 static void vhost_scsi_port_unlink(struct se_portal_group *se_tpg,
2206 struct se_lun *lun)
2207 {
2208 struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2209 struct vhost_scsi_tpg, se_tpg);
2210
2211 mutex_lock(&tpg->tv_tpg_mutex);
2212 tpg->tv_tpg_port_count--;
2213 vhost_scsi_hotunplug(tpg, lun);
2214 mutex_unlock(&tpg->tv_tpg_mutex);
2215 }
2216
vhost_scsi_tpg_attrib_fabric_prot_type_store(struct config_item * item,const char * page,size_t count)2217 static ssize_t vhost_scsi_tpg_attrib_fabric_prot_type_store(
2218 struct config_item *item, const char *page, size_t count)
2219 {
2220 struct se_portal_group *se_tpg = attrib_to_tpg(item);
2221 struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2222 struct vhost_scsi_tpg, se_tpg);
2223 unsigned long val;
2224 int ret = kstrtoul(page, 0, &val);
2225
2226 if (ret) {
2227 pr_err("kstrtoul() returned %d for fabric_prot_type\n", ret);
2228 return ret;
2229 }
2230 if (val != 0 && val != 1 && val != 3) {
2231 pr_err("Invalid vhost_scsi fabric_prot_type: %lu\n", val);
2232 return -EINVAL;
2233 }
2234 tpg->tv_fabric_prot_type = val;
2235
2236 return count;
2237 }
2238
vhost_scsi_tpg_attrib_fabric_prot_type_show(struct config_item * item,char * page)2239 static ssize_t vhost_scsi_tpg_attrib_fabric_prot_type_show(
2240 struct config_item *item, char *page)
2241 {
2242 struct se_portal_group *se_tpg = attrib_to_tpg(item);
2243 struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2244 struct vhost_scsi_tpg, se_tpg);
2245
2246 return sysfs_emit(page, "%d\n", tpg->tv_fabric_prot_type);
2247 }
2248
2249 CONFIGFS_ATTR(vhost_scsi_tpg_attrib_, fabric_prot_type);
2250
2251 static struct configfs_attribute *vhost_scsi_tpg_attrib_attrs[] = {
2252 &vhost_scsi_tpg_attrib_attr_fabric_prot_type,
2253 NULL,
2254 };
2255
vhost_scsi_make_nexus(struct vhost_scsi_tpg * tpg,const char * name)2256 static int vhost_scsi_make_nexus(struct vhost_scsi_tpg *tpg,
2257 const char *name)
2258 {
2259 struct vhost_scsi_nexus *tv_nexus;
2260
2261 mutex_lock(&tpg->tv_tpg_mutex);
2262 if (tpg->tpg_nexus) {
2263 mutex_unlock(&tpg->tv_tpg_mutex);
2264 pr_debug("tpg->tpg_nexus already exists\n");
2265 return -EEXIST;
2266 }
2267
2268 tv_nexus = kzalloc(sizeof(*tv_nexus), GFP_KERNEL);
2269 if (!tv_nexus) {
2270 mutex_unlock(&tpg->tv_tpg_mutex);
2271 pr_err("Unable to allocate struct vhost_scsi_nexus\n");
2272 return -ENOMEM;
2273 }
2274 /*
2275 * Since we are running in 'demo mode' this call with generate a
2276 * struct se_node_acl for the vhost_scsi struct se_portal_group with
2277 * the SCSI Initiator port name of the passed configfs group 'name'.
2278 */
2279 tv_nexus->tvn_se_sess = target_setup_session(&tpg->se_tpg, 0, 0,
2280 TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS,
2281 (unsigned char *)name, tv_nexus, NULL);
2282 if (IS_ERR(tv_nexus->tvn_se_sess)) {
2283 mutex_unlock(&tpg->tv_tpg_mutex);
2284 kfree(tv_nexus);
2285 return -ENOMEM;
2286 }
2287 tpg->tpg_nexus = tv_nexus;
2288
2289 mutex_unlock(&tpg->tv_tpg_mutex);
2290 return 0;
2291 }
2292
vhost_scsi_drop_nexus(struct vhost_scsi_tpg * tpg)2293 static int vhost_scsi_drop_nexus(struct vhost_scsi_tpg *tpg)
2294 {
2295 struct se_session *se_sess;
2296 struct vhost_scsi_nexus *tv_nexus;
2297
2298 mutex_lock(&tpg->tv_tpg_mutex);
2299 tv_nexus = tpg->tpg_nexus;
2300 if (!tv_nexus) {
2301 mutex_unlock(&tpg->tv_tpg_mutex);
2302 return -ENODEV;
2303 }
2304
2305 se_sess = tv_nexus->tvn_se_sess;
2306 if (!se_sess) {
2307 mutex_unlock(&tpg->tv_tpg_mutex);
2308 return -ENODEV;
2309 }
2310
2311 if (tpg->tv_tpg_port_count != 0) {
2312 mutex_unlock(&tpg->tv_tpg_mutex);
2313 pr_err("Unable to remove TCM_vhost I_T Nexus with"
2314 " active TPG port count: %d\n",
2315 tpg->tv_tpg_port_count);
2316 return -EBUSY;
2317 }
2318
2319 if (tpg->tv_tpg_vhost_count != 0) {
2320 mutex_unlock(&tpg->tv_tpg_mutex);
2321 pr_err("Unable to remove TCM_vhost I_T Nexus with"
2322 " active TPG vhost count: %d\n",
2323 tpg->tv_tpg_vhost_count);
2324 return -EBUSY;
2325 }
2326
2327 pr_debug("TCM_vhost_ConfigFS: Removing I_T Nexus to emulated"
2328 " %s Initiator Port: %s\n", vhost_scsi_dump_proto_id(tpg->tport),
2329 tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
2330
2331 /*
2332 * Release the SCSI I_T Nexus to the emulated vhost Target Port
2333 */
2334 target_remove_session(se_sess);
2335 tpg->tpg_nexus = NULL;
2336 mutex_unlock(&tpg->tv_tpg_mutex);
2337
2338 kfree(tv_nexus);
2339 return 0;
2340 }
2341
vhost_scsi_tpg_nexus_show(struct config_item * item,char * page)2342 static ssize_t vhost_scsi_tpg_nexus_show(struct config_item *item, char *page)
2343 {
2344 struct se_portal_group *se_tpg = to_tpg(item);
2345 struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2346 struct vhost_scsi_tpg, se_tpg);
2347 struct vhost_scsi_nexus *tv_nexus;
2348 ssize_t ret;
2349
2350 mutex_lock(&tpg->tv_tpg_mutex);
2351 tv_nexus = tpg->tpg_nexus;
2352 if (!tv_nexus) {
2353 mutex_unlock(&tpg->tv_tpg_mutex);
2354 return -ENODEV;
2355 }
2356 ret = sysfs_emit(page, "%s\n",
2357 tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
2358 mutex_unlock(&tpg->tv_tpg_mutex);
2359
2360 return ret;
2361 }
2362
vhost_scsi_tpg_nexus_store(struct config_item * item,const char * page,size_t count)2363 static ssize_t vhost_scsi_tpg_nexus_store(struct config_item *item,
2364 const char *page, size_t count)
2365 {
2366 struct se_portal_group *se_tpg = to_tpg(item);
2367 struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2368 struct vhost_scsi_tpg, se_tpg);
2369 struct vhost_scsi_tport *tport_wwn = tpg->tport;
2370 unsigned char i_port[VHOST_SCSI_NAMELEN], *ptr, *port_ptr;
2371 int ret;
2372 /*
2373 * Shutdown the active I_T nexus if 'NULL' is passed..
2374 */
2375 if (!strncmp(page, "NULL", 4)) {
2376 ret = vhost_scsi_drop_nexus(tpg);
2377 return (!ret) ? count : ret;
2378 }
2379 /*
2380 * Otherwise make sure the passed virtual Initiator port WWN matches
2381 * the fabric protocol_id set in vhost_scsi_make_tport(), and call
2382 * vhost_scsi_make_nexus().
2383 */
2384 if (strlen(page) >= VHOST_SCSI_NAMELEN) {
2385 pr_err("Emulated NAA Sas Address: %s, exceeds"
2386 " max: %d\n", page, VHOST_SCSI_NAMELEN);
2387 return -EINVAL;
2388 }
2389 snprintf(&i_port[0], VHOST_SCSI_NAMELEN, "%s", page);
2390
2391 ptr = strstr(i_port, "naa.");
2392 if (ptr) {
2393 if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_SAS) {
2394 pr_err("Passed SAS Initiator Port %s does not"
2395 " match target port protoid: %s\n", i_port,
2396 vhost_scsi_dump_proto_id(tport_wwn));
2397 return -EINVAL;
2398 }
2399 port_ptr = &i_port[0];
2400 goto check_newline;
2401 }
2402 ptr = strstr(i_port, "fc.");
2403 if (ptr) {
2404 if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_FCP) {
2405 pr_err("Passed FCP Initiator Port %s does not"
2406 " match target port protoid: %s\n", i_port,
2407 vhost_scsi_dump_proto_id(tport_wwn));
2408 return -EINVAL;
2409 }
2410 port_ptr = &i_port[3]; /* Skip over "fc." */
2411 goto check_newline;
2412 }
2413 ptr = strstr(i_port, "iqn.");
2414 if (ptr) {
2415 if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_ISCSI) {
2416 pr_err("Passed iSCSI Initiator Port %s does not"
2417 " match target port protoid: %s\n", i_port,
2418 vhost_scsi_dump_proto_id(tport_wwn));
2419 return -EINVAL;
2420 }
2421 port_ptr = &i_port[0];
2422 goto check_newline;
2423 }
2424 pr_err("Unable to locate prefix for emulated Initiator Port:"
2425 " %s\n", i_port);
2426 return -EINVAL;
2427 /*
2428 * Clear any trailing newline for the NAA WWN
2429 */
2430 check_newline:
2431 if (i_port[strlen(i_port)-1] == '\n')
2432 i_port[strlen(i_port)-1] = '\0';
2433
2434 ret = vhost_scsi_make_nexus(tpg, port_ptr);
2435 if (ret < 0)
2436 return ret;
2437
2438 return count;
2439 }
2440
2441 CONFIGFS_ATTR(vhost_scsi_tpg_, nexus);
2442
2443 static struct configfs_attribute *vhost_scsi_tpg_attrs[] = {
2444 &vhost_scsi_tpg_attr_nexus,
2445 NULL,
2446 };
2447
2448 static struct se_portal_group *
vhost_scsi_make_tpg(struct se_wwn * wwn,const char * name)2449 vhost_scsi_make_tpg(struct se_wwn *wwn, const char *name)
2450 {
2451 struct vhost_scsi_tport *tport = container_of(wwn,
2452 struct vhost_scsi_tport, tport_wwn);
2453
2454 struct vhost_scsi_tpg *tpg;
2455 u16 tpgt;
2456 int ret;
2457
2458 if (strstr(name, "tpgt_") != name)
2459 return ERR_PTR(-EINVAL);
2460 if (kstrtou16(name + 5, 10, &tpgt) || tpgt >= VHOST_SCSI_MAX_TARGET)
2461 return ERR_PTR(-EINVAL);
2462
2463 tpg = kzalloc(sizeof(*tpg), GFP_KERNEL);
2464 if (!tpg) {
2465 pr_err("Unable to allocate struct vhost_scsi_tpg");
2466 return ERR_PTR(-ENOMEM);
2467 }
2468 mutex_init(&tpg->tv_tpg_mutex);
2469 INIT_LIST_HEAD(&tpg->tv_tpg_list);
2470 tpg->tport = tport;
2471 tpg->tport_tpgt = tpgt;
2472
2473 ret = core_tpg_register(wwn, &tpg->se_tpg, tport->tport_proto_id);
2474 if (ret < 0) {
2475 kfree(tpg);
2476 return NULL;
2477 }
2478 mutex_lock(&vhost_scsi_mutex);
2479 list_add_tail(&tpg->tv_tpg_list, &vhost_scsi_list);
2480 mutex_unlock(&vhost_scsi_mutex);
2481
2482 return &tpg->se_tpg;
2483 }
2484
vhost_scsi_drop_tpg(struct se_portal_group * se_tpg)2485 static void vhost_scsi_drop_tpg(struct se_portal_group *se_tpg)
2486 {
2487 struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2488 struct vhost_scsi_tpg, se_tpg);
2489
2490 mutex_lock(&vhost_scsi_mutex);
2491 list_del(&tpg->tv_tpg_list);
2492 mutex_unlock(&vhost_scsi_mutex);
2493 /*
2494 * Release the virtual I_T Nexus for this vhost TPG
2495 */
2496 vhost_scsi_drop_nexus(tpg);
2497 /*
2498 * Deregister the se_tpg from TCM..
2499 */
2500 core_tpg_deregister(se_tpg);
2501 kfree(tpg);
2502 }
2503
2504 static struct se_wwn *
vhost_scsi_make_tport(struct target_fabric_configfs * tf,struct config_group * group,const char * name)2505 vhost_scsi_make_tport(struct target_fabric_configfs *tf,
2506 struct config_group *group,
2507 const char *name)
2508 {
2509 struct vhost_scsi_tport *tport;
2510 char *ptr;
2511 u64 wwpn = 0;
2512 int off = 0;
2513
2514 /* if (vhost_scsi_parse_wwn(name, &wwpn, 1) < 0)
2515 return ERR_PTR(-EINVAL); */
2516
2517 tport = kzalloc(sizeof(*tport), GFP_KERNEL);
2518 if (!tport) {
2519 pr_err("Unable to allocate struct vhost_scsi_tport");
2520 return ERR_PTR(-ENOMEM);
2521 }
2522 tport->tport_wwpn = wwpn;
2523 /*
2524 * Determine the emulated Protocol Identifier and Target Port Name
2525 * based on the incoming configfs directory name.
2526 */
2527 ptr = strstr(name, "naa.");
2528 if (ptr) {
2529 tport->tport_proto_id = SCSI_PROTOCOL_SAS;
2530 goto check_len;
2531 }
2532 ptr = strstr(name, "fc.");
2533 if (ptr) {
2534 tport->tport_proto_id = SCSI_PROTOCOL_FCP;
2535 off = 3; /* Skip over "fc." */
2536 goto check_len;
2537 }
2538 ptr = strstr(name, "iqn.");
2539 if (ptr) {
2540 tport->tport_proto_id = SCSI_PROTOCOL_ISCSI;
2541 goto check_len;
2542 }
2543
2544 pr_err("Unable to locate prefix for emulated Target Port:"
2545 " %s\n", name);
2546 kfree(tport);
2547 return ERR_PTR(-EINVAL);
2548
2549 check_len:
2550 if (strlen(name) >= VHOST_SCSI_NAMELEN) {
2551 pr_err("Emulated %s Address: %s, exceeds"
2552 " max: %d\n", name, vhost_scsi_dump_proto_id(tport),
2553 VHOST_SCSI_NAMELEN);
2554 kfree(tport);
2555 return ERR_PTR(-EINVAL);
2556 }
2557 snprintf(&tport->tport_name[0], VHOST_SCSI_NAMELEN, "%s", &name[off]);
2558
2559 pr_debug("TCM_VHost_ConfigFS: Allocated emulated Target"
2560 " %s Address: %s\n", vhost_scsi_dump_proto_id(tport), name);
2561
2562 return &tport->tport_wwn;
2563 }
2564
vhost_scsi_drop_tport(struct se_wwn * wwn)2565 static void vhost_scsi_drop_tport(struct se_wwn *wwn)
2566 {
2567 struct vhost_scsi_tport *tport = container_of(wwn,
2568 struct vhost_scsi_tport, tport_wwn);
2569
2570 pr_debug("TCM_VHost_ConfigFS: Deallocating emulated Target"
2571 " %s Address: %s\n", vhost_scsi_dump_proto_id(tport),
2572 tport->tport_name);
2573
2574 kfree(tport);
2575 }
2576
2577 static ssize_t
vhost_scsi_wwn_version_show(struct config_item * item,char * page)2578 vhost_scsi_wwn_version_show(struct config_item *item, char *page)
2579 {
2580 return sysfs_emit(page, "TCM_VHOST fabric module %s on %s/%s"
2581 "on "UTS_RELEASE"\n", VHOST_SCSI_VERSION, utsname()->sysname,
2582 utsname()->machine);
2583 }
2584
2585 CONFIGFS_ATTR_RO(vhost_scsi_wwn_, version);
2586
2587 static struct configfs_attribute *vhost_scsi_wwn_attrs[] = {
2588 &vhost_scsi_wwn_attr_version,
2589 NULL,
2590 };
2591
2592 static const struct target_core_fabric_ops vhost_scsi_ops = {
2593 .module = THIS_MODULE,
2594 .fabric_name = "vhost",
2595 .max_data_sg_nents = VHOST_SCSI_PREALLOC_SGLS,
2596 .tpg_get_wwn = vhost_scsi_get_fabric_wwn,
2597 .tpg_get_tag = vhost_scsi_get_tpgt,
2598 .tpg_check_demo_mode = vhost_scsi_check_true,
2599 .tpg_check_demo_mode_cache = vhost_scsi_check_true,
2600 .tpg_check_prot_fabric_only = vhost_scsi_check_prot_fabric_only,
2601 .release_cmd = vhost_scsi_release_cmd,
2602 .check_stop_free = vhost_scsi_check_stop_free,
2603 .sess_get_initiator_sid = NULL,
2604 .write_pending = vhost_scsi_write_pending,
2605 .queue_data_in = vhost_scsi_queue_data_in,
2606 .queue_status = vhost_scsi_queue_status,
2607 .queue_tm_rsp = vhost_scsi_queue_tm_rsp,
2608 .aborted_task = vhost_scsi_aborted_task,
2609 /*
2610 * Setup callers for generic logic in target_core_fabric_configfs.c
2611 */
2612 .fabric_make_wwn = vhost_scsi_make_tport,
2613 .fabric_drop_wwn = vhost_scsi_drop_tport,
2614 .fabric_make_tpg = vhost_scsi_make_tpg,
2615 .fabric_drop_tpg = vhost_scsi_drop_tpg,
2616 .fabric_post_link = vhost_scsi_port_link,
2617 .fabric_pre_unlink = vhost_scsi_port_unlink,
2618
2619 .tfc_wwn_attrs = vhost_scsi_wwn_attrs,
2620 .tfc_tpg_base_attrs = vhost_scsi_tpg_attrs,
2621 .tfc_tpg_attrib_attrs = vhost_scsi_tpg_attrib_attrs,
2622
2623 .default_submit_type = TARGET_QUEUE_SUBMIT,
2624 .direct_submit_supp = 1,
2625 };
2626
vhost_scsi_init(void)2627 static int __init vhost_scsi_init(void)
2628 {
2629 int ret = -ENOMEM;
2630
2631 pr_debug("TCM_VHOST fabric module %s on %s/%s"
2632 " on "UTS_RELEASE"\n", VHOST_SCSI_VERSION, utsname()->sysname,
2633 utsname()->machine);
2634
2635 ret = vhost_scsi_register();
2636 if (ret < 0)
2637 goto out;
2638
2639 ret = target_register_template(&vhost_scsi_ops);
2640 if (ret < 0)
2641 goto out_vhost_scsi_deregister;
2642
2643 return 0;
2644
2645 out_vhost_scsi_deregister:
2646 vhost_scsi_deregister();
2647 out:
2648 return ret;
2649 };
2650
vhost_scsi_exit(void)2651 static void vhost_scsi_exit(void)
2652 {
2653 target_unregister_template(&vhost_scsi_ops);
2654 vhost_scsi_deregister();
2655 };
2656
2657 MODULE_DESCRIPTION("VHOST_SCSI series fabric driver");
2658 MODULE_ALIAS("tcm_vhost");
2659 MODULE_LICENSE("GPL");
2660 module_init(vhost_scsi_init);
2661 module_exit(vhost_scsi_exit);
2662