1 // SPDX-License-Identifier: GPL-2.0+
2 /*******************************************************************************
3 * Vhost kernel TCM fabric driver for virtio SCSI initiators
4 *
5 * (C) Copyright 2010-2013 Datera, Inc.
6 * (C) Copyright 2010-2012 IBM Corp.
7 *
8 * Authors: Nicholas A. Bellinger <nab@daterainc.com>
9 * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
10 ****************************************************************************/
11
12 #include <linux/module.h>
13 #include <linux/moduleparam.h>
14 #include <generated/utsrelease.h>
15 #include <linux/utsname.h>
16 #include <linux/init.h>
17 #include <linux/slab.h>
18 #include <linux/kthread.h>
19 #include <linux/types.h>
20 #include <linux/string.h>
21 #include <linux/configfs.h>
22 #include <linux/ctype.h>
23 #include <linux/compat.h>
24 #include <linux/eventfd.h>
25 #include <linux/fs.h>
26 #include <linux/vmalloc.h>
27 #include <linux/miscdevice.h>
28 #include <linux/blk_types.h>
29 #include <linux/bio.h>
30 #include <linux/unaligned.h>
31 #include <scsi/scsi_common.h>
32 #include <scsi/scsi_proto.h>
33 #include <target/target_core_base.h>
34 #include <target/target_core_fabric.h>
35 #include <linux/vhost.h>
36 #include <linux/virtio_scsi.h>
37 #include <linux/llist.h>
38 #include <linux/bitmap.h>
39
40 #include "vhost.h"
41
42 #define VHOST_SCSI_VERSION "v0.1"
43 #define VHOST_SCSI_NAMELEN 256
44 #define VHOST_SCSI_MAX_CDB_SIZE 32
45 #define VHOST_SCSI_PREALLOC_SGLS 2048
46 #define VHOST_SCSI_PREALLOC_UPAGES 2048
47 #define VHOST_SCSI_PREALLOC_PROT_SGLS 2048
48 /*
49 * For the legacy descriptor case we allocate an iov per byte in the
50 * virtio_scsi_cmd_resp struct.
51 */
52 #define VHOST_SCSI_MAX_RESP_IOVS sizeof(struct virtio_scsi_cmd_resp)
53
54 static unsigned int vhost_scsi_inline_sg_cnt = VHOST_SCSI_PREALLOC_SGLS;
55
56 #ifdef CONFIG_ARCH_NO_SG_CHAIN
vhost_scsi_set_inline_sg_cnt(const char * buf,const struct kernel_param * kp)57 static int vhost_scsi_set_inline_sg_cnt(const char *buf,
58 const struct kernel_param *kp)
59 {
60 pr_err("Setting inline_sg_cnt is not supported.\n");
61 return -EOPNOTSUPP;
62 }
63 #else
vhost_scsi_set_inline_sg_cnt(const char * buf,const struct kernel_param * kp)64 static int vhost_scsi_set_inline_sg_cnt(const char *buf,
65 const struct kernel_param *kp)
66 {
67 unsigned int cnt;
68 int ret;
69
70 ret = kstrtouint(buf, 10, &cnt);
71 if (ret)
72 return ret;
73
74 if (ret > VHOST_SCSI_PREALLOC_SGLS) {
75 pr_err("Max inline_sg_cnt is %u\n", VHOST_SCSI_PREALLOC_SGLS);
76 return -EINVAL;
77 }
78
79 vhost_scsi_inline_sg_cnt = cnt;
80 return 0;
81 }
82 #endif
83
vhost_scsi_get_inline_sg_cnt(char * buf,const struct kernel_param * kp)84 static int vhost_scsi_get_inline_sg_cnt(char *buf,
85 const struct kernel_param *kp)
86 {
87 return sprintf(buf, "%u\n", vhost_scsi_inline_sg_cnt);
88 }
89
90 static const struct kernel_param_ops vhost_scsi_inline_sg_cnt_op = {
91 .get = vhost_scsi_get_inline_sg_cnt,
92 .set = vhost_scsi_set_inline_sg_cnt,
93 };
94
95 module_param_cb(inline_sg_cnt, &vhost_scsi_inline_sg_cnt_op, NULL, 0644);
96 MODULE_PARM_DESC(inline_sg_cnt, "Set the number of scatterlist entries to pre-allocate. The default is 2048.");
97
98 /* Max number of requests before requeueing the job.
99 * Using this limit prevents one virtqueue from starving others with
100 * request.
101 */
102 #define VHOST_SCSI_WEIGHT 256
103
104 struct vhost_scsi_inflight {
105 /* Wait for the flush operation to finish */
106 struct completion comp;
107 /* Refcount for the inflight reqs */
108 struct kref kref;
109 };
110
111 struct vhost_scsi_cmd {
112 /* Descriptor from vhost_get_vq_desc() for virt_queue segment */
113 int tvc_vq_desc;
114 /* The number of scatterlists associated with this cmd */
115 u32 tvc_sgl_count;
116 u32 tvc_prot_sgl_count;
117 u32 copied_iov:1;
118 const void *read_iov;
119 struct iov_iter *read_iter;
120 struct scatterlist *sgl;
121 struct sg_table table;
122 struct scatterlist *prot_sgl;
123 struct sg_table prot_table;
124 /* Fast path response header iovec used when only one vec is needed */
125 struct iovec tvc_resp_iov;
126 /* Number of iovs for response */
127 unsigned int tvc_resp_iovs_cnt;
128 /* Pointer to response header iovecs if more than one is needed */
129 struct iovec *tvc_resp_iovs;
130 /* Pointer to vhost_virtqueue for the cmd */
131 struct vhost_virtqueue *tvc_vq;
132 /* The TCM I/O descriptor that is accessed via container_of() */
133 struct se_cmd tvc_se_cmd;
134 /* Sense buffer that will be mapped into outgoing status */
135 unsigned char tvc_sense_buf[TRANSPORT_SENSE_BUFFER];
136 /*
137 * Dirty write descriptors of this command.
138 */
139 struct vhost_log *tvc_log;
140 unsigned int tvc_log_num;
141 /* Completed commands list, serviced from vhost worker thread */
142 struct llist_node tvc_completion_list;
143 /* Used to track inflight cmd */
144 struct vhost_scsi_inflight *inflight;
145 };
146
147 struct vhost_scsi_nexus {
148 /* Pointer to TCM session for I_T Nexus */
149 struct se_session *tvn_se_sess;
150 };
151
152 struct vhost_scsi_tpg {
153 /* Vhost port target portal group tag for TCM */
154 u16 tport_tpgt;
155 /* Used to track number of TPG Port/Lun Links wrt to explict I_T Nexus shutdown */
156 int tv_tpg_port_count;
157 /* Used for vhost_scsi device reference to tpg_nexus, protected by tv_tpg_mutex */
158 int tv_tpg_vhost_count;
159 /* Used for enabling T10-PI with legacy devices */
160 int tv_fabric_prot_type;
161 /* list for vhost_scsi_list */
162 struct list_head tv_tpg_list;
163 /* Used to protect access for tpg_nexus */
164 struct mutex tv_tpg_mutex;
165 /* Pointer to the TCM VHost I_T Nexus for this TPG endpoint */
166 struct vhost_scsi_nexus *tpg_nexus;
167 /* Pointer back to vhost_scsi_tport */
168 struct vhost_scsi_tport *tport;
169 /* Returned by vhost_scsi_make_tpg() */
170 struct se_portal_group se_tpg;
171 /* Pointer back to vhost_scsi, protected by tv_tpg_mutex */
172 struct vhost_scsi *vhost_scsi;
173 };
174
175 struct vhost_scsi_tport {
176 /* SCSI protocol the tport is providing */
177 u8 tport_proto_id;
178 /* Binary World Wide unique Port Name for Vhost Target port */
179 u64 tport_wwpn;
180 /* ASCII formatted WWPN for Vhost Target port */
181 char tport_name[VHOST_SCSI_NAMELEN];
182 /* Returned by vhost_scsi_make_tport() */
183 struct se_wwn tport_wwn;
184 };
185
186 struct vhost_scsi_evt {
187 /* event to be sent to guest */
188 struct virtio_scsi_event event;
189 /* event list, serviced from vhost worker thread */
190 struct llist_node list;
191 };
192
193 enum {
194 VHOST_SCSI_VQ_CTL = 0,
195 VHOST_SCSI_VQ_EVT = 1,
196 VHOST_SCSI_VQ_IO = 2,
197 };
198
199 /* Note: can't set VIRTIO_F_VERSION_1 yet, since that implies ANY_LAYOUT. */
200 enum {
201 VHOST_SCSI_FEATURES = VHOST_FEATURES | (1ULL << VIRTIO_SCSI_F_HOTPLUG) |
202 (1ULL << VIRTIO_SCSI_F_T10_PI)
203 };
204
205 #define VHOST_SCSI_MAX_TARGET 256
206 #define VHOST_SCSI_MAX_IO_VQ 1024
207 #define VHOST_SCSI_MAX_EVENT 128
208
209 static unsigned vhost_scsi_max_io_vqs = 128;
210 module_param_named(max_io_vqs, vhost_scsi_max_io_vqs, uint, 0644);
211 MODULE_PARM_DESC(max_io_vqs, "Set the max number of IO virtqueues a vhost scsi device can support. The default is 128. The max is 1024.");
212
213 struct vhost_scsi_virtqueue {
214 struct vhost_virtqueue vq;
215 struct vhost_scsi *vs;
216 /*
217 * Reference counting for inflight reqs, used for flush operation. At
218 * each time, one reference tracks new commands submitted, while we
219 * wait for another one to reach 0.
220 */
221 struct vhost_scsi_inflight inflights[2];
222 /*
223 * Indicate current inflight in use, protected by vq->mutex.
224 * Writers must also take dev mutex and flush under it.
225 */
226 int inflight_idx;
227 struct vhost_scsi_cmd *scsi_cmds;
228 struct sbitmap scsi_tags;
229 int max_cmds;
230 struct page **upages;
231
232 struct vhost_work completion_work;
233 struct llist_head completion_list;
234 };
235
236 struct vhost_scsi {
237 /* Protected by vhost_scsi->dev.mutex */
238 struct vhost_scsi_tpg **vs_tpg;
239 char vs_vhost_wwpn[TRANSPORT_IQN_LEN];
240
241 struct vhost_dev dev;
242 struct vhost_scsi_virtqueue *vqs;
243 struct vhost_scsi_inflight **old_inflight;
244
245 struct vhost_work vs_event_work; /* evt injection work item */
246 struct llist_head vs_event_list; /* evt injection queue */
247
248 bool vs_events_missed; /* any missed events, protected by vq->mutex */
249 int vs_events_nr; /* num of pending events, protected by vq->mutex */
250
251 unsigned int inline_sg_cnt;
252 };
253
254 struct vhost_scsi_tmf {
255 struct vhost_work vwork;
256 struct work_struct flush_work;
257 struct vhost_scsi *vhost;
258 struct vhost_scsi_virtqueue *svq;
259
260 struct se_cmd se_cmd;
261 u8 scsi_resp;
262 struct vhost_scsi_inflight *inflight;
263 struct iovec resp_iov;
264 int in_iovs;
265 int vq_desc;
266
267 /*
268 * Dirty write descriptors of this command.
269 */
270 struct vhost_log *tmf_log;
271 unsigned int tmf_log_num;
272 };
273
274 /*
275 * Context for processing request and control queue operations.
276 */
277 struct vhost_scsi_ctx {
278 int head;
279 unsigned int out, in;
280 size_t req_size, rsp_size;
281 size_t out_size, in_size;
282 u8 *target, *lunp;
283 void *req;
284 struct iov_iter out_iter;
285 };
286
287 /*
288 * Global mutex to protect vhost_scsi TPG list for vhost IOCTLs and LIO
289 * configfs management operations.
290 */
291 static DEFINE_MUTEX(vhost_scsi_mutex);
292 static LIST_HEAD(vhost_scsi_list);
293
vhost_scsi_done_inflight(struct kref * kref)294 static void vhost_scsi_done_inflight(struct kref *kref)
295 {
296 struct vhost_scsi_inflight *inflight;
297
298 inflight = container_of(kref, struct vhost_scsi_inflight, kref);
299 complete(&inflight->comp);
300 }
301
vhost_scsi_init_inflight(struct vhost_scsi * vs,struct vhost_scsi_inflight * old_inflight[])302 static void vhost_scsi_init_inflight(struct vhost_scsi *vs,
303 struct vhost_scsi_inflight *old_inflight[])
304 {
305 struct vhost_scsi_inflight *new_inflight;
306 struct vhost_virtqueue *vq;
307 int idx, i;
308
309 for (i = 0; i < vs->dev.nvqs; i++) {
310 vq = &vs->vqs[i].vq;
311
312 mutex_lock(&vq->mutex);
313
314 /* store old infight */
315 idx = vs->vqs[i].inflight_idx;
316 if (old_inflight)
317 old_inflight[i] = &vs->vqs[i].inflights[idx];
318
319 /* setup new infight */
320 vs->vqs[i].inflight_idx = idx ^ 1;
321 new_inflight = &vs->vqs[i].inflights[idx ^ 1];
322 kref_init(&new_inflight->kref);
323 init_completion(&new_inflight->comp);
324
325 mutex_unlock(&vq->mutex);
326 }
327 }
328
329 static struct vhost_scsi_inflight *
vhost_scsi_get_inflight(struct vhost_virtqueue * vq)330 vhost_scsi_get_inflight(struct vhost_virtqueue *vq)
331 {
332 struct vhost_scsi_inflight *inflight;
333 struct vhost_scsi_virtqueue *svq;
334
335 svq = container_of(vq, struct vhost_scsi_virtqueue, vq);
336 inflight = &svq->inflights[svq->inflight_idx];
337 kref_get(&inflight->kref);
338
339 return inflight;
340 }
341
vhost_scsi_put_inflight(struct vhost_scsi_inflight * inflight)342 static void vhost_scsi_put_inflight(struct vhost_scsi_inflight *inflight)
343 {
344 kref_put(&inflight->kref, vhost_scsi_done_inflight);
345 }
346
vhost_scsi_check_true(struct se_portal_group * se_tpg)347 static int vhost_scsi_check_true(struct se_portal_group *se_tpg)
348 {
349 return 1;
350 }
351
vhost_scsi_get_fabric_wwn(struct se_portal_group * se_tpg)352 static char *vhost_scsi_get_fabric_wwn(struct se_portal_group *se_tpg)
353 {
354 struct vhost_scsi_tpg *tpg = container_of(se_tpg,
355 struct vhost_scsi_tpg, se_tpg);
356 struct vhost_scsi_tport *tport = tpg->tport;
357
358 return &tport->tport_name[0];
359 }
360
vhost_scsi_get_tpgt(struct se_portal_group * se_tpg)361 static u16 vhost_scsi_get_tpgt(struct se_portal_group *se_tpg)
362 {
363 struct vhost_scsi_tpg *tpg = container_of(se_tpg,
364 struct vhost_scsi_tpg, se_tpg);
365 return tpg->tport_tpgt;
366 }
367
vhost_scsi_check_prot_fabric_only(struct se_portal_group * se_tpg)368 static int vhost_scsi_check_prot_fabric_only(struct se_portal_group *se_tpg)
369 {
370 struct vhost_scsi_tpg *tpg = container_of(se_tpg,
371 struct vhost_scsi_tpg, se_tpg);
372
373 return tpg->tv_fabric_prot_type;
374 }
375
vhost_scsi_copy_cmd_log(struct vhost_virtqueue * vq,struct vhost_scsi_cmd * cmd,struct vhost_log * log,unsigned int log_num)376 static int vhost_scsi_copy_cmd_log(struct vhost_virtqueue *vq,
377 struct vhost_scsi_cmd *cmd,
378 struct vhost_log *log,
379 unsigned int log_num)
380 {
381 if (!cmd->tvc_log)
382 cmd->tvc_log = kmalloc_array(vq->dev->iov_limit,
383 sizeof(*cmd->tvc_log),
384 GFP_KERNEL);
385
386 if (unlikely(!cmd->tvc_log)) {
387 vq_err(vq, "Failed to alloc tvc_log\n");
388 return -ENOMEM;
389 }
390
391 memcpy(cmd->tvc_log, log, sizeof(*cmd->tvc_log) * log_num);
392 cmd->tvc_log_num = log_num;
393
394 return 0;
395 }
396
vhost_scsi_log_write(struct vhost_virtqueue * vq,struct vhost_log * log,unsigned int log_num)397 static void vhost_scsi_log_write(struct vhost_virtqueue *vq,
398 struct vhost_log *log,
399 unsigned int log_num)
400 {
401 if (likely(!vhost_has_feature(vq, VHOST_F_LOG_ALL)))
402 return;
403
404 if (likely(!log_num || !log))
405 return;
406
407 /*
408 * vhost-scsi doesn't support VIRTIO_F_ACCESS_PLATFORM.
409 * No requirement for vq->iotlb case.
410 */
411 WARN_ON_ONCE(unlikely(vq->iotlb));
412 vhost_log_write(vq, log, log_num, U64_MAX, NULL, 0);
413 }
414
vhost_scsi_release_cmd_res(struct se_cmd * se_cmd)415 static void vhost_scsi_release_cmd_res(struct se_cmd *se_cmd)
416 {
417 struct vhost_scsi_cmd *tv_cmd = container_of(se_cmd,
418 struct vhost_scsi_cmd, tvc_se_cmd);
419 struct vhost_scsi_virtqueue *svq = container_of(tv_cmd->tvc_vq,
420 struct vhost_scsi_virtqueue, vq);
421 struct vhost_scsi *vs = svq->vs;
422 struct vhost_scsi_inflight *inflight = tv_cmd->inflight;
423 struct scatterlist *sg;
424 struct page *page;
425 int i;
426
427 if (tv_cmd->tvc_sgl_count) {
428 for_each_sgtable_sg(&tv_cmd->table, sg, i) {
429 page = sg_page(sg);
430 if (!page)
431 continue;
432
433 if (tv_cmd->copied_iov)
434 __free_page(page);
435 else
436 put_page(page);
437 }
438 kfree(tv_cmd->read_iter);
439 kfree(tv_cmd->read_iov);
440 sg_free_table_chained(&tv_cmd->table, vs->inline_sg_cnt);
441 }
442 if (tv_cmd->tvc_prot_sgl_count) {
443 for_each_sgtable_sg(&tv_cmd->prot_table, sg, i) {
444 page = sg_page(sg);
445 if (page)
446 put_page(page);
447 }
448 sg_free_table_chained(&tv_cmd->prot_table, vs->inline_sg_cnt);
449 }
450
451 if (tv_cmd->tvc_resp_iovs != &tv_cmd->tvc_resp_iov)
452 kfree(tv_cmd->tvc_resp_iovs);
453 sbitmap_clear_bit(&svq->scsi_tags, se_cmd->map_tag);
454 vhost_scsi_put_inflight(inflight);
455 }
456
vhost_scsi_release_tmf_res(struct vhost_scsi_tmf * tmf)457 static void vhost_scsi_release_tmf_res(struct vhost_scsi_tmf *tmf)
458 {
459 struct vhost_scsi_inflight *inflight = tmf->inflight;
460
461 /*
462 * tmf->tmf_log is default NULL unless VHOST_F_LOG_ALL is set.
463 */
464 kfree(tmf->tmf_log);
465 kfree(tmf);
466 vhost_scsi_put_inflight(inflight);
467 }
468
vhost_scsi_drop_cmds(struct vhost_scsi_virtqueue * svq)469 static void vhost_scsi_drop_cmds(struct vhost_scsi_virtqueue *svq)
470 {
471 struct vhost_scsi_cmd *cmd, *t;
472 struct llist_node *llnode;
473
474 llnode = llist_del_all(&svq->completion_list);
475 llist_for_each_entry_safe(cmd, t, llnode, tvc_completion_list)
476 vhost_scsi_release_cmd_res(&cmd->tvc_se_cmd);
477 }
478
vhost_scsi_release_cmd(struct se_cmd * se_cmd)479 static void vhost_scsi_release_cmd(struct se_cmd *se_cmd)
480 {
481 if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) {
482 struct vhost_scsi_tmf *tmf = container_of(se_cmd,
483 struct vhost_scsi_tmf, se_cmd);
484
485 schedule_work(&tmf->flush_work);
486 } else {
487 struct vhost_scsi_cmd *cmd = container_of(se_cmd,
488 struct vhost_scsi_cmd, tvc_se_cmd);
489 struct vhost_scsi_virtqueue *svq = container_of(cmd->tvc_vq,
490 struct vhost_scsi_virtqueue, vq);
491
492 llist_add(&cmd->tvc_completion_list, &svq->completion_list);
493 if (!vhost_vq_work_queue(&svq->vq, &svq->completion_work))
494 vhost_scsi_drop_cmds(svq);
495 }
496 }
497
vhost_scsi_write_pending(struct se_cmd * se_cmd)498 static int vhost_scsi_write_pending(struct se_cmd *se_cmd)
499 {
500 /* Go ahead and process the write immediately */
501 target_execute_cmd(se_cmd);
502 return 0;
503 }
504
vhost_scsi_queue_data_in(struct se_cmd * se_cmd)505 static int vhost_scsi_queue_data_in(struct se_cmd *se_cmd)
506 {
507 transport_generic_free_cmd(se_cmd, 0);
508 return 0;
509 }
510
vhost_scsi_queue_status(struct se_cmd * se_cmd)511 static int vhost_scsi_queue_status(struct se_cmd *se_cmd)
512 {
513 transport_generic_free_cmd(se_cmd, 0);
514 return 0;
515 }
516
vhost_scsi_queue_tm_rsp(struct se_cmd * se_cmd)517 static void vhost_scsi_queue_tm_rsp(struct se_cmd *se_cmd)
518 {
519 struct vhost_scsi_tmf *tmf = container_of(se_cmd, struct vhost_scsi_tmf,
520 se_cmd);
521
522 tmf->scsi_resp = se_cmd->se_tmr_req->response;
523 transport_generic_free_cmd(&tmf->se_cmd, 0);
524 }
525
vhost_scsi_aborted_task(struct se_cmd * se_cmd)526 static void vhost_scsi_aborted_task(struct se_cmd *se_cmd)
527 {
528 return;
529 }
530
vhost_scsi_free_evt(struct vhost_scsi * vs,struct vhost_scsi_evt * evt)531 static void vhost_scsi_free_evt(struct vhost_scsi *vs, struct vhost_scsi_evt *evt)
532 {
533 vs->vs_events_nr--;
534 kfree(evt);
535 }
536
537 static struct vhost_scsi_evt *
vhost_scsi_allocate_evt(struct vhost_scsi * vs,u32 event,u32 reason)538 vhost_scsi_allocate_evt(struct vhost_scsi *vs,
539 u32 event, u32 reason)
540 {
541 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
542 struct vhost_scsi_evt *evt;
543
544 if (vs->vs_events_nr > VHOST_SCSI_MAX_EVENT) {
545 vs->vs_events_missed = true;
546 return NULL;
547 }
548
549 evt = kzalloc(sizeof(*evt), GFP_KERNEL);
550 if (!evt) {
551 vq_err(vq, "Failed to allocate vhost_scsi_evt\n");
552 vs->vs_events_missed = true;
553 return NULL;
554 }
555
556 evt->event.event = cpu_to_vhost32(vq, event);
557 evt->event.reason = cpu_to_vhost32(vq, reason);
558 vs->vs_events_nr++;
559
560 return evt;
561 }
562
vhost_scsi_check_stop_free(struct se_cmd * se_cmd)563 static int vhost_scsi_check_stop_free(struct se_cmd *se_cmd)
564 {
565 return target_put_sess_cmd(se_cmd);
566 }
567
568 static void
vhost_scsi_do_evt_work(struct vhost_scsi * vs,struct vhost_scsi_evt * evt)569 vhost_scsi_do_evt_work(struct vhost_scsi *vs, struct vhost_scsi_evt *evt)
570 {
571 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
572 struct virtio_scsi_event *event = &evt->event;
573 struct virtio_scsi_event __user *eventp;
574 struct vhost_log *vq_log;
575 unsigned int log_num;
576 unsigned out, in;
577 int head, ret;
578
579 if (!vhost_vq_get_backend(vq)) {
580 vs->vs_events_missed = true;
581 return;
582 }
583
584 again:
585 vhost_disable_notify(&vs->dev, vq);
586
587 vq_log = unlikely(vhost_has_feature(vq, VHOST_F_LOG_ALL)) ?
588 vq->log : NULL;
589
590 /*
591 * Reset 'log_num' since vhost_get_vq_desc() may reset it only
592 * after certain condition checks.
593 */
594 log_num = 0;
595
596 head = vhost_get_vq_desc(vq, vq->iov,
597 ARRAY_SIZE(vq->iov), &out, &in,
598 vq_log, &log_num);
599 if (head < 0) {
600 vs->vs_events_missed = true;
601 return;
602 }
603 if (head == vq->num) {
604 if (vhost_enable_notify(&vs->dev, vq))
605 goto again;
606 vs->vs_events_missed = true;
607 return;
608 }
609
610 if ((vq->iov[out].iov_len != sizeof(struct virtio_scsi_event))) {
611 vq_err(vq, "Expecting virtio_scsi_event, got %zu bytes\n",
612 vq->iov[out].iov_len);
613 vs->vs_events_missed = true;
614 return;
615 }
616
617 if (vs->vs_events_missed) {
618 event->event |= cpu_to_vhost32(vq, VIRTIO_SCSI_T_EVENTS_MISSED);
619 vs->vs_events_missed = false;
620 }
621
622 eventp = vq->iov[out].iov_base;
623 ret = __copy_to_user(eventp, event, sizeof(*event));
624 if (!ret)
625 vhost_add_used_and_signal(&vs->dev, vq, head, 0);
626 else
627 vq_err(vq, "Faulted on vhost_scsi_send_event\n");
628
629 vhost_scsi_log_write(vq, vq_log, log_num);
630 }
631
vhost_scsi_complete_events(struct vhost_scsi * vs,bool drop)632 static void vhost_scsi_complete_events(struct vhost_scsi *vs, bool drop)
633 {
634 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
635 struct vhost_scsi_evt *evt, *t;
636 struct llist_node *llnode;
637
638 mutex_lock(&vq->mutex);
639 llnode = llist_del_all(&vs->vs_event_list);
640 llist_for_each_entry_safe(evt, t, llnode, list) {
641 if (!drop)
642 vhost_scsi_do_evt_work(vs, evt);
643 vhost_scsi_free_evt(vs, evt);
644 }
645 mutex_unlock(&vq->mutex);
646 }
647
vhost_scsi_evt_work(struct vhost_work * work)648 static void vhost_scsi_evt_work(struct vhost_work *work)
649 {
650 struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
651 vs_event_work);
652 vhost_scsi_complete_events(vs, false);
653 }
654
vhost_scsi_copy_sgl_to_iov(struct vhost_scsi_cmd * cmd)655 static int vhost_scsi_copy_sgl_to_iov(struct vhost_scsi_cmd *cmd)
656 {
657 struct iov_iter *iter = cmd->read_iter;
658 struct scatterlist *sg;
659 struct page *page;
660 size_t len;
661 int i;
662
663 for_each_sgtable_sg(&cmd->table, sg, i) {
664 page = sg_page(sg);
665 if (!page)
666 continue;
667
668 len = sg->length;
669
670 if (copy_page_to_iter(page, 0, len, iter) != len) {
671 pr_err("Could not copy data while handling misaligned cmd. Error %zu\n",
672 len);
673 return -1;
674 }
675 }
676
677 return 0;
678 }
679
680 /* Fill in status and signal that we are done processing this command
681 *
682 * This is scheduled in the vhost work queue so we are called with the owner
683 * process mm and can access the vring.
684 */
vhost_scsi_complete_cmd_work(struct vhost_work * work)685 static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
686 {
687 struct vhost_scsi_virtqueue *svq = container_of(work,
688 struct vhost_scsi_virtqueue, completion_work);
689 struct virtio_scsi_cmd_resp v_rsp;
690 struct vhost_scsi_cmd *cmd, *t;
691 struct llist_node *llnode;
692 struct se_cmd *se_cmd;
693 struct iov_iter iov_iter;
694 bool signal = false;
695 int ret;
696
697 llnode = llist_del_all(&svq->completion_list);
698
699 mutex_lock(&svq->vq.mutex);
700
701 llist_for_each_entry_safe(cmd, t, llnode, tvc_completion_list) {
702 se_cmd = &cmd->tvc_se_cmd;
703
704 pr_debug("%s tv_cmd %p resid %u status %#02x\n", __func__,
705 cmd, se_cmd->residual_count, se_cmd->scsi_status);
706 memset(&v_rsp, 0, sizeof(v_rsp));
707
708 if (cmd->read_iter && vhost_scsi_copy_sgl_to_iov(cmd)) {
709 v_rsp.response = VIRTIO_SCSI_S_BAD_TARGET;
710 } else {
711 v_rsp.resid = cpu_to_vhost32(cmd->tvc_vq,
712 se_cmd->residual_count);
713 /* TODO is status_qualifier field needed? */
714 v_rsp.status = se_cmd->scsi_status;
715 v_rsp.sense_len = cpu_to_vhost32(cmd->tvc_vq,
716 se_cmd->scsi_sense_length);
717 memcpy(v_rsp.sense, cmd->tvc_sense_buf,
718 se_cmd->scsi_sense_length);
719 }
720
721 iov_iter_init(&iov_iter, ITER_DEST, cmd->tvc_resp_iovs,
722 cmd->tvc_resp_iovs_cnt, sizeof(v_rsp));
723 ret = copy_to_iter(&v_rsp, sizeof(v_rsp), &iov_iter);
724 if (likely(ret == sizeof(v_rsp))) {
725 signal = true;
726
727 vhost_add_used(cmd->tvc_vq, cmd->tvc_vq_desc, 0);
728 } else
729 pr_err("Faulted on virtio_scsi_cmd_resp\n");
730
731 vhost_scsi_log_write(cmd->tvc_vq, cmd->tvc_log,
732 cmd->tvc_log_num);
733
734 vhost_scsi_release_cmd_res(se_cmd);
735 }
736
737 mutex_unlock(&svq->vq.mutex);
738
739 if (signal)
740 vhost_signal(&svq->vs->dev, &svq->vq);
741 }
742
743 static struct vhost_scsi_cmd *
vhost_scsi_get_cmd(struct vhost_virtqueue * vq,u64 scsi_tag)744 vhost_scsi_get_cmd(struct vhost_virtqueue *vq, u64 scsi_tag)
745 {
746 struct vhost_scsi_virtqueue *svq = container_of(vq,
747 struct vhost_scsi_virtqueue, vq);
748 struct vhost_scsi_cmd *cmd;
749 struct scatterlist *sgl, *prot_sgl;
750 struct vhost_log *log;
751 int tag;
752
753 tag = sbitmap_get(&svq->scsi_tags);
754 if (tag < 0) {
755 pr_warn_once("Guest sent too many cmds. Returning TASK_SET_FULL.\n");
756 return ERR_PTR(-ENOMEM);
757 }
758
759 cmd = &svq->scsi_cmds[tag];
760 sgl = cmd->sgl;
761 prot_sgl = cmd->prot_sgl;
762 log = cmd->tvc_log;
763 memset(cmd, 0, sizeof(*cmd));
764 cmd->sgl = sgl;
765 cmd->prot_sgl = prot_sgl;
766 cmd->tvc_log = log;
767 cmd->tvc_se_cmd.map_tag = tag;
768 cmd->inflight = vhost_scsi_get_inflight(vq);
769
770 return cmd;
771 }
772
vhost_scsi_revert_map_iov_to_sgl(struct iov_iter * iter,struct scatterlist * curr,struct scatterlist * end)773 static void vhost_scsi_revert_map_iov_to_sgl(struct iov_iter *iter,
774 struct scatterlist *curr,
775 struct scatterlist *end)
776 {
777 size_t revert_bytes = 0;
778 struct page *page;
779
780 while (curr != end) {
781 page = sg_page(curr);
782
783 if (page) {
784 put_page(page);
785 revert_bytes += curr->length;
786 }
787 /* Clear so we can re-use it for the copy path */
788 sg_set_page(curr, NULL, 0, 0);
789 curr = sg_next(curr);
790 }
791 iov_iter_revert(iter, revert_bytes);
792 }
793
794 /*
795 * Map a user memory range into a scatterlist
796 *
797 * Returns the number of scatterlist entries used or -errno on error.
798 */
799 static int
vhost_scsi_map_to_sgl(struct vhost_scsi_cmd * cmd,struct iov_iter * iter,struct sg_table * sg_table,struct scatterlist ** sgl,bool is_prot)800 vhost_scsi_map_to_sgl(struct vhost_scsi_cmd *cmd,
801 struct iov_iter *iter,
802 struct sg_table *sg_table,
803 struct scatterlist **sgl,
804 bool is_prot)
805 {
806 struct vhost_scsi_virtqueue *svq = container_of(cmd->tvc_vq,
807 struct vhost_scsi_virtqueue, vq);
808 struct page **pages = svq->upages;
809 struct scatterlist *sg = *sgl;
810 ssize_t bytes;
811 size_t offset;
812 unsigned int n, npages = 0;
813
814 bytes = iov_iter_get_pages2(iter, pages, LONG_MAX,
815 VHOST_SCSI_PREALLOC_UPAGES, &offset);
816 /* No pages were pinned */
817 if (bytes <= 0)
818 return bytes < 0 ? bytes : -EFAULT;
819
820 while (bytes) {
821 n = min_t(unsigned int, PAGE_SIZE - offset, bytes);
822 /*
823 * The block layer requires bios/requests to be a multiple of
824 * 512 bytes, but Windows can send us vecs that are misaligned.
825 * This can result in bios and later requests with misaligned
826 * sizes if we have to break up a cmd/scatterlist into multiple
827 * bios.
828 *
829 * We currently only break up a command into multiple bios if
830 * we hit the vec/seg limit, so check if our sgl_count is
831 * greater than the max and if a vec in the cmd has a
832 * misaligned offset/size.
833 */
834 if (!is_prot &&
835 (offset & (SECTOR_SIZE - 1) || n & (SECTOR_SIZE - 1)) &&
836 cmd->tvc_sgl_count > BIO_MAX_VECS) {
837 WARN_ONCE(true,
838 "vhost-scsi detected misaligned IO. Performance may be degraded.");
839 goto revert_iter_get_pages;
840 }
841
842 sg_set_page(sg, pages[npages++], n, offset);
843 sg = sg_next(sg);
844 bytes -= n;
845 offset = 0;
846 }
847
848 *sgl = sg;
849 return npages;
850
851 revert_iter_get_pages:
852 vhost_scsi_revert_map_iov_to_sgl(iter, *sgl, sg);
853
854 iov_iter_revert(iter, bytes);
855 while (bytes) {
856 n = min_t(unsigned int, PAGE_SIZE, bytes);
857
858 put_page(pages[npages++]);
859 bytes -= n;
860 }
861
862 return -EINVAL;
863 }
864
865 static int
vhost_scsi_calc_sgls(struct iov_iter * iter,size_t bytes,int max_sgls)866 vhost_scsi_calc_sgls(struct iov_iter *iter, size_t bytes, int max_sgls)
867 {
868 int sgl_count = 0;
869
870 if (!iter || !iter_iov(iter)) {
871 pr_err("%s: iter->iov is NULL, but expected bytes: %zu"
872 " present\n", __func__, bytes);
873 return -EINVAL;
874 }
875
876 sgl_count = iov_iter_npages(iter, 0xffff);
877 if (sgl_count > max_sgls) {
878 pr_err("%s: requested sgl_count: %d exceeds pre-allocated"
879 " max_sgls: %d\n", __func__, sgl_count, max_sgls);
880 return -EINVAL;
881 }
882 return sgl_count;
883 }
884
885 static int
vhost_scsi_copy_iov_to_sgl(struct vhost_scsi_cmd * cmd,struct iov_iter * iter,struct sg_table * sg_table,int sg_count,int data_dir)886 vhost_scsi_copy_iov_to_sgl(struct vhost_scsi_cmd *cmd, struct iov_iter *iter,
887 struct sg_table *sg_table, int sg_count,
888 int data_dir)
889 {
890 size_t len = iov_iter_count(iter);
891 unsigned int nbytes = 0;
892 struct scatterlist *sg;
893 struct page *page;
894 int i, ret;
895
896 if (data_dir == DMA_FROM_DEVICE) {
897 cmd->read_iter = kzalloc(sizeof(*cmd->read_iter), GFP_KERNEL);
898 if (!cmd->read_iter)
899 return -ENOMEM;
900
901 cmd->read_iov = dup_iter(cmd->read_iter, iter, GFP_KERNEL);
902 if (!cmd->read_iov) {
903 ret = -ENOMEM;
904 goto free_iter;
905 }
906 }
907
908 for_each_sgtable_sg(sg_table, sg, i) {
909 page = alloc_page(GFP_KERNEL);
910 if (!page) {
911 ret = -ENOMEM;
912 goto err;
913 }
914
915 nbytes = min_t(unsigned int, PAGE_SIZE, len);
916 sg_set_page(sg, page, nbytes, 0);
917
918 if (data_dir == DMA_TO_DEVICE &&
919 copy_page_from_iter(page, 0, nbytes, iter) != nbytes) {
920 ret = -EFAULT;
921 goto err;
922 }
923
924 len -= nbytes;
925 }
926
927 cmd->copied_iov = 1;
928 return 0;
929
930 err:
931 pr_err("Could not read %u bytes while handling misaligned cmd\n",
932 nbytes);
933
934 for_each_sgtable_sg(sg_table, sg, i) {
935 page = sg_page(sg);
936 if (page)
937 __free_page(page);
938 }
939 kfree(cmd->read_iov);
940 free_iter:
941 kfree(cmd->read_iter);
942 return ret;
943 }
944
945 static int
vhost_scsi_map_iov_to_sgl(struct vhost_scsi_cmd * cmd,struct iov_iter * iter,struct sg_table * sg_table,int sg_count,bool is_prot)946 vhost_scsi_map_iov_to_sgl(struct vhost_scsi_cmd *cmd, struct iov_iter *iter,
947 struct sg_table *sg_table, int sg_count, bool is_prot)
948 {
949 struct scatterlist *sg = sg_table->sgl;
950 int ret;
951
952 while (iov_iter_count(iter)) {
953 ret = vhost_scsi_map_to_sgl(cmd, iter, sg_table, &sg, is_prot);
954 if (ret < 0) {
955 vhost_scsi_revert_map_iov_to_sgl(iter, sg_table->sgl,
956 sg);
957 return ret;
958 }
959 }
960
961 return 0;
962 }
963
964 static int
vhost_scsi_mapal(struct vhost_scsi * vs,struct vhost_scsi_cmd * cmd,size_t prot_bytes,struct iov_iter * prot_iter,size_t data_bytes,struct iov_iter * data_iter,int data_dir)965 vhost_scsi_mapal(struct vhost_scsi *vs, struct vhost_scsi_cmd *cmd,
966 size_t prot_bytes, struct iov_iter *prot_iter,
967 size_t data_bytes, struct iov_iter *data_iter, int data_dir)
968 {
969 int sgl_count, ret;
970
971 if (prot_bytes) {
972 sgl_count = vhost_scsi_calc_sgls(prot_iter, prot_bytes,
973 VHOST_SCSI_PREALLOC_PROT_SGLS);
974 cmd->prot_table.sgl = cmd->prot_sgl;
975 ret = sg_alloc_table_chained(&cmd->prot_table, sgl_count,
976 cmd->prot_table.sgl,
977 vs->inline_sg_cnt);
978 if (ret)
979 return ret;
980
981 cmd->tvc_prot_sgl_count = sgl_count;
982 pr_debug("%s prot_sg %p prot_sgl_count %u\n", __func__,
983 cmd->prot_table.sgl, cmd->tvc_prot_sgl_count);
984
985 ret = vhost_scsi_map_iov_to_sgl(cmd, prot_iter,
986 &cmd->prot_table,
987 cmd->tvc_prot_sgl_count, true);
988 if (ret < 0) {
989 sg_free_table_chained(&cmd->prot_table,
990 vs->inline_sg_cnt);
991 cmd->tvc_prot_sgl_count = 0;
992 return ret;
993 }
994 }
995 sgl_count = vhost_scsi_calc_sgls(data_iter, data_bytes,
996 VHOST_SCSI_PREALLOC_SGLS);
997 if (sgl_count < 0)
998 return sgl_count;
999
1000 cmd->table.sgl = cmd->sgl;
1001 ret = sg_alloc_table_chained(&cmd->table, sgl_count, cmd->table.sgl,
1002 vs->inline_sg_cnt);
1003 if (ret)
1004 return ret;
1005
1006 cmd->tvc_sgl_count = sgl_count;
1007 pr_debug("%s data_sg %p data_sgl_count %u\n", __func__,
1008 cmd->table.sgl, cmd->tvc_sgl_count);
1009
1010 ret = vhost_scsi_map_iov_to_sgl(cmd, data_iter, &cmd->table,
1011 cmd->tvc_sgl_count, false);
1012 if (ret == -EINVAL)
1013 ret = vhost_scsi_copy_iov_to_sgl(cmd, data_iter, &cmd->table,
1014 cmd->tvc_sgl_count, data_dir);
1015 if (ret < 0) {
1016 sg_free_table_chained(&cmd->table, vs->inline_sg_cnt);
1017 cmd->tvc_sgl_count = 0;
1018 return ret;
1019 }
1020 return 0;
1021 }
1022
vhost_scsi_to_tcm_attr(int attr)1023 static int vhost_scsi_to_tcm_attr(int attr)
1024 {
1025 switch (attr) {
1026 case VIRTIO_SCSI_S_SIMPLE:
1027 return TCM_SIMPLE_TAG;
1028 case VIRTIO_SCSI_S_ORDERED:
1029 return TCM_ORDERED_TAG;
1030 case VIRTIO_SCSI_S_HEAD:
1031 return TCM_HEAD_TAG;
1032 case VIRTIO_SCSI_S_ACA:
1033 return TCM_ACA_TAG;
1034 default:
1035 break;
1036 }
1037 return TCM_SIMPLE_TAG;
1038 }
1039
vhost_scsi_target_queue_cmd(struct vhost_scsi_nexus * nexus,struct vhost_scsi_cmd * cmd,unsigned char * cdb,u16 lun,int task_attr,int data_dir,u32 exp_data_len)1040 static void vhost_scsi_target_queue_cmd(struct vhost_scsi_nexus *nexus,
1041 struct vhost_scsi_cmd *cmd,
1042 unsigned char *cdb, u16 lun,
1043 int task_attr, int data_dir,
1044 u32 exp_data_len)
1045 {
1046 struct se_cmd *se_cmd = &cmd->tvc_se_cmd;
1047 struct scatterlist *sg_ptr, *sg_prot_ptr = NULL;
1048
1049 /* FIXME: BIDI operation */
1050 if (cmd->tvc_sgl_count) {
1051 sg_ptr = cmd->table.sgl;
1052
1053 if (cmd->tvc_prot_sgl_count)
1054 sg_prot_ptr = cmd->prot_table.sgl;
1055 else
1056 se_cmd->prot_pto = true;
1057 } else {
1058 sg_ptr = NULL;
1059 }
1060
1061 se_cmd->tag = 0;
1062 target_init_cmd(se_cmd, nexus->tvn_se_sess, &cmd->tvc_sense_buf[0],
1063 lun, exp_data_len, vhost_scsi_to_tcm_attr(task_attr),
1064 data_dir, TARGET_SCF_ACK_KREF);
1065
1066 if (target_submit_prep(se_cmd, cdb, sg_ptr,
1067 cmd->tvc_sgl_count, NULL, 0, sg_prot_ptr,
1068 cmd->tvc_prot_sgl_count, GFP_KERNEL))
1069 return;
1070
1071 target_submit(se_cmd);
1072 }
1073
1074 static void
vhost_scsi_send_status(struct vhost_scsi * vs,struct vhost_virtqueue * vq,struct vhost_scsi_ctx * vc,u8 status)1075 vhost_scsi_send_status(struct vhost_scsi *vs, struct vhost_virtqueue *vq,
1076 struct vhost_scsi_ctx *vc, u8 status)
1077 {
1078 struct virtio_scsi_cmd_resp rsp;
1079 struct iov_iter iov_iter;
1080 int ret;
1081
1082 memset(&rsp, 0, sizeof(rsp));
1083 rsp.status = status;
1084
1085 iov_iter_init(&iov_iter, ITER_DEST, &vq->iov[vc->out], vc->in,
1086 sizeof(rsp));
1087
1088 ret = copy_to_iter(&rsp, sizeof(rsp), &iov_iter);
1089
1090 if (likely(ret == sizeof(rsp)))
1091 vhost_add_used_and_signal(&vs->dev, vq, vc->head, 0);
1092 else
1093 pr_err("Faulted on virtio_scsi_cmd_resp\n");
1094 }
1095
1096 #define TYPE_IO_CMD 0
1097 #define TYPE_CTRL_TMF 1
1098 #define TYPE_CTRL_AN 2
1099
1100 static void
vhost_scsi_send_bad_target(struct vhost_scsi * vs,struct vhost_virtqueue * vq,struct vhost_scsi_ctx * vc,int type)1101 vhost_scsi_send_bad_target(struct vhost_scsi *vs,
1102 struct vhost_virtqueue *vq,
1103 struct vhost_scsi_ctx *vc, int type)
1104 {
1105 union {
1106 struct virtio_scsi_cmd_resp cmd;
1107 struct virtio_scsi_ctrl_tmf_resp tmf;
1108 struct virtio_scsi_ctrl_an_resp an;
1109 } rsp;
1110 struct iov_iter iov_iter;
1111 size_t rsp_size;
1112 int ret;
1113
1114 memset(&rsp, 0, sizeof(rsp));
1115
1116 if (type == TYPE_IO_CMD) {
1117 rsp_size = sizeof(struct virtio_scsi_cmd_resp);
1118 rsp.cmd.response = VIRTIO_SCSI_S_BAD_TARGET;
1119 } else if (type == TYPE_CTRL_TMF) {
1120 rsp_size = sizeof(struct virtio_scsi_ctrl_tmf_resp);
1121 rsp.tmf.response = VIRTIO_SCSI_S_BAD_TARGET;
1122 } else {
1123 rsp_size = sizeof(struct virtio_scsi_ctrl_an_resp);
1124 rsp.an.response = VIRTIO_SCSI_S_BAD_TARGET;
1125 }
1126
1127 iov_iter_init(&iov_iter, ITER_DEST, &vq->iov[vc->out], vc->in,
1128 rsp_size);
1129
1130 ret = copy_to_iter(&rsp, rsp_size, &iov_iter);
1131
1132 if (likely(ret == rsp_size))
1133 vhost_add_used_and_signal(&vs->dev, vq, vc->head, 0);
1134 else
1135 pr_err("Faulted on virtio scsi type=%d\n", type);
1136 }
1137
1138 static int
vhost_scsi_get_desc(struct vhost_scsi * vs,struct vhost_virtqueue * vq,struct vhost_scsi_ctx * vc,struct vhost_log * log,unsigned int * log_num)1139 vhost_scsi_get_desc(struct vhost_scsi *vs, struct vhost_virtqueue *vq,
1140 struct vhost_scsi_ctx *vc,
1141 struct vhost_log *log, unsigned int *log_num)
1142 {
1143 int ret = -ENXIO;
1144
1145 if (likely(log_num))
1146 *log_num = 0;
1147
1148 vc->head = vhost_get_vq_desc(vq, vq->iov,
1149 ARRAY_SIZE(vq->iov), &vc->out, &vc->in,
1150 log, log_num);
1151
1152 pr_debug("vhost_get_vq_desc: head: %d, out: %u in: %u\n",
1153 vc->head, vc->out, vc->in);
1154
1155 /* On error, stop handling until the next kick. */
1156 if (unlikely(vc->head < 0))
1157 goto done;
1158
1159 /* Nothing new? Wait for eventfd to tell us they refilled. */
1160 if (vc->head == vq->num) {
1161 if (unlikely(vhost_enable_notify(&vs->dev, vq))) {
1162 vhost_disable_notify(&vs->dev, vq);
1163 ret = -EAGAIN;
1164 }
1165 goto done;
1166 }
1167
1168 /*
1169 * Get the size of request and response buffers.
1170 * FIXME: Not correct for BIDI operation
1171 */
1172 vc->out_size = iov_length(vq->iov, vc->out);
1173 vc->in_size = iov_length(&vq->iov[vc->out], vc->in);
1174
1175 /*
1176 * Copy over the virtio-scsi request header, which for a
1177 * ANY_LAYOUT enabled guest may span multiple iovecs, or a
1178 * single iovec may contain both the header + outgoing
1179 * WRITE payloads.
1180 *
1181 * copy_from_iter() will advance out_iter, so that it will
1182 * point at the start of the outgoing WRITE payload, if
1183 * DMA_TO_DEVICE is set.
1184 */
1185 iov_iter_init(&vc->out_iter, ITER_SOURCE, vq->iov, vc->out, vc->out_size);
1186 ret = 0;
1187
1188 done:
1189 return ret;
1190 }
1191
1192 static int
vhost_scsi_chk_size(struct vhost_virtqueue * vq,struct vhost_scsi_ctx * vc)1193 vhost_scsi_chk_size(struct vhost_virtqueue *vq, struct vhost_scsi_ctx *vc)
1194 {
1195 if (unlikely(vc->in_size < vc->rsp_size)) {
1196 vq_err(vq,
1197 "Response buf too small, need min %zu bytes got %zu",
1198 vc->rsp_size, vc->in_size);
1199 return -EINVAL;
1200 } else if (unlikely(vc->out_size < vc->req_size)) {
1201 vq_err(vq,
1202 "Request buf too small, need min %zu bytes got %zu",
1203 vc->req_size, vc->out_size);
1204 return -EIO;
1205 }
1206
1207 return 0;
1208 }
1209
1210 static int
vhost_scsi_get_req(struct vhost_virtqueue * vq,struct vhost_scsi_ctx * vc,struct vhost_scsi_tpg ** tpgp)1211 vhost_scsi_get_req(struct vhost_virtqueue *vq, struct vhost_scsi_ctx *vc,
1212 struct vhost_scsi_tpg **tpgp)
1213 {
1214 int ret = -EIO;
1215
1216 if (unlikely(!copy_from_iter_full(vc->req, vc->req_size,
1217 &vc->out_iter))) {
1218 vq_err(vq, "Faulted on copy_from_iter_full\n");
1219 } else if (unlikely(*vc->lunp != 1)) {
1220 /* virtio-scsi spec requires byte 0 of the lun to be 1 */
1221 vq_err(vq, "Illegal virtio-scsi lun: %u\n", *vc->lunp);
1222 } else {
1223 struct vhost_scsi_tpg **vs_tpg, *tpg = NULL;
1224
1225 if (vc->target) {
1226 /* validated at handler entry */
1227 vs_tpg = vhost_vq_get_backend(vq);
1228 tpg = READ_ONCE(vs_tpg[*vc->target]);
1229 if (unlikely(!tpg)) {
1230 vq_err(vq, "Target 0x%x does not exist\n", *vc->target);
1231 goto out;
1232 }
1233 }
1234
1235 if (tpgp)
1236 *tpgp = tpg;
1237 ret = 0;
1238 }
1239 out:
1240 return ret;
1241 }
1242
1243 static int
vhost_scsi_setup_resp_iovs(struct vhost_scsi_cmd * cmd,struct iovec * in_iovs,unsigned int in_iovs_cnt)1244 vhost_scsi_setup_resp_iovs(struct vhost_scsi_cmd *cmd, struct iovec *in_iovs,
1245 unsigned int in_iovs_cnt)
1246 {
1247 int i, cnt;
1248
1249 if (!in_iovs_cnt)
1250 return 0;
1251 /*
1252 * Initiator's normally just put the virtio_scsi_cmd_resp in the first
1253 * iov, but just in case they wedged in some data with it we check for
1254 * greater than or equal to the response struct.
1255 */
1256 if (in_iovs[0].iov_len >= sizeof(struct virtio_scsi_cmd_resp)) {
1257 cmd->tvc_resp_iovs = &cmd->tvc_resp_iov;
1258 cmd->tvc_resp_iovs_cnt = 1;
1259 } else {
1260 /*
1261 * Legacy descriptor layouts didn't specify that we must put
1262 * the entire response in one iov. Worst case we have a
1263 * iov per byte.
1264 */
1265 cnt = min(VHOST_SCSI_MAX_RESP_IOVS, in_iovs_cnt);
1266 cmd->tvc_resp_iovs = kcalloc(cnt, sizeof(struct iovec),
1267 GFP_KERNEL);
1268 if (!cmd->tvc_resp_iovs)
1269 return -ENOMEM;
1270
1271 cmd->tvc_resp_iovs_cnt = cnt;
1272 }
1273
1274 for (i = 0; i < cmd->tvc_resp_iovs_cnt; i++)
1275 cmd->tvc_resp_iovs[i] = in_iovs[i];
1276
1277 return 0;
1278 }
1279
vhost_buf_to_lun(u8 * lun_buf)1280 static u16 vhost_buf_to_lun(u8 *lun_buf)
1281 {
1282 return ((lun_buf[2] << 8) | lun_buf[3]) & 0x3FFF;
1283 }
1284
1285 static void
vhost_scsi_handle_vq(struct vhost_scsi * vs,struct vhost_virtqueue * vq)1286 vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
1287 {
1288 struct vhost_scsi_tpg **vs_tpg, *tpg;
1289 struct virtio_scsi_cmd_req v_req;
1290 struct virtio_scsi_cmd_req_pi v_req_pi;
1291 struct vhost_scsi_nexus *nexus;
1292 struct vhost_scsi_ctx vc;
1293 struct vhost_scsi_cmd *cmd;
1294 struct iov_iter in_iter, prot_iter, data_iter;
1295 u64 tag;
1296 u32 exp_data_len, data_direction;
1297 int ret, prot_bytes, c = 0;
1298 u16 lun;
1299 u8 task_attr;
1300 bool t10_pi = vhost_has_feature(vq, VIRTIO_SCSI_F_T10_PI);
1301 u8 *cdb;
1302 struct vhost_log *vq_log;
1303 unsigned int log_num;
1304
1305 mutex_lock(&vq->mutex);
1306 /*
1307 * We can handle the vq only after the endpoint is setup by calling the
1308 * VHOST_SCSI_SET_ENDPOINT ioctl.
1309 */
1310 vs_tpg = vhost_vq_get_backend(vq);
1311 if (!vs_tpg)
1312 goto out;
1313
1314 memset(&vc, 0, sizeof(vc));
1315 vc.rsp_size = sizeof(struct virtio_scsi_cmd_resp);
1316
1317 vhost_disable_notify(&vs->dev, vq);
1318
1319 vq_log = unlikely(vhost_has_feature(vq, VHOST_F_LOG_ALL)) ?
1320 vq->log : NULL;
1321
1322 do {
1323 ret = vhost_scsi_get_desc(vs, vq, &vc, vq_log, &log_num);
1324 if (ret)
1325 goto err;
1326
1327 /*
1328 * Setup pointers and values based upon different virtio-scsi
1329 * request header if T10_PI is enabled in KVM guest.
1330 */
1331 if (t10_pi) {
1332 vc.req = &v_req_pi;
1333 vc.req_size = sizeof(v_req_pi);
1334 vc.lunp = &v_req_pi.lun[0];
1335 vc.target = &v_req_pi.lun[1];
1336 } else {
1337 vc.req = &v_req;
1338 vc.req_size = sizeof(v_req);
1339 vc.lunp = &v_req.lun[0];
1340 vc.target = &v_req.lun[1];
1341 }
1342
1343 /*
1344 * Validate the size of request and response buffers.
1345 * Check for a sane response buffer so we can report
1346 * early errors back to the guest.
1347 */
1348 ret = vhost_scsi_chk_size(vq, &vc);
1349 if (ret)
1350 goto err;
1351
1352 ret = vhost_scsi_get_req(vq, &vc, &tpg);
1353 if (ret)
1354 goto err;
1355
1356 ret = -EIO; /* bad target on any error from here on */
1357
1358 /*
1359 * Determine data_direction by calculating the total outgoing
1360 * iovec sizes + incoming iovec sizes vs. virtio-scsi request +
1361 * response headers respectively.
1362 *
1363 * For DMA_TO_DEVICE this is out_iter, which is already pointing
1364 * to the right place.
1365 *
1366 * For DMA_FROM_DEVICE, the iovec will be just past the end
1367 * of the virtio-scsi response header in either the same
1368 * or immediately following iovec.
1369 *
1370 * Any associated T10_PI bytes for the outgoing / incoming
1371 * payloads are included in calculation of exp_data_len here.
1372 */
1373 prot_bytes = 0;
1374
1375 if (vc.out_size > vc.req_size) {
1376 data_direction = DMA_TO_DEVICE;
1377 exp_data_len = vc.out_size - vc.req_size;
1378 data_iter = vc.out_iter;
1379 } else if (vc.in_size > vc.rsp_size) {
1380 data_direction = DMA_FROM_DEVICE;
1381 exp_data_len = vc.in_size - vc.rsp_size;
1382
1383 iov_iter_init(&in_iter, ITER_DEST, &vq->iov[vc.out], vc.in,
1384 vc.rsp_size + exp_data_len);
1385 iov_iter_advance(&in_iter, vc.rsp_size);
1386 data_iter = in_iter;
1387 } else {
1388 data_direction = DMA_NONE;
1389 exp_data_len = 0;
1390 }
1391 /*
1392 * If T10_PI header + payload is present, setup prot_iter values
1393 * and recalculate data_iter for vhost_scsi_mapal() mapping to
1394 * host scatterlists via get_user_pages_fast().
1395 */
1396 if (t10_pi) {
1397 if (v_req_pi.pi_bytesout) {
1398 if (data_direction != DMA_TO_DEVICE) {
1399 vq_err(vq, "Received non zero pi_bytesout,"
1400 " but wrong data_direction\n");
1401 goto err;
1402 }
1403 prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesout);
1404 } else if (v_req_pi.pi_bytesin) {
1405 if (data_direction != DMA_FROM_DEVICE) {
1406 vq_err(vq, "Received non zero pi_bytesin,"
1407 " but wrong data_direction\n");
1408 goto err;
1409 }
1410 prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesin);
1411 }
1412 /*
1413 * Set prot_iter to data_iter and truncate it to
1414 * prot_bytes, and advance data_iter past any
1415 * preceding prot_bytes that may be present.
1416 *
1417 * Also fix up the exp_data_len to reflect only the
1418 * actual data payload length.
1419 */
1420 if (prot_bytes) {
1421 exp_data_len -= prot_bytes;
1422 prot_iter = data_iter;
1423 iov_iter_truncate(&prot_iter, prot_bytes);
1424 iov_iter_advance(&data_iter, prot_bytes);
1425 }
1426 tag = vhost64_to_cpu(vq, v_req_pi.tag);
1427 task_attr = v_req_pi.task_attr;
1428 cdb = &v_req_pi.cdb[0];
1429 lun = vhost_buf_to_lun(v_req_pi.lun);
1430 } else {
1431 tag = vhost64_to_cpu(vq, v_req.tag);
1432 task_attr = v_req.task_attr;
1433 cdb = &v_req.cdb[0];
1434 lun = vhost_buf_to_lun(v_req.lun);
1435 }
1436 /*
1437 * Check that the received CDB size does not exceeded our
1438 * hardcoded max for vhost-scsi, then get a pre-allocated
1439 * cmd descriptor for the new virtio-scsi tag.
1440 *
1441 * TODO what if cdb was too small for varlen cdb header?
1442 */
1443 if (unlikely(scsi_command_size(cdb) > VHOST_SCSI_MAX_CDB_SIZE)) {
1444 vq_err(vq, "Received SCSI CDB with command_size: %d that"
1445 " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
1446 scsi_command_size(cdb), VHOST_SCSI_MAX_CDB_SIZE);
1447 goto err;
1448 }
1449
1450 nexus = tpg->tpg_nexus;
1451 if (!nexus) {
1452 vq_err(vq, "Unable to locate active struct vhost_scsi_nexus\n");
1453 ret = -EIO;
1454 goto err;
1455 }
1456
1457 cmd = vhost_scsi_get_cmd(vq, tag);
1458 if (IS_ERR(cmd)) {
1459 ret = PTR_ERR(cmd);
1460 vq_err(vq, "vhost_scsi_get_tag failed %dd\n", ret);
1461 goto err;
1462 }
1463 cmd->tvc_vq = vq;
1464
1465 ret = vhost_scsi_setup_resp_iovs(cmd, &vq->iov[vc.out], vc.in);
1466 if (ret) {
1467 vq_err(vq, "Failed to alloc recv iovs\n");
1468 vhost_scsi_release_cmd_res(&cmd->tvc_se_cmd);
1469 goto err;
1470 }
1471
1472 if (unlikely(vq_log && log_num)) {
1473 ret = vhost_scsi_copy_cmd_log(vq, cmd, vq_log, log_num);
1474 if (unlikely(ret)) {
1475 vhost_scsi_release_cmd_res(&cmd->tvc_se_cmd);
1476 goto err;
1477 }
1478 }
1479
1480 pr_debug("vhost_scsi got command opcode: %#02x, lun: %d\n",
1481 cdb[0], lun);
1482 pr_debug("cmd: %p exp_data_len: %d, prot_bytes: %d data_direction:"
1483 " %d\n", cmd, exp_data_len, prot_bytes, data_direction);
1484
1485 if (data_direction != DMA_NONE) {
1486 ret = vhost_scsi_mapal(vs, cmd, prot_bytes, &prot_iter,
1487 exp_data_len, &data_iter,
1488 data_direction);
1489 if (unlikely(ret)) {
1490 vq_err(vq, "Failed to map iov to sgl\n");
1491 vhost_scsi_release_cmd_res(&cmd->tvc_se_cmd);
1492 goto err;
1493 }
1494 }
1495 /*
1496 * Save the descriptor from vhost_get_vq_desc() to be used to
1497 * complete the virtio-scsi request in TCM callback context via
1498 * vhost_scsi_queue_data_in() and vhost_scsi_queue_status()
1499 */
1500 cmd->tvc_vq_desc = vc.head;
1501 vhost_scsi_target_queue_cmd(nexus, cmd, cdb, lun, task_attr,
1502 data_direction,
1503 exp_data_len + prot_bytes);
1504 ret = 0;
1505 err:
1506 /*
1507 * ENXIO: No more requests, or read error, wait for next kick
1508 * EINVAL: Invalid response buffer, drop the request
1509 * EIO: Respond with bad target
1510 * EAGAIN: Pending request
1511 * ENOMEM: Could not allocate resources for request
1512 */
1513 if (ret == -ENXIO)
1514 break;
1515 else if (ret == -EIO) {
1516 vhost_scsi_send_bad_target(vs, vq, &vc, TYPE_IO_CMD);
1517 vhost_scsi_log_write(vq, vq_log, log_num);
1518 } else if (ret == -ENOMEM) {
1519 vhost_scsi_send_status(vs, vq, &vc,
1520 SAM_STAT_TASK_SET_FULL);
1521 vhost_scsi_log_write(vq, vq_log, log_num);
1522 }
1523 } while (likely(!vhost_exceeds_weight(vq, ++c, 0)));
1524 out:
1525 mutex_unlock(&vq->mutex);
1526 }
1527
1528 static void
vhost_scsi_send_tmf_resp(struct vhost_scsi * vs,struct vhost_virtqueue * vq,int in_iovs,int vq_desc,struct iovec * resp_iov,int tmf_resp_code)1529 vhost_scsi_send_tmf_resp(struct vhost_scsi *vs, struct vhost_virtqueue *vq,
1530 int in_iovs, int vq_desc, struct iovec *resp_iov,
1531 int tmf_resp_code)
1532 {
1533 struct virtio_scsi_ctrl_tmf_resp rsp;
1534 struct iov_iter iov_iter;
1535 int ret;
1536
1537 pr_debug("%s\n", __func__);
1538 memset(&rsp, 0, sizeof(rsp));
1539 rsp.response = tmf_resp_code;
1540
1541 iov_iter_init(&iov_iter, ITER_DEST, resp_iov, in_iovs, sizeof(rsp));
1542
1543 ret = copy_to_iter(&rsp, sizeof(rsp), &iov_iter);
1544 if (likely(ret == sizeof(rsp)))
1545 vhost_add_used_and_signal(&vs->dev, vq, vq_desc, 0);
1546 else
1547 pr_err("Faulted on virtio_scsi_ctrl_tmf_resp\n");
1548 }
1549
vhost_scsi_tmf_resp_work(struct vhost_work * work)1550 static void vhost_scsi_tmf_resp_work(struct vhost_work *work)
1551 {
1552 struct vhost_scsi_tmf *tmf = container_of(work, struct vhost_scsi_tmf,
1553 vwork);
1554 int resp_code;
1555
1556 if (tmf->scsi_resp == TMR_FUNCTION_COMPLETE)
1557 resp_code = VIRTIO_SCSI_S_FUNCTION_SUCCEEDED;
1558 else
1559 resp_code = VIRTIO_SCSI_S_FUNCTION_REJECTED;
1560
1561 mutex_lock(&tmf->svq->vq.mutex);
1562 vhost_scsi_send_tmf_resp(tmf->vhost, &tmf->svq->vq, tmf->in_iovs,
1563 tmf->vq_desc, &tmf->resp_iov, resp_code);
1564 vhost_scsi_log_write(&tmf->svq->vq, tmf->tmf_log,
1565 tmf->tmf_log_num);
1566 mutex_unlock(&tmf->svq->vq.mutex);
1567
1568 vhost_scsi_release_tmf_res(tmf);
1569 }
1570
vhost_scsi_tmf_flush_work(struct work_struct * work)1571 static void vhost_scsi_tmf_flush_work(struct work_struct *work)
1572 {
1573 struct vhost_scsi_tmf *tmf = container_of(work, struct vhost_scsi_tmf,
1574 flush_work);
1575 struct vhost_virtqueue *vq = &tmf->svq->vq;
1576 /*
1577 * Make sure we have sent responses for other commands before we
1578 * send our response.
1579 */
1580 vhost_dev_flush(vq->dev);
1581 if (!vhost_vq_work_queue(vq, &tmf->vwork))
1582 vhost_scsi_release_tmf_res(tmf);
1583 }
1584
1585 static void
vhost_scsi_handle_tmf(struct vhost_scsi * vs,struct vhost_scsi_tpg * tpg,struct vhost_virtqueue * vq,struct virtio_scsi_ctrl_tmf_req * vtmf,struct vhost_scsi_ctx * vc,struct vhost_log * log,unsigned int log_num)1586 vhost_scsi_handle_tmf(struct vhost_scsi *vs, struct vhost_scsi_tpg *tpg,
1587 struct vhost_virtqueue *vq,
1588 struct virtio_scsi_ctrl_tmf_req *vtmf,
1589 struct vhost_scsi_ctx *vc,
1590 struct vhost_log *log, unsigned int log_num)
1591 {
1592 struct vhost_scsi_virtqueue *svq = container_of(vq,
1593 struct vhost_scsi_virtqueue, vq);
1594 struct vhost_scsi_tmf *tmf;
1595
1596 if (vhost32_to_cpu(vq, vtmf->subtype) !=
1597 VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET)
1598 goto send_reject;
1599
1600 if (!tpg->tpg_nexus || !tpg->tpg_nexus->tvn_se_sess) {
1601 pr_err("Unable to locate active struct vhost_scsi_nexus for LUN RESET.\n");
1602 goto send_reject;
1603 }
1604
1605 tmf = kzalloc(sizeof(*tmf), GFP_KERNEL);
1606 if (!tmf)
1607 goto send_reject;
1608
1609 INIT_WORK(&tmf->flush_work, vhost_scsi_tmf_flush_work);
1610 vhost_work_init(&tmf->vwork, vhost_scsi_tmf_resp_work);
1611 tmf->vhost = vs;
1612 tmf->svq = svq;
1613 tmf->resp_iov = vq->iov[vc->out];
1614 tmf->vq_desc = vc->head;
1615 tmf->in_iovs = vc->in;
1616 tmf->inflight = vhost_scsi_get_inflight(vq);
1617
1618 if (unlikely(log && log_num)) {
1619 tmf->tmf_log = kmalloc_array(log_num, sizeof(*tmf->tmf_log),
1620 GFP_KERNEL);
1621 if (tmf->tmf_log) {
1622 memcpy(tmf->tmf_log, log, sizeof(*tmf->tmf_log) * log_num);
1623 tmf->tmf_log_num = log_num;
1624 } else {
1625 pr_err("vhost_scsi tmf log allocation error\n");
1626 vhost_scsi_release_tmf_res(tmf);
1627 goto send_reject;
1628 }
1629 }
1630
1631 if (target_submit_tmr(&tmf->se_cmd, tpg->tpg_nexus->tvn_se_sess, NULL,
1632 vhost_buf_to_lun(vtmf->lun), NULL,
1633 TMR_LUN_RESET, GFP_KERNEL, 0,
1634 TARGET_SCF_ACK_KREF) < 0) {
1635 vhost_scsi_release_tmf_res(tmf);
1636 goto send_reject;
1637 }
1638
1639 return;
1640
1641 send_reject:
1642 vhost_scsi_send_tmf_resp(vs, vq, vc->in, vc->head, &vq->iov[vc->out],
1643 VIRTIO_SCSI_S_FUNCTION_REJECTED);
1644 vhost_scsi_log_write(vq, log, log_num);
1645 }
1646
1647 static void
vhost_scsi_send_an_resp(struct vhost_scsi * vs,struct vhost_virtqueue * vq,struct vhost_scsi_ctx * vc)1648 vhost_scsi_send_an_resp(struct vhost_scsi *vs,
1649 struct vhost_virtqueue *vq,
1650 struct vhost_scsi_ctx *vc)
1651 {
1652 struct virtio_scsi_ctrl_an_resp rsp;
1653 struct iov_iter iov_iter;
1654 int ret;
1655
1656 pr_debug("%s\n", __func__);
1657 memset(&rsp, 0, sizeof(rsp)); /* event_actual = 0 */
1658 rsp.response = VIRTIO_SCSI_S_OK;
1659
1660 iov_iter_init(&iov_iter, ITER_DEST, &vq->iov[vc->out], vc->in, sizeof(rsp));
1661
1662 ret = copy_to_iter(&rsp, sizeof(rsp), &iov_iter);
1663 if (likely(ret == sizeof(rsp)))
1664 vhost_add_used_and_signal(&vs->dev, vq, vc->head, 0);
1665 else
1666 pr_err("Faulted on virtio_scsi_ctrl_an_resp\n");
1667 }
1668
1669 static void
vhost_scsi_ctl_handle_vq(struct vhost_scsi * vs,struct vhost_virtqueue * vq)1670 vhost_scsi_ctl_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
1671 {
1672 struct vhost_scsi_tpg *tpg;
1673 union {
1674 __virtio32 type;
1675 struct virtio_scsi_ctrl_an_req an;
1676 struct virtio_scsi_ctrl_tmf_req tmf;
1677 } v_req;
1678 struct vhost_scsi_ctx vc;
1679 size_t typ_size;
1680 int ret, c = 0;
1681 struct vhost_log *vq_log;
1682 unsigned int log_num;
1683
1684 mutex_lock(&vq->mutex);
1685 /*
1686 * We can handle the vq only after the endpoint is setup by calling the
1687 * VHOST_SCSI_SET_ENDPOINT ioctl.
1688 */
1689 if (!vhost_vq_get_backend(vq))
1690 goto out;
1691
1692 memset(&vc, 0, sizeof(vc));
1693
1694 vhost_disable_notify(&vs->dev, vq);
1695
1696 vq_log = unlikely(vhost_has_feature(vq, VHOST_F_LOG_ALL)) ?
1697 vq->log : NULL;
1698
1699 do {
1700 ret = vhost_scsi_get_desc(vs, vq, &vc, vq_log, &log_num);
1701 if (ret)
1702 goto err;
1703
1704 /*
1705 * Get the request type first in order to setup
1706 * other parameters dependent on the type.
1707 */
1708 vc.req = &v_req.type;
1709 typ_size = sizeof(v_req.type);
1710
1711 if (unlikely(!copy_from_iter_full(vc.req, typ_size,
1712 &vc.out_iter))) {
1713 vq_err(vq, "Faulted on copy_from_iter tmf type\n");
1714 /*
1715 * The size of the response buffer depends on the
1716 * request type and must be validated against it.
1717 * Since the request type is not known, don't send
1718 * a response.
1719 */
1720 continue;
1721 }
1722
1723 switch (vhost32_to_cpu(vq, v_req.type)) {
1724 case VIRTIO_SCSI_T_TMF:
1725 vc.req = &v_req.tmf;
1726 vc.req_size = sizeof(struct virtio_scsi_ctrl_tmf_req);
1727 vc.rsp_size = sizeof(struct virtio_scsi_ctrl_tmf_resp);
1728 vc.lunp = &v_req.tmf.lun[0];
1729 vc.target = &v_req.tmf.lun[1];
1730 break;
1731 case VIRTIO_SCSI_T_AN_QUERY:
1732 case VIRTIO_SCSI_T_AN_SUBSCRIBE:
1733 vc.req = &v_req.an;
1734 vc.req_size = sizeof(struct virtio_scsi_ctrl_an_req);
1735 vc.rsp_size = sizeof(struct virtio_scsi_ctrl_an_resp);
1736 vc.lunp = &v_req.an.lun[0];
1737 vc.target = NULL;
1738 break;
1739 default:
1740 vq_err(vq, "Unknown control request %d", v_req.type);
1741 continue;
1742 }
1743
1744 /*
1745 * Validate the size of request and response buffers.
1746 * Check for a sane response buffer so we can report
1747 * early errors back to the guest.
1748 */
1749 ret = vhost_scsi_chk_size(vq, &vc);
1750 if (ret)
1751 goto err;
1752
1753 /*
1754 * Get the rest of the request now that its size is known.
1755 */
1756 vc.req += typ_size;
1757 vc.req_size -= typ_size;
1758
1759 ret = vhost_scsi_get_req(vq, &vc, &tpg);
1760 if (ret)
1761 goto err;
1762
1763 if (v_req.type == VIRTIO_SCSI_T_TMF)
1764 vhost_scsi_handle_tmf(vs, tpg, vq, &v_req.tmf, &vc,
1765 vq_log, log_num);
1766 else {
1767 vhost_scsi_send_an_resp(vs, vq, &vc);
1768 vhost_scsi_log_write(vq, vq_log, log_num);
1769 }
1770 err:
1771 /*
1772 * ENXIO: No more requests, or read error, wait for next kick
1773 * EINVAL: Invalid response buffer, drop the request
1774 * EIO: Respond with bad target
1775 * EAGAIN: Pending request
1776 */
1777 if (ret == -ENXIO)
1778 break;
1779 else if (ret == -EIO) {
1780 vhost_scsi_send_bad_target(vs, vq, &vc,
1781 v_req.type == VIRTIO_SCSI_T_TMF ?
1782 TYPE_CTRL_TMF :
1783 TYPE_CTRL_AN);
1784 vhost_scsi_log_write(vq, vq_log, log_num);
1785 }
1786 } while (likely(!vhost_exceeds_weight(vq, ++c, 0)));
1787 out:
1788 mutex_unlock(&vq->mutex);
1789 }
1790
vhost_scsi_ctl_handle_kick(struct vhost_work * work)1791 static void vhost_scsi_ctl_handle_kick(struct vhost_work *work)
1792 {
1793 struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
1794 poll.work);
1795 struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
1796
1797 pr_debug("%s: The handling func for control queue.\n", __func__);
1798 vhost_scsi_ctl_handle_vq(vs, vq);
1799 }
1800
1801 static void
vhost_scsi_send_evt(struct vhost_scsi * vs,struct vhost_virtqueue * vq,struct vhost_scsi_tpg * tpg,struct se_lun * lun,u32 event,u32 reason)1802 vhost_scsi_send_evt(struct vhost_scsi *vs, struct vhost_virtqueue *vq,
1803 struct vhost_scsi_tpg *tpg, struct se_lun *lun,
1804 u32 event, u32 reason)
1805 {
1806 struct vhost_scsi_evt *evt;
1807
1808 evt = vhost_scsi_allocate_evt(vs, event, reason);
1809 if (!evt)
1810 return;
1811
1812 if (tpg && lun) {
1813 /* TODO: share lun setup code with virtio-scsi.ko */
1814 /*
1815 * Note: evt->event is zeroed when we allocate it and
1816 * lun[4-7] need to be zero according to virtio-scsi spec.
1817 */
1818 evt->event.lun[0] = 0x01;
1819 evt->event.lun[1] = tpg->tport_tpgt;
1820 if (lun->unpacked_lun >= 256)
1821 evt->event.lun[2] = lun->unpacked_lun >> 8 | 0x40 ;
1822 evt->event.lun[3] = lun->unpacked_lun & 0xFF;
1823 }
1824
1825 llist_add(&evt->list, &vs->vs_event_list);
1826 if (!vhost_vq_work_queue(vq, &vs->vs_event_work))
1827 vhost_scsi_complete_events(vs, true);
1828 }
1829
vhost_scsi_evt_handle_kick(struct vhost_work * work)1830 static void vhost_scsi_evt_handle_kick(struct vhost_work *work)
1831 {
1832 struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
1833 poll.work);
1834 struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
1835
1836 mutex_lock(&vq->mutex);
1837 if (!vhost_vq_get_backend(vq))
1838 goto out;
1839
1840 if (vs->vs_events_missed)
1841 vhost_scsi_send_evt(vs, vq, NULL, NULL, VIRTIO_SCSI_T_NO_EVENT,
1842 0);
1843 out:
1844 mutex_unlock(&vq->mutex);
1845 }
1846
vhost_scsi_handle_kick(struct vhost_work * work)1847 static void vhost_scsi_handle_kick(struct vhost_work *work)
1848 {
1849 struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
1850 poll.work);
1851 struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
1852
1853 vhost_scsi_handle_vq(vs, vq);
1854 }
1855
1856 /* Callers must hold dev mutex */
vhost_scsi_flush(struct vhost_scsi * vs)1857 static void vhost_scsi_flush(struct vhost_scsi *vs)
1858 {
1859 int i;
1860
1861 /* Init new inflight and remember the old inflight */
1862 vhost_scsi_init_inflight(vs, vs->old_inflight);
1863
1864 /*
1865 * The inflight->kref was initialized to 1. We decrement it here to
1866 * indicate the start of the flush operation so that it will reach 0
1867 * when all the reqs are finished.
1868 */
1869 for (i = 0; i < vs->dev.nvqs; i++)
1870 kref_put(&vs->old_inflight[i]->kref, vhost_scsi_done_inflight);
1871
1872 /* Flush both the vhost poll and vhost work */
1873 vhost_dev_flush(&vs->dev);
1874
1875 /* Wait for all reqs issued before the flush to be finished */
1876 for (i = 0; i < vs->dev.nvqs; i++)
1877 wait_for_completion(&vs->old_inflight[i]->comp);
1878 }
1879
vhost_scsi_destroy_vq_log(struct vhost_virtqueue * vq)1880 static void vhost_scsi_destroy_vq_log(struct vhost_virtqueue *vq)
1881 {
1882 struct vhost_scsi_virtqueue *svq = container_of(vq,
1883 struct vhost_scsi_virtqueue, vq);
1884 struct vhost_scsi_cmd *tv_cmd;
1885 unsigned int i;
1886
1887 if (!svq->scsi_cmds)
1888 return;
1889
1890 for (i = 0; i < svq->max_cmds; i++) {
1891 tv_cmd = &svq->scsi_cmds[i];
1892 kfree(tv_cmd->tvc_log);
1893 tv_cmd->tvc_log = NULL;
1894 tv_cmd->tvc_log_num = 0;
1895 }
1896 }
1897
vhost_scsi_destroy_vq_cmds(struct vhost_virtqueue * vq)1898 static void vhost_scsi_destroy_vq_cmds(struct vhost_virtqueue *vq)
1899 {
1900 struct vhost_scsi_virtqueue *svq = container_of(vq,
1901 struct vhost_scsi_virtqueue, vq);
1902 struct vhost_scsi_cmd *tv_cmd;
1903 unsigned int i;
1904
1905 if (!svq->scsi_cmds)
1906 return;
1907
1908 for (i = 0; i < svq->max_cmds; i++) {
1909 tv_cmd = &svq->scsi_cmds[i];
1910
1911 kfree(tv_cmd->sgl);
1912 kfree(tv_cmd->prot_sgl);
1913 }
1914
1915 sbitmap_free(&svq->scsi_tags);
1916 kfree(svq->upages);
1917 vhost_scsi_destroy_vq_log(vq);
1918 kfree(svq->scsi_cmds);
1919 svq->scsi_cmds = NULL;
1920 }
1921
vhost_scsi_setup_vq_cmds(struct vhost_virtqueue * vq,int max_cmds)1922 static int vhost_scsi_setup_vq_cmds(struct vhost_virtqueue *vq, int max_cmds)
1923 {
1924 struct vhost_scsi_virtqueue *svq = container_of(vq,
1925 struct vhost_scsi_virtqueue, vq);
1926 struct vhost_scsi *vs = svq->vs;
1927 struct vhost_scsi_cmd *tv_cmd;
1928 unsigned int i;
1929
1930 if (svq->scsi_cmds)
1931 return 0;
1932
1933 if (sbitmap_init_node(&svq->scsi_tags, max_cmds, -1, GFP_KERNEL,
1934 NUMA_NO_NODE, false, true))
1935 return -ENOMEM;
1936 svq->max_cmds = max_cmds;
1937
1938 svq->scsi_cmds = kcalloc(max_cmds, sizeof(*tv_cmd), GFP_KERNEL);
1939 if (!svq->scsi_cmds) {
1940 sbitmap_free(&svq->scsi_tags);
1941 return -ENOMEM;
1942 }
1943
1944 svq->upages = kcalloc(VHOST_SCSI_PREALLOC_UPAGES, sizeof(struct page *),
1945 GFP_KERNEL);
1946 if (!svq->upages)
1947 goto out;
1948
1949 for (i = 0; i < max_cmds; i++) {
1950 tv_cmd = &svq->scsi_cmds[i];
1951
1952 if (vs->inline_sg_cnt) {
1953 tv_cmd->sgl = kcalloc(vs->inline_sg_cnt,
1954 sizeof(struct scatterlist),
1955 GFP_KERNEL);
1956 if (!tv_cmd->sgl) {
1957 pr_err("Unable to allocate tv_cmd->sgl\n");
1958 goto out;
1959 }
1960 }
1961
1962 if (vhost_has_feature(vq, VIRTIO_SCSI_F_T10_PI) &&
1963 vs->inline_sg_cnt) {
1964 tv_cmd->prot_sgl = kcalloc(vs->inline_sg_cnt,
1965 sizeof(struct scatterlist),
1966 GFP_KERNEL);
1967 if (!tv_cmd->prot_sgl) {
1968 pr_err("Unable to allocate tv_cmd->prot_sgl\n");
1969 goto out;
1970 }
1971 }
1972 }
1973 return 0;
1974 out:
1975 vhost_scsi_destroy_vq_cmds(vq);
1976 return -ENOMEM;
1977 }
1978
1979 /*
1980 * Called from vhost_scsi_ioctl() context to walk the list of available
1981 * vhost_scsi_tpg with an active struct vhost_scsi_nexus
1982 *
1983 * The lock nesting rule is:
1984 * vs->dev.mutex -> vhost_scsi_mutex -> tpg->tv_tpg_mutex -> vq->mutex
1985 */
1986 static int
vhost_scsi_set_endpoint(struct vhost_scsi * vs,struct vhost_scsi_target * t)1987 vhost_scsi_set_endpoint(struct vhost_scsi *vs,
1988 struct vhost_scsi_target *t)
1989 {
1990 struct se_portal_group *se_tpg;
1991 struct vhost_scsi_tport *tv_tport;
1992 struct vhost_scsi_tpg *tpg;
1993 struct vhost_scsi_tpg **vs_tpg;
1994 struct vhost_virtqueue *vq;
1995 int index, ret, i, len;
1996 bool match = false;
1997
1998 mutex_lock(&vs->dev.mutex);
1999
2000 /* Verify that ring has been setup correctly. */
2001 for (index = 0; index < vs->dev.nvqs; ++index) {
2002 /* Verify that ring has been setup correctly. */
2003 if (!vhost_vq_access_ok(&vs->vqs[index].vq)) {
2004 ret = -EFAULT;
2005 goto out;
2006 }
2007 }
2008
2009 if (vs->vs_tpg) {
2010 pr_err("vhost-scsi endpoint already set for %s.\n",
2011 vs->vs_vhost_wwpn);
2012 ret = -EEXIST;
2013 goto out;
2014 }
2015
2016 len = sizeof(vs_tpg[0]) * VHOST_SCSI_MAX_TARGET;
2017 vs_tpg = kzalloc(len, GFP_KERNEL);
2018 if (!vs_tpg) {
2019 ret = -ENOMEM;
2020 goto out;
2021 }
2022
2023 mutex_lock(&vhost_scsi_mutex);
2024 list_for_each_entry(tpg, &vhost_scsi_list, tv_tpg_list) {
2025 mutex_lock(&tpg->tv_tpg_mutex);
2026 if (!tpg->tpg_nexus) {
2027 mutex_unlock(&tpg->tv_tpg_mutex);
2028 continue;
2029 }
2030 if (tpg->tv_tpg_vhost_count != 0) {
2031 mutex_unlock(&tpg->tv_tpg_mutex);
2032 continue;
2033 }
2034 tv_tport = tpg->tport;
2035
2036 if (!strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
2037 /*
2038 * In order to ensure individual vhost-scsi configfs
2039 * groups cannot be removed while in use by vhost ioctl,
2040 * go ahead and take an explicit se_tpg->tpg_group.cg_item
2041 * dependency now.
2042 */
2043 se_tpg = &tpg->se_tpg;
2044 ret = target_depend_item(&se_tpg->tpg_group.cg_item);
2045 if (ret) {
2046 pr_warn("target_depend_item() failed: %d\n", ret);
2047 mutex_unlock(&tpg->tv_tpg_mutex);
2048 mutex_unlock(&vhost_scsi_mutex);
2049 goto undepend;
2050 }
2051 tpg->tv_tpg_vhost_count++;
2052 tpg->vhost_scsi = vs;
2053 vs_tpg[tpg->tport_tpgt] = tpg;
2054 match = true;
2055 }
2056 mutex_unlock(&tpg->tv_tpg_mutex);
2057 }
2058 mutex_unlock(&vhost_scsi_mutex);
2059
2060 if (match) {
2061 memcpy(vs->vs_vhost_wwpn, t->vhost_wwpn,
2062 sizeof(vs->vs_vhost_wwpn));
2063
2064 for (i = VHOST_SCSI_VQ_IO; i < vs->dev.nvqs; i++) {
2065 vq = &vs->vqs[i].vq;
2066 if (!vhost_vq_is_setup(vq))
2067 continue;
2068
2069 ret = vhost_scsi_setup_vq_cmds(vq, vq->num);
2070 if (ret)
2071 goto destroy_vq_cmds;
2072 }
2073
2074 for (i = 0; i < vs->dev.nvqs; i++) {
2075 vq = &vs->vqs[i].vq;
2076 mutex_lock(&vq->mutex);
2077 vhost_vq_set_backend(vq, vs_tpg);
2078 vhost_vq_init_access(vq);
2079 mutex_unlock(&vq->mutex);
2080 }
2081 ret = 0;
2082 } else {
2083 ret = -ENODEV;
2084 goto free_tpg;
2085 }
2086
2087 /*
2088 * Act as synchronize_rcu to make sure requests after this point
2089 * see a fully setup device.
2090 */
2091 vhost_scsi_flush(vs);
2092 vs->vs_tpg = vs_tpg;
2093 goto out;
2094
2095 destroy_vq_cmds:
2096 for (i--; i >= VHOST_SCSI_VQ_IO; i--) {
2097 if (!vhost_vq_get_backend(&vs->vqs[i].vq))
2098 vhost_scsi_destroy_vq_cmds(&vs->vqs[i].vq);
2099 }
2100 undepend:
2101 for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) {
2102 tpg = vs_tpg[i];
2103 if (tpg) {
2104 mutex_lock(&tpg->tv_tpg_mutex);
2105 tpg->vhost_scsi = NULL;
2106 tpg->tv_tpg_vhost_count--;
2107 mutex_unlock(&tpg->tv_tpg_mutex);
2108 target_undepend_item(&tpg->se_tpg.tpg_group.cg_item);
2109 }
2110 }
2111 free_tpg:
2112 kfree(vs_tpg);
2113 out:
2114 mutex_unlock(&vs->dev.mutex);
2115 return ret;
2116 }
2117
2118 static int
vhost_scsi_clear_endpoint(struct vhost_scsi * vs,struct vhost_scsi_target * t)2119 vhost_scsi_clear_endpoint(struct vhost_scsi *vs,
2120 struct vhost_scsi_target *t)
2121 {
2122 struct se_portal_group *se_tpg;
2123 struct vhost_scsi_tport *tv_tport;
2124 struct vhost_scsi_tpg *tpg;
2125 struct vhost_virtqueue *vq;
2126 bool match = false;
2127 int index, ret, i;
2128 u8 target;
2129
2130 mutex_lock(&vs->dev.mutex);
2131 /* Verify that ring has been setup correctly. */
2132 for (index = 0; index < vs->dev.nvqs; ++index) {
2133 if (!vhost_vq_access_ok(&vs->vqs[index].vq)) {
2134 ret = -EFAULT;
2135 goto err_dev;
2136 }
2137 }
2138
2139 if (!vs->vs_tpg) {
2140 ret = 0;
2141 goto err_dev;
2142 }
2143
2144 for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) {
2145 target = i;
2146 tpg = vs->vs_tpg[target];
2147 if (!tpg)
2148 continue;
2149
2150 tv_tport = tpg->tport;
2151 if (!tv_tport) {
2152 ret = -ENODEV;
2153 goto err_dev;
2154 }
2155
2156 if (strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
2157 pr_warn("tv_tport->tport_name: %s, tpg->tport_tpgt: %hu"
2158 " does not match t->vhost_wwpn: %s, t->vhost_tpgt: %hu\n",
2159 tv_tport->tport_name, tpg->tport_tpgt,
2160 t->vhost_wwpn, t->vhost_tpgt);
2161 ret = -EINVAL;
2162 goto err_dev;
2163 }
2164 match = true;
2165 }
2166 if (!match)
2167 goto free_vs_tpg;
2168
2169 /* Prevent new cmds from starting and accessing the tpgs/sessions */
2170 for (i = 0; i < vs->dev.nvqs; i++) {
2171 vq = &vs->vqs[i].vq;
2172 mutex_lock(&vq->mutex);
2173 vhost_vq_set_backend(vq, NULL);
2174 mutex_unlock(&vq->mutex);
2175 }
2176 /* Make sure cmds are not running before tearing them down. */
2177 vhost_scsi_flush(vs);
2178
2179 for (i = 0; i < vs->dev.nvqs; i++) {
2180 vq = &vs->vqs[i].vq;
2181 vhost_scsi_destroy_vq_cmds(vq);
2182 }
2183
2184 /*
2185 * We can now release our hold on the tpg and sessions and userspace
2186 * can free them after this point.
2187 */
2188 for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) {
2189 target = i;
2190 tpg = vs->vs_tpg[target];
2191 if (!tpg)
2192 continue;
2193
2194 mutex_lock(&tpg->tv_tpg_mutex);
2195
2196 tpg->tv_tpg_vhost_count--;
2197 tpg->vhost_scsi = NULL;
2198 vs->vs_tpg[target] = NULL;
2199
2200 mutex_unlock(&tpg->tv_tpg_mutex);
2201
2202 se_tpg = &tpg->se_tpg;
2203 target_undepend_item(&se_tpg->tpg_group.cg_item);
2204 }
2205
2206 free_vs_tpg:
2207 /*
2208 * Act as synchronize_rcu to make sure access to
2209 * old vs->vs_tpg is finished.
2210 */
2211 vhost_scsi_flush(vs);
2212 kfree(vs->vs_tpg);
2213 vs->vs_tpg = NULL;
2214 memset(vs->vs_vhost_wwpn, 0, sizeof(vs->vs_vhost_wwpn));
2215 WARN_ON(vs->vs_events_nr);
2216 mutex_unlock(&vs->dev.mutex);
2217 return 0;
2218
2219 err_dev:
2220 mutex_unlock(&vs->dev.mutex);
2221 return ret;
2222 }
2223
vhost_scsi_set_features(struct vhost_scsi * vs,u64 features)2224 static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
2225 {
2226 struct vhost_virtqueue *vq;
2227 bool is_log, was_log;
2228 int i;
2229
2230 if (features & ~VHOST_SCSI_FEATURES)
2231 return -EOPNOTSUPP;
2232
2233 mutex_lock(&vs->dev.mutex);
2234 if ((features & (1 << VHOST_F_LOG_ALL)) &&
2235 !vhost_log_access_ok(&vs->dev)) {
2236 mutex_unlock(&vs->dev.mutex);
2237 return -EFAULT;
2238 }
2239
2240 if (!vs->dev.nvqs)
2241 goto out;
2242
2243 is_log = features & (1 << VHOST_F_LOG_ALL);
2244 /*
2245 * All VQs should have same feature.
2246 */
2247 was_log = vhost_has_feature(&vs->vqs[0].vq, VHOST_F_LOG_ALL);
2248
2249 for (i = 0; i < vs->dev.nvqs; i++) {
2250 vq = &vs->vqs[i].vq;
2251 mutex_lock(&vq->mutex);
2252 vq->acked_features = features;
2253 mutex_unlock(&vq->mutex);
2254 }
2255
2256 /*
2257 * If VHOST_F_LOG_ALL is removed, free tvc_log after
2258 * vq->acked_features is committed.
2259 */
2260 if (!is_log && was_log) {
2261 for (i = VHOST_SCSI_VQ_IO; i < vs->dev.nvqs; i++) {
2262 if (!vs->vqs[i].scsi_cmds)
2263 continue;
2264
2265 vq = &vs->vqs[i].vq;
2266 mutex_lock(&vq->mutex);
2267 vhost_scsi_destroy_vq_log(vq);
2268 mutex_unlock(&vq->mutex);
2269 }
2270 }
2271
2272 out:
2273 mutex_unlock(&vs->dev.mutex);
2274 return 0;
2275 }
2276
vhost_scsi_open(struct inode * inode,struct file * f)2277 static int vhost_scsi_open(struct inode *inode, struct file *f)
2278 {
2279 struct vhost_scsi_virtqueue *svq;
2280 struct vhost_scsi *vs;
2281 struct vhost_virtqueue **vqs;
2282 int r = -ENOMEM, i, nvqs = vhost_scsi_max_io_vqs;
2283
2284 vs = kvzalloc(sizeof(*vs), GFP_KERNEL);
2285 if (!vs)
2286 goto err_vs;
2287 vs->inline_sg_cnt = vhost_scsi_inline_sg_cnt;
2288
2289 if (nvqs > VHOST_SCSI_MAX_IO_VQ) {
2290 pr_err("Invalid max_io_vqs of %d. Using %d.\n", nvqs,
2291 VHOST_SCSI_MAX_IO_VQ);
2292 nvqs = VHOST_SCSI_MAX_IO_VQ;
2293 } else if (nvqs == 0) {
2294 pr_err("Invalid max_io_vqs of %d. Using 1.\n", nvqs);
2295 nvqs = 1;
2296 }
2297 nvqs += VHOST_SCSI_VQ_IO;
2298
2299 vs->old_inflight = kmalloc_array(nvqs, sizeof(*vs->old_inflight),
2300 GFP_KERNEL | __GFP_ZERO);
2301 if (!vs->old_inflight)
2302 goto err_inflight;
2303
2304 vs->vqs = kmalloc_array(nvqs, sizeof(*vs->vqs),
2305 GFP_KERNEL | __GFP_ZERO);
2306 if (!vs->vqs)
2307 goto err_vqs;
2308
2309 vqs = kmalloc_array(nvqs, sizeof(*vqs), GFP_KERNEL);
2310 if (!vqs)
2311 goto err_local_vqs;
2312
2313 vhost_work_init(&vs->vs_event_work, vhost_scsi_evt_work);
2314
2315 vs->vs_events_nr = 0;
2316 vs->vs_events_missed = false;
2317
2318 vqs[VHOST_SCSI_VQ_CTL] = &vs->vqs[VHOST_SCSI_VQ_CTL].vq;
2319 vqs[VHOST_SCSI_VQ_EVT] = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
2320 vs->vqs[VHOST_SCSI_VQ_CTL].vq.handle_kick = vhost_scsi_ctl_handle_kick;
2321 vs->vqs[VHOST_SCSI_VQ_EVT].vq.handle_kick = vhost_scsi_evt_handle_kick;
2322 for (i = VHOST_SCSI_VQ_IO; i < nvqs; i++) {
2323 svq = &vs->vqs[i];
2324
2325 vqs[i] = &svq->vq;
2326 svq->vs = vs;
2327 init_llist_head(&svq->completion_list);
2328 vhost_work_init(&svq->completion_work,
2329 vhost_scsi_complete_cmd_work);
2330 svq->vq.handle_kick = vhost_scsi_handle_kick;
2331 }
2332 vhost_dev_init(&vs->dev, vqs, nvqs, UIO_MAXIOV,
2333 VHOST_SCSI_WEIGHT, 0, true, NULL);
2334
2335 vhost_scsi_init_inflight(vs, NULL);
2336
2337 f->private_data = vs;
2338 return 0;
2339
2340 err_local_vqs:
2341 kfree(vs->vqs);
2342 err_vqs:
2343 kfree(vs->old_inflight);
2344 err_inflight:
2345 kvfree(vs);
2346 err_vs:
2347 return r;
2348 }
2349
vhost_scsi_release(struct inode * inode,struct file * f)2350 static int vhost_scsi_release(struct inode *inode, struct file *f)
2351 {
2352 struct vhost_scsi *vs = f->private_data;
2353 struct vhost_scsi_target t;
2354
2355 mutex_lock(&vs->dev.mutex);
2356 memcpy(t.vhost_wwpn, vs->vs_vhost_wwpn, sizeof(t.vhost_wwpn));
2357 mutex_unlock(&vs->dev.mutex);
2358 vhost_scsi_clear_endpoint(vs, &t);
2359 vhost_dev_stop(&vs->dev);
2360 vhost_dev_cleanup(&vs->dev);
2361 kfree(vs->dev.vqs);
2362 kfree(vs->vqs);
2363 kfree(vs->old_inflight);
2364 kvfree(vs);
2365 return 0;
2366 }
2367
2368 static long
vhost_scsi_ioctl(struct file * f,unsigned int ioctl,unsigned long arg)2369 vhost_scsi_ioctl(struct file *f,
2370 unsigned int ioctl,
2371 unsigned long arg)
2372 {
2373 struct vhost_scsi *vs = f->private_data;
2374 struct vhost_scsi_target backend;
2375 void __user *argp = (void __user *)arg;
2376 u64 __user *featurep = argp;
2377 u32 __user *eventsp = argp;
2378 u32 events_missed;
2379 u64 features;
2380 int r, abi_version = VHOST_SCSI_ABI_VERSION;
2381 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
2382
2383 switch (ioctl) {
2384 case VHOST_SCSI_SET_ENDPOINT:
2385 if (copy_from_user(&backend, argp, sizeof backend))
2386 return -EFAULT;
2387 if (backend.reserved != 0)
2388 return -EOPNOTSUPP;
2389
2390 return vhost_scsi_set_endpoint(vs, &backend);
2391 case VHOST_SCSI_CLEAR_ENDPOINT:
2392 if (copy_from_user(&backend, argp, sizeof backend))
2393 return -EFAULT;
2394 if (backend.reserved != 0)
2395 return -EOPNOTSUPP;
2396
2397 return vhost_scsi_clear_endpoint(vs, &backend);
2398 case VHOST_SCSI_GET_ABI_VERSION:
2399 if (copy_to_user(argp, &abi_version, sizeof abi_version))
2400 return -EFAULT;
2401 return 0;
2402 case VHOST_SCSI_SET_EVENTS_MISSED:
2403 if (get_user(events_missed, eventsp))
2404 return -EFAULT;
2405 mutex_lock(&vq->mutex);
2406 vs->vs_events_missed = events_missed;
2407 mutex_unlock(&vq->mutex);
2408 return 0;
2409 case VHOST_SCSI_GET_EVENTS_MISSED:
2410 mutex_lock(&vq->mutex);
2411 events_missed = vs->vs_events_missed;
2412 mutex_unlock(&vq->mutex);
2413 if (put_user(events_missed, eventsp))
2414 return -EFAULT;
2415 return 0;
2416 case VHOST_GET_FEATURES:
2417 features = VHOST_SCSI_FEATURES;
2418 if (copy_to_user(featurep, &features, sizeof features))
2419 return -EFAULT;
2420 return 0;
2421 case VHOST_SET_FEATURES:
2422 if (copy_from_user(&features, featurep, sizeof features))
2423 return -EFAULT;
2424 return vhost_scsi_set_features(vs, features);
2425 case VHOST_NEW_WORKER:
2426 case VHOST_FREE_WORKER:
2427 case VHOST_ATTACH_VRING_WORKER:
2428 case VHOST_GET_VRING_WORKER:
2429 mutex_lock(&vs->dev.mutex);
2430 r = vhost_worker_ioctl(&vs->dev, ioctl, argp);
2431 mutex_unlock(&vs->dev.mutex);
2432 return r;
2433 default:
2434 mutex_lock(&vs->dev.mutex);
2435 r = vhost_dev_ioctl(&vs->dev, ioctl, argp);
2436 /* TODO: flush backend after dev ioctl. */
2437 if (r == -ENOIOCTLCMD)
2438 r = vhost_vring_ioctl(&vs->dev, ioctl, argp);
2439 mutex_unlock(&vs->dev.mutex);
2440 return r;
2441 }
2442 }
2443
2444 static const struct file_operations vhost_scsi_fops = {
2445 .owner = THIS_MODULE,
2446 .release = vhost_scsi_release,
2447 .unlocked_ioctl = vhost_scsi_ioctl,
2448 .compat_ioctl = compat_ptr_ioctl,
2449 .open = vhost_scsi_open,
2450 .llseek = noop_llseek,
2451 };
2452
2453 static struct miscdevice vhost_scsi_misc = {
2454 MISC_DYNAMIC_MINOR,
2455 "vhost-scsi",
2456 &vhost_scsi_fops,
2457 };
2458
vhost_scsi_register(void)2459 static int __init vhost_scsi_register(void)
2460 {
2461 return misc_register(&vhost_scsi_misc);
2462 }
2463
vhost_scsi_deregister(void)2464 static void vhost_scsi_deregister(void)
2465 {
2466 misc_deregister(&vhost_scsi_misc);
2467 }
2468
vhost_scsi_dump_proto_id(struct vhost_scsi_tport * tport)2469 static char *vhost_scsi_dump_proto_id(struct vhost_scsi_tport *tport)
2470 {
2471 switch (tport->tport_proto_id) {
2472 case SCSI_PROTOCOL_SAS:
2473 return "SAS";
2474 case SCSI_PROTOCOL_FCP:
2475 return "FCP";
2476 case SCSI_PROTOCOL_ISCSI:
2477 return "iSCSI";
2478 default:
2479 break;
2480 }
2481
2482 return "Unknown";
2483 }
2484
2485 static void
vhost_scsi_do_plug(struct vhost_scsi_tpg * tpg,struct se_lun * lun,bool plug)2486 vhost_scsi_do_plug(struct vhost_scsi_tpg *tpg,
2487 struct se_lun *lun, bool plug)
2488 {
2489
2490 struct vhost_scsi *vs = tpg->vhost_scsi;
2491 struct vhost_virtqueue *vq;
2492 u32 reason;
2493
2494 if (!vs)
2495 return;
2496
2497 if (plug)
2498 reason = VIRTIO_SCSI_EVT_RESET_RESCAN;
2499 else
2500 reason = VIRTIO_SCSI_EVT_RESET_REMOVED;
2501
2502 vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
2503 mutex_lock(&vq->mutex);
2504 /*
2505 * We can't queue events if the backend has been cleared, because
2506 * we could end up queueing an event after the flush.
2507 */
2508 if (!vhost_vq_get_backend(vq))
2509 goto unlock;
2510
2511 if (vhost_has_feature(vq, VIRTIO_SCSI_F_HOTPLUG))
2512 vhost_scsi_send_evt(vs, vq, tpg, lun,
2513 VIRTIO_SCSI_T_TRANSPORT_RESET, reason);
2514 unlock:
2515 mutex_unlock(&vq->mutex);
2516 }
2517
vhost_scsi_hotplug(struct vhost_scsi_tpg * tpg,struct se_lun * lun)2518 static void vhost_scsi_hotplug(struct vhost_scsi_tpg *tpg, struct se_lun *lun)
2519 {
2520 vhost_scsi_do_plug(tpg, lun, true);
2521 }
2522
vhost_scsi_hotunplug(struct vhost_scsi_tpg * tpg,struct se_lun * lun)2523 static void vhost_scsi_hotunplug(struct vhost_scsi_tpg *tpg, struct se_lun *lun)
2524 {
2525 vhost_scsi_do_plug(tpg, lun, false);
2526 }
2527
vhost_scsi_port_link(struct se_portal_group * se_tpg,struct se_lun * lun)2528 static int vhost_scsi_port_link(struct se_portal_group *se_tpg,
2529 struct se_lun *lun)
2530 {
2531 struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2532 struct vhost_scsi_tpg, se_tpg);
2533
2534 mutex_lock(&tpg->tv_tpg_mutex);
2535 tpg->tv_tpg_port_count++;
2536 vhost_scsi_hotplug(tpg, lun);
2537 mutex_unlock(&tpg->tv_tpg_mutex);
2538
2539 return 0;
2540 }
2541
vhost_scsi_port_unlink(struct se_portal_group * se_tpg,struct se_lun * lun)2542 static void vhost_scsi_port_unlink(struct se_portal_group *se_tpg,
2543 struct se_lun *lun)
2544 {
2545 struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2546 struct vhost_scsi_tpg, se_tpg);
2547
2548 mutex_lock(&tpg->tv_tpg_mutex);
2549 tpg->tv_tpg_port_count--;
2550 vhost_scsi_hotunplug(tpg, lun);
2551 mutex_unlock(&tpg->tv_tpg_mutex);
2552 }
2553
vhost_scsi_tpg_attrib_fabric_prot_type_store(struct config_item * item,const char * page,size_t count)2554 static ssize_t vhost_scsi_tpg_attrib_fabric_prot_type_store(
2555 struct config_item *item, const char *page, size_t count)
2556 {
2557 struct se_portal_group *se_tpg = attrib_to_tpg(item);
2558 struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2559 struct vhost_scsi_tpg, se_tpg);
2560 unsigned long val;
2561 int ret = kstrtoul(page, 0, &val);
2562
2563 if (ret) {
2564 pr_err("kstrtoul() returned %d for fabric_prot_type\n", ret);
2565 return ret;
2566 }
2567 if (val != 0 && val != 1 && val != 3) {
2568 pr_err("Invalid vhost_scsi fabric_prot_type: %lu\n", val);
2569 return -EINVAL;
2570 }
2571 tpg->tv_fabric_prot_type = val;
2572
2573 return count;
2574 }
2575
vhost_scsi_tpg_attrib_fabric_prot_type_show(struct config_item * item,char * page)2576 static ssize_t vhost_scsi_tpg_attrib_fabric_prot_type_show(
2577 struct config_item *item, char *page)
2578 {
2579 struct se_portal_group *se_tpg = attrib_to_tpg(item);
2580 struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2581 struct vhost_scsi_tpg, se_tpg);
2582
2583 return sysfs_emit(page, "%d\n", tpg->tv_fabric_prot_type);
2584 }
2585
2586 CONFIGFS_ATTR(vhost_scsi_tpg_attrib_, fabric_prot_type);
2587
2588 static struct configfs_attribute *vhost_scsi_tpg_attrib_attrs[] = {
2589 &vhost_scsi_tpg_attrib_attr_fabric_prot_type,
2590 NULL,
2591 };
2592
vhost_scsi_make_nexus(struct vhost_scsi_tpg * tpg,const char * name)2593 static int vhost_scsi_make_nexus(struct vhost_scsi_tpg *tpg,
2594 const char *name)
2595 {
2596 struct vhost_scsi_nexus *tv_nexus;
2597
2598 mutex_lock(&tpg->tv_tpg_mutex);
2599 if (tpg->tpg_nexus) {
2600 mutex_unlock(&tpg->tv_tpg_mutex);
2601 pr_debug("tpg->tpg_nexus already exists\n");
2602 return -EEXIST;
2603 }
2604
2605 tv_nexus = kzalloc(sizeof(*tv_nexus), GFP_KERNEL);
2606 if (!tv_nexus) {
2607 mutex_unlock(&tpg->tv_tpg_mutex);
2608 pr_err("Unable to allocate struct vhost_scsi_nexus\n");
2609 return -ENOMEM;
2610 }
2611 /*
2612 * Since we are running in 'demo mode' this call with generate a
2613 * struct se_node_acl for the vhost_scsi struct se_portal_group with
2614 * the SCSI Initiator port name of the passed configfs group 'name'.
2615 */
2616 tv_nexus->tvn_se_sess = target_setup_session(&tpg->se_tpg, 0, 0,
2617 TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS,
2618 (unsigned char *)name, tv_nexus, NULL);
2619 if (IS_ERR(tv_nexus->tvn_se_sess)) {
2620 mutex_unlock(&tpg->tv_tpg_mutex);
2621 kfree(tv_nexus);
2622 return -ENOMEM;
2623 }
2624 tpg->tpg_nexus = tv_nexus;
2625
2626 mutex_unlock(&tpg->tv_tpg_mutex);
2627 return 0;
2628 }
2629
vhost_scsi_drop_nexus(struct vhost_scsi_tpg * tpg)2630 static int vhost_scsi_drop_nexus(struct vhost_scsi_tpg *tpg)
2631 {
2632 struct se_session *se_sess;
2633 struct vhost_scsi_nexus *tv_nexus;
2634
2635 mutex_lock(&tpg->tv_tpg_mutex);
2636 tv_nexus = tpg->tpg_nexus;
2637 if (!tv_nexus) {
2638 mutex_unlock(&tpg->tv_tpg_mutex);
2639 return -ENODEV;
2640 }
2641
2642 se_sess = tv_nexus->tvn_se_sess;
2643 if (!se_sess) {
2644 mutex_unlock(&tpg->tv_tpg_mutex);
2645 return -ENODEV;
2646 }
2647
2648 if (tpg->tv_tpg_port_count != 0) {
2649 mutex_unlock(&tpg->tv_tpg_mutex);
2650 pr_err("Unable to remove TCM_vhost I_T Nexus with"
2651 " active TPG port count: %d\n",
2652 tpg->tv_tpg_port_count);
2653 return -EBUSY;
2654 }
2655
2656 if (tpg->tv_tpg_vhost_count != 0) {
2657 mutex_unlock(&tpg->tv_tpg_mutex);
2658 pr_err("Unable to remove TCM_vhost I_T Nexus with"
2659 " active TPG vhost count: %d\n",
2660 tpg->tv_tpg_vhost_count);
2661 return -EBUSY;
2662 }
2663
2664 pr_debug("TCM_vhost_ConfigFS: Removing I_T Nexus to emulated"
2665 " %s Initiator Port: %s\n", vhost_scsi_dump_proto_id(tpg->tport),
2666 tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
2667
2668 /*
2669 * Release the SCSI I_T Nexus to the emulated vhost Target Port
2670 */
2671 target_remove_session(se_sess);
2672 tpg->tpg_nexus = NULL;
2673 mutex_unlock(&tpg->tv_tpg_mutex);
2674
2675 kfree(tv_nexus);
2676 return 0;
2677 }
2678
vhost_scsi_tpg_nexus_show(struct config_item * item,char * page)2679 static ssize_t vhost_scsi_tpg_nexus_show(struct config_item *item, char *page)
2680 {
2681 struct se_portal_group *se_tpg = to_tpg(item);
2682 struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2683 struct vhost_scsi_tpg, se_tpg);
2684 struct vhost_scsi_nexus *tv_nexus;
2685 ssize_t ret;
2686
2687 mutex_lock(&tpg->tv_tpg_mutex);
2688 tv_nexus = tpg->tpg_nexus;
2689 if (!tv_nexus) {
2690 mutex_unlock(&tpg->tv_tpg_mutex);
2691 return -ENODEV;
2692 }
2693 ret = sysfs_emit(page, "%s\n",
2694 tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
2695 mutex_unlock(&tpg->tv_tpg_mutex);
2696
2697 return ret;
2698 }
2699
vhost_scsi_tpg_nexus_store(struct config_item * item,const char * page,size_t count)2700 static ssize_t vhost_scsi_tpg_nexus_store(struct config_item *item,
2701 const char *page, size_t count)
2702 {
2703 struct se_portal_group *se_tpg = to_tpg(item);
2704 struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2705 struct vhost_scsi_tpg, se_tpg);
2706 struct vhost_scsi_tport *tport_wwn = tpg->tport;
2707 unsigned char i_port[VHOST_SCSI_NAMELEN], *ptr, *port_ptr;
2708 int ret;
2709 /*
2710 * Shutdown the active I_T nexus if 'NULL' is passed..
2711 */
2712 if (!strncmp(page, "NULL", 4)) {
2713 ret = vhost_scsi_drop_nexus(tpg);
2714 return (!ret) ? count : ret;
2715 }
2716 /*
2717 * Otherwise make sure the passed virtual Initiator port WWN matches
2718 * the fabric protocol_id set in vhost_scsi_make_tport(), and call
2719 * vhost_scsi_make_nexus().
2720 */
2721 if (strlen(page) >= VHOST_SCSI_NAMELEN) {
2722 pr_err("Emulated NAA Sas Address: %s, exceeds"
2723 " max: %d\n", page, VHOST_SCSI_NAMELEN);
2724 return -EINVAL;
2725 }
2726 snprintf(&i_port[0], VHOST_SCSI_NAMELEN, "%s", page);
2727
2728 ptr = strstr(i_port, "naa.");
2729 if (ptr) {
2730 if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_SAS) {
2731 pr_err("Passed SAS Initiator Port %s does not"
2732 " match target port protoid: %s\n", i_port,
2733 vhost_scsi_dump_proto_id(tport_wwn));
2734 return -EINVAL;
2735 }
2736 port_ptr = &i_port[0];
2737 goto check_newline;
2738 }
2739 ptr = strstr(i_port, "fc.");
2740 if (ptr) {
2741 if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_FCP) {
2742 pr_err("Passed FCP Initiator Port %s does not"
2743 " match target port protoid: %s\n", i_port,
2744 vhost_scsi_dump_proto_id(tport_wwn));
2745 return -EINVAL;
2746 }
2747 port_ptr = &i_port[3]; /* Skip over "fc." */
2748 goto check_newline;
2749 }
2750 ptr = strstr(i_port, "iqn.");
2751 if (ptr) {
2752 if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_ISCSI) {
2753 pr_err("Passed iSCSI Initiator Port %s does not"
2754 " match target port protoid: %s\n", i_port,
2755 vhost_scsi_dump_proto_id(tport_wwn));
2756 return -EINVAL;
2757 }
2758 port_ptr = &i_port[0];
2759 goto check_newline;
2760 }
2761 pr_err("Unable to locate prefix for emulated Initiator Port:"
2762 " %s\n", i_port);
2763 return -EINVAL;
2764 /*
2765 * Clear any trailing newline for the NAA WWN
2766 */
2767 check_newline:
2768 if (i_port[strlen(i_port)-1] == '\n')
2769 i_port[strlen(i_port)-1] = '\0';
2770
2771 ret = vhost_scsi_make_nexus(tpg, port_ptr);
2772 if (ret < 0)
2773 return ret;
2774
2775 return count;
2776 }
2777
2778 CONFIGFS_ATTR(vhost_scsi_tpg_, nexus);
2779
2780 static struct configfs_attribute *vhost_scsi_tpg_attrs[] = {
2781 &vhost_scsi_tpg_attr_nexus,
2782 NULL,
2783 };
2784
2785 static struct se_portal_group *
vhost_scsi_make_tpg(struct se_wwn * wwn,const char * name)2786 vhost_scsi_make_tpg(struct se_wwn *wwn, const char *name)
2787 {
2788 struct vhost_scsi_tport *tport = container_of(wwn,
2789 struct vhost_scsi_tport, tport_wwn);
2790
2791 struct vhost_scsi_tpg *tpg;
2792 u16 tpgt;
2793 int ret;
2794
2795 if (strstr(name, "tpgt_") != name)
2796 return ERR_PTR(-EINVAL);
2797 if (kstrtou16(name + 5, 10, &tpgt) || tpgt >= VHOST_SCSI_MAX_TARGET)
2798 return ERR_PTR(-EINVAL);
2799
2800 tpg = kzalloc(sizeof(*tpg), GFP_KERNEL);
2801 if (!tpg) {
2802 pr_err("Unable to allocate struct vhost_scsi_tpg");
2803 return ERR_PTR(-ENOMEM);
2804 }
2805 mutex_init(&tpg->tv_tpg_mutex);
2806 INIT_LIST_HEAD(&tpg->tv_tpg_list);
2807 tpg->tport = tport;
2808 tpg->tport_tpgt = tpgt;
2809
2810 ret = core_tpg_register(wwn, &tpg->se_tpg, tport->tport_proto_id);
2811 if (ret < 0) {
2812 kfree(tpg);
2813 return NULL;
2814 }
2815 mutex_lock(&vhost_scsi_mutex);
2816 list_add_tail(&tpg->tv_tpg_list, &vhost_scsi_list);
2817 mutex_unlock(&vhost_scsi_mutex);
2818
2819 return &tpg->se_tpg;
2820 }
2821
vhost_scsi_drop_tpg(struct se_portal_group * se_tpg)2822 static void vhost_scsi_drop_tpg(struct se_portal_group *se_tpg)
2823 {
2824 struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2825 struct vhost_scsi_tpg, se_tpg);
2826
2827 mutex_lock(&vhost_scsi_mutex);
2828 list_del(&tpg->tv_tpg_list);
2829 mutex_unlock(&vhost_scsi_mutex);
2830 /*
2831 * Release the virtual I_T Nexus for this vhost TPG
2832 */
2833 vhost_scsi_drop_nexus(tpg);
2834 /*
2835 * Deregister the se_tpg from TCM..
2836 */
2837 core_tpg_deregister(se_tpg);
2838 kfree(tpg);
2839 }
2840
2841 static struct se_wwn *
vhost_scsi_make_tport(struct target_fabric_configfs * tf,struct config_group * group,const char * name)2842 vhost_scsi_make_tport(struct target_fabric_configfs *tf,
2843 struct config_group *group,
2844 const char *name)
2845 {
2846 struct vhost_scsi_tport *tport;
2847 char *ptr;
2848 u64 wwpn = 0;
2849 int off = 0;
2850
2851 /* if (vhost_scsi_parse_wwn(name, &wwpn, 1) < 0)
2852 return ERR_PTR(-EINVAL); */
2853
2854 tport = kzalloc(sizeof(*tport), GFP_KERNEL);
2855 if (!tport) {
2856 pr_err("Unable to allocate struct vhost_scsi_tport");
2857 return ERR_PTR(-ENOMEM);
2858 }
2859 tport->tport_wwpn = wwpn;
2860 /*
2861 * Determine the emulated Protocol Identifier and Target Port Name
2862 * based on the incoming configfs directory name.
2863 */
2864 ptr = strstr(name, "naa.");
2865 if (ptr) {
2866 tport->tport_proto_id = SCSI_PROTOCOL_SAS;
2867 goto check_len;
2868 }
2869 ptr = strstr(name, "fc.");
2870 if (ptr) {
2871 tport->tport_proto_id = SCSI_PROTOCOL_FCP;
2872 off = 3; /* Skip over "fc." */
2873 goto check_len;
2874 }
2875 ptr = strstr(name, "iqn.");
2876 if (ptr) {
2877 tport->tport_proto_id = SCSI_PROTOCOL_ISCSI;
2878 goto check_len;
2879 }
2880
2881 pr_err("Unable to locate prefix for emulated Target Port:"
2882 " %s\n", name);
2883 kfree(tport);
2884 return ERR_PTR(-EINVAL);
2885
2886 check_len:
2887 if (strlen(name) >= VHOST_SCSI_NAMELEN) {
2888 pr_err("Emulated %s Address: %s, exceeds"
2889 " max: %d\n", name, vhost_scsi_dump_proto_id(tport),
2890 VHOST_SCSI_NAMELEN);
2891 kfree(tport);
2892 return ERR_PTR(-EINVAL);
2893 }
2894 snprintf(&tport->tport_name[0], VHOST_SCSI_NAMELEN, "%s", &name[off]);
2895
2896 pr_debug("TCM_VHost_ConfigFS: Allocated emulated Target"
2897 " %s Address: %s\n", vhost_scsi_dump_proto_id(tport), name);
2898
2899 return &tport->tport_wwn;
2900 }
2901
vhost_scsi_drop_tport(struct se_wwn * wwn)2902 static void vhost_scsi_drop_tport(struct se_wwn *wwn)
2903 {
2904 struct vhost_scsi_tport *tport = container_of(wwn,
2905 struct vhost_scsi_tport, tport_wwn);
2906
2907 pr_debug("TCM_VHost_ConfigFS: Deallocating emulated Target"
2908 " %s Address: %s\n", vhost_scsi_dump_proto_id(tport),
2909 tport->tport_name);
2910
2911 kfree(tport);
2912 }
2913
2914 static ssize_t
vhost_scsi_wwn_version_show(struct config_item * item,char * page)2915 vhost_scsi_wwn_version_show(struct config_item *item, char *page)
2916 {
2917 return sysfs_emit(page, "TCM_VHOST fabric module %s on %s/%s"
2918 "on "UTS_RELEASE"\n", VHOST_SCSI_VERSION, utsname()->sysname,
2919 utsname()->machine);
2920 }
2921
2922 CONFIGFS_ATTR_RO(vhost_scsi_wwn_, version);
2923
2924 static struct configfs_attribute *vhost_scsi_wwn_attrs[] = {
2925 &vhost_scsi_wwn_attr_version,
2926 NULL,
2927 };
2928
2929 static const struct target_core_fabric_ops vhost_scsi_ops = {
2930 .module = THIS_MODULE,
2931 .fabric_name = "vhost",
2932 .max_data_sg_nents = VHOST_SCSI_PREALLOC_SGLS,
2933 .tpg_get_wwn = vhost_scsi_get_fabric_wwn,
2934 .tpg_get_tag = vhost_scsi_get_tpgt,
2935 .tpg_check_demo_mode = vhost_scsi_check_true,
2936 .tpg_check_demo_mode_cache = vhost_scsi_check_true,
2937 .tpg_check_prot_fabric_only = vhost_scsi_check_prot_fabric_only,
2938 .release_cmd = vhost_scsi_release_cmd,
2939 .check_stop_free = vhost_scsi_check_stop_free,
2940 .sess_get_initiator_sid = NULL,
2941 .write_pending = vhost_scsi_write_pending,
2942 .queue_data_in = vhost_scsi_queue_data_in,
2943 .queue_status = vhost_scsi_queue_status,
2944 .queue_tm_rsp = vhost_scsi_queue_tm_rsp,
2945 .aborted_task = vhost_scsi_aborted_task,
2946 /*
2947 * Setup callers for generic logic in target_core_fabric_configfs.c
2948 */
2949 .fabric_make_wwn = vhost_scsi_make_tport,
2950 .fabric_drop_wwn = vhost_scsi_drop_tport,
2951 .fabric_make_tpg = vhost_scsi_make_tpg,
2952 .fabric_drop_tpg = vhost_scsi_drop_tpg,
2953 .fabric_post_link = vhost_scsi_port_link,
2954 .fabric_pre_unlink = vhost_scsi_port_unlink,
2955
2956 .tfc_wwn_attrs = vhost_scsi_wwn_attrs,
2957 .tfc_tpg_base_attrs = vhost_scsi_tpg_attrs,
2958 .tfc_tpg_attrib_attrs = vhost_scsi_tpg_attrib_attrs,
2959
2960 .default_submit_type = TARGET_QUEUE_SUBMIT,
2961 .direct_submit_supp = 1,
2962 };
2963
vhost_scsi_init(void)2964 static int __init vhost_scsi_init(void)
2965 {
2966 int ret = -ENOMEM;
2967
2968 pr_debug("TCM_VHOST fabric module %s on %s/%s"
2969 " on "UTS_RELEASE"\n", VHOST_SCSI_VERSION, utsname()->sysname,
2970 utsname()->machine);
2971
2972 ret = vhost_scsi_register();
2973 if (ret < 0)
2974 goto out;
2975
2976 ret = target_register_template(&vhost_scsi_ops);
2977 if (ret < 0)
2978 goto out_vhost_scsi_deregister;
2979
2980 return 0;
2981
2982 out_vhost_scsi_deregister:
2983 vhost_scsi_deregister();
2984 out:
2985 return ret;
2986 };
2987
vhost_scsi_exit(void)2988 static void vhost_scsi_exit(void)
2989 {
2990 target_unregister_template(&vhost_scsi_ops);
2991 vhost_scsi_deregister();
2992 };
2993
2994 MODULE_DESCRIPTION("VHOST_SCSI series fabric driver");
2995 MODULE_ALIAS("tcm_vhost");
2996 MODULE_LICENSE("GPL");
2997 module_init(vhost_scsi_init);
2998 module_exit(vhost_scsi_exit);
2999