1 // SPDX-License-Identifier: GPL-2.0+
2 /*******************************************************************************
3 * Vhost kernel TCM fabric driver for virtio SCSI initiators
4 *
5 * (C) Copyright 2010-2013 Datera, Inc.
6 * (C) Copyright 2010-2012 IBM Corp.
7 *
8 * Authors: Nicholas A. Bellinger <nab@daterainc.com>
9 * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
10 ****************************************************************************/
11
12 #include <linux/module.h>
13 #include <linux/moduleparam.h>
14 #include <generated/utsrelease.h>
15 #include <linux/utsname.h>
16 #include <linux/init.h>
17 #include <linux/slab.h>
18 #include <linux/kthread.h>
19 #include <linux/types.h>
20 #include <linux/string.h>
21 #include <linux/configfs.h>
22 #include <linux/ctype.h>
23 #include <linux/compat.h>
24 #include <linux/eventfd.h>
25 #include <linux/fs.h>
26 #include <linux/vmalloc.h>
27 #include <linux/miscdevice.h>
28 #include <linux/blk_types.h>
29 #include <linux/bio.h>
30 #include <linux/unaligned.h>
31 #include <scsi/scsi_common.h>
32 #include <scsi/scsi_proto.h>
33 #include <target/target_core_base.h>
34 #include <target/target_core_fabric.h>
35 #include <linux/vhost.h>
36 #include <linux/virtio_scsi.h>
37 #include <linux/llist.h>
38 #include <linux/bitmap.h>
39
40 #include "vhost.h"
41
42 #define VHOST_SCSI_VERSION "v0.1"
43 #define VHOST_SCSI_NAMELEN 256
44 #define VHOST_SCSI_MAX_CDB_SIZE 32
45 #define VHOST_SCSI_PREALLOC_SGLS 2048
46 #define VHOST_SCSI_PREALLOC_UPAGES 2048
47 #define VHOST_SCSI_PREALLOC_PROT_SGLS 2048
48 /*
49 * For the legacy descriptor case we allocate an iov per byte in the
50 * virtio_scsi_cmd_resp struct.
51 */
52 #define VHOST_SCSI_MAX_RESP_IOVS sizeof(struct virtio_scsi_cmd_resp)
53
54 static unsigned int vhost_scsi_inline_sg_cnt = VHOST_SCSI_PREALLOC_SGLS;
55
56 #ifdef CONFIG_ARCH_NO_SG_CHAIN
vhost_scsi_set_inline_sg_cnt(const char * buf,const struct kernel_param * kp)57 static int vhost_scsi_set_inline_sg_cnt(const char *buf,
58 const struct kernel_param *kp)
59 {
60 pr_err("Setting inline_sg_cnt is not supported.\n");
61 return -EOPNOTSUPP;
62 }
63 #else
vhost_scsi_set_inline_sg_cnt(const char * buf,const struct kernel_param * kp)64 static int vhost_scsi_set_inline_sg_cnt(const char *buf,
65 const struct kernel_param *kp)
66 {
67 unsigned int cnt;
68 int ret;
69
70 ret = kstrtouint(buf, 10, &cnt);
71 if (ret)
72 return ret;
73
74 if (cnt > VHOST_SCSI_PREALLOC_SGLS) {
75 pr_err("Max inline_sg_cnt is %u\n", VHOST_SCSI_PREALLOC_SGLS);
76 return -EINVAL;
77 }
78
79 vhost_scsi_inline_sg_cnt = cnt;
80 return 0;
81 }
82 #endif
83
vhost_scsi_get_inline_sg_cnt(char * buf,const struct kernel_param * kp)84 static int vhost_scsi_get_inline_sg_cnt(char *buf,
85 const struct kernel_param *kp)
86 {
87 return sprintf(buf, "%u\n", vhost_scsi_inline_sg_cnt);
88 }
89
90 static const struct kernel_param_ops vhost_scsi_inline_sg_cnt_op = {
91 .get = vhost_scsi_get_inline_sg_cnt,
92 .set = vhost_scsi_set_inline_sg_cnt,
93 };
94
95 module_param_cb(inline_sg_cnt, &vhost_scsi_inline_sg_cnt_op, NULL, 0644);
96 MODULE_PARM_DESC(inline_sg_cnt, "Set the number of scatterlist entries to pre-allocate. The default is 2048.");
97
98 /* Max number of requests before requeueing the job.
99 * Using this limit prevents one virtqueue from starving others with
100 * request.
101 */
102 #define VHOST_SCSI_WEIGHT 256
103
104 struct vhost_scsi_inflight {
105 /* Wait for the flush operation to finish */
106 struct completion comp;
107 /* Refcount for the inflight reqs */
108 struct kref kref;
109 };
110
111 struct vhost_scsi_cmd {
112 /* Descriptor from vhost_get_vq_desc() for virt_queue segment */
113 int tvc_vq_desc;
114 /* The number of scatterlists associated with this cmd */
115 u32 tvc_sgl_count;
116 u32 tvc_prot_sgl_count;
117 u32 copied_iov:1;
118 const void *read_iov;
119 struct iov_iter *read_iter;
120 struct scatterlist *sgl;
121 struct sg_table table;
122 struct scatterlist *prot_sgl;
123 struct sg_table prot_table;
124 /* Fast path response header iovec used when only one vec is needed */
125 struct iovec tvc_resp_iov;
126 /* Number of iovs for response */
127 unsigned int tvc_resp_iovs_cnt;
128 /* Pointer to response header iovecs if more than one is needed */
129 struct iovec *tvc_resp_iovs;
130 /* Pointer to vhost_virtqueue for the cmd */
131 struct vhost_virtqueue *tvc_vq;
132 /* The TCM I/O descriptor that is accessed via container_of() */
133 struct se_cmd tvc_se_cmd;
134 /* Sense buffer that will be mapped into outgoing status */
135 unsigned char tvc_sense_buf[TRANSPORT_SENSE_BUFFER];
136 /*
137 * Dirty write descriptors of this command.
138 */
139 struct vhost_log *tvc_log;
140 unsigned int tvc_log_num;
141 /* Completed commands list, serviced from vhost worker thread */
142 struct llist_node tvc_completion_list;
143 /* Used to track inflight cmd */
144 struct vhost_scsi_inflight *inflight;
145 };
146
147 struct vhost_scsi_nexus {
148 /* Pointer to TCM session for I_T Nexus */
149 struct se_session *tvn_se_sess;
150 };
151
152 struct vhost_scsi_tpg {
153 /* Vhost port target portal group tag for TCM */
154 u16 tport_tpgt;
155 /* Used to track number of TPG Port/Lun Links wrt to explicit I_T Nexus shutdown */
156 int tv_tpg_port_count;
157 /* Used for vhost_scsi device reference to tpg_nexus, protected by tv_tpg_mutex */
158 int tv_tpg_vhost_count;
159 /* Used for enabling T10-PI with legacy devices */
160 int tv_fabric_prot_type;
161 /* list for vhost_scsi_list */
162 struct list_head tv_tpg_list;
163 /* Used to protect access for tpg_nexus */
164 struct mutex tv_tpg_mutex;
165 /* Pointer to the TCM VHost I_T Nexus for this TPG endpoint */
166 struct vhost_scsi_nexus *tpg_nexus;
167 /* Pointer back to vhost_scsi_tport */
168 struct vhost_scsi_tport *tport;
169 /* Returned by vhost_scsi_make_tpg() */
170 struct se_portal_group se_tpg;
171 /* Pointer back to vhost_scsi, protected by tv_tpg_mutex */
172 struct vhost_scsi *vhost_scsi;
173 };
174
175 struct vhost_scsi_tport {
176 /* SCSI protocol the tport is providing */
177 u8 tport_proto_id;
178 /* Binary World Wide unique Port Name for Vhost Target port */
179 u64 tport_wwpn;
180 /* ASCII formatted WWPN for Vhost Target port */
181 char tport_name[VHOST_SCSI_NAMELEN];
182 /* Returned by vhost_scsi_make_tport() */
183 struct se_wwn tport_wwn;
184 };
185
186 struct vhost_scsi_evt {
187 /* event to be sent to guest */
188 struct virtio_scsi_event event;
189 /* event list, serviced from vhost worker thread */
190 struct llist_node list;
191 };
192
193 enum {
194 VHOST_SCSI_VQ_CTL = 0,
195 VHOST_SCSI_VQ_EVT = 1,
196 VHOST_SCSI_VQ_IO = 2,
197 };
198
199 /* Note: can't set VIRTIO_F_VERSION_1 yet, since that implies ANY_LAYOUT. */
200 static const int vhost_scsi_bits[] = {
201 VHOST_FEATURES,
202 VIRTIO_SCSI_F_HOTPLUG,
203 VIRTIO_SCSI_F_T10_PI
204 };
205
206 #define VHOST_SCSI_FEATURES VHOST_FEATURES_U64(vhost_scsi_bits, 0)
207
208 #define VHOST_SCSI_MAX_TARGET 256
209 #define VHOST_SCSI_MAX_IO_VQ 1024
210 #define VHOST_SCSI_MAX_EVENT 128
211
212 static unsigned vhost_scsi_max_io_vqs = 128;
213 module_param_named(max_io_vqs, vhost_scsi_max_io_vqs, uint, 0644);
214 MODULE_PARM_DESC(max_io_vqs, "Set the max number of IO virtqueues a vhost scsi device can support. The default is 128. The max is 1024.");
215
216 struct vhost_scsi_virtqueue {
217 struct vhost_virtqueue vq;
218 struct vhost_scsi *vs;
219 /*
220 * Reference counting for inflight reqs, used for flush operation. At
221 * each time, one reference tracks new commands submitted, while we
222 * wait for another one to reach 0.
223 */
224 struct vhost_scsi_inflight inflights[2];
225 /*
226 * Indicate current inflight in use, protected by vq->mutex.
227 * Writers must also take dev mutex and flush under it.
228 */
229 int inflight_idx;
230 struct vhost_scsi_cmd *scsi_cmds;
231 struct sbitmap scsi_tags;
232 int max_cmds;
233 struct page **upages;
234
235 struct vhost_work completion_work;
236 struct llist_head completion_list;
237 };
238
239 struct vhost_scsi {
240 /* Protected by vhost_scsi->dev.mutex */
241 struct vhost_scsi_tpg **vs_tpg;
242 char vs_vhost_wwpn[TRANSPORT_IQN_LEN];
243
244 struct vhost_dev dev;
245 struct vhost_scsi_virtqueue *vqs;
246 struct vhost_scsi_inflight **old_inflight;
247
248 struct vhost_work vs_event_work; /* evt injection work item */
249 struct llist_head vs_event_list; /* evt injection queue */
250
251 bool vs_events_missed; /* any missed events, protected by vq->mutex */
252 int vs_events_nr; /* num of pending events, protected by vq->mutex */
253
254 unsigned int inline_sg_cnt;
255 };
256
257 struct vhost_scsi_tmf {
258 struct vhost_work vwork;
259 struct work_struct flush_work;
260 struct vhost_scsi *vhost;
261 struct vhost_scsi_virtqueue *svq;
262
263 struct se_cmd se_cmd;
264 u8 scsi_resp;
265 struct vhost_scsi_inflight *inflight;
266 struct iovec resp_iov;
267 int in_iovs;
268 int vq_desc;
269
270 /*
271 * Dirty write descriptors of this command.
272 */
273 struct vhost_log *tmf_log;
274 unsigned int tmf_log_num;
275 };
276
277 /*
278 * Context for processing request and control queue operations.
279 */
280 struct vhost_scsi_ctx {
281 int head;
282 unsigned int out, in;
283 size_t req_size, rsp_size;
284 size_t out_size, in_size;
285 u8 *target, *lunp;
286 void *req;
287 struct iov_iter out_iter;
288 };
289
290 /*
291 * Global mutex to protect vhost_scsi TPG list for vhost IOCTLs and LIO
292 * configfs management operations.
293 */
294 static DEFINE_MUTEX(vhost_scsi_mutex);
295 static LIST_HEAD(vhost_scsi_list);
296
vhost_scsi_done_inflight(struct kref * kref)297 static void vhost_scsi_done_inflight(struct kref *kref)
298 {
299 struct vhost_scsi_inflight *inflight;
300
301 inflight = container_of(kref, struct vhost_scsi_inflight, kref);
302 complete(&inflight->comp);
303 }
304
vhost_scsi_init_inflight(struct vhost_scsi * vs,struct vhost_scsi_inflight * old_inflight[])305 static void vhost_scsi_init_inflight(struct vhost_scsi *vs,
306 struct vhost_scsi_inflight *old_inflight[])
307 {
308 struct vhost_scsi_inflight *new_inflight;
309 struct vhost_virtqueue *vq;
310 int idx, i;
311
312 for (i = 0; i < vs->dev.nvqs; i++) {
313 vq = &vs->vqs[i].vq;
314
315 mutex_lock(&vq->mutex);
316
317 /* store old inflight */
318 idx = vs->vqs[i].inflight_idx;
319 if (old_inflight)
320 old_inflight[i] = &vs->vqs[i].inflights[idx];
321
322 /* setup new inflight */
323 vs->vqs[i].inflight_idx = idx ^ 1;
324 new_inflight = &vs->vqs[i].inflights[idx ^ 1];
325 kref_init(&new_inflight->kref);
326 init_completion(&new_inflight->comp);
327
328 mutex_unlock(&vq->mutex);
329 }
330 }
331
332 static struct vhost_scsi_inflight *
vhost_scsi_get_inflight(struct vhost_virtqueue * vq)333 vhost_scsi_get_inflight(struct vhost_virtqueue *vq)
334 {
335 struct vhost_scsi_inflight *inflight;
336 struct vhost_scsi_virtqueue *svq;
337
338 svq = container_of(vq, struct vhost_scsi_virtqueue, vq);
339 inflight = &svq->inflights[svq->inflight_idx];
340 kref_get(&inflight->kref);
341
342 return inflight;
343 }
344
vhost_scsi_put_inflight(struct vhost_scsi_inflight * inflight)345 static void vhost_scsi_put_inflight(struct vhost_scsi_inflight *inflight)
346 {
347 kref_put(&inflight->kref, vhost_scsi_done_inflight);
348 }
349
vhost_scsi_check_true(struct se_portal_group * se_tpg)350 static int vhost_scsi_check_true(struct se_portal_group *se_tpg)
351 {
352 return 1;
353 }
354
vhost_scsi_get_fabric_wwn(struct se_portal_group * se_tpg)355 static char *vhost_scsi_get_fabric_wwn(struct se_portal_group *se_tpg)
356 {
357 struct vhost_scsi_tpg *tpg = container_of(se_tpg,
358 struct vhost_scsi_tpg, se_tpg);
359 struct vhost_scsi_tport *tport = tpg->tport;
360
361 return &tport->tport_name[0];
362 }
363
vhost_scsi_get_tpgt(struct se_portal_group * se_tpg)364 static u16 vhost_scsi_get_tpgt(struct se_portal_group *se_tpg)
365 {
366 struct vhost_scsi_tpg *tpg = container_of(se_tpg,
367 struct vhost_scsi_tpg, se_tpg);
368 return tpg->tport_tpgt;
369 }
370
vhost_scsi_check_prot_fabric_only(struct se_portal_group * se_tpg)371 static int vhost_scsi_check_prot_fabric_only(struct se_portal_group *se_tpg)
372 {
373 struct vhost_scsi_tpg *tpg = container_of(se_tpg,
374 struct vhost_scsi_tpg, se_tpg);
375
376 return tpg->tv_fabric_prot_type;
377 }
378
vhost_scsi_copy_cmd_log(struct vhost_virtqueue * vq,struct vhost_scsi_cmd * cmd,struct vhost_log * log,unsigned int log_num)379 static int vhost_scsi_copy_cmd_log(struct vhost_virtqueue *vq,
380 struct vhost_scsi_cmd *cmd,
381 struct vhost_log *log,
382 unsigned int log_num)
383 {
384 if (!cmd->tvc_log)
385 cmd->tvc_log = kmalloc_array(vq->dev->iov_limit,
386 sizeof(*cmd->tvc_log),
387 GFP_KERNEL);
388
389 if (unlikely(!cmd->tvc_log)) {
390 vq_err(vq, "Failed to alloc tvc_log\n");
391 return -ENOMEM;
392 }
393
394 memcpy(cmd->tvc_log, log, sizeof(*cmd->tvc_log) * log_num);
395 cmd->tvc_log_num = log_num;
396
397 return 0;
398 }
399
vhost_scsi_log_write(struct vhost_virtqueue * vq,struct vhost_log * log,unsigned int log_num)400 static void vhost_scsi_log_write(struct vhost_virtqueue *vq,
401 struct vhost_log *log,
402 unsigned int log_num)
403 {
404 if (likely(!vhost_has_feature(vq, VHOST_F_LOG_ALL)))
405 return;
406
407 if (likely(!log_num || !log))
408 return;
409
410 /*
411 * vhost-scsi doesn't support VIRTIO_F_ACCESS_PLATFORM.
412 * No requirement for vq->iotlb case.
413 */
414 WARN_ON_ONCE(unlikely(vq->iotlb));
415 vhost_log_write(vq, log, log_num, U64_MAX, NULL, 0);
416 }
417
vhost_scsi_release_cmd_res(struct se_cmd * se_cmd)418 static void vhost_scsi_release_cmd_res(struct se_cmd *se_cmd)
419 {
420 struct vhost_scsi_cmd *tv_cmd = container_of(se_cmd,
421 struct vhost_scsi_cmd, tvc_se_cmd);
422 struct vhost_scsi_virtqueue *svq = container_of(tv_cmd->tvc_vq,
423 struct vhost_scsi_virtqueue, vq);
424 struct vhost_scsi *vs = svq->vs;
425 struct vhost_scsi_inflight *inflight = tv_cmd->inflight;
426 struct scatterlist *sg;
427 struct page *page;
428 int i;
429
430 if (tv_cmd->tvc_sgl_count) {
431 for_each_sgtable_sg(&tv_cmd->table, sg, i) {
432 page = sg_page(sg);
433 if (!page)
434 continue;
435
436 if (tv_cmd->copied_iov)
437 __free_page(page);
438 else
439 put_page(page);
440 }
441 kfree(tv_cmd->read_iter);
442 kfree(tv_cmd->read_iov);
443 sg_free_table_chained(&tv_cmd->table, vs->inline_sg_cnt);
444 }
445 if (tv_cmd->tvc_prot_sgl_count) {
446 for_each_sgtable_sg(&tv_cmd->prot_table, sg, i) {
447 page = sg_page(sg);
448 if (page)
449 put_page(page);
450 }
451 sg_free_table_chained(&tv_cmd->prot_table, vs->inline_sg_cnt);
452 }
453
454 if (tv_cmd->tvc_resp_iovs != &tv_cmd->tvc_resp_iov)
455 kfree(tv_cmd->tvc_resp_iovs);
456 sbitmap_clear_bit(&svq->scsi_tags, se_cmd->map_tag);
457 vhost_scsi_put_inflight(inflight);
458 }
459
vhost_scsi_release_tmf_res(struct vhost_scsi_tmf * tmf)460 static void vhost_scsi_release_tmf_res(struct vhost_scsi_tmf *tmf)
461 {
462 struct vhost_scsi_inflight *inflight = tmf->inflight;
463
464 /*
465 * tmf->tmf_log is default NULL unless VHOST_F_LOG_ALL is set.
466 */
467 kfree(tmf->tmf_log);
468 kfree(tmf);
469 vhost_scsi_put_inflight(inflight);
470 }
471
vhost_scsi_drop_cmds(struct vhost_scsi_virtqueue * svq)472 static void vhost_scsi_drop_cmds(struct vhost_scsi_virtqueue *svq)
473 {
474 struct vhost_scsi_cmd *cmd, *t;
475 struct llist_node *llnode;
476
477 llnode = llist_del_all(&svq->completion_list);
478 llist_for_each_entry_safe(cmd, t, llnode, tvc_completion_list)
479 vhost_scsi_release_cmd_res(&cmd->tvc_se_cmd);
480 }
481
vhost_scsi_release_cmd(struct se_cmd * se_cmd)482 static void vhost_scsi_release_cmd(struct se_cmd *se_cmd)
483 {
484 if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) {
485 struct vhost_scsi_tmf *tmf = container_of(se_cmd,
486 struct vhost_scsi_tmf, se_cmd);
487
488 schedule_work(&tmf->flush_work);
489 } else {
490 struct vhost_scsi_cmd *cmd = container_of(se_cmd,
491 struct vhost_scsi_cmd, tvc_se_cmd);
492 struct vhost_scsi_virtqueue *svq = container_of(cmd->tvc_vq,
493 struct vhost_scsi_virtqueue, vq);
494
495 llist_add(&cmd->tvc_completion_list, &svq->completion_list);
496 if (!vhost_vq_work_queue(&svq->vq, &svq->completion_work))
497 vhost_scsi_drop_cmds(svq);
498 }
499 }
500
vhost_scsi_write_pending(struct se_cmd * se_cmd)501 static int vhost_scsi_write_pending(struct se_cmd *se_cmd)
502 {
503 /* Go ahead and process the write immediately */
504 target_execute_cmd(se_cmd);
505 return 0;
506 }
507
vhost_scsi_queue_data_in(struct se_cmd * se_cmd)508 static int vhost_scsi_queue_data_in(struct se_cmd *se_cmd)
509 {
510 transport_generic_free_cmd(se_cmd, 0);
511 return 0;
512 }
513
vhost_scsi_queue_status(struct se_cmd * se_cmd)514 static int vhost_scsi_queue_status(struct se_cmd *se_cmd)
515 {
516 transport_generic_free_cmd(se_cmd, 0);
517 return 0;
518 }
519
vhost_scsi_queue_tm_rsp(struct se_cmd * se_cmd)520 static void vhost_scsi_queue_tm_rsp(struct se_cmd *se_cmd)
521 {
522 struct vhost_scsi_tmf *tmf = container_of(se_cmd, struct vhost_scsi_tmf,
523 se_cmd);
524
525 tmf->scsi_resp = se_cmd->se_tmr_req->response;
526 transport_generic_free_cmd(&tmf->se_cmd, 0);
527 }
528
vhost_scsi_aborted_task(struct se_cmd * se_cmd)529 static void vhost_scsi_aborted_task(struct se_cmd *se_cmd)
530 {
531 return;
532 }
533
vhost_scsi_free_evt(struct vhost_scsi * vs,struct vhost_scsi_evt * evt)534 static void vhost_scsi_free_evt(struct vhost_scsi *vs, struct vhost_scsi_evt *evt)
535 {
536 vs->vs_events_nr--;
537 kfree(evt);
538 }
539
540 static struct vhost_scsi_evt *
vhost_scsi_allocate_evt(struct vhost_scsi * vs,u32 event,u32 reason)541 vhost_scsi_allocate_evt(struct vhost_scsi *vs,
542 u32 event, u32 reason)
543 {
544 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
545 struct vhost_scsi_evt *evt;
546
547 if (vs->vs_events_nr > VHOST_SCSI_MAX_EVENT) {
548 vs->vs_events_missed = true;
549 return NULL;
550 }
551
552 evt = kzalloc(sizeof(*evt), GFP_KERNEL);
553 if (!evt) {
554 vq_err(vq, "Failed to allocate vhost_scsi_evt\n");
555 vs->vs_events_missed = true;
556 return NULL;
557 }
558
559 evt->event.event = cpu_to_vhost32(vq, event);
560 evt->event.reason = cpu_to_vhost32(vq, reason);
561 vs->vs_events_nr++;
562
563 return evt;
564 }
565
vhost_scsi_check_stop_free(struct se_cmd * se_cmd)566 static int vhost_scsi_check_stop_free(struct se_cmd *se_cmd)
567 {
568 return target_put_sess_cmd(se_cmd);
569 }
570
571 static void
vhost_scsi_do_evt_work(struct vhost_scsi * vs,struct vhost_scsi_evt * evt)572 vhost_scsi_do_evt_work(struct vhost_scsi *vs, struct vhost_scsi_evt *evt)
573 {
574 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
575 struct virtio_scsi_event *event = &evt->event;
576 struct virtio_scsi_event __user *eventp;
577 struct vhost_log *vq_log;
578 unsigned int log_num;
579 unsigned out, in;
580 int head, ret;
581
582 if (!vhost_vq_get_backend(vq)) {
583 vs->vs_events_missed = true;
584 return;
585 }
586
587 again:
588 vhost_disable_notify(&vs->dev, vq);
589
590 vq_log = unlikely(vhost_has_feature(vq, VHOST_F_LOG_ALL)) ?
591 vq->log : NULL;
592
593 /*
594 * Reset 'log_num' since vhost_get_vq_desc() may reset it only
595 * after certain condition checks.
596 */
597 log_num = 0;
598
599 head = vhost_get_vq_desc(vq, vq->iov,
600 ARRAY_SIZE(vq->iov), &out, &in,
601 vq_log, &log_num);
602 if (head < 0) {
603 vs->vs_events_missed = true;
604 return;
605 }
606 if (head == vq->num) {
607 if (vhost_enable_notify(&vs->dev, vq))
608 goto again;
609 vs->vs_events_missed = true;
610 return;
611 }
612
613 if ((vq->iov[out].iov_len != sizeof(struct virtio_scsi_event))) {
614 vq_err(vq, "Expecting virtio_scsi_event, got %zu bytes\n",
615 vq->iov[out].iov_len);
616 vs->vs_events_missed = true;
617 return;
618 }
619
620 if (vs->vs_events_missed) {
621 event->event |= cpu_to_vhost32(vq, VIRTIO_SCSI_T_EVENTS_MISSED);
622 vs->vs_events_missed = false;
623 }
624
625 eventp = vq->iov[out].iov_base;
626 ret = __copy_to_user(eventp, event, sizeof(*event));
627 if (!ret)
628 vhost_add_used_and_signal(&vs->dev, vq, head, 0);
629 else
630 vq_err(vq, "Faulted on vhost_scsi_send_event\n");
631
632 vhost_scsi_log_write(vq, vq_log, log_num);
633 }
634
vhost_scsi_complete_events(struct vhost_scsi * vs,bool drop)635 static void vhost_scsi_complete_events(struct vhost_scsi *vs, bool drop)
636 {
637 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
638 struct vhost_scsi_evt *evt, *t;
639 struct llist_node *llnode;
640
641 mutex_lock(&vq->mutex);
642 llnode = llist_del_all(&vs->vs_event_list);
643 llist_for_each_entry_safe(evt, t, llnode, list) {
644 if (!drop)
645 vhost_scsi_do_evt_work(vs, evt);
646 vhost_scsi_free_evt(vs, evt);
647 }
648 mutex_unlock(&vq->mutex);
649 }
650
vhost_scsi_evt_work(struct vhost_work * work)651 static void vhost_scsi_evt_work(struct vhost_work *work)
652 {
653 struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
654 vs_event_work);
655 vhost_scsi_complete_events(vs, false);
656 }
657
vhost_scsi_copy_sgl_to_iov(struct vhost_scsi_cmd * cmd)658 static int vhost_scsi_copy_sgl_to_iov(struct vhost_scsi_cmd *cmd)
659 {
660 struct iov_iter *iter = cmd->read_iter;
661 struct scatterlist *sg;
662 struct page *page;
663 size_t len;
664 int i;
665
666 for_each_sgtable_sg(&cmd->table, sg, i) {
667 page = sg_page(sg);
668 if (!page)
669 continue;
670
671 len = sg->length;
672
673 if (copy_page_to_iter(page, 0, len, iter) != len) {
674 pr_err("Could not copy data while handling misaligned cmd. Error %zu\n",
675 len);
676 return -1;
677 }
678 }
679
680 return 0;
681 }
682
683 /* Fill in status and signal that we are done processing this command
684 *
685 * This is scheduled in the vhost work queue so we are called with the owner
686 * process mm and can access the vring.
687 */
vhost_scsi_complete_cmd_work(struct vhost_work * work)688 static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
689 {
690 struct vhost_scsi_virtqueue *svq = container_of(work,
691 struct vhost_scsi_virtqueue, completion_work);
692 struct virtio_scsi_cmd_resp v_rsp;
693 struct vhost_scsi_cmd *cmd, *t;
694 struct llist_node *llnode;
695 struct se_cmd *se_cmd;
696 struct iov_iter iov_iter;
697 bool signal = false;
698 int ret;
699
700 llnode = llist_del_all(&svq->completion_list);
701
702 mutex_lock(&svq->vq.mutex);
703
704 llist_for_each_entry_safe(cmd, t, llnode, tvc_completion_list) {
705 se_cmd = &cmd->tvc_se_cmd;
706
707 pr_debug("%s tv_cmd %p resid %u status %#02x\n", __func__,
708 cmd, se_cmd->residual_count, se_cmd->scsi_status);
709 memset(&v_rsp, 0, sizeof(v_rsp));
710
711 if (cmd->read_iter && vhost_scsi_copy_sgl_to_iov(cmd)) {
712 v_rsp.response = VIRTIO_SCSI_S_BAD_TARGET;
713 } else {
714 v_rsp.resid = cpu_to_vhost32(cmd->tvc_vq,
715 se_cmd->residual_count);
716 /* TODO is status_qualifier field needed? */
717 v_rsp.status = se_cmd->scsi_status;
718 v_rsp.sense_len = cpu_to_vhost32(cmd->tvc_vq,
719 se_cmd->scsi_sense_length);
720 memcpy(v_rsp.sense, cmd->tvc_sense_buf,
721 se_cmd->scsi_sense_length);
722 }
723
724 iov_iter_init(&iov_iter, ITER_DEST, cmd->tvc_resp_iovs,
725 cmd->tvc_resp_iovs_cnt, sizeof(v_rsp));
726 ret = copy_to_iter(&v_rsp, sizeof(v_rsp), &iov_iter);
727 if (likely(ret == sizeof(v_rsp))) {
728 signal = true;
729
730 vhost_add_used(cmd->tvc_vq, cmd->tvc_vq_desc, 0);
731 } else
732 pr_err("Faulted on virtio_scsi_cmd_resp\n");
733
734 vhost_scsi_log_write(cmd->tvc_vq, cmd->tvc_log,
735 cmd->tvc_log_num);
736
737 vhost_scsi_release_cmd_res(se_cmd);
738 }
739
740 mutex_unlock(&svq->vq.mutex);
741
742 if (signal)
743 vhost_signal(&svq->vs->dev, &svq->vq);
744 }
745
746 static struct vhost_scsi_cmd *
vhost_scsi_get_cmd(struct vhost_virtqueue * vq,u64 scsi_tag)747 vhost_scsi_get_cmd(struct vhost_virtqueue *vq, u64 scsi_tag)
748 {
749 struct vhost_scsi_virtqueue *svq = container_of(vq,
750 struct vhost_scsi_virtqueue, vq);
751 struct vhost_scsi_cmd *cmd;
752 struct scatterlist *sgl, *prot_sgl;
753 struct vhost_log *log;
754 int tag;
755
756 tag = sbitmap_get(&svq->scsi_tags);
757 if (tag < 0) {
758 pr_warn_once("Guest sent too many cmds. Returning TASK_SET_FULL.\n");
759 return ERR_PTR(-ENOMEM);
760 }
761
762 cmd = &svq->scsi_cmds[tag];
763 sgl = cmd->sgl;
764 prot_sgl = cmd->prot_sgl;
765 log = cmd->tvc_log;
766 memset(cmd, 0, sizeof(*cmd));
767 cmd->sgl = sgl;
768 cmd->prot_sgl = prot_sgl;
769 cmd->tvc_log = log;
770 cmd->tvc_se_cmd.map_tag = tag;
771 cmd->inflight = vhost_scsi_get_inflight(vq);
772
773 return cmd;
774 }
775
vhost_scsi_revert_map_iov_to_sgl(struct iov_iter * iter,struct scatterlist * curr,struct scatterlist * end)776 static void vhost_scsi_revert_map_iov_to_sgl(struct iov_iter *iter,
777 struct scatterlist *curr,
778 struct scatterlist *end)
779 {
780 size_t revert_bytes = 0;
781 struct page *page;
782
783 while (curr != end) {
784 page = sg_page(curr);
785
786 if (page) {
787 put_page(page);
788 revert_bytes += curr->length;
789 }
790 /* Clear so we can re-use it for the copy path */
791 sg_set_page(curr, NULL, 0, 0);
792 curr = sg_next(curr);
793 }
794 iov_iter_revert(iter, revert_bytes);
795 }
796
797 /*
798 * Map a user memory range into a scatterlist
799 *
800 * Returns the number of scatterlist entries used or -errno on error.
801 */
802 static int
vhost_scsi_map_to_sgl(struct vhost_scsi_cmd * cmd,struct iov_iter * iter,struct sg_table * sg_table,struct scatterlist ** sgl,bool is_prot)803 vhost_scsi_map_to_sgl(struct vhost_scsi_cmd *cmd,
804 struct iov_iter *iter,
805 struct sg_table *sg_table,
806 struct scatterlist **sgl,
807 bool is_prot)
808 {
809 struct vhost_scsi_virtqueue *svq = container_of(cmd->tvc_vq,
810 struct vhost_scsi_virtqueue, vq);
811 struct page **pages = svq->upages;
812 struct scatterlist *sg = *sgl;
813 ssize_t bytes;
814 size_t offset;
815 unsigned int n, npages = 0;
816
817 bytes = iov_iter_get_pages2(iter, pages, LONG_MAX,
818 VHOST_SCSI_PREALLOC_UPAGES, &offset);
819 /* No pages were pinned */
820 if (bytes <= 0)
821 return bytes < 0 ? bytes : -EFAULT;
822
823 while (bytes) {
824 n = min_t(unsigned int, PAGE_SIZE - offset, bytes);
825 /*
826 * The block layer requires bios/requests to be a multiple of
827 * 512 bytes, but Windows can send us vecs that are misaligned.
828 * This can result in bios and later requests with misaligned
829 * sizes if we have to break up a cmd/scatterlist into multiple
830 * bios.
831 *
832 * We currently only break up a command into multiple bios if
833 * we hit the vec/seg limit, so check if our sgl_count is
834 * greater than the max and if a vec in the cmd has a
835 * misaligned offset/size.
836 */
837 if (!is_prot &&
838 (offset & (SECTOR_SIZE - 1) || n & (SECTOR_SIZE - 1)) &&
839 cmd->tvc_sgl_count > BIO_MAX_VECS) {
840 WARN_ONCE(true,
841 "vhost-scsi detected misaligned IO. Performance may be degraded.");
842 goto revert_iter_get_pages;
843 }
844
845 sg_set_page(sg, pages[npages++], n, offset);
846 sg = sg_next(sg);
847 bytes -= n;
848 offset = 0;
849 }
850
851 *sgl = sg;
852 return npages;
853
854 revert_iter_get_pages:
855 vhost_scsi_revert_map_iov_to_sgl(iter, *sgl, sg);
856
857 iov_iter_revert(iter, bytes);
858 while (bytes) {
859 n = min_t(unsigned int, PAGE_SIZE, bytes);
860
861 put_page(pages[npages++]);
862 bytes -= n;
863 }
864
865 return -EINVAL;
866 }
867
868 static int
vhost_scsi_calc_sgls(struct iov_iter * iter,size_t bytes,int max_sgls)869 vhost_scsi_calc_sgls(struct iov_iter *iter, size_t bytes, int max_sgls)
870 {
871 int sgl_count = 0;
872
873 if (!iter || !iter_iov(iter)) {
874 pr_err("%s: iter->iov is NULL, but expected bytes: %zu"
875 " present\n", __func__, bytes);
876 return -EINVAL;
877 }
878
879 sgl_count = iov_iter_npages(iter, 0xffff);
880 if (sgl_count > max_sgls) {
881 pr_err("%s: requested sgl_count: %d exceeds pre-allocated"
882 " max_sgls: %d\n", __func__, sgl_count, max_sgls);
883 return -EINVAL;
884 }
885 return sgl_count;
886 }
887
888 static int
vhost_scsi_copy_iov_to_sgl(struct vhost_scsi_cmd * cmd,struct iov_iter * iter,struct sg_table * sg_table,int sg_count,int data_dir)889 vhost_scsi_copy_iov_to_sgl(struct vhost_scsi_cmd *cmd, struct iov_iter *iter,
890 struct sg_table *sg_table, int sg_count,
891 int data_dir)
892 {
893 size_t len = iov_iter_count(iter);
894 unsigned int nbytes = 0;
895 struct scatterlist *sg;
896 struct page *page;
897 int i, ret;
898
899 if (data_dir == DMA_FROM_DEVICE) {
900 cmd->read_iter = kzalloc(sizeof(*cmd->read_iter), GFP_KERNEL);
901 if (!cmd->read_iter)
902 return -ENOMEM;
903
904 cmd->read_iov = dup_iter(cmd->read_iter, iter, GFP_KERNEL);
905 if (!cmd->read_iov) {
906 ret = -ENOMEM;
907 goto free_iter;
908 }
909 }
910
911 for_each_sgtable_sg(sg_table, sg, i) {
912 page = alloc_page(GFP_KERNEL);
913 if (!page) {
914 ret = -ENOMEM;
915 goto err;
916 }
917
918 nbytes = min_t(unsigned int, PAGE_SIZE, len);
919 sg_set_page(sg, page, nbytes, 0);
920
921 if (data_dir == DMA_TO_DEVICE &&
922 copy_page_from_iter(page, 0, nbytes, iter) != nbytes) {
923 ret = -EFAULT;
924 goto err;
925 }
926
927 len -= nbytes;
928 }
929
930 cmd->copied_iov = 1;
931 return 0;
932
933 err:
934 pr_err("Could not read %u bytes while handling misaligned cmd\n",
935 nbytes);
936
937 for_each_sgtable_sg(sg_table, sg, i) {
938 page = sg_page(sg);
939 if (page)
940 __free_page(page);
941 }
942 kfree(cmd->read_iov);
943 free_iter:
944 kfree(cmd->read_iter);
945 return ret;
946 }
947
948 static int
vhost_scsi_map_iov_to_sgl(struct vhost_scsi_cmd * cmd,struct iov_iter * iter,struct sg_table * sg_table,int sg_count,bool is_prot)949 vhost_scsi_map_iov_to_sgl(struct vhost_scsi_cmd *cmd, struct iov_iter *iter,
950 struct sg_table *sg_table, int sg_count, bool is_prot)
951 {
952 struct scatterlist *sg = sg_table->sgl;
953 int ret;
954
955 while (iov_iter_count(iter)) {
956 ret = vhost_scsi_map_to_sgl(cmd, iter, sg_table, &sg, is_prot);
957 if (ret < 0) {
958 vhost_scsi_revert_map_iov_to_sgl(iter, sg_table->sgl,
959 sg);
960 return ret;
961 }
962 }
963
964 return 0;
965 }
966
967 static int
vhost_scsi_mapal(struct vhost_scsi * vs,struct vhost_scsi_cmd * cmd,size_t prot_bytes,struct iov_iter * prot_iter,size_t data_bytes,struct iov_iter * data_iter,int data_dir)968 vhost_scsi_mapal(struct vhost_scsi *vs, struct vhost_scsi_cmd *cmd,
969 size_t prot_bytes, struct iov_iter *prot_iter,
970 size_t data_bytes, struct iov_iter *data_iter, int data_dir)
971 {
972 int sgl_count, ret;
973
974 if (prot_bytes) {
975 sgl_count = vhost_scsi_calc_sgls(prot_iter, prot_bytes,
976 VHOST_SCSI_PREALLOC_PROT_SGLS);
977 cmd->prot_table.sgl = cmd->prot_sgl;
978 ret = sg_alloc_table_chained(&cmd->prot_table, sgl_count,
979 cmd->prot_table.sgl,
980 vs->inline_sg_cnt);
981 if (ret)
982 return ret;
983
984 cmd->tvc_prot_sgl_count = sgl_count;
985 pr_debug("%s prot_sg %p prot_sgl_count %u\n", __func__,
986 cmd->prot_table.sgl, cmd->tvc_prot_sgl_count);
987
988 ret = vhost_scsi_map_iov_to_sgl(cmd, prot_iter,
989 &cmd->prot_table,
990 cmd->tvc_prot_sgl_count, true);
991 if (ret < 0) {
992 sg_free_table_chained(&cmd->prot_table,
993 vs->inline_sg_cnt);
994 cmd->tvc_prot_sgl_count = 0;
995 return ret;
996 }
997 }
998 sgl_count = vhost_scsi_calc_sgls(data_iter, data_bytes,
999 VHOST_SCSI_PREALLOC_SGLS);
1000 if (sgl_count < 0)
1001 return sgl_count;
1002
1003 cmd->table.sgl = cmd->sgl;
1004 ret = sg_alloc_table_chained(&cmd->table, sgl_count, cmd->table.sgl,
1005 vs->inline_sg_cnt);
1006 if (ret)
1007 return ret;
1008
1009 cmd->tvc_sgl_count = sgl_count;
1010 pr_debug("%s data_sg %p data_sgl_count %u\n", __func__,
1011 cmd->table.sgl, cmd->tvc_sgl_count);
1012
1013 ret = vhost_scsi_map_iov_to_sgl(cmd, data_iter, &cmd->table,
1014 cmd->tvc_sgl_count, false);
1015 if (ret == -EINVAL)
1016 ret = vhost_scsi_copy_iov_to_sgl(cmd, data_iter, &cmd->table,
1017 cmd->tvc_sgl_count, data_dir);
1018 if (ret < 0) {
1019 sg_free_table_chained(&cmd->table, vs->inline_sg_cnt);
1020 cmd->tvc_sgl_count = 0;
1021 return ret;
1022 }
1023 return 0;
1024 }
1025
vhost_scsi_to_tcm_attr(int attr)1026 static int vhost_scsi_to_tcm_attr(int attr)
1027 {
1028 switch (attr) {
1029 case VIRTIO_SCSI_S_SIMPLE:
1030 return TCM_SIMPLE_TAG;
1031 case VIRTIO_SCSI_S_ORDERED:
1032 return TCM_ORDERED_TAG;
1033 case VIRTIO_SCSI_S_HEAD:
1034 return TCM_HEAD_TAG;
1035 case VIRTIO_SCSI_S_ACA:
1036 return TCM_ACA_TAG;
1037 default:
1038 break;
1039 }
1040 return TCM_SIMPLE_TAG;
1041 }
1042
vhost_scsi_target_queue_cmd(struct vhost_scsi_nexus * nexus,struct vhost_scsi_cmd * cmd,unsigned char * cdb,u16 lun,int task_attr,int data_dir,u32 exp_data_len)1043 static void vhost_scsi_target_queue_cmd(struct vhost_scsi_nexus *nexus,
1044 struct vhost_scsi_cmd *cmd,
1045 unsigned char *cdb, u16 lun,
1046 int task_attr, int data_dir,
1047 u32 exp_data_len)
1048 {
1049 struct se_cmd *se_cmd = &cmd->tvc_se_cmd;
1050 struct scatterlist *sg_ptr, *sg_prot_ptr = NULL;
1051
1052 /* FIXME: BIDI operation */
1053 if (cmd->tvc_sgl_count) {
1054 sg_ptr = cmd->table.sgl;
1055
1056 if (cmd->tvc_prot_sgl_count)
1057 sg_prot_ptr = cmd->prot_table.sgl;
1058 else
1059 se_cmd->prot_pto = true;
1060 } else {
1061 sg_ptr = NULL;
1062 }
1063
1064 se_cmd->tag = 0;
1065 target_init_cmd(se_cmd, nexus->tvn_se_sess, &cmd->tvc_sense_buf[0],
1066 lun, exp_data_len, vhost_scsi_to_tcm_attr(task_attr),
1067 data_dir, TARGET_SCF_ACK_KREF);
1068
1069 if (target_submit_prep(se_cmd, cdb, sg_ptr,
1070 cmd->tvc_sgl_count, NULL, 0, sg_prot_ptr,
1071 cmd->tvc_prot_sgl_count, GFP_KERNEL))
1072 return;
1073
1074 target_submit(se_cmd);
1075 }
1076
1077 static void
vhost_scsi_send_status(struct vhost_scsi * vs,struct vhost_virtqueue * vq,struct vhost_scsi_ctx * vc,u8 status)1078 vhost_scsi_send_status(struct vhost_scsi *vs, struct vhost_virtqueue *vq,
1079 struct vhost_scsi_ctx *vc, u8 status)
1080 {
1081 struct virtio_scsi_cmd_resp rsp;
1082 struct iov_iter iov_iter;
1083 int ret;
1084
1085 memset(&rsp, 0, sizeof(rsp));
1086 rsp.status = status;
1087
1088 iov_iter_init(&iov_iter, ITER_DEST, &vq->iov[vc->out], vc->in,
1089 sizeof(rsp));
1090
1091 ret = copy_to_iter(&rsp, sizeof(rsp), &iov_iter);
1092
1093 if (likely(ret == sizeof(rsp)))
1094 vhost_add_used_and_signal(&vs->dev, vq, vc->head, 0);
1095 else
1096 pr_err("Faulted on virtio_scsi_cmd_resp\n");
1097 }
1098
1099 #define TYPE_IO_CMD 0
1100 #define TYPE_CTRL_TMF 1
1101 #define TYPE_CTRL_AN 2
1102
1103 static void
vhost_scsi_send_bad_target(struct vhost_scsi * vs,struct vhost_virtqueue * vq,struct vhost_scsi_ctx * vc,int type)1104 vhost_scsi_send_bad_target(struct vhost_scsi *vs,
1105 struct vhost_virtqueue *vq,
1106 struct vhost_scsi_ctx *vc, int type)
1107 {
1108 union {
1109 struct virtio_scsi_cmd_resp cmd;
1110 struct virtio_scsi_ctrl_tmf_resp tmf;
1111 struct virtio_scsi_ctrl_an_resp an;
1112 } rsp;
1113 struct iov_iter iov_iter;
1114 size_t rsp_size;
1115 int ret;
1116
1117 memset(&rsp, 0, sizeof(rsp));
1118
1119 if (type == TYPE_IO_CMD) {
1120 rsp_size = sizeof(struct virtio_scsi_cmd_resp);
1121 rsp.cmd.response = VIRTIO_SCSI_S_BAD_TARGET;
1122 } else if (type == TYPE_CTRL_TMF) {
1123 rsp_size = sizeof(struct virtio_scsi_ctrl_tmf_resp);
1124 rsp.tmf.response = VIRTIO_SCSI_S_BAD_TARGET;
1125 } else {
1126 rsp_size = sizeof(struct virtio_scsi_ctrl_an_resp);
1127 rsp.an.response = VIRTIO_SCSI_S_BAD_TARGET;
1128 }
1129
1130 iov_iter_init(&iov_iter, ITER_DEST, &vq->iov[vc->out], vc->in,
1131 rsp_size);
1132
1133 ret = copy_to_iter(&rsp, rsp_size, &iov_iter);
1134
1135 if (likely(ret == rsp_size))
1136 vhost_add_used_and_signal(&vs->dev, vq, vc->head, 0);
1137 else
1138 pr_err("Faulted on virtio scsi type=%d\n", type);
1139 }
1140
1141 static int
vhost_scsi_get_desc(struct vhost_scsi * vs,struct vhost_virtqueue * vq,struct vhost_scsi_ctx * vc,struct vhost_log * log,unsigned int * log_num)1142 vhost_scsi_get_desc(struct vhost_scsi *vs, struct vhost_virtqueue *vq,
1143 struct vhost_scsi_ctx *vc,
1144 struct vhost_log *log, unsigned int *log_num)
1145 {
1146 int ret = -ENXIO;
1147
1148 if (likely(log_num))
1149 *log_num = 0;
1150
1151 vc->head = vhost_get_vq_desc(vq, vq->iov,
1152 ARRAY_SIZE(vq->iov), &vc->out, &vc->in,
1153 log, log_num);
1154
1155 pr_debug("vhost_get_vq_desc: head: %d, out: %u in: %u\n",
1156 vc->head, vc->out, vc->in);
1157
1158 /* On error, stop handling until the next kick. */
1159 if (unlikely(vc->head < 0))
1160 goto done;
1161
1162 /* Nothing new? Wait for eventfd to tell us they refilled. */
1163 if (vc->head == vq->num) {
1164 if (unlikely(vhost_enable_notify(&vs->dev, vq))) {
1165 vhost_disable_notify(&vs->dev, vq);
1166 ret = -EAGAIN;
1167 }
1168 goto done;
1169 }
1170
1171 /*
1172 * Get the size of request and response buffers.
1173 * FIXME: Not correct for BIDI operation
1174 */
1175 vc->out_size = iov_length(vq->iov, vc->out);
1176 vc->in_size = iov_length(&vq->iov[vc->out], vc->in);
1177
1178 /*
1179 * Copy over the virtio-scsi request header, which for a
1180 * ANY_LAYOUT enabled guest may span multiple iovecs, or a
1181 * single iovec may contain both the header + outgoing
1182 * WRITE payloads.
1183 *
1184 * copy_from_iter() will advance out_iter, so that it will
1185 * point at the start of the outgoing WRITE payload, if
1186 * DMA_TO_DEVICE is set.
1187 */
1188 iov_iter_init(&vc->out_iter, ITER_SOURCE, vq->iov, vc->out, vc->out_size);
1189 ret = 0;
1190
1191 done:
1192 return ret;
1193 }
1194
1195 static int
vhost_scsi_chk_size(struct vhost_virtqueue * vq,struct vhost_scsi_ctx * vc)1196 vhost_scsi_chk_size(struct vhost_virtqueue *vq, struct vhost_scsi_ctx *vc)
1197 {
1198 if (unlikely(vc->in_size < vc->rsp_size)) {
1199 vq_err(vq,
1200 "Response buf too small, need min %zu bytes got %zu",
1201 vc->rsp_size, vc->in_size);
1202 return -EINVAL;
1203 } else if (unlikely(vc->out_size < vc->req_size)) {
1204 vq_err(vq,
1205 "Request buf too small, need min %zu bytes got %zu",
1206 vc->req_size, vc->out_size);
1207 return -EIO;
1208 }
1209
1210 return 0;
1211 }
1212
1213 static int
vhost_scsi_get_req(struct vhost_virtqueue * vq,struct vhost_scsi_ctx * vc,struct vhost_scsi_tpg ** tpgp)1214 vhost_scsi_get_req(struct vhost_virtqueue *vq, struct vhost_scsi_ctx *vc,
1215 struct vhost_scsi_tpg **tpgp)
1216 {
1217 int ret = -EIO;
1218
1219 if (unlikely(!copy_from_iter_full(vc->req, vc->req_size,
1220 &vc->out_iter))) {
1221 vq_err(vq, "Faulted on copy_from_iter_full\n");
1222 } else if (unlikely(*vc->lunp != 1)) {
1223 /* virtio-scsi spec requires byte 0 of the lun to be 1 */
1224 vq_err(vq, "Illegal virtio-scsi lun: %u\n", *vc->lunp);
1225 } else {
1226 struct vhost_scsi_tpg **vs_tpg, *tpg = NULL;
1227
1228 if (vc->target) {
1229 /* validated at handler entry */
1230 vs_tpg = vhost_vq_get_backend(vq);
1231 tpg = READ_ONCE(vs_tpg[*vc->target]);
1232 if (unlikely(!tpg))
1233 goto out;
1234 }
1235
1236 if (tpgp)
1237 *tpgp = tpg;
1238 ret = 0;
1239 }
1240 out:
1241 return ret;
1242 }
1243
1244 static int
vhost_scsi_setup_resp_iovs(struct vhost_scsi_cmd * cmd,struct iovec * in_iovs,unsigned int in_iovs_cnt)1245 vhost_scsi_setup_resp_iovs(struct vhost_scsi_cmd *cmd, struct iovec *in_iovs,
1246 unsigned int in_iovs_cnt)
1247 {
1248 int i, cnt;
1249
1250 if (!in_iovs_cnt)
1251 return 0;
1252 /*
1253 * Initiators normally just put the virtio_scsi_cmd_resp in the first
1254 * iov, but just in case they wedged in some data with it we check for
1255 * greater than or equal to the response struct.
1256 */
1257 if (in_iovs[0].iov_len >= sizeof(struct virtio_scsi_cmd_resp)) {
1258 cmd->tvc_resp_iovs = &cmd->tvc_resp_iov;
1259 cmd->tvc_resp_iovs_cnt = 1;
1260 } else {
1261 /*
1262 * Legacy descriptor layouts didn't specify that we must put
1263 * the entire response in one iov. Worst case we have a
1264 * iov per byte.
1265 */
1266 cnt = min(VHOST_SCSI_MAX_RESP_IOVS, in_iovs_cnt);
1267 cmd->tvc_resp_iovs = kcalloc(cnt, sizeof(struct iovec),
1268 GFP_KERNEL);
1269 if (!cmd->tvc_resp_iovs)
1270 return -ENOMEM;
1271
1272 cmd->tvc_resp_iovs_cnt = cnt;
1273 }
1274
1275 for (i = 0; i < cmd->tvc_resp_iovs_cnt; i++)
1276 cmd->tvc_resp_iovs[i] = in_iovs[i];
1277
1278 return 0;
1279 }
1280
vhost_buf_to_lun(u8 * lun_buf)1281 static u16 vhost_buf_to_lun(u8 *lun_buf)
1282 {
1283 return ((lun_buf[2] << 8) | lun_buf[3]) & 0x3FFF;
1284 }
1285
1286 static void
vhost_scsi_handle_vq(struct vhost_scsi * vs,struct vhost_virtqueue * vq)1287 vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
1288 {
1289 struct vhost_scsi_tpg **vs_tpg, *tpg;
1290 struct virtio_scsi_cmd_req v_req;
1291 struct virtio_scsi_cmd_req_pi v_req_pi;
1292 struct vhost_scsi_nexus *nexus;
1293 struct vhost_scsi_ctx vc;
1294 struct vhost_scsi_cmd *cmd;
1295 struct iov_iter in_iter, prot_iter, data_iter;
1296 u64 tag;
1297 u32 exp_data_len, data_direction;
1298 int ret, prot_bytes, c = 0;
1299 u16 lun;
1300 u8 task_attr;
1301 bool t10_pi = vhost_has_feature(vq, VIRTIO_SCSI_F_T10_PI);
1302 u8 *cdb;
1303 struct vhost_log *vq_log;
1304 unsigned int log_num;
1305
1306 mutex_lock(&vq->mutex);
1307 /*
1308 * We can handle the vq only after the endpoint is setup by calling the
1309 * VHOST_SCSI_SET_ENDPOINT ioctl.
1310 */
1311 vs_tpg = vhost_vq_get_backend(vq);
1312 if (!vs_tpg)
1313 goto out;
1314
1315 memset(&vc, 0, sizeof(vc));
1316 vc.rsp_size = sizeof(struct virtio_scsi_cmd_resp);
1317
1318 vhost_disable_notify(&vs->dev, vq);
1319
1320 vq_log = unlikely(vhost_has_feature(vq, VHOST_F_LOG_ALL)) ?
1321 vq->log : NULL;
1322
1323 do {
1324 ret = vhost_scsi_get_desc(vs, vq, &vc, vq_log, &log_num);
1325 if (ret)
1326 goto err;
1327
1328 /*
1329 * Setup pointers and values based upon different virtio-scsi
1330 * request header if T10_PI is enabled in KVM guest.
1331 */
1332 if (t10_pi) {
1333 vc.req = &v_req_pi;
1334 vc.req_size = sizeof(v_req_pi);
1335 vc.lunp = &v_req_pi.lun[0];
1336 vc.target = &v_req_pi.lun[1];
1337 } else {
1338 vc.req = &v_req;
1339 vc.req_size = sizeof(v_req);
1340 vc.lunp = &v_req.lun[0];
1341 vc.target = &v_req.lun[1];
1342 }
1343
1344 /*
1345 * Validate the size of request and response buffers.
1346 * Check for a sane response buffer so we can report
1347 * early errors back to the guest.
1348 */
1349 ret = vhost_scsi_chk_size(vq, &vc);
1350 if (ret)
1351 goto err;
1352
1353 ret = vhost_scsi_get_req(vq, &vc, &tpg);
1354 if (ret)
1355 goto err;
1356
1357 ret = -EIO; /* bad target on any error from here on */
1358
1359 /*
1360 * Determine data_direction by calculating the total outgoing
1361 * iovec sizes + incoming iovec sizes vs. virtio-scsi request +
1362 * response headers respectively.
1363 *
1364 * For DMA_TO_DEVICE this is out_iter, which is already pointing
1365 * to the right place.
1366 *
1367 * For DMA_FROM_DEVICE, the iovec will be just past the end
1368 * of the virtio-scsi response header in either the same
1369 * or immediately following iovec.
1370 *
1371 * Any associated T10_PI bytes for the outgoing / incoming
1372 * payloads are included in calculation of exp_data_len here.
1373 */
1374 prot_bytes = 0;
1375
1376 if (vc.out_size > vc.req_size) {
1377 data_direction = DMA_TO_DEVICE;
1378 exp_data_len = vc.out_size - vc.req_size;
1379 data_iter = vc.out_iter;
1380 } else if (vc.in_size > vc.rsp_size) {
1381 data_direction = DMA_FROM_DEVICE;
1382 exp_data_len = vc.in_size - vc.rsp_size;
1383
1384 iov_iter_init(&in_iter, ITER_DEST, &vq->iov[vc.out], vc.in,
1385 vc.rsp_size + exp_data_len);
1386 iov_iter_advance(&in_iter, vc.rsp_size);
1387 data_iter = in_iter;
1388 } else {
1389 data_direction = DMA_NONE;
1390 exp_data_len = 0;
1391 }
1392 /*
1393 * If T10_PI header + payload is present, setup prot_iter values
1394 * and recalculate data_iter for vhost_scsi_mapal() mapping to
1395 * host scatterlists via get_user_pages_fast().
1396 */
1397 if (t10_pi) {
1398 if (v_req_pi.pi_bytesout) {
1399 if (data_direction != DMA_TO_DEVICE) {
1400 vq_err(vq, "Received non zero pi_bytesout,"
1401 " but wrong data_direction\n");
1402 goto err;
1403 }
1404 prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesout);
1405 } else if (v_req_pi.pi_bytesin) {
1406 if (data_direction != DMA_FROM_DEVICE) {
1407 vq_err(vq, "Received non zero pi_bytesin,"
1408 " but wrong data_direction\n");
1409 goto err;
1410 }
1411 prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesin);
1412 }
1413 /*
1414 * Set prot_iter to data_iter and truncate it to
1415 * prot_bytes, and advance data_iter past any
1416 * preceding prot_bytes that may be present.
1417 *
1418 * Also fix up the exp_data_len to reflect only the
1419 * actual data payload length.
1420 */
1421 if (prot_bytes) {
1422 exp_data_len -= prot_bytes;
1423 prot_iter = data_iter;
1424 iov_iter_truncate(&prot_iter, prot_bytes);
1425 iov_iter_advance(&data_iter, prot_bytes);
1426 }
1427 tag = vhost64_to_cpu(vq, v_req_pi.tag);
1428 task_attr = v_req_pi.task_attr;
1429 cdb = &v_req_pi.cdb[0];
1430 lun = vhost_buf_to_lun(v_req_pi.lun);
1431 } else {
1432 tag = vhost64_to_cpu(vq, v_req.tag);
1433 task_attr = v_req.task_attr;
1434 cdb = &v_req.cdb[0];
1435 lun = vhost_buf_to_lun(v_req.lun);
1436 }
1437 /*
1438 * Check that the received CDB size does not exceeded our
1439 * hardcoded max for vhost-scsi, then get a pre-allocated
1440 * cmd descriptor for the new virtio-scsi tag.
1441 *
1442 * TODO what if cdb was too small for varlen cdb header?
1443 */
1444 if (unlikely(scsi_command_size(cdb) > VHOST_SCSI_MAX_CDB_SIZE)) {
1445 vq_err(vq, "Received SCSI CDB with command_size: %d that"
1446 " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
1447 scsi_command_size(cdb), VHOST_SCSI_MAX_CDB_SIZE);
1448 goto err;
1449 }
1450
1451 nexus = tpg->tpg_nexus;
1452 if (!nexus) {
1453 vq_err(vq, "Unable to locate active struct vhost_scsi_nexus\n");
1454 ret = -EIO;
1455 goto err;
1456 }
1457
1458 cmd = vhost_scsi_get_cmd(vq, tag);
1459 if (IS_ERR(cmd)) {
1460 ret = PTR_ERR(cmd);
1461 vq_err(vq, "vhost_scsi_get_tag failed %d\n", ret);
1462 goto err;
1463 }
1464 cmd->tvc_vq = vq;
1465
1466 ret = vhost_scsi_setup_resp_iovs(cmd, &vq->iov[vc.out], vc.in);
1467 if (ret) {
1468 vq_err(vq, "Failed to alloc recv iovs\n");
1469 vhost_scsi_release_cmd_res(&cmd->tvc_se_cmd);
1470 goto err;
1471 }
1472
1473 if (unlikely(vq_log && log_num)) {
1474 ret = vhost_scsi_copy_cmd_log(vq, cmd, vq_log, log_num);
1475 if (unlikely(ret)) {
1476 vhost_scsi_release_cmd_res(&cmd->tvc_se_cmd);
1477 goto err;
1478 }
1479 }
1480
1481 pr_debug("vhost_scsi got command opcode: %#02x, lun: %d\n",
1482 cdb[0], lun);
1483 pr_debug("cmd: %p exp_data_len: %d, prot_bytes: %d data_direction:"
1484 " %d\n", cmd, exp_data_len, prot_bytes, data_direction);
1485
1486 if (data_direction != DMA_NONE) {
1487 ret = vhost_scsi_mapal(vs, cmd, prot_bytes, &prot_iter,
1488 exp_data_len, &data_iter,
1489 data_direction);
1490 if (unlikely(ret)) {
1491 vq_err(vq, "Failed to map iov to sgl\n");
1492 vhost_scsi_release_cmd_res(&cmd->tvc_se_cmd);
1493 goto err;
1494 }
1495 }
1496 /*
1497 * Save the descriptor from vhost_get_vq_desc() to be used to
1498 * complete the virtio-scsi request in TCM callback context via
1499 * vhost_scsi_queue_data_in() and vhost_scsi_queue_status()
1500 */
1501 cmd->tvc_vq_desc = vc.head;
1502 vhost_scsi_target_queue_cmd(nexus, cmd, cdb, lun, task_attr,
1503 data_direction,
1504 exp_data_len + prot_bytes);
1505 ret = 0;
1506 err:
1507 /*
1508 * ENXIO: No more requests, or read error, wait for next kick
1509 * EINVAL: Invalid response buffer, drop the request
1510 * EIO: Respond with bad target
1511 * EAGAIN: Pending request
1512 * ENOMEM: Could not allocate resources for request
1513 */
1514 if (ret == -ENXIO)
1515 break;
1516 else if (ret == -EIO) {
1517 vhost_scsi_send_bad_target(vs, vq, &vc, TYPE_IO_CMD);
1518 vhost_scsi_log_write(vq, vq_log, log_num);
1519 } else if (ret == -ENOMEM) {
1520 vhost_scsi_send_status(vs, vq, &vc,
1521 SAM_STAT_TASK_SET_FULL);
1522 vhost_scsi_log_write(vq, vq_log, log_num);
1523 }
1524 } while (likely(!vhost_exceeds_weight(vq, ++c, 0)));
1525 out:
1526 mutex_unlock(&vq->mutex);
1527 }
1528
1529 static void
vhost_scsi_send_tmf_resp(struct vhost_scsi * vs,struct vhost_virtqueue * vq,int in_iovs,int vq_desc,struct iovec * resp_iov,int tmf_resp_code)1530 vhost_scsi_send_tmf_resp(struct vhost_scsi *vs, struct vhost_virtqueue *vq,
1531 int in_iovs, int vq_desc, struct iovec *resp_iov,
1532 int tmf_resp_code)
1533 {
1534 struct virtio_scsi_ctrl_tmf_resp rsp;
1535 struct iov_iter iov_iter;
1536 int ret;
1537
1538 pr_debug("%s\n", __func__);
1539 memset(&rsp, 0, sizeof(rsp));
1540 rsp.response = tmf_resp_code;
1541
1542 iov_iter_init(&iov_iter, ITER_DEST, resp_iov, in_iovs, sizeof(rsp));
1543
1544 ret = copy_to_iter(&rsp, sizeof(rsp), &iov_iter);
1545 if (likely(ret == sizeof(rsp)))
1546 vhost_add_used_and_signal(&vs->dev, vq, vq_desc, 0);
1547 else
1548 pr_err("Faulted on virtio_scsi_ctrl_tmf_resp\n");
1549 }
1550
vhost_scsi_tmf_resp_work(struct vhost_work * work)1551 static void vhost_scsi_tmf_resp_work(struct vhost_work *work)
1552 {
1553 struct vhost_scsi_tmf *tmf = container_of(work, struct vhost_scsi_tmf,
1554 vwork);
1555 int resp_code;
1556
1557 if (tmf->scsi_resp == TMR_FUNCTION_COMPLETE)
1558 resp_code = VIRTIO_SCSI_S_FUNCTION_SUCCEEDED;
1559 else
1560 resp_code = VIRTIO_SCSI_S_FUNCTION_REJECTED;
1561
1562 mutex_lock(&tmf->svq->vq.mutex);
1563 vhost_scsi_send_tmf_resp(tmf->vhost, &tmf->svq->vq, tmf->in_iovs,
1564 tmf->vq_desc, &tmf->resp_iov, resp_code);
1565 vhost_scsi_log_write(&tmf->svq->vq, tmf->tmf_log,
1566 tmf->tmf_log_num);
1567 mutex_unlock(&tmf->svq->vq.mutex);
1568
1569 vhost_scsi_release_tmf_res(tmf);
1570 }
1571
vhost_scsi_tmf_flush_work(struct work_struct * work)1572 static void vhost_scsi_tmf_flush_work(struct work_struct *work)
1573 {
1574 struct vhost_scsi_tmf *tmf = container_of(work, struct vhost_scsi_tmf,
1575 flush_work);
1576 struct vhost_virtqueue *vq = &tmf->svq->vq;
1577 /*
1578 * Make sure we have sent responses for other commands before we
1579 * send our response.
1580 */
1581 vhost_dev_flush(vq->dev);
1582 if (!vhost_vq_work_queue(vq, &tmf->vwork))
1583 vhost_scsi_release_tmf_res(tmf);
1584 }
1585
1586 static void
vhost_scsi_handle_tmf(struct vhost_scsi * vs,struct vhost_scsi_tpg * tpg,struct vhost_virtqueue * vq,struct virtio_scsi_ctrl_tmf_req * vtmf,struct vhost_scsi_ctx * vc,struct vhost_log * log,unsigned int log_num)1587 vhost_scsi_handle_tmf(struct vhost_scsi *vs, struct vhost_scsi_tpg *tpg,
1588 struct vhost_virtqueue *vq,
1589 struct virtio_scsi_ctrl_tmf_req *vtmf,
1590 struct vhost_scsi_ctx *vc,
1591 struct vhost_log *log, unsigned int log_num)
1592 {
1593 struct vhost_scsi_virtqueue *svq = container_of(vq,
1594 struct vhost_scsi_virtqueue, vq);
1595 struct vhost_scsi_tmf *tmf;
1596
1597 if (vhost32_to_cpu(vq, vtmf->subtype) !=
1598 VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET)
1599 goto send_reject;
1600
1601 if (!tpg->tpg_nexus || !tpg->tpg_nexus->tvn_se_sess) {
1602 pr_err("Unable to locate active struct vhost_scsi_nexus for LUN RESET.\n");
1603 goto send_reject;
1604 }
1605
1606 tmf = kzalloc(sizeof(*tmf), GFP_KERNEL);
1607 if (!tmf)
1608 goto send_reject;
1609
1610 INIT_WORK(&tmf->flush_work, vhost_scsi_tmf_flush_work);
1611 vhost_work_init(&tmf->vwork, vhost_scsi_tmf_resp_work);
1612 tmf->vhost = vs;
1613 tmf->svq = svq;
1614 tmf->resp_iov = vq->iov[vc->out];
1615 tmf->vq_desc = vc->head;
1616 tmf->in_iovs = vc->in;
1617 tmf->inflight = vhost_scsi_get_inflight(vq);
1618
1619 if (unlikely(log && log_num)) {
1620 tmf->tmf_log = kmalloc_array(log_num, sizeof(*tmf->tmf_log),
1621 GFP_KERNEL);
1622 if (tmf->tmf_log) {
1623 memcpy(tmf->tmf_log, log, sizeof(*tmf->tmf_log) * log_num);
1624 tmf->tmf_log_num = log_num;
1625 } else {
1626 pr_err("vhost_scsi tmf log allocation error\n");
1627 vhost_scsi_release_tmf_res(tmf);
1628 goto send_reject;
1629 }
1630 }
1631
1632 if (target_submit_tmr(&tmf->se_cmd, tpg->tpg_nexus->tvn_se_sess, NULL,
1633 vhost_buf_to_lun(vtmf->lun), NULL,
1634 TMR_LUN_RESET, GFP_KERNEL, 0,
1635 TARGET_SCF_ACK_KREF) < 0) {
1636 vhost_scsi_release_tmf_res(tmf);
1637 goto send_reject;
1638 }
1639
1640 return;
1641
1642 send_reject:
1643 vhost_scsi_send_tmf_resp(vs, vq, vc->in, vc->head, &vq->iov[vc->out],
1644 VIRTIO_SCSI_S_FUNCTION_REJECTED);
1645 vhost_scsi_log_write(vq, log, log_num);
1646 }
1647
1648 static void
vhost_scsi_send_an_resp(struct vhost_scsi * vs,struct vhost_virtqueue * vq,struct vhost_scsi_ctx * vc)1649 vhost_scsi_send_an_resp(struct vhost_scsi *vs,
1650 struct vhost_virtqueue *vq,
1651 struct vhost_scsi_ctx *vc)
1652 {
1653 struct virtio_scsi_ctrl_an_resp rsp;
1654 struct iov_iter iov_iter;
1655 int ret;
1656
1657 pr_debug("%s\n", __func__);
1658 memset(&rsp, 0, sizeof(rsp)); /* event_actual = 0 */
1659 rsp.response = VIRTIO_SCSI_S_OK;
1660
1661 iov_iter_init(&iov_iter, ITER_DEST, &vq->iov[vc->out], vc->in, sizeof(rsp));
1662
1663 ret = copy_to_iter(&rsp, sizeof(rsp), &iov_iter);
1664 if (likely(ret == sizeof(rsp)))
1665 vhost_add_used_and_signal(&vs->dev, vq, vc->head, 0);
1666 else
1667 pr_err("Faulted on virtio_scsi_ctrl_an_resp\n");
1668 }
1669
1670 static void
vhost_scsi_ctl_handle_vq(struct vhost_scsi * vs,struct vhost_virtqueue * vq)1671 vhost_scsi_ctl_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
1672 {
1673 struct vhost_scsi_tpg *tpg;
1674 union {
1675 __virtio32 type;
1676 struct virtio_scsi_ctrl_an_req an;
1677 struct virtio_scsi_ctrl_tmf_req tmf;
1678 } v_req;
1679 struct vhost_scsi_ctx vc;
1680 size_t typ_size;
1681 int ret, c = 0;
1682 struct vhost_log *vq_log;
1683 unsigned int log_num;
1684
1685 mutex_lock(&vq->mutex);
1686 /*
1687 * We can handle the vq only after the endpoint is setup by calling the
1688 * VHOST_SCSI_SET_ENDPOINT ioctl.
1689 */
1690 if (!vhost_vq_get_backend(vq))
1691 goto out;
1692
1693 memset(&vc, 0, sizeof(vc));
1694
1695 vhost_disable_notify(&vs->dev, vq);
1696
1697 vq_log = unlikely(vhost_has_feature(vq, VHOST_F_LOG_ALL)) ?
1698 vq->log : NULL;
1699
1700 do {
1701 ret = vhost_scsi_get_desc(vs, vq, &vc, vq_log, &log_num);
1702 if (ret)
1703 goto err;
1704
1705 /*
1706 * Get the request type first in order to setup
1707 * other parameters dependent on the type.
1708 */
1709 vc.req = &v_req.type;
1710 typ_size = sizeof(v_req.type);
1711
1712 if (unlikely(!copy_from_iter_full(vc.req, typ_size,
1713 &vc.out_iter))) {
1714 vq_err(vq, "Faulted on copy_from_iter tmf type\n");
1715 /*
1716 * The size of the response buffer depends on the
1717 * request type and must be validated against it.
1718 * Since the request type is not known, don't send
1719 * a response.
1720 */
1721 continue;
1722 }
1723
1724 switch (vhost32_to_cpu(vq, v_req.type)) {
1725 case VIRTIO_SCSI_T_TMF:
1726 vc.req = &v_req.tmf;
1727 vc.req_size = sizeof(struct virtio_scsi_ctrl_tmf_req);
1728 vc.rsp_size = sizeof(struct virtio_scsi_ctrl_tmf_resp);
1729 vc.lunp = &v_req.tmf.lun[0];
1730 vc.target = &v_req.tmf.lun[1];
1731 break;
1732 case VIRTIO_SCSI_T_AN_QUERY:
1733 case VIRTIO_SCSI_T_AN_SUBSCRIBE:
1734 vc.req = &v_req.an;
1735 vc.req_size = sizeof(struct virtio_scsi_ctrl_an_req);
1736 vc.rsp_size = sizeof(struct virtio_scsi_ctrl_an_resp);
1737 vc.lunp = &v_req.an.lun[0];
1738 vc.target = NULL;
1739 break;
1740 default:
1741 vq_err(vq, "Unknown control request %d", v_req.type);
1742 continue;
1743 }
1744
1745 /*
1746 * Validate the size of request and response buffers.
1747 * Check for a sane response buffer so we can report
1748 * early errors back to the guest.
1749 */
1750 ret = vhost_scsi_chk_size(vq, &vc);
1751 if (ret)
1752 goto err;
1753
1754 /*
1755 * Get the rest of the request now that its size is known.
1756 */
1757 vc.req += typ_size;
1758 vc.req_size -= typ_size;
1759
1760 ret = vhost_scsi_get_req(vq, &vc, &tpg);
1761 if (ret)
1762 goto err;
1763
1764 if (v_req.type == VIRTIO_SCSI_T_TMF)
1765 vhost_scsi_handle_tmf(vs, tpg, vq, &v_req.tmf, &vc,
1766 vq_log, log_num);
1767 else {
1768 vhost_scsi_send_an_resp(vs, vq, &vc);
1769 vhost_scsi_log_write(vq, vq_log, log_num);
1770 }
1771 err:
1772 /*
1773 * ENXIO: No more requests, or read error, wait for next kick
1774 * EINVAL: Invalid response buffer, drop the request
1775 * EIO: Respond with bad target
1776 * EAGAIN: Pending request
1777 */
1778 if (ret == -ENXIO)
1779 break;
1780 else if (ret == -EIO) {
1781 vhost_scsi_send_bad_target(vs, vq, &vc,
1782 v_req.type == VIRTIO_SCSI_T_TMF ?
1783 TYPE_CTRL_TMF :
1784 TYPE_CTRL_AN);
1785 vhost_scsi_log_write(vq, vq_log, log_num);
1786 }
1787 } while (likely(!vhost_exceeds_weight(vq, ++c, 0)));
1788 out:
1789 mutex_unlock(&vq->mutex);
1790 }
1791
vhost_scsi_ctl_handle_kick(struct vhost_work * work)1792 static void vhost_scsi_ctl_handle_kick(struct vhost_work *work)
1793 {
1794 struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
1795 poll.work);
1796 struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
1797
1798 pr_debug("%s: The handling func for control queue.\n", __func__);
1799 vhost_scsi_ctl_handle_vq(vs, vq);
1800 }
1801
1802 static void
vhost_scsi_send_evt(struct vhost_scsi * vs,struct vhost_virtqueue * vq,struct vhost_scsi_tpg * tpg,struct se_lun * lun,u32 event,u32 reason)1803 vhost_scsi_send_evt(struct vhost_scsi *vs, struct vhost_virtqueue *vq,
1804 struct vhost_scsi_tpg *tpg, struct se_lun *lun,
1805 u32 event, u32 reason)
1806 {
1807 struct vhost_scsi_evt *evt;
1808
1809 evt = vhost_scsi_allocate_evt(vs, event, reason);
1810 if (!evt)
1811 return;
1812
1813 if (tpg && lun) {
1814 /* TODO: share lun setup code with virtio-scsi.ko */
1815 /*
1816 * Note: evt->event is zeroed when we allocate it and
1817 * lun[4-7] need to be zero according to virtio-scsi spec.
1818 */
1819 evt->event.lun[0] = 0x01;
1820 evt->event.lun[1] = tpg->tport_tpgt;
1821 if (lun->unpacked_lun >= 256)
1822 evt->event.lun[2] = lun->unpacked_lun >> 8 | 0x40 ;
1823 evt->event.lun[3] = lun->unpacked_lun & 0xFF;
1824 }
1825
1826 llist_add(&evt->list, &vs->vs_event_list);
1827 if (!vhost_vq_work_queue(vq, &vs->vs_event_work))
1828 vhost_scsi_complete_events(vs, true);
1829 }
1830
vhost_scsi_evt_handle_kick(struct vhost_work * work)1831 static void vhost_scsi_evt_handle_kick(struct vhost_work *work)
1832 {
1833 struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
1834 poll.work);
1835 struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
1836
1837 mutex_lock(&vq->mutex);
1838 if (!vhost_vq_get_backend(vq))
1839 goto out;
1840
1841 if (vs->vs_events_missed)
1842 vhost_scsi_send_evt(vs, vq, NULL, NULL, VIRTIO_SCSI_T_NO_EVENT,
1843 0);
1844 out:
1845 mutex_unlock(&vq->mutex);
1846 }
1847
vhost_scsi_handle_kick(struct vhost_work * work)1848 static void vhost_scsi_handle_kick(struct vhost_work *work)
1849 {
1850 struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
1851 poll.work);
1852 struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
1853
1854 vhost_scsi_handle_vq(vs, vq);
1855 }
1856
1857 /* Callers must hold dev mutex */
vhost_scsi_flush(struct vhost_scsi * vs)1858 static void vhost_scsi_flush(struct vhost_scsi *vs)
1859 {
1860 int i;
1861
1862 /* Init new inflight and remember the old inflight */
1863 vhost_scsi_init_inflight(vs, vs->old_inflight);
1864
1865 /*
1866 * The inflight->kref was initialized to 1. We decrement it here to
1867 * indicate the start of the flush operation so that it will reach 0
1868 * when all the reqs are finished.
1869 */
1870 for (i = 0; i < vs->dev.nvqs; i++)
1871 kref_put(&vs->old_inflight[i]->kref, vhost_scsi_done_inflight);
1872
1873 /* Flush both the vhost poll and vhost work */
1874 vhost_dev_flush(&vs->dev);
1875
1876 /* Wait for all reqs issued before the flush to be finished */
1877 for (i = 0; i < vs->dev.nvqs; i++)
1878 wait_for_completion(&vs->old_inflight[i]->comp);
1879 }
1880
vhost_scsi_destroy_vq_log(struct vhost_virtqueue * vq)1881 static void vhost_scsi_destroy_vq_log(struct vhost_virtqueue *vq)
1882 {
1883 struct vhost_scsi_virtqueue *svq = container_of(vq,
1884 struct vhost_scsi_virtqueue, vq);
1885 struct vhost_scsi_cmd *tv_cmd;
1886 unsigned int i;
1887
1888 if (!svq->scsi_cmds)
1889 return;
1890
1891 for (i = 0; i < svq->max_cmds; i++) {
1892 tv_cmd = &svq->scsi_cmds[i];
1893 kfree(tv_cmd->tvc_log);
1894 tv_cmd->tvc_log = NULL;
1895 tv_cmd->tvc_log_num = 0;
1896 }
1897 }
1898
vhost_scsi_destroy_vq_cmds(struct vhost_virtqueue * vq)1899 static void vhost_scsi_destroy_vq_cmds(struct vhost_virtqueue *vq)
1900 {
1901 struct vhost_scsi_virtqueue *svq = container_of(vq,
1902 struct vhost_scsi_virtqueue, vq);
1903 struct vhost_scsi_cmd *tv_cmd;
1904 unsigned int i;
1905
1906 if (!svq->scsi_cmds)
1907 return;
1908
1909 for (i = 0; i < svq->max_cmds; i++) {
1910 tv_cmd = &svq->scsi_cmds[i];
1911
1912 kfree(tv_cmd->sgl);
1913 kfree(tv_cmd->prot_sgl);
1914 }
1915
1916 sbitmap_free(&svq->scsi_tags);
1917 kfree(svq->upages);
1918 vhost_scsi_destroy_vq_log(vq);
1919 kfree(svq->scsi_cmds);
1920 svq->scsi_cmds = NULL;
1921 }
1922
vhost_scsi_setup_vq_cmds(struct vhost_virtqueue * vq,int max_cmds)1923 static int vhost_scsi_setup_vq_cmds(struct vhost_virtqueue *vq, int max_cmds)
1924 {
1925 struct vhost_scsi_virtqueue *svq = container_of(vq,
1926 struct vhost_scsi_virtqueue, vq);
1927 struct vhost_scsi *vs = svq->vs;
1928 struct vhost_scsi_cmd *tv_cmd;
1929 unsigned int i;
1930
1931 if (svq->scsi_cmds)
1932 return 0;
1933
1934 if (sbitmap_init_node(&svq->scsi_tags, max_cmds, -1, GFP_KERNEL,
1935 NUMA_NO_NODE, false, true))
1936 return -ENOMEM;
1937 svq->max_cmds = max_cmds;
1938
1939 svq->scsi_cmds = kcalloc(max_cmds, sizeof(*tv_cmd), GFP_KERNEL);
1940 if (!svq->scsi_cmds) {
1941 sbitmap_free(&svq->scsi_tags);
1942 return -ENOMEM;
1943 }
1944
1945 svq->upages = kcalloc(VHOST_SCSI_PREALLOC_UPAGES, sizeof(struct page *),
1946 GFP_KERNEL);
1947 if (!svq->upages)
1948 goto out;
1949
1950 for (i = 0; i < max_cmds; i++) {
1951 tv_cmd = &svq->scsi_cmds[i];
1952
1953 if (vs->inline_sg_cnt) {
1954 tv_cmd->sgl = kcalloc(vs->inline_sg_cnt,
1955 sizeof(struct scatterlist),
1956 GFP_KERNEL);
1957 if (!tv_cmd->sgl) {
1958 pr_err("Unable to allocate tv_cmd->sgl\n");
1959 goto out;
1960 }
1961 }
1962
1963 if (vhost_has_feature(vq, VIRTIO_SCSI_F_T10_PI) &&
1964 vs->inline_sg_cnt) {
1965 tv_cmd->prot_sgl = kcalloc(vs->inline_sg_cnt,
1966 sizeof(struct scatterlist),
1967 GFP_KERNEL);
1968 if (!tv_cmd->prot_sgl) {
1969 pr_err("Unable to allocate tv_cmd->prot_sgl\n");
1970 goto out;
1971 }
1972 }
1973 }
1974 return 0;
1975 out:
1976 vhost_scsi_destroy_vq_cmds(vq);
1977 return -ENOMEM;
1978 }
1979
1980 /*
1981 * Called from vhost_scsi_ioctl() context to walk the list of available
1982 * vhost_scsi_tpg with an active struct vhost_scsi_nexus
1983 *
1984 * The lock nesting rule is:
1985 * vs->dev.mutex -> vhost_scsi_mutex -> tpg->tv_tpg_mutex -> vq->mutex
1986 */
1987 static int
vhost_scsi_set_endpoint(struct vhost_scsi * vs,struct vhost_scsi_target * t)1988 vhost_scsi_set_endpoint(struct vhost_scsi *vs,
1989 struct vhost_scsi_target *t)
1990 {
1991 struct se_portal_group *se_tpg;
1992 struct vhost_scsi_tport *tv_tport;
1993 struct vhost_scsi_tpg *tpg;
1994 struct vhost_scsi_tpg **vs_tpg;
1995 struct vhost_virtqueue *vq;
1996 int index, ret, i, len;
1997 bool match = false;
1998
1999 mutex_lock(&vs->dev.mutex);
2000
2001 /* Verify that ring has been setup correctly. */
2002 for (index = 0; index < vs->dev.nvqs; ++index) {
2003 /* Verify that ring has been setup correctly. */
2004 if (!vhost_vq_access_ok(&vs->vqs[index].vq)) {
2005 ret = -EFAULT;
2006 goto out;
2007 }
2008 }
2009
2010 if (vs->vs_tpg) {
2011 pr_err("vhost-scsi endpoint already set for %s.\n",
2012 vs->vs_vhost_wwpn);
2013 ret = -EEXIST;
2014 goto out;
2015 }
2016
2017 len = sizeof(vs_tpg[0]) * VHOST_SCSI_MAX_TARGET;
2018 vs_tpg = kzalloc(len, GFP_KERNEL);
2019 if (!vs_tpg) {
2020 ret = -ENOMEM;
2021 goto out;
2022 }
2023
2024 mutex_lock(&vhost_scsi_mutex);
2025 list_for_each_entry(tpg, &vhost_scsi_list, tv_tpg_list) {
2026 mutex_lock(&tpg->tv_tpg_mutex);
2027 if (!tpg->tpg_nexus) {
2028 mutex_unlock(&tpg->tv_tpg_mutex);
2029 continue;
2030 }
2031 if (tpg->tv_tpg_vhost_count != 0) {
2032 mutex_unlock(&tpg->tv_tpg_mutex);
2033 continue;
2034 }
2035 tv_tport = tpg->tport;
2036
2037 if (!strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
2038 /*
2039 * In order to ensure individual vhost-scsi configfs
2040 * groups cannot be removed while in use by vhost ioctl,
2041 * go ahead and take an explicit se_tpg->tpg_group.cg_item
2042 * dependency now.
2043 */
2044 se_tpg = &tpg->se_tpg;
2045 ret = target_depend_item(&se_tpg->tpg_group.cg_item);
2046 if (ret) {
2047 pr_warn("target_depend_item() failed: %d\n", ret);
2048 mutex_unlock(&tpg->tv_tpg_mutex);
2049 mutex_unlock(&vhost_scsi_mutex);
2050 goto undepend;
2051 }
2052 tpg->tv_tpg_vhost_count++;
2053 tpg->vhost_scsi = vs;
2054 vs_tpg[tpg->tport_tpgt] = tpg;
2055 match = true;
2056 }
2057 mutex_unlock(&tpg->tv_tpg_mutex);
2058 }
2059 mutex_unlock(&vhost_scsi_mutex);
2060
2061 if (match) {
2062 memcpy(vs->vs_vhost_wwpn, t->vhost_wwpn,
2063 sizeof(vs->vs_vhost_wwpn));
2064
2065 for (i = VHOST_SCSI_VQ_IO; i < vs->dev.nvqs; i++) {
2066 vq = &vs->vqs[i].vq;
2067 if (!vhost_vq_is_setup(vq))
2068 continue;
2069
2070 ret = vhost_scsi_setup_vq_cmds(vq, vq->num);
2071 if (ret)
2072 goto destroy_vq_cmds;
2073 }
2074
2075 for (i = 0; i < vs->dev.nvqs; i++) {
2076 vq = &vs->vqs[i].vq;
2077 mutex_lock(&vq->mutex);
2078 vhost_vq_set_backend(vq, vs_tpg);
2079 vhost_vq_init_access(vq);
2080 mutex_unlock(&vq->mutex);
2081 }
2082 ret = 0;
2083 } else {
2084 ret = -ENODEV;
2085 goto free_tpg;
2086 }
2087
2088 /*
2089 * Act as synchronize_rcu to make sure requests after this point
2090 * see a fully setup device.
2091 */
2092 vhost_scsi_flush(vs);
2093 vs->vs_tpg = vs_tpg;
2094 goto out;
2095
2096 destroy_vq_cmds:
2097 for (i--; i >= VHOST_SCSI_VQ_IO; i--) {
2098 if (!vhost_vq_get_backend(&vs->vqs[i].vq))
2099 vhost_scsi_destroy_vq_cmds(&vs->vqs[i].vq);
2100 }
2101 undepend:
2102 for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) {
2103 tpg = vs_tpg[i];
2104 if (tpg) {
2105 mutex_lock(&tpg->tv_tpg_mutex);
2106 tpg->vhost_scsi = NULL;
2107 tpg->tv_tpg_vhost_count--;
2108 mutex_unlock(&tpg->tv_tpg_mutex);
2109 target_undepend_item(&tpg->se_tpg.tpg_group.cg_item);
2110 }
2111 }
2112 free_tpg:
2113 kfree(vs_tpg);
2114 out:
2115 mutex_unlock(&vs->dev.mutex);
2116 return ret;
2117 }
2118
2119 static int
vhost_scsi_clear_endpoint(struct vhost_scsi * vs,struct vhost_scsi_target * t)2120 vhost_scsi_clear_endpoint(struct vhost_scsi *vs,
2121 struct vhost_scsi_target *t)
2122 {
2123 struct se_portal_group *se_tpg;
2124 struct vhost_scsi_tport *tv_tport;
2125 struct vhost_scsi_tpg *tpg;
2126 struct vhost_virtqueue *vq;
2127 bool match = false;
2128 int index, ret, i;
2129 u8 target;
2130
2131 mutex_lock(&vs->dev.mutex);
2132 /* Verify that ring has been setup correctly. */
2133 for (index = 0; index < vs->dev.nvqs; ++index) {
2134 if (!vhost_vq_access_ok(&vs->vqs[index].vq)) {
2135 ret = -EFAULT;
2136 goto err_dev;
2137 }
2138 }
2139
2140 if (!vs->vs_tpg) {
2141 ret = 0;
2142 goto err_dev;
2143 }
2144
2145 for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) {
2146 target = i;
2147 tpg = vs->vs_tpg[target];
2148 if (!tpg)
2149 continue;
2150
2151 tv_tport = tpg->tport;
2152 if (!tv_tport) {
2153 ret = -ENODEV;
2154 goto err_dev;
2155 }
2156
2157 if (strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
2158 pr_warn("tv_tport->tport_name: %s, tpg->tport_tpgt: %hu"
2159 " does not match t->vhost_wwpn: %s, t->vhost_tpgt: %hu\n",
2160 tv_tport->tport_name, tpg->tport_tpgt,
2161 t->vhost_wwpn, t->vhost_tpgt);
2162 ret = -EINVAL;
2163 goto err_dev;
2164 }
2165 match = true;
2166 }
2167 if (!match)
2168 goto free_vs_tpg;
2169
2170 /* Prevent new cmds from starting and accessing the tpgs/sessions */
2171 for (i = 0; i < vs->dev.nvqs; i++) {
2172 vq = &vs->vqs[i].vq;
2173 mutex_lock(&vq->mutex);
2174 vhost_vq_set_backend(vq, NULL);
2175 mutex_unlock(&vq->mutex);
2176 }
2177 /* Make sure cmds are not running before tearing them down. */
2178 vhost_scsi_flush(vs);
2179
2180 for (i = 0; i < vs->dev.nvqs; i++) {
2181 vq = &vs->vqs[i].vq;
2182 vhost_scsi_destroy_vq_cmds(vq);
2183 }
2184
2185 /*
2186 * We can now release our hold on the tpg and sessions and userspace
2187 * can free them after this point.
2188 */
2189 for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) {
2190 target = i;
2191 tpg = vs->vs_tpg[target];
2192 if (!tpg)
2193 continue;
2194
2195 mutex_lock(&tpg->tv_tpg_mutex);
2196
2197 tpg->tv_tpg_vhost_count--;
2198 tpg->vhost_scsi = NULL;
2199 vs->vs_tpg[target] = NULL;
2200
2201 mutex_unlock(&tpg->tv_tpg_mutex);
2202
2203 se_tpg = &tpg->se_tpg;
2204 target_undepend_item(&se_tpg->tpg_group.cg_item);
2205 }
2206
2207 free_vs_tpg:
2208 /*
2209 * Act as synchronize_rcu to make sure access to
2210 * old vs->vs_tpg is finished.
2211 */
2212 vhost_scsi_flush(vs);
2213 kfree(vs->vs_tpg);
2214 vs->vs_tpg = NULL;
2215 memset(vs->vs_vhost_wwpn, 0, sizeof(vs->vs_vhost_wwpn));
2216 WARN_ON(vs->vs_events_nr);
2217 mutex_unlock(&vs->dev.mutex);
2218 return 0;
2219
2220 err_dev:
2221 mutex_unlock(&vs->dev.mutex);
2222 return ret;
2223 }
2224
vhost_scsi_set_features(struct vhost_scsi * vs,u64 features)2225 static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
2226 {
2227 struct vhost_virtqueue *vq;
2228 bool is_log, was_log;
2229 int i;
2230
2231 if (features & ~VHOST_SCSI_FEATURES)
2232 return -EOPNOTSUPP;
2233
2234 mutex_lock(&vs->dev.mutex);
2235 if ((features & (1 << VHOST_F_LOG_ALL)) &&
2236 !vhost_log_access_ok(&vs->dev)) {
2237 mutex_unlock(&vs->dev.mutex);
2238 return -EFAULT;
2239 }
2240
2241 if (!vs->dev.nvqs)
2242 goto out;
2243
2244 is_log = features & (1 << VHOST_F_LOG_ALL);
2245 /*
2246 * All VQs should have same feature.
2247 */
2248 was_log = vhost_has_feature(&vs->vqs[0].vq, VHOST_F_LOG_ALL);
2249
2250 for (i = 0; i < vs->dev.nvqs; i++) {
2251 vq = &vs->vqs[i].vq;
2252 mutex_lock(&vq->mutex);
2253 vq->acked_features = features;
2254 mutex_unlock(&vq->mutex);
2255 }
2256
2257 /*
2258 * If VHOST_F_LOG_ALL is removed, free tvc_log after
2259 * vq->acked_features is committed.
2260 */
2261 if (!is_log && was_log) {
2262 for (i = VHOST_SCSI_VQ_IO; i < vs->dev.nvqs; i++) {
2263 if (!vs->vqs[i].scsi_cmds)
2264 continue;
2265
2266 vq = &vs->vqs[i].vq;
2267 mutex_lock(&vq->mutex);
2268 vhost_scsi_destroy_vq_log(vq);
2269 mutex_unlock(&vq->mutex);
2270 }
2271 }
2272
2273 out:
2274 mutex_unlock(&vs->dev.mutex);
2275 return 0;
2276 }
2277
vhost_scsi_open(struct inode * inode,struct file * f)2278 static int vhost_scsi_open(struct inode *inode, struct file *f)
2279 {
2280 struct vhost_scsi_virtqueue *svq;
2281 struct vhost_scsi *vs;
2282 struct vhost_virtqueue **vqs;
2283 int r = -ENOMEM, i, nvqs = vhost_scsi_max_io_vqs;
2284
2285 vs = kvzalloc(sizeof(*vs), GFP_KERNEL);
2286 if (!vs)
2287 goto err_vs;
2288 vs->inline_sg_cnt = vhost_scsi_inline_sg_cnt;
2289
2290 if (nvqs > VHOST_SCSI_MAX_IO_VQ) {
2291 pr_err("Invalid max_io_vqs of %d. Using %d.\n", nvqs,
2292 VHOST_SCSI_MAX_IO_VQ);
2293 nvqs = VHOST_SCSI_MAX_IO_VQ;
2294 } else if (nvqs == 0) {
2295 pr_err("Invalid max_io_vqs of %d. Using 1.\n", nvqs);
2296 nvqs = 1;
2297 }
2298 nvqs += VHOST_SCSI_VQ_IO;
2299
2300 vs->old_inflight = kmalloc_array(nvqs, sizeof(*vs->old_inflight),
2301 GFP_KERNEL | __GFP_ZERO);
2302 if (!vs->old_inflight)
2303 goto err_inflight;
2304
2305 vs->vqs = kmalloc_array(nvqs, sizeof(*vs->vqs),
2306 GFP_KERNEL | __GFP_ZERO);
2307 if (!vs->vqs)
2308 goto err_vqs;
2309
2310 vqs = kmalloc_array(nvqs, sizeof(*vqs), GFP_KERNEL);
2311 if (!vqs)
2312 goto err_local_vqs;
2313
2314 vhost_work_init(&vs->vs_event_work, vhost_scsi_evt_work);
2315
2316 vs->vs_events_nr = 0;
2317 vs->vs_events_missed = false;
2318
2319 vqs[VHOST_SCSI_VQ_CTL] = &vs->vqs[VHOST_SCSI_VQ_CTL].vq;
2320 vqs[VHOST_SCSI_VQ_EVT] = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
2321 vs->vqs[VHOST_SCSI_VQ_CTL].vq.handle_kick = vhost_scsi_ctl_handle_kick;
2322 vs->vqs[VHOST_SCSI_VQ_EVT].vq.handle_kick = vhost_scsi_evt_handle_kick;
2323 for (i = VHOST_SCSI_VQ_IO; i < nvqs; i++) {
2324 svq = &vs->vqs[i];
2325
2326 vqs[i] = &svq->vq;
2327 svq->vs = vs;
2328 init_llist_head(&svq->completion_list);
2329 vhost_work_init(&svq->completion_work,
2330 vhost_scsi_complete_cmd_work);
2331 svq->vq.handle_kick = vhost_scsi_handle_kick;
2332 }
2333 vhost_dev_init(&vs->dev, vqs, nvqs, UIO_MAXIOV,
2334 VHOST_SCSI_WEIGHT, 0, true, NULL);
2335
2336 vhost_scsi_init_inflight(vs, NULL);
2337
2338 f->private_data = vs;
2339 return 0;
2340
2341 err_local_vqs:
2342 kfree(vs->vqs);
2343 err_vqs:
2344 kfree(vs->old_inflight);
2345 err_inflight:
2346 kvfree(vs);
2347 err_vs:
2348 return r;
2349 }
2350
vhost_scsi_release(struct inode * inode,struct file * f)2351 static int vhost_scsi_release(struct inode *inode, struct file *f)
2352 {
2353 struct vhost_scsi *vs = f->private_data;
2354 struct vhost_scsi_target t;
2355
2356 mutex_lock(&vs->dev.mutex);
2357 memcpy(t.vhost_wwpn, vs->vs_vhost_wwpn, sizeof(t.vhost_wwpn));
2358 mutex_unlock(&vs->dev.mutex);
2359 vhost_scsi_clear_endpoint(vs, &t);
2360 vhost_dev_stop(&vs->dev);
2361 vhost_dev_cleanup(&vs->dev);
2362 kfree(vs->dev.vqs);
2363 kfree(vs->vqs);
2364 kfree(vs->old_inflight);
2365 kvfree(vs);
2366 return 0;
2367 }
2368
2369 static long
vhost_scsi_ioctl(struct file * f,unsigned int ioctl,unsigned long arg)2370 vhost_scsi_ioctl(struct file *f,
2371 unsigned int ioctl,
2372 unsigned long arg)
2373 {
2374 struct vhost_scsi *vs = f->private_data;
2375 struct vhost_scsi_target backend;
2376 void __user *argp = (void __user *)arg;
2377 u64 __user *featurep = argp;
2378 u32 __user *eventsp = argp;
2379 u32 events_missed;
2380 u64 features;
2381 int r, abi_version = VHOST_SCSI_ABI_VERSION;
2382 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
2383
2384 switch (ioctl) {
2385 case VHOST_SCSI_SET_ENDPOINT:
2386 if (copy_from_user(&backend, argp, sizeof backend))
2387 return -EFAULT;
2388 if (backend.reserved != 0)
2389 return -EOPNOTSUPP;
2390
2391 return vhost_scsi_set_endpoint(vs, &backend);
2392 case VHOST_SCSI_CLEAR_ENDPOINT:
2393 if (copy_from_user(&backend, argp, sizeof backend))
2394 return -EFAULT;
2395 if (backend.reserved != 0)
2396 return -EOPNOTSUPP;
2397
2398 return vhost_scsi_clear_endpoint(vs, &backend);
2399 case VHOST_SCSI_GET_ABI_VERSION:
2400 if (copy_to_user(argp, &abi_version, sizeof abi_version))
2401 return -EFAULT;
2402 return 0;
2403 case VHOST_SCSI_SET_EVENTS_MISSED:
2404 if (get_user(events_missed, eventsp))
2405 return -EFAULT;
2406 mutex_lock(&vq->mutex);
2407 vs->vs_events_missed = events_missed;
2408 mutex_unlock(&vq->mutex);
2409 return 0;
2410 case VHOST_SCSI_GET_EVENTS_MISSED:
2411 mutex_lock(&vq->mutex);
2412 events_missed = vs->vs_events_missed;
2413 mutex_unlock(&vq->mutex);
2414 if (put_user(events_missed, eventsp))
2415 return -EFAULT;
2416 return 0;
2417 case VHOST_GET_FEATURES:
2418 features = VHOST_SCSI_FEATURES;
2419 if (copy_to_user(featurep, &features, sizeof features))
2420 return -EFAULT;
2421 return 0;
2422 case VHOST_SET_FEATURES:
2423 if (copy_from_user(&features, featurep, sizeof features))
2424 return -EFAULT;
2425 return vhost_scsi_set_features(vs, features);
2426 case VHOST_NEW_WORKER:
2427 case VHOST_FREE_WORKER:
2428 case VHOST_ATTACH_VRING_WORKER:
2429 case VHOST_GET_VRING_WORKER:
2430 mutex_lock(&vs->dev.mutex);
2431 r = vhost_worker_ioctl(&vs->dev, ioctl, argp);
2432 mutex_unlock(&vs->dev.mutex);
2433 return r;
2434 default:
2435 mutex_lock(&vs->dev.mutex);
2436 r = vhost_dev_ioctl(&vs->dev, ioctl, argp);
2437 /* TODO: flush backend after dev ioctl. */
2438 if (r == -ENOIOCTLCMD)
2439 r = vhost_vring_ioctl(&vs->dev, ioctl, argp);
2440 mutex_unlock(&vs->dev.mutex);
2441 return r;
2442 }
2443 }
2444
2445 static const struct file_operations vhost_scsi_fops = {
2446 .owner = THIS_MODULE,
2447 .release = vhost_scsi_release,
2448 .unlocked_ioctl = vhost_scsi_ioctl,
2449 .compat_ioctl = compat_ptr_ioctl,
2450 .open = vhost_scsi_open,
2451 .llseek = noop_llseek,
2452 };
2453
2454 static struct miscdevice vhost_scsi_misc = {
2455 MISC_DYNAMIC_MINOR,
2456 "vhost-scsi",
2457 &vhost_scsi_fops,
2458 };
2459
vhost_scsi_register(void)2460 static int __init vhost_scsi_register(void)
2461 {
2462 return misc_register(&vhost_scsi_misc);
2463 }
2464
vhost_scsi_deregister(void)2465 static void vhost_scsi_deregister(void)
2466 {
2467 misc_deregister(&vhost_scsi_misc);
2468 }
2469
vhost_scsi_dump_proto_id(struct vhost_scsi_tport * tport)2470 static char *vhost_scsi_dump_proto_id(struct vhost_scsi_tport *tport)
2471 {
2472 switch (tport->tport_proto_id) {
2473 case SCSI_PROTOCOL_SAS:
2474 return "SAS";
2475 case SCSI_PROTOCOL_FCP:
2476 return "FCP";
2477 case SCSI_PROTOCOL_ISCSI:
2478 return "iSCSI";
2479 default:
2480 break;
2481 }
2482
2483 return "Unknown";
2484 }
2485
2486 static void
vhost_scsi_do_plug(struct vhost_scsi_tpg * tpg,struct se_lun * lun,bool plug)2487 vhost_scsi_do_plug(struct vhost_scsi_tpg *tpg,
2488 struct se_lun *lun, bool plug)
2489 {
2490
2491 struct vhost_scsi *vs = tpg->vhost_scsi;
2492 struct vhost_virtqueue *vq;
2493 u32 reason;
2494
2495 if (!vs)
2496 return;
2497
2498 if (plug)
2499 reason = VIRTIO_SCSI_EVT_RESET_RESCAN;
2500 else
2501 reason = VIRTIO_SCSI_EVT_RESET_REMOVED;
2502
2503 vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
2504 mutex_lock(&vq->mutex);
2505 /*
2506 * We can't queue events if the backend has been cleared, because
2507 * we could end up queueing an event after the flush.
2508 */
2509 if (!vhost_vq_get_backend(vq))
2510 goto unlock;
2511
2512 if (vhost_has_feature(vq, VIRTIO_SCSI_F_HOTPLUG))
2513 vhost_scsi_send_evt(vs, vq, tpg, lun,
2514 VIRTIO_SCSI_T_TRANSPORT_RESET, reason);
2515 unlock:
2516 mutex_unlock(&vq->mutex);
2517 }
2518
vhost_scsi_hotplug(struct vhost_scsi_tpg * tpg,struct se_lun * lun)2519 static void vhost_scsi_hotplug(struct vhost_scsi_tpg *tpg, struct se_lun *lun)
2520 {
2521 vhost_scsi_do_plug(tpg, lun, true);
2522 }
2523
vhost_scsi_hotunplug(struct vhost_scsi_tpg * tpg,struct se_lun * lun)2524 static void vhost_scsi_hotunplug(struct vhost_scsi_tpg *tpg, struct se_lun *lun)
2525 {
2526 vhost_scsi_do_plug(tpg, lun, false);
2527 }
2528
vhost_scsi_port_link(struct se_portal_group * se_tpg,struct se_lun * lun)2529 static int vhost_scsi_port_link(struct se_portal_group *se_tpg,
2530 struct se_lun *lun)
2531 {
2532 struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2533 struct vhost_scsi_tpg, se_tpg);
2534
2535 mutex_lock(&tpg->tv_tpg_mutex);
2536 tpg->tv_tpg_port_count++;
2537 vhost_scsi_hotplug(tpg, lun);
2538 mutex_unlock(&tpg->tv_tpg_mutex);
2539
2540 return 0;
2541 }
2542
vhost_scsi_port_unlink(struct se_portal_group * se_tpg,struct se_lun * lun)2543 static void vhost_scsi_port_unlink(struct se_portal_group *se_tpg,
2544 struct se_lun *lun)
2545 {
2546 struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2547 struct vhost_scsi_tpg, se_tpg);
2548
2549 mutex_lock(&tpg->tv_tpg_mutex);
2550 tpg->tv_tpg_port_count--;
2551 vhost_scsi_hotunplug(tpg, lun);
2552 mutex_unlock(&tpg->tv_tpg_mutex);
2553 }
2554
vhost_scsi_tpg_attrib_fabric_prot_type_store(struct config_item * item,const char * page,size_t count)2555 static ssize_t vhost_scsi_tpg_attrib_fabric_prot_type_store(
2556 struct config_item *item, const char *page, size_t count)
2557 {
2558 struct se_portal_group *se_tpg = attrib_to_tpg(item);
2559 struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2560 struct vhost_scsi_tpg, se_tpg);
2561 unsigned long val;
2562 int ret = kstrtoul(page, 0, &val);
2563
2564 if (ret) {
2565 pr_err("kstrtoul() returned %d for fabric_prot_type\n", ret);
2566 return ret;
2567 }
2568 if (val != 0 && val != 1 && val != 3) {
2569 pr_err("Invalid vhost_scsi fabric_prot_type: %lu\n", val);
2570 return -EINVAL;
2571 }
2572 tpg->tv_fabric_prot_type = val;
2573
2574 return count;
2575 }
2576
vhost_scsi_tpg_attrib_fabric_prot_type_show(struct config_item * item,char * page)2577 static ssize_t vhost_scsi_tpg_attrib_fabric_prot_type_show(
2578 struct config_item *item, char *page)
2579 {
2580 struct se_portal_group *se_tpg = attrib_to_tpg(item);
2581 struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2582 struct vhost_scsi_tpg, se_tpg);
2583
2584 return sysfs_emit(page, "%d\n", tpg->tv_fabric_prot_type);
2585 }
2586
2587 CONFIGFS_ATTR(vhost_scsi_tpg_attrib_, fabric_prot_type);
2588
2589 static struct configfs_attribute *vhost_scsi_tpg_attrib_attrs[] = {
2590 &vhost_scsi_tpg_attrib_attr_fabric_prot_type,
2591 NULL,
2592 };
2593
vhost_scsi_make_nexus(struct vhost_scsi_tpg * tpg,const char * name)2594 static int vhost_scsi_make_nexus(struct vhost_scsi_tpg *tpg,
2595 const char *name)
2596 {
2597 struct vhost_scsi_nexus *tv_nexus;
2598
2599 mutex_lock(&tpg->tv_tpg_mutex);
2600 if (tpg->tpg_nexus) {
2601 mutex_unlock(&tpg->tv_tpg_mutex);
2602 pr_debug("tpg->tpg_nexus already exists\n");
2603 return -EEXIST;
2604 }
2605
2606 tv_nexus = kzalloc(sizeof(*tv_nexus), GFP_KERNEL);
2607 if (!tv_nexus) {
2608 mutex_unlock(&tpg->tv_tpg_mutex);
2609 pr_err("Unable to allocate struct vhost_scsi_nexus\n");
2610 return -ENOMEM;
2611 }
2612 /*
2613 * Since we are running in 'demo mode' this call will generate a
2614 * struct se_node_acl for the vhost_scsi struct se_portal_group with
2615 * the SCSI Initiator port name of the passed configfs group 'name'.
2616 */
2617 tv_nexus->tvn_se_sess = target_setup_session(&tpg->se_tpg, 0, 0,
2618 TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS,
2619 (unsigned char *)name, tv_nexus, NULL);
2620 if (IS_ERR(tv_nexus->tvn_se_sess)) {
2621 mutex_unlock(&tpg->tv_tpg_mutex);
2622 kfree(tv_nexus);
2623 return -ENOMEM;
2624 }
2625 tpg->tpg_nexus = tv_nexus;
2626
2627 mutex_unlock(&tpg->tv_tpg_mutex);
2628 return 0;
2629 }
2630
vhost_scsi_drop_nexus(struct vhost_scsi_tpg * tpg)2631 static int vhost_scsi_drop_nexus(struct vhost_scsi_tpg *tpg)
2632 {
2633 struct se_session *se_sess;
2634 struct vhost_scsi_nexus *tv_nexus;
2635
2636 mutex_lock(&tpg->tv_tpg_mutex);
2637 tv_nexus = tpg->tpg_nexus;
2638 if (!tv_nexus) {
2639 mutex_unlock(&tpg->tv_tpg_mutex);
2640 return -ENODEV;
2641 }
2642
2643 se_sess = tv_nexus->tvn_se_sess;
2644 if (!se_sess) {
2645 mutex_unlock(&tpg->tv_tpg_mutex);
2646 return -ENODEV;
2647 }
2648
2649 if (tpg->tv_tpg_port_count != 0) {
2650 mutex_unlock(&tpg->tv_tpg_mutex);
2651 pr_err("Unable to remove TCM_vhost I_T Nexus with"
2652 " active TPG port count: %d\n",
2653 tpg->tv_tpg_port_count);
2654 return -EBUSY;
2655 }
2656
2657 if (tpg->tv_tpg_vhost_count != 0) {
2658 mutex_unlock(&tpg->tv_tpg_mutex);
2659 pr_err("Unable to remove TCM_vhost I_T Nexus with"
2660 " active TPG vhost count: %d\n",
2661 tpg->tv_tpg_vhost_count);
2662 return -EBUSY;
2663 }
2664
2665 pr_debug("TCM_vhost_ConfigFS: Removing I_T Nexus to emulated"
2666 " %s Initiator Port: %s\n", vhost_scsi_dump_proto_id(tpg->tport),
2667 tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
2668
2669 /*
2670 * Release the SCSI I_T Nexus to the emulated vhost Target Port
2671 */
2672 target_remove_session(se_sess);
2673 tpg->tpg_nexus = NULL;
2674 mutex_unlock(&tpg->tv_tpg_mutex);
2675
2676 kfree(tv_nexus);
2677 return 0;
2678 }
2679
vhost_scsi_tpg_nexus_show(struct config_item * item,char * page)2680 static ssize_t vhost_scsi_tpg_nexus_show(struct config_item *item, char *page)
2681 {
2682 struct se_portal_group *se_tpg = to_tpg(item);
2683 struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2684 struct vhost_scsi_tpg, se_tpg);
2685 struct vhost_scsi_nexus *tv_nexus;
2686 ssize_t ret;
2687
2688 mutex_lock(&tpg->tv_tpg_mutex);
2689 tv_nexus = tpg->tpg_nexus;
2690 if (!tv_nexus) {
2691 mutex_unlock(&tpg->tv_tpg_mutex);
2692 return -ENODEV;
2693 }
2694 ret = sysfs_emit(page, "%s\n",
2695 tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
2696 mutex_unlock(&tpg->tv_tpg_mutex);
2697
2698 return ret;
2699 }
2700
vhost_scsi_tpg_nexus_store(struct config_item * item,const char * page,size_t count)2701 static ssize_t vhost_scsi_tpg_nexus_store(struct config_item *item,
2702 const char *page, size_t count)
2703 {
2704 struct se_portal_group *se_tpg = to_tpg(item);
2705 struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2706 struct vhost_scsi_tpg, se_tpg);
2707 struct vhost_scsi_tport *tport_wwn = tpg->tport;
2708 unsigned char i_port[VHOST_SCSI_NAMELEN], *ptr, *port_ptr;
2709 int ret;
2710 /*
2711 * Shutdown the active I_T nexus if 'NULL' is passed..
2712 */
2713 if (!strncmp(page, "NULL", 4)) {
2714 ret = vhost_scsi_drop_nexus(tpg);
2715 return (!ret) ? count : ret;
2716 }
2717 /*
2718 * Otherwise make sure the passed virtual Initiator port WWN matches
2719 * the fabric protocol_id set in vhost_scsi_make_tport(), and call
2720 * vhost_scsi_make_nexus().
2721 */
2722 if (strlen(page) >= VHOST_SCSI_NAMELEN) {
2723 pr_err("Emulated NAA Sas Address: %s, exceeds"
2724 " max: %d\n", page, VHOST_SCSI_NAMELEN);
2725 return -EINVAL;
2726 }
2727 snprintf(&i_port[0], VHOST_SCSI_NAMELEN, "%s", page);
2728
2729 ptr = strstr(i_port, "naa.");
2730 if (ptr) {
2731 if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_SAS) {
2732 pr_err("Passed SAS Initiator Port %s does not"
2733 " match target port protoid: %s\n", i_port,
2734 vhost_scsi_dump_proto_id(tport_wwn));
2735 return -EINVAL;
2736 }
2737 port_ptr = &i_port[0];
2738 goto check_newline;
2739 }
2740 ptr = strstr(i_port, "fc.");
2741 if (ptr) {
2742 if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_FCP) {
2743 pr_err("Passed FCP Initiator Port %s does not"
2744 " match target port protoid: %s\n", i_port,
2745 vhost_scsi_dump_proto_id(tport_wwn));
2746 return -EINVAL;
2747 }
2748 port_ptr = &i_port[3]; /* Skip over "fc." */
2749 goto check_newline;
2750 }
2751 ptr = strstr(i_port, "iqn.");
2752 if (ptr) {
2753 if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_ISCSI) {
2754 pr_err("Passed iSCSI Initiator Port %s does not"
2755 " match target port protoid: %s\n", i_port,
2756 vhost_scsi_dump_proto_id(tport_wwn));
2757 return -EINVAL;
2758 }
2759 port_ptr = &i_port[0];
2760 goto check_newline;
2761 }
2762 pr_err("Unable to locate prefix for emulated Initiator Port:"
2763 " %s\n", i_port);
2764 return -EINVAL;
2765 /*
2766 * Clear any trailing newline for the NAA WWN
2767 */
2768 check_newline:
2769 if (i_port[strlen(i_port)-1] == '\n')
2770 i_port[strlen(i_port)-1] = '\0';
2771
2772 ret = vhost_scsi_make_nexus(tpg, port_ptr);
2773 if (ret < 0)
2774 return ret;
2775
2776 return count;
2777 }
2778
2779 CONFIGFS_ATTR(vhost_scsi_tpg_, nexus);
2780
2781 static struct configfs_attribute *vhost_scsi_tpg_attrs[] = {
2782 &vhost_scsi_tpg_attr_nexus,
2783 NULL,
2784 };
2785
2786 static struct se_portal_group *
vhost_scsi_make_tpg(struct se_wwn * wwn,const char * name)2787 vhost_scsi_make_tpg(struct se_wwn *wwn, const char *name)
2788 {
2789 struct vhost_scsi_tport *tport = container_of(wwn,
2790 struct vhost_scsi_tport, tport_wwn);
2791
2792 struct vhost_scsi_tpg *tpg;
2793 u16 tpgt;
2794 int ret;
2795
2796 if (strstr(name, "tpgt_") != name)
2797 return ERR_PTR(-EINVAL);
2798 if (kstrtou16(name + 5, 10, &tpgt) || tpgt >= VHOST_SCSI_MAX_TARGET)
2799 return ERR_PTR(-EINVAL);
2800
2801 tpg = kzalloc(sizeof(*tpg), GFP_KERNEL);
2802 if (!tpg) {
2803 pr_err("Unable to allocate struct vhost_scsi_tpg");
2804 return ERR_PTR(-ENOMEM);
2805 }
2806 mutex_init(&tpg->tv_tpg_mutex);
2807 INIT_LIST_HEAD(&tpg->tv_tpg_list);
2808 tpg->tport = tport;
2809 tpg->tport_tpgt = tpgt;
2810
2811 ret = core_tpg_register(wwn, &tpg->se_tpg, tport->tport_proto_id);
2812 if (ret < 0) {
2813 kfree(tpg);
2814 return NULL;
2815 }
2816 mutex_lock(&vhost_scsi_mutex);
2817 list_add_tail(&tpg->tv_tpg_list, &vhost_scsi_list);
2818 mutex_unlock(&vhost_scsi_mutex);
2819
2820 return &tpg->se_tpg;
2821 }
2822
vhost_scsi_drop_tpg(struct se_portal_group * se_tpg)2823 static void vhost_scsi_drop_tpg(struct se_portal_group *se_tpg)
2824 {
2825 struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2826 struct vhost_scsi_tpg, se_tpg);
2827
2828 mutex_lock(&vhost_scsi_mutex);
2829 list_del(&tpg->tv_tpg_list);
2830 mutex_unlock(&vhost_scsi_mutex);
2831 /*
2832 * Release the virtual I_T Nexus for this vhost TPG
2833 */
2834 vhost_scsi_drop_nexus(tpg);
2835 /*
2836 * Deregister the se_tpg from TCM..
2837 */
2838 core_tpg_deregister(se_tpg);
2839 kfree(tpg);
2840 }
2841
2842 static struct se_wwn *
vhost_scsi_make_tport(struct target_fabric_configfs * tf,struct config_group * group,const char * name)2843 vhost_scsi_make_tport(struct target_fabric_configfs *tf,
2844 struct config_group *group,
2845 const char *name)
2846 {
2847 struct vhost_scsi_tport *tport;
2848 char *ptr;
2849 u64 wwpn = 0;
2850 int off = 0;
2851
2852 /* if (vhost_scsi_parse_wwn(name, &wwpn, 1) < 0)
2853 return ERR_PTR(-EINVAL); */
2854
2855 tport = kzalloc(sizeof(*tport), GFP_KERNEL);
2856 if (!tport) {
2857 pr_err("Unable to allocate struct vhost_scsi_tport");
2858 return ERR_PTR(-ENOMEM);
2859 }
2860 tport->tport_wwpn = wwpn;
2861 /*
2862 * Determine the emulated Protocol Identifier and Target Port Name
2863 * based on the incoming configfs directory name.
2864 */
2865 ptr = strstr(name, "naa.");
2866 if (ptr) {
2867 tport->tport_proto_id = SCSI_PROTOCOL_SAS;
2868 goto check_len;
2869 }
2870 ptr = strstr(name, "fc.");
2871 if (ptr) {
2872 tport->tport_proto_id = SCSI_PROTOCOL_FCP;
2873 off = 3; /* Skip over "fc." */
2874 goto check_len;
2875 }
2876 ptr = strstr(name, "iqn.");
2877 if (ptr) {
2878 tport->tport_proto_id = SCSI_PROTOCOL_ISCSI;
2879 goto check_len;
2880 }
2881
2882 pr_err("Unable to locate prefix for emulated Target Port:"
2883 " %s\n", name);
2884 kfree(tport);
2885 return ERR_PTR(-EINVAL);
2886
2887 check_len:
2888 if (strlen(name) >= VHOST_SCSI_NAMELEN) {
2889 pr_err("Emulated %s Address: %s, exceeds"
2890 " max: %d\n", vhost_scsi_dump_proto_id(tport), name,
2891 VHOST_SCSI_NAMELEN);
2892 kfree(tport);
2893 return ERR_PTR(-EINVAL);
2894 }
2895 snprintf(&tport->tport_name[0], VHOST_SCSI_NAMELEN, "%s", &name[off]);
2896
2897 pr_debug("TCM_VHost_ConfigFS: Allocated emulated Target"
2898 " %s Address: %s\n", vhost_scsi_dump_proto_id(tport), name);
2899
2900 return &tport->tport_wwn;
2901 }
2902
vhost_scsi_drop_tport(struct se_wwn * wwn)2903 static void vhost_scsi_drop_tport(struct se_wwn *wwn)
2904 {
2905 struct vhost_scsi_tport *tport = container_of(wwn,
2906 struct vhost_scsi_tport, tport_wwn);
2907
2908 pr_debug("TCM_VHost_ConfigFS: Deallocating emulated Target"
2909 " %s Address: %s\n", vhost_scsi_dump_proto_id(tport),
2910 tport->tport_name);
2911
2912 kfree(tport);
2913 }
2914
2915 static ssize_t
vhost_scsi_wwn_version_show(struct config_item * item,char * page)2916 vhost_scsi_wwn_version_show(struct config_item *item, char *page)
2917 {
2918 return sysfs_emit(page, "TCM_VHOST fabric module %s on %s/%s"
2919 " on "UTS_RELEASE"\n", VHOST_SCSI_VERSION, utsname()->sysname,
2920 utsname()->machine);
2921 }
2922
2923 CONFIGFS_ATTR_RO(vhost_scsi_wwn_, version);
2924
2925 static struct configfs_attribute *vhost_scsi_wwn_attrs[] = {
2926 &vhost_scsi_wwn_attr_version,
2927 NULL,
2928 };
2929
2930 static const struct target_core_fabric_ops vhost_scsi_ops = {
2931 .module = THIS_MODULE,
2932 .fabric_name = "vhost",
2933 .max_data_sg_nents = VHOST_SCSI_PREALLOC_SGLS,
2934 .tpg_get_wwn = vhost_scsi_get_fabric_wwn,
2935 .tpg_get_tag = vhost_scsi_get_tpgt,
2936 .tpg_check_demo_mode = vhost_scsi_check_true,
2937 .tpg_check_demo_mode_cache = vhost_scsi_check_true,
2938 .tpg_check_prot_fabric_only = vhost_scsi_check_prot_fabric_only,
2939 .release_cmd = vhost_scsi_release_cmd,
2940 .check_stop_free = vhost_scsi_check_stop_free,
2941 .sess_get_initiator_sid = NULL,
2942 .write_pending = vhost_scsi_write_pending,
2943 .queue_data_in = vhost_scsi_queue_data_in,
2944 .queue_status = vhost_scsi_queue_status,
2945 .queue_tm_rsp = vhost_scsi_queue_tm_rsp,
2946 .aborted_task = vhost_scsi_aborted_task,
2947 /*
2948 * Setup callers for generic logic in target_core_fabric_configfs.c
2949 */
2950 .fabric_make_wwn = vhost_scsi_make_tport,
2951 .fabric_drop_wwn = vhost_scsi_drop_tport,
2952 .fabric_make_tpg = vhost_scsi_make_tpg,
2953 .fabric_drop_tpg = vhost_scsi_drop_tpg,
2954 .fabric_post_link = vhost_scsi_port_link,
2955 .fabric_pre_unlink = vhost_scsi_port_unlink,
2956
2957 .tfc_wwn_attrs = vhost_scsi_wwn_attrs,
2958 .tfc_tpg_base_attrs = vhost_scsi_tpg_attrs,
2959 .tfc_tpg_attrib_attrs = vhost_scsi_tpg_attrib_attrs,
2960
2961 .default_submit_type = TARGET_QUEUE_SUBMIT,
2962 .direct_submit_supp = 1,
2963 };
2964
vhost_scsi_init(void)2965 static int __init vhost_scsi_init(void)
2966 {
2967 int ret = -ENOMEM;
2968
2969 pr_debug("TCM_VHOST fabric module %s on %s/%s"
2970 " on "UTS_RELEASE"\n", VHOST_SCSI_VERSION, utsname()->sysname,
2971 utsname()->machine);
2972
2973 ret = vhost_scsi_register();
2974 if (ret < 0)
2975 goto out;
2976
2977 ret = target_register_template(&vhost_scsi_ops);
2978 if (ret < 0)
2979 goto out_vhost_scsi_deregister;
2980
2981 return 0;
2982
2983 out_vhost_scsi_deregister:
2984 vhost_scsi_deregister();
2985 out:
2986 return ret;
2987 }
2988
vhost_scsi_exit(void)2989 static void vhost_scsi_exit(void)
2990 {
2991 target_unregister_template(&vhost_scsi_ops);
2992 vhost_scsi_deregister();
2993 }
2994
2995 MODULE_DESCRIPTION("VHOST_SCSI series fabric driver");
2996 MODULE_ALIAS("tcm_vhost");
2997 MODULE_LICENSE("GPL");
2998 module_init(vhost_scsi_init);
2999 module_exit(vhost_scsi_exit);
3000