1 /******************************************************************************* 2 * Vhost kernel TCM fabric driver for virtio SCSI initiators 3 * 4 * (C) Copyright 2010-2013 Datera, Inc. 5 * (C) Copyright 2010-2012 IBM Corp. 6 * 7 * Licensed to the Linux Foundation under the General Public License (GPL) version 2. 8 * 9 * Authors: Nicholas A. Bellinger <nab@daterainc.com> 10 * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com> 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of the GNU General Public License as published by 14 * the Free Software Foundation; either version 2 of the License, or 15 * (at your option) any later version. 16 * 17 * This program is distributed in the hope that it will be useful, 18 * but WITHOUT ANY WARRANTY; without even the implied warranty of 19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 20 * GNU General Public License for more details. 21 * 22 ****************************************************************************/ 23 24 #include <linux/module.h> 25 #include <linux/moduleparam.h> 26 #include <generated/utsrelease.h> 27 #include <linux/utsname.h> 28 #include <linux/init.h> 29 #include <linux/slab.h> 30 #include <linux/kthread.h> 31 #include <linux/types.h> 32 #include <linux/string.h> 33 #include <linux/configfs.h> 34 #include <linux/ctype.h> 35 #include <linux/compat.h> 36 #include <linux/eventfd.h> 37 #include <linux/fs.h> 38 #include <linux/vmalloc.h> 39 #include <linux/miscdevice.h> 40 #include <asm/unaligned.h> 41 #include <scsi/scsi.h> 42 #include <target/target_core_base.h> 43 #include <target/target_core_fabric.h> 44 #include <target/target_core_fabric_configfs.h> 45 #include <target/target_core_configfs.h> 46 #include <target/configfs_macros.h> 47 #include <linux/vhost.h> 48 #include <linux/virtio_scsi.h> 49 #include <linux/llist.h> 50 #include <linux/bitmap.h> 51 #include <linux/percpu_ida.h> 52 53 #include "vhost.h" 54 55 #define VHOST_SCSI_VERSION "v0.1" 56 #define VHOST_SCSI_NAMELEN 256 57 #define VHOST_SCSI_MAX_CDB_SIZE 32 58 #define VHOST_SCSI_DEFAULT_TAGS 256 59 #define VHOST_SCSI_PREALLOC_SGLS 2048 60 #define VHOST_SCSI_PREALLOC_UPAGES 2048 61 #define VHOST_SCSI_PREALLOC_PROT_SGLS 512 62 63 struct vhost_scsi_inflight { 64 /* Wait for the flush operation to finish */ 65 struct completion comp; 66 /* Refcount for the inflight reqs */ 67 struct kref kref; 68 }; 69 70 struct vhost_scsi_cmd { 71 /* Descriptor from vhost_get_vq_desc() for virt_queue segment */ 72 int tvc_vq_desc; 73 /* virtio-scsi initiator task attribute */ 74 int tvc_task_attr; 75 /* virtio-scsi response incoming iovecs */ 76 int tvc_in_iovs; 77 /* virtio-scsi initiator data direction */ 78 enum dma_data_direction tvc_data_direction; 79 /* Expected data transfer length from virtio-scsi header */ 80 u32 tvc_exp_data_len; 81 /* The Tag from include/linux/virtio_scsi.h:struct virtio_scsi_cmd_req */ 82 u64 tvc_tag; 83 /* The number of scatterlists associated with this cmd */ 84 u32 tvc_sgl_count; 85 u32 tvc_prot_sgl_count; 86 /* Saved unpacked SCSI LUN for vhost_scsi_submission_work() */ 87 u32 tvc_lun; 88 /* Pointer to the SGL formatted memory from virtio-scsi */ 89 struct scatterlist *tvc_sgl; 90 struct scatterlist *tvc_prot_sgl; 91 struct page **tvc_upages; 92 /* Pointer to response header iovec */ 93 struct iovec *tvc_resp_iov; 94 /* Pointer to vhost_scsi for our device */ 95 struct vhost_scsi *tvc_vhost; 96 /* Pointer to vhost_virtqueue for the cmd */ 97 struct vhost_virtqueue *tvc_vq; 98 /* Pointer to vhost nexus memory */ 99 struct vhost_scsi_nexus *tvc_nexus; 100 /* The TCM I/O descriptor that is accessed via container_of() */ 101 struct se_cmd tvc_se_cmd; 102 /* work item used for cmwq dispatch to vhost_scsi_submission_work() */ 103 struct work_struct work; 104 /* Copy of the incoming SCSI command descriptor block (CDB) */ 105 unsigned char tvc_cdb[VHOST_SCSI_MAX_CDB_SIZE]; 106 /* Sense buffer that will be mapped into outgoing status */ 107 unsigned char tvc_sense_buf[TRANSPORT_SENSE_BUFFER]; 108 /* Completed commands list, serviced from vhost worker thread */ 109 struct llist_node tvc_completion_list; 110 /* Used to track inflight cmd */ 111 struct vhost_scsi_inflight *inflight; 112 }; 113 114 struct vhost_scsi_nexus { 115 /* Pointer to TCM session for I_T Nexus */ 116 struct se_session *tvn_se_sess; 117 }; 118 119 struct vhost_scsi_nacl { 120 /* Binary World Wide unique Port Name for Vhost Initiator port */ 121 u64 iport_wwpn; 122 /* ASCII formatted WWPN for Sas Initiator port */ 123 char iport_name[VHOST_SCSI_NAMELEN]; 124 /* Returned by vhost_scsi_make_nodeacl() */ 125 struct se_node_acl se_node_acl; 126 }; 127 128 struct vhost_scsi_tpg { 129 /* Vhost port target portal group tag for TCM */ 130 u16 tport_tpgt; 131 /* Used to track number of TPG Port/Lun Links wrt to explict I_T Nexus shutdown */ 132 int tv_tpg_port_count; 133 /* Used for vhost_scsi device reference to tpg_nexus, protected by tv_tpg_mutex */ 134 int tv_tpg_vhost_count; 135 /* Used for enabling T10-PI with legacy devices */ 136 int tv_fabric_prot_type; 137 /* list for vhost_scsi_list */ 138 struct list_head tv_tpg_list; 139 /* Used to protect access for tpg_nexus */ 140 struct mutex tv_tpg_mutex; 141 /* Pointer to the TCM VHost I_T Nexus for this TPG endpoint */ 142 struct vhost_scsi_nexus *tpg_nexus; 143 /* Pointer back to vhost_scsi_tport */ 144 struct vhost_scsi_tport *tport; 145 /* Returned by vhost_scsi_make_tpg() */ 146 struct se_portal_group se_tpg; 147 /* Pointer back to vhost_scsi, protected by tv_tpg_mutex */ 148 struct vhost_scsi *vhost_scsi; 149 }; 150 151 struct vhost_scsi_tport { 152 /* SCSI protocol the tport is providing */ 153 u8 tport_proto_id; 154 /* Binary World Wide unique Port Name for Vhost Target port */ 155 u64 tport_wwpn; 156 /* ASCII formatted WWPN for Vhost Target port */ 157 char tport_name[VHOST_SCSI_NAMELEN]; 158 /* Returned by vhost_scsi_make_tport() */ 159 struct se_wwn tport_wwn; 160 }; 161 162 struct vhost_scsi_evt { 163 /* event to be sent to guest */ 164 struct virtio_scsi_event event; 165 /* event list, serviced from vhost worker thread */ 166 struct llist_node list; 167 }; 168 169 enum { 170 VHOST_SCSI_VQ_CTL = 0, 171 VHOST_SCSI_VQ_EVT = 1, 172 VHOST_SCSI_VQ_IO = 2, 173 }; 174 175 /* Note: can't set VIRTIO_F_VERSION_1 yet, since that implies ANY_LAYOUT. */ 176 enum { 177 VHOST_SCSI_FEATURES = VHOST_FEATURES | (1ULL << VIRTIO_SCSI_F_HOTPLUG) | 178 (1ULL << VIRTIO_SCSI_F_T10_PI) | 179 (1ULL << VIRTIO_F_ANY_LAYOUT) | 180 (1ULL << VIRTIO_F_VERSION_1) 181 }; 182 183 #define VHOST_SCSI_MAX_TARGET 256 184 #define VHOST_SCSI_MAX_VQ 128 185 #define VHOST_SCSI_MAX_EVENT 128 186 187 struct vhost_scsi_virtqueue { 188 struct vhost_virtqueue vq; 189 /* 190 * Reference counting for inflight reqs, used for flush operation. At 191 * each time, one reference tracks new commands submitted, while we 192 * wait for another one to reach 0. 193 */ 194 struct vhost_scsi_inflight inflights[2]; 195 /* 196 * Indicate current inflight in use, protected by vq->mutex. 197 * Writers must also take dev mutex and flush under it. 198 */ 199 int inflight_idx; 200 }; 201 202 struct vhost_scsi { 203 /* Protected by vhost_scsi->dev.mutex */ 204 struct vhost_scsi_tpg **vs_tpg; 205 char vs_vhost_wwpn[TRANSPORT_IQN_LEN]; 206 207 struct vhost_dev dev; 208 struct vhost_scsi_virtqueue vqs[VHOST_SCSI_MAX_VQ]; 209 210 struct vhost_work vs_completion_work; /* cmd completion work item */ 211 struct llist_head vs_completion_list; /* cmd completion queue */ 212 213 struct vhost_work vs_event_work; /* evt injection work item */ 214 struct llist_head vs_event_list; /* evt injection queue */ 215 216 bool vs_events_missed; /* any missed events, protected by vq->mutex */ 217 int vs_events_nr; /* num of pending events, protected by vq->mutex */ 218 }; 219 220 static struct target_core_fabric_ops vhost_scsi_ops; 221 static struct workqueue_struct *vhost_scsi_workqueue; 222 223 /* Global spinlock to protect vhost_scsi TPG list for vhost IOCTL access */ 224 static DEFINE_MUTEX(vhost_scsi_mutex); 225 static LIST_HEAD(vhost_scsi_list); 226 227 static int iov_num_pages(void __user *iov_base, size_t iov_len) 228 { 229 return (PAGE_ALIGN((unsigned long)iov_base + iov_len) - 230 ((unsigned long)iov_base & PAGE_MASK)) >> PAGE_SHIFT; 231 } 232 233 static void vhost_scsi_done_inflight(struct kref *kref) 234 { 235 struct vhost_scsi_inflight *inflight; 236 237 inflight = container_of(kref, struct vhost_scsi_inflight, kref); 238 complete(&inflight->comp); 239 } 240 241 static void vhost_scsi_init_inflight(struct vhost_scsi *vs, 242 struct vhost_scsi_inflight *old_inflight[]) 243 { 244 struct vhost_scsi_inflight *new_inflight; 245 struct vhost_virtqueue *vq; 246 int idx, i; 247 248 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) { 249 vq = &vs->vqs[i].vq; 250 251 mutex_lock(&vq->mutex); 252 253 /* store old infight */ 254 idx = vs->vqs[i].inflight_idx; 255 if (old_inflight) 256 old_inflight[i] = &vs->vqs[i].inflights[idx]; 257 258 /* setup new infight */ 259 vs->vqs[i].inflight_idx = idx ^ 1; 260 new_inflight = &vs->vqs[i].inflights[idx ^ 1]; 261 kref_init(&new_inflight->kref); 262 init_completion(&new_inflight->comp); 263 264 mutex_unlock(&vq->mutex); 265 } 266 } 267 268 static struct vhost_scsi_inflight * 269 vhost_scsi_get_inflight(struct vhost_virtqueue *vq) 270 { 271 struct vhost_scsi_inflight *inflight; 272 struct vhost_scsi_virtqueue *svq; 273 274 svq = container_of(vq, struct vhost_scsi_virtqueue, vq); 275 inflight = &svq->inflights[svq->inflight_idx]; 276 kref_get(&inflight->kref); 277 278 return inflight; 279 } 280 281 static void vhost_scsi_put_inflight(struct vhost_scsi_inflight *inflight) 282 { 283 kref_put(&inflight->kref, vhost_scsi_done_inflight); 284 } 285 286 static int vhost_scsi_check_true(struct se_portal_group *se_tpg) 287 { 288 return 1; 289 } 290 291 static int vhost_scsi_check_false(struct se_portal_group *se_tpg) 292 { 293 return 0; 294 } 295 296 static char *vhost_scsi_get_fabric_name(void) 297 { 298 return "vhost"; 299 } 300 301 static u8 vhost_scsi_get_fabric_proto_ident(struct se_portal_group *se_tpg) 302 { 303 struct vhost_scsi_tpg *tpg = container_of(se_tpg, 304 struct vhost_scsi_tpg, se_tpg); 305 struct vhost_scsi_tport *tport = tpg->tport; 306 307 switch (tport->tport_proto_id) { 308 case SCSI_PROTOCOL_SAS: 309 return sas_get_fabric_proto_ident(se_tpg); 310 case SCSI_PROTOCOL_FCP: 311 return fc_get_fabric_proto_ident(se_tpg); 312 case SCSI_PROTOCOL_ISCSI: 313 return iscsi_get_fabric_proto_ident(se_tpg); 314 default: 315 pr_err("Unknown tport_proto_id: 0x%02x, using" 316 " SAS emulation\n", tport->tport_proto_id); 317 break; 318 } 319 320 return sas_get_fabric_proto_ident(se_tpg); 321 } 322 323 static char *vhost_scsi_get_fabric_wwn(struct se_portal_group *se_tpg) 324 { 325 struct vhost_scsi_tpg *tpg = container_of(se_tpg, 326 struct vhost_scsi_tpg, se_tpg); 327 struct vhost_scsi_tport *tport = tpg->tport; 328 329 return &tport->tport_name[0]; 330 } 331 332 static u16 vhost_scsi_get_tpgt(struct se_portal_group *se_tpg) 333 { 334 struct vhost_scsi_tpg *tpg = container_of(se_tpg, 335 struct vhost_scsi_tpg, se_tpg); 336 return tpg->tport_tpgt; 337 } 338 339 static u32 vhost_scsi_get_default_depth(struct se_portal_group *se_tpg) 340 { 341 return 1; 342 } 343 344 static u32 345 vhost_scsi_get_pr_transport_id(struct se_portal_group *se_tpg, 346 struct se_node_acl *se_nacl, 347 struct t10_pr_registration *pr_reg, 348 int *format_code, 349 unsigned char *buf) 350 { 351 struct vhost_scsi_tpg *tpg = container_of(se_tpg, 352 struct vhost_scsi_tpg, se_tpg); 353 struct vhost_scsi_tport *tport = tpg->tport; 354 355 switch (tport->tport_proto_id) { 356 case SCSI_PROTOCOL_SAS: 357 return sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg, 358 format_code, buf); 359 case SCSI_PROTOCOL_FCP: 360 return fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg, 361 format_code, buf); 362 case SCSI_PROTOCOL_ISCSI: 363 return iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg, 364 format_code, buf); 365 default: 366 pr_err("Unknown tport_proto_id: 0x%02x, using" 367 " SAS emulation\n", tport->tport_proto_id); 368 break; 369 } 370 371 return sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg, 372 format_code, buf); 373 } 374 375 static u32 376 vhost_scsi_get_pr_transport_id_len(struct se_portal_group *se_tpg, 377 struct se_node_acl *se_nacl, 378 struct t10_pr_registration *pr_reg, 379 int *format_code) 380 { 381 struct vhost_scsi_tpg *tpg = container_of(se_tpg, 382 struct vhost_scsi_tpg, se_tpg); 383 struct vhost_scsi_tport *tport = tpg->tport; 384 385 switch (tport->tport_proto_id) { 386 case SCSI_PROTOCOL_SAS: 387 return sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg, 388 format_code); 389 case SCSI_PROTOCOL_FCP: 390 return fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg, 391 format_code); 392 case SCSI_PROTOCOL_ISCSI: 393 return iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg, 394 format_code); 395 default: 396 pr_err("Unknown tport_proto_id: 0x%02x, using" 397 " SAS emulation\n", tport->tport_proto_id); 398 break; 399 } 400 401 return sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg, 402 format_code); 403 } 404 405 static char * 406 vhost_scsi_parse_pr_out_transport_id(struct se_portal_group *se_tpg, 407 const char *buf, 408 u32 *out_tid_len, 409 char **port_nexus_ptr) 410 { 411 struct vhost_scsi_tpg *tpg = container_of(se_tpg, 412 struct vhost_scsi_tpg, se_tpg); 413 struct vhost_scsi_tport *tport = tpg->tport; 414 415 switch (tport->tport_proto_id) { 416 case SCSI_PROTOCOL_SAS: 417 return sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len, 418 port_nexus_ptr); 419 case SCSI_PROTOCOL_FCP: 420 return fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len, 421 port_nexus_ptr); 422 case SCSI_PROTOCOL_ISCSI: 423 return iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len, 424 port_nexus_ptr); 425 default: 426 pr_err("Unknown tport_proto_id: 0x%02x, using" 427 " SAS emulation\n", tport->tport_proto_id); 428 break; 429 } 430 431 return sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len, 432 port_nexus_ptr); 433 } 434 435 static int vhost_scsi_check_prot_fabric_only(struct se_portal_group *se_tpg) 436 { 437 struct vhost_scsi_tpg *tpg = container_of(se_tpg, 438 struct vhost_scsi_tpg, se_tpg); 439 440 return tpg->tv_fabric_prot_type; 441 } 442 443 static struct se_node_acl * 444 vhost_scsi_alloc_fabric_acl(struct se_portal_group *se_tpg) 445 { 446 struct vhost_scsi_nacl *nacl; 447 448 nacl = kzalloc(sizeof(struct vhost_scsi_nacl), GFP_KERNEL); 449 if (!nacl) { 450 pr_err("Unable to allocate struct vhost_scsi_nacl\n"); 451 return NULL; 452 } 453 454 return &nacl->se_node_acl; 455 } 456 457 static void 458 vhost_scsi_release_fabric_acl(struct se_portal_group *se_tpg, 459 struct se_node_acl *se_nacl) 460 { 461 struct vhost_scsi_nacl *nacl = container_of(se_nacl, 462 struct vhost_scsi_nacl, se_node_acl); 463 kfree(nacl); 464 } 465 466 static u32 vhost_scsi_tpg_get_inst_index(struct se_portal_group *se_tpg) 467 { 468 return 1; 469 } 470 471 static void vhost_scsi_release_cmd(struct se_cmd *se_cmd) 472 { 473 struct vhost_scsi_cmd *tv_cmd = container_of(se_cmd, 474 struct vhost_scsi_cmd, tvc_se_cmd); 475 struct se_session *se_sess = tv_cmd->tvc_nexus->tvn_se_sess; 476 int i; 477 478 if (tv_cmd->tvc_sgl_count) { 479 for (i = 0; i < tv_cmd->tvc_sgl_count; i++) 480 put_page(sg_page(&tv_cmd->tvc_sgl[i])); 481 } 482 if (tv_cmd->tvc_prot_sgl_count) { 483 for (i = 0; i < tv_cmd->tvc_prot_sgl_count; i++) 484 put_page(sg_page(&tv_cmd->tvc_prot_sgl[i])); 485 } 486 487 vhost_scsi_put_inflight(tv_cmd->inflight); 488 percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag); 489 } 490 491 static int vhost_scsi_shutdown_session(struct se_session *se_sess) 492 { 493 return 0; 494 } 495 496 static void vhost_scsi_close_session(struct se_session *se_sess) 497 { 498 return; 499 } 500 501 static u32 vhost_scsi_sess_get_index(struct se_session *se_sess) 502 { 503 return 0; 504 } 505 506 static int vhost_scsi_write_pending(struct se_cmd *se_cmd) 507 { 508 /* Go ahead and process the write immediately */ 509 target_execute_cmd(se_cmd); 510 return 0; 511 } 512 513 static int vhost_scsi_write_pending_status(struct se_cmd *se_cmd) 514 { 515 return 0; 516 } 517 518 static void vhost_scsi_set_default_node_attrs(struct se_node_acl *nacl) 519 { 520 return; 521 } 522 523 static u32 vhost_scsi_get_task_tag(struct se_cmd *se_cmd) 524 { 525 return 0; 526 } 527 528 static int vhost_scsi_get_cmd_state(struct se_cmd *se_cmd) 529 { 530 return 0; 531 } 532 533 static void vhost_scsi_complete_cmd(struct vhost_scsi_cmd *cmd) 534 { 535 struct vhost_scsi *vs = cmd->tvc_vhost; 536 537 llist_add(&cmd->tvc_completion_list, &vs->vs_completion_list); 538 539 vhost_work_queue(&vs->dev, &vs->vs_completion_work); 540 } 541 542 static int vhost_scsi_queue_data_in(struct se_cmd *se_cmd) 543 { 544 struct vhost_scsi_cmd *cmd = container_of(se_cmd, 545 struct vhost_scsi_cmd, tvc_se_cmd); 546 vhost_scsi_complete_cmd(cmd); 547 return 0; 548 } 549 550 static int vhost_scsi_queue_status(struct se_cmd *se_cmd) 551 { 552 struct vhost_scsi_cmd *cmd = container_of(se_cmd, 553 struct vhost_scsi_cmd, tvc_se_cmd); 554 vhost_scsi_complete_cmd(cmd); 555 return 0; 556 } 557 558 static void vhost_scsi_queue_tm_rsp(struct se_cmd *se_cmd) 559 { 560 return; 561 } 562 563 static void vhost_scsi_aborted_task(struct se_cmd *se_cmd) 564 { 565 return; 566 } 567 568 static void vhost_scsi_free_evt(struct vhost_scsi *vs, struct vhost_scsi_evt *evt) 569 { 570 vs->vs_events_nr--; 571 kfree(evt); 572 } 573 574 static struct vhost_scsi_evt * 575 vhost_scsi_allocate_evt(struct vhost_scsi *vs, 576 u32 event, u32 reason) 577 { 578 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq; 579 struct vhost_scsi_evt *evt; 580 581 if (vs->vs_events_nr > VHOST_SCSI_MAX_EVENT) { 582 vs->vs_events_missed = true; 583 return NULL; 584 } 585 586 evt = kzalloc(sizeof(*evt), GFP_KERNEL); 587 if (!evt) { 588 vq_err(vq, "Failed to allocate vhost_scsi_evt\n"); 589 vs->vs_events_missed = true; 590 return NULL; 591 } 592 593 evt->event.event = cpu_to_vhost32(vq, event); 594 evt->event.reason = cpu_to_vhost32(vq, reason); 595 vs->vs_events_nr++; 596 597 return evt; 598 } 599 600 static void vhost_scsi_free_cmd(struct vhost_scsi_cmd *cmd) 601 { 602 struct se_cmd *se_cmd = &cmd->tvc_se_cmd; 603 604 /* TODO locking against target/backend threads? */ 605 transport_generic_free_cmd(se_cmd, 0); 606 607 } 608 609 static int vhost_scsi_check_stop_free(struct se_cmd *se_cmd) 610 { 611 return target_put_sess_cmd(se_cmd->se_sess, se_cmd); 612 } 613 614 static void 615 vhost_scsi_do_evt_work(struct vhost_scsi *vs, struct vhost_scsi_evt *evt) 616 { 617 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq; 618 struct virtio_scsi_event *event = &evt->event; 619 struct virtio_scsi_event __user *eventp; 620 unsigned out, in; 621 int head, ret; 622 623 if (!vq->private_data) { 624 vs->vs_events_missed = true; 625 return; 626 } 627 628 again: 629 vhost_disable_notify(&vs->dev, vq); 630 head = vhost_get_vq_desc(vq, vq->iov, 631 ARRAY_SIZE(vq->iov), &out, &in, 632 NULL, NULL); 633 if (head < 0) { 634 vs->vs_events_missed = true; 635 return; 636 } 637 if (head == vq->num) { 638 if (vhost_enable_notify(&vs->dev, vq)) 639 goto again; 640 vs->vs_events_missed = true; 641 return; 642 } 643 644 if ((vq->iov[out].iov_len != sizeof(struct virtio_scsi_event))) { 645 vq_err(vq, "Expecting virtio_scsi_event, got %zu bytes\n", 646 vq->iov[out].iov_len); 647 vs->vs_events_missed = true; 648 return; 649 } 650 651 if (vs->vs_events_missed) { 652 event->event |= cpu_to_vhost32(vq, VIRTIO_SCSI_T_EVENTS_MISSED); 653 vs->vs_events_missed = false; 654 } 655 656 eventp = vq->iov[out].iov_base; 657 ret = __copy_to_user(eventp, event, sizeof(*event)); 658 if (!ret) 659 vhost_add_used_and_signal(&vs->dev, vq, head, 0); 660 else 661 vq_err(vq, "Faulted on vhost_scsi_send_event\n"); 662 } 663 664 static void vhost_scsi_evt_work(struct vhost_work *work) 665 { 666 struct vhost_scsi *vs = container_of(work, struct vhost_scsi, 667 vs_event_work); 668 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq; 669 struct vhost_scsi_evt *evt; 670 struct llist_node *llnode; 671 672 mutex_lock(&vq->mutex); 673 llnode = llist_del_all(&vs->vs_event_list); 674 while (llnode) { 675 evt = llist_entry(llnode, struct vhost_scsi_evt, list); 676 llnode = llist_next(llnode); 677 vhost_scsi_do_evt_work(vs, evt); 678 vhost_scsi_free_evt(vs, evt); 679 } 680 mutex_unlock(&vq->mutex); 681 } 682 683 /* Fill in status and signal that we are done processing this command 684 * 685 * This is scheduled in the vhost work queue so we are called with the owner 686 * process mm and can access the vring. 687 */ 688 static void vhost_scsi_complete_cmd_work(struct vhost_work *work) 689 { 690 struct vhost_scsi *vs = container_of(work, struct vhost_scsi, 691 vs_completion_work); 692 DECLARE_BITMAP(signal, VHOST_SCSI_MAX_VQ); 693 struct virtio_scsi_cmd_resp v_rsp; 694 struct vhost_scsi_cmd *cmd; 695 struct llist_node *llnode; 696 struct se_cmd *se_cmd; 697 struct iov_iter iov_iter; 698 int ret, vq; 699 700 bitmap_zero(signal, VHOST_SCSI_MAX_VQ); 701 llnode = llist_del_all(&vs->vs_completion_list); 702 while (llnode) { 703 cmd = llist_entry(llnode, struct vhost_scsi_cmd, 704 tvc_completion_list); 705 llnode = llist_next(llnode); 706 se_cmd = &cmd->tvc_se_cmd; 707 708 pr_debug("%s tv_cmd %p resid %u status %#02x\n", __func__, 709 cmd, se_cmd->residual_count, se_cmd->scsi_status); 710 711 memset(&v_rsp, 0, sizeof(v_rsp)); 712 v_rsp.resid = cpu_to_vhost32(cmd->tvc_vq, se_cmd->residual_count); 713 /* TODO is status_qualifier field needed? */ 714 v_rsp.status = se_cmd->scsi_status; 715 v_rsp.sense_len = cpu_to_vhost32(cmd->tvc_vq, 716 se_cmd->scsi_sense_length); 717 memcpy(v_rsp.sense, cmd->tvc_sense_buf, 718 se_cmd->scsi_sense_length); 719 720 iov_iter_init(&iov_iter, READ, cmd->tvc_resp_iov, 721 cmd->tvc_in_iovs, sizeof(v_rsp)); 722 ret = copy_to_iter(&v_rsp, sizeof(v_rsp), &iov_iter); 723 if (likely(ret == sizeof(v_rsp))) { 724 struct vhost_scsi_virtqueue *q; 725 vhost_add_used(cmd->tvc_vq, cmd->tvc_vq_desc, 0); 726 q = container_of(cmd->tvc_vq, struct vhost_scsi_virtqueue, vq); 727 vq = q - vs->vqs; 728 __set_bit(vq, signal); 729 } else 730 pr_err("Faulted on virtio_scsi_cmd_resp\n"); 731 732 vhost_scsi_free_cmd(cmd); 733 } 734 735 vq = -1; 736 while ((vq = find_next_bit(signal, VHOST_SCSI_MAX_VQ, vq + 1)) 737 < VHOST_SCSI_MAX_VQ) 738 vhost_signal(&vs->dev, &vs->vqs[vq].vq); 739 } 740 741 static struct vhost_scsi_cmd * 742 vhost_scsi_get_tag(struct vhost_virtqueue *vq, struct vhost_scsi_tpg *tpg, 743 unsigned char *cdb, u64 scsi_tag, u16 lun, u8 task_attr, 744 u32 exp_data_len, int data_direction) 745 { 746 struct vhost_scsi_cmd *cmd; 747 struct vhost_scsi_nexus *tv_nexus; 748 struct se_session *se_sess; 749 struct scatterlist *sg, *prot_sg; 750 struct page **pages; 751 int tag; 752 753 tv_nexus = tpg->tpg_nexus; 754 if (!tv_nexus) { 755 pr_err("Unable to locate active struct vhost_scsi_nexus\n"); 756 return ERR_PTR(-EIO); 757 } 758 se_sess = tv_nexus->tvn_se_sess; 759 760 tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING); 761 if (tag < 0) { 762 pr_err("Unable to obtain tag for vhost_scsi_cmd\n"); 763 return ERR_PTR(-ENOMEM); 764 } 765 766 cmd = &((struct vhost_scsi_cmd *)se_sess->sess_cmd_map)[tag]; 767 sg = cmd->tvc_sgl; 768 prot_sg = cmd->tvc_prot_sgl; 769 pages = cmd->tvc_upages; 770 memset(cmd, 0, sizeof(struct vhost_scsi_cmd)); 771 772 cmd->tvc_sgl = sg; 773 cmd->tvc_prot_sgl = prot_sg; 774 cmd->tvc_upages = pages; 775 cmd->tvc_se_cmd.map_tag = tag; 776 cmd->tvc_tag = scsi_tag; 777 cmd->tvc_lun = lun; 778 cmd->tvc_task_attr = task_attr; 779 cmd->tvc_exp_data_len = exp_data_len; 780 cmd->tvc_data_direction = data_direction; 781 cmd->tvc_nexus = tv_nexus; 782 cmd->inflight = vhost_scsi_get_inflight(vq); 783 784 memcpy(cmd->tvc_cdb, cdb, VHOST_SCSI_MAX_CDB_SIZE); 785 786 return cmd; 787 } 788 789 /* 790 * Map a user memory range into a scatterlist 791 * 792 * Returns the number of scatterlist entries used or -errno on error. 793 */ 794 static int 795 vhost_scsi_map_to_sgl(struct vhost_scsi_cmd *cmd, 796 void __user *ptr, 797 size_t len, 798 struct scatterlist *sgl, 799 bool write) 800 { 801 unsigned int npages = 0, offset, nbytes; 802 unsigned int pages_nr = iov_num_pages(ptr, len); 803 struct scatterlist *sg = sgl; 804 struct page **pages = cmd->tvc_upages; 805 int ret, i; 806 807 if (pages_nr > VHOST_SCSI_PREALLOC_UPAGES) { 808 pr_err("vhost_scsi_map_to_sgl() pages_nr: %u greater than" 809 " preallocated VHOST_SCSI_PREALLOC_UPAGES: %u\n", 810 pages_nr, VHOST_SCSI_PREALLOC_UPAGES); 811 return -ENOBUFS; 812 } 813 814 ret = get_user_pages_fast((unsigned long)ptr, pages_nr, write, pages); 815 /* No pages were pinned */ 816 if (ret < 0) 817 goto out; 818 /* Less pages pinned than wanted */ 819 if (ret != pages_nr) { 820 for (i = 0; i < ret; i++) 821 put_page(pages[i]); 822 ret = -EFAULT; 823 goto out; 824 } 825 826 while (len > 0) { 827 offset = (uintptr_t)ptr & ~PAGE_MASK; 828 nbytes = min_t(unsigned int, PAGE_SIZE - offset, len); 829 sg_set_page(sg, pages[npages], nbytes, offset); 830 ptr += nbytes; 831 len -= nbytes; 832 sg++; 833 npages++; 834 } 835 836 out: 837 return ret; 838 } 839 840 static int 841 vhost_scsi_calc_sgls(struct iov_iter *iter, size_t bytes, int max_sgls) 842 { 843 int sgl_count = 0; 844 845 if (!iter || !iter->iov) { 846 pr_err("%s: iter->iov is NULL, but expected bytes: %zu" 847 " present\n", __func__, bytes); 848 return -EINVAL; 849 } 850 851 sgl_count = iov_iter_npages(iter, 0xffff); 852 if (sgl_count > max_sgls) { 853 pr_err("%s: requested sgl_count: %d exceeds pre-allocated" 854 " max_sgls: %d\n", __func__, sgl_count, max_sgls); 855 return -EINVAL; 856 } 857 return sgl_count; 858 } 859 860 static int 861 vhost_scsi_iov_to_sgl(struct vhost_scsi_cmd *cmd, bool write, 862 struct iov_iter *iter, 863 struct scatterlist *sg, int sg_count) 864 { 865 size_t off = iter->iov_offset; 866 int i, ret; 867 868 for (i = 0; i < iter->nr_segs; i++) { 869 void __user *base = iter->iov[i].iov_base + off; 870 size_t len = iter->iov[i].iov_len - off; 871 872 ret = vhost_scsi_map_to_sgl(cmd, base, len, sg, write); 873 if (ret < 0) { 874 for (i = 0; i < sg_count; i++) { 875 struct page *page = sg_page(&sg[i]); 876 if (page) 877 put_page(page); 878 } 879 return ret; 880 } 881 sg += ret; 882 off = 0; 883 } 884 return 0; 885 } 886 887 static int 888 vhost_scsi_mapal(struct vhost_scsi_cmd *cmd, 889 size_t prot_bytes, struct iov_iter *prot_iter, 890 size_t data_bytes, struct iov_iter *data_iter) 891 { 892 int sgl_count, ret; 893 bool write = (cmd->tvc_data_direction == DMA_FROM_DEVICE); 894 895 if (prot_bytes) { 896 sgl_count = vhost_scsi_calc_sgls(prot_iter, prot_bytes, 897 VHOST_SCSI_PREALLOC_PROT_SGLS); 898 if (sgl_count < 0) 899 return sgl_count; 900 901 sg_init_table(cmd->tvc_prot_sgl, sgl_count); 902 cmd->tvc_prot_sgl_count = sgl_count; 903 pr_debug("%s prot_sg %p prot_sgl_count %u\n", __func__, 904 cmd->tvc_prot_sgl, cmd->tvc_prot_sgl_count); 905 906 ret = vhost_scsi_iov_to_sgl(cmd, write, prot_iter, 907 cmd->tvc_prot_sgl, 908 cmd->tvc_prot_sgl_count); 909 if (ret < 0) { 910 cmd->tvc_prot_sgl_count = 0; 911 return ret; 912 } 913 } 914 sgl_count = vhost_scsi_calc_sgls(data_iter, data_bytes, 915 VHOST_SCSI_PREALLOC_SGLS); 916 if (sgl_count < 0) 917 return sgl_count; 918 919 sg_init_table(cmd->tvc_sgl, sgl_count); 920 cmd->tvc_sgl_count = sgl_count; 921 pr_debug("%s data_sg %p data_sgl_count %u\n", __func__, 922 cmd->tvc_sgl, cmd->tvc_sgl_count); 923 924 ret = vhost_scsi_iov_to_sgl(cmd, write, data_iter, 925 cmd->tvc_sgl, cmd->tvc_sgl_count); 926 if (ret < 0) { 927 cmd->tvc_sgl_count = 0; 928 return ret; 929 } 930 return 0; 931 } 932 933 static int vhost_scsi_to_tcm_attr(int attr) 934 { 935 switch (attr) { 936 case VIRTIO_SCSI_S_SIMPLE: 937 return TCM_SIMPLE_TAG; 938 case VIRTIO_SCSI_S_ORDERED: 939 return TCM_ORDERED_TAG; 940 case VIRTIO_SCSI_S_HEAD: 941 return TCM_HEAD_TAG; 942 case VIRTIO_SCSI_S_ACA: 943 return TCM_ACA_TAG; 944 default: 945 break; 946 } 947 return TCM_SIMPLE_TAG; 948 } 949 950 static void vhost_scsi_submission_work(struct work_struct *work) 951 { 952 struct vhost_scsi_cmd *cmd = 953 container_of(work, struct vhost_scsi_cmd, work); 954 struct vhost_scsi_nexus *tv_nexus; 955 struct se_cmd *se_cmd = &cmd->tvc_se_cmd; 956 struct scatterlist *sg_ptr, *sg_prot_ptr = NULL; 957 int rc; 958 959 /* FIXME: BIDI operation */ 960 if (cmd->tvc_sgl_count) { 961 sg_ptr = cmd->tvc_sgl; 962 963 if (cmd->tvc_prot_sgl_count) 964 sg_prot_ptr = cmd->tvc_prot_sgl; 965 else 966 se_cmd->prot_pto = true; 967 } else { 968 sg_ptr = NULL; 969 } 970 tv_nexus = cmd->tvc_nexus; 971 972 rc = target_submit_cmd_map_sgls(se_cmd, tv_nexus->tvn_se_sess, 973 cmd->tvc_cdb, &cmd->tvc_sense_buf[0], 974 cmd->tvc_lun, cmd->tvc_exp_data_len, 975 vhost_scsi_to_tcm_attr(cmd->tvc_task_attr), 976 cmd->tvc_data_direction, TARGET_SCF_ACK_KREF, 977 sg_ptr, cmd->tvc_sgl_count, NULL, 0, sg_prot_ptr, 978 cmd->tvc_prot_sgl_count); 979 if (rc < 0) { 980 transport_send_check_condition_and_sense(se_cmd, 981 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0); 982 transport_generic_free_cmd(se_cmd, 0); 983 } 984 } 985 986 static void 987 vhost_scsi_send_bad_target(struct vhost_scsi *vs, 988 struct vhost_virtqueue *vq, 989 int head, unsigned out) 990 { 991 struct virtio_scsi_cmd_resp __user *resp; 992 struct virtio_scsi_cmd_resp rsp; 993 int ret; 994 995 memset(&rsp, 0, sizeof(rsp)); 996 rsp.response = VIRTIO_SCSI_S_BAD_TARGET; 997 resp = vq->iov[out].iov_base; 998 ret = __copy_to_user(resp, &rsp, sizeof(rsp)); 999 if (!ret) 1000 vhost_add_used_and_signal(&vs->dev, vq, head, 0); 1001 else 1002 pr_err("Faulted on virtio_scsi_cmd_resp\n"); 1003 } 1004 1005 static void 1006 vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) 1007 { 1008 struct vhost_scsi_tpg **vs_tpg, *tpg; 1009 struct virtio_scsi_cmd_req v_req; 1010 struct virtio_scsi_cmd_req_pi v_req_pi; 1011 struct vhost_scsi_cmd *cmd; 1012 struct iov_iter out_iter, in_iter, prot_iter, data_iter; 1013 u64 tag; 1014 u32 exp_data_len, data_direction; 1015 unsigned out, in; 1016 int head, ret, prot_bytes; 1017 size_t req_size, rsp_size = sizeof(struct virtio_scsi_cmd_resp); 1018 size_t out_size, in_size; 1019 u16 lun; 1020 u8 *target, *lunp, task_attr; 1021 bool t10_pi = vhost_has_feature(vq, VIRTIO_SCSI_F_T10_PI); 1022 void *req, *cdb; 1023 1024 mutex_lock(&vq->mutex); 1025 /* 1026 * We can handle the vq only after the endpoint is setup by calling the 1027 * VHOST_SCSI_SET_ENDPOINT ioctl. 1028 */ 1029 vs_tpg = vq->private_data; 1030 if (!vs_tpg) 1031 goto out; 1032 1033 vhost_disable_notify(&vs->dev, vq); 1034 1035 for (;;) { 1036 head = vhost_get_vq_desc(vq, vq->iov, 1037 ARRAY_SIZE(vq->iov), &out, &in, 1038 NULL, NULL); 1039 pr_debug("vhost_get_vq_desc: head: %d, out: %u in: %u\n", 1040 head, out, in); 1041 /* On error, stop handling until the next kick. */ 1042 if (unlikely(head < 0)) 1043 break; 1044 /* Nothing new? Wait for eventfd to tell us they refilled. */ 1045 if (head == vq->num) { 1046 if (unlikely(vhost_enable_notify(&vs->dev, vq))) { 1047 vhost_disable_notify(&vs->dev, vq); 1048 continue; 1049 } 1050 break; 1051 } 1052 /* 1053 * Check for a sane response buffer so we can report early 1054 * errors back to the guest. 1055 */ 1056 if (unlikely(vq->iov[out].iov_len < rsp_size)) { 1057 vq_err(vq, "Expecting at least virtio_scsi_cmd_resp" 1058 " size, got %zu bytes\n", vq->iov[out].iov_len); 1059 break; 1060 } 1061 /* 1062 * Setup pointers and values based upon different virtio-scsi 1063 * request header if T10_PI is enabled in KVM guest. 1064 */ 1065 if (t10_pi) { 1066 req = &v_req_pi; 1067 req_size = sizeof(v_req_pi); 1068 lunp = &v_req_pi.lun[0]; 1069 target = &v_req_pi.lun[1]; 1070 } else { 1071 req = &v_req; 1072 req_size = sizeof(v_req); 1073 lunp = &v_req.lun[0]; 1074 target = &v_req.lun[1]; 1075 } 1076 /* 1077 * FIXME: Not correct for BIDI operation 1078 */ 1079 out_size = iov_length(vq->iov, out); 1080 in_size = iov_length(&vq->iov[out], in); 1081 1082 /* 1083 * Copy over the virtio-scsi request header, which for a 1084 * ANY_LAYOUT enabled guest may span multiple iovecs, or a 1085 * single iovec may contain both the header + outgoing 1086 * WRITE payloads. 1087 * 1088 * copy_from_iter() will advance out_iter, so that it will 1089 * point at the start of the outgoing WRITE payload, if 1090 * DMA_TO_DEVICE is set. 1091 */ 1092 iov_iter_init(&out_iter, WRITE, vq->iov, out, out_size); 1093 1094 ret = copy_from_iter(req, req_size, &out_iter); 1095 if (unlikely(ret != req_size)) { 1096 vq_err(vq, "Faulted on copy_from_iter\n"); 1097 vhost_scsi_send_bad_target(vs, vq, head, out); 1098 continue; 1099 } 1100 /* virtio-scsi spec requires byte 0 of the lun to be 1 */ 1101 if (unlikely(*lunp != 1)) { 1102 vq_err(vq, "Illegal virtio-scsi lun: %u\n", *lunp); 1103 vhost_scsi_send_bad_target(vs, vq, head, out); 1104 continue; 1105 } 1106 1107 tpg = ACCESS_ONCE(vs_tpg[*target]); 1108 if (unlikely(!tpg)) { 1109 /* Target does not exist, fail the request */ 1110 vhost_scsi_send_bad_target(vs, vq, head, out); 1111 continue; 1112 } 1113 /* 1114 * Determine data_direction by calculating the total outgoing 1115 * iovec sizes + incoming iovec sizes vs. virtio-scsi request + 1116 * response headers respectively. 1117 * 1118 * For DMA_TO_DEVICE this is out_iter, which is already pointing 1119 * to the right place. 1120 * 1121 * For DMA_FROM_DEVICE, the iovec will be just past the end 1122 * of the virtio-scsi response header in either the same 1123 * or immediately following iovec. 1124 * 1125 * Any associated T10_PI bytes for the outgoing / incoming 1126 * payloads are included in calculation of exp_data_len here. 1127 */ 1128 prot_bytes = 0; 1129 1130 if (out_size > req_size) { 1131 data_direction = DMA_TO_DEVICE; 1132 exp_data_len = out_size - req_size; 1133 data_iter = out_iter; 1134 } else if (in_size > rsp_size) { 1135 data_direction = DMA_FROM_DEVICE; 1136 exp_data_len = in_size - rsp_size; 1137 1138 iov_iter_init(&in_iter, READ, &vq->iov[out], in, 1139 rsp_size + exp_data_len); 1140 iov_iter_advance(&in_iter, rsp_size); 1141 data_iter = in_iter; 1142 } else { 1143 data_direction = DMA_NONE; 1144 exp_data_len = 0; 1145 } 1146 /* 1147 * If T10_PI header + payload is present, setup prot_iter values 1148 * and recalculate data_iter for vhost_scsi_mapal() mapping to 1149 * host scatterlists via get_user_pages_fast(). 1150 */ 1151 if (t10_pi) { 1152 if (v_req_pi.pi_bytesout) { 1153 if (data_direction != DMA_TO_DEVICE) { 1154 vq_err(vq, "Received non zero pi_bytesout," 1155 " but wrong data_direction\n"); 1156 vhost_scsi_send_bad_target(vs, vq, head, out); 1157 continue; 1158 } 1159 prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesout); 1160 } else if (v_req_pi.pi_bytesin) { 1161 if (data_direction != DMA_FROM_DEVICE) { 1162 vq_err(vq, "Received non zero pi_bytesin," 1163 " but wrong data_direction\n"); 1164 vhost_scsi_send_bad_target(vs, vq, head, out); 1165 continue; 1166 } 1167 prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesin); 1168 } 1169 /* 1170 * Set prot_iter to data_iter, and advance past any 1171 * preceeding prot_bytes that may be present. 1172 * 1173 * Also fix up the exp_data_len to reflect only the 1174 * actual data payload length. 1175 */ 1176 if (prot_bytes) { 1177 exp_data_len -= prot_bytes; 1178 prot_iter = data_iter; 1179 iov_iter_advance(&data_iter, prot_bytes); 1180 } 1181 tag = vhost64_to_cpu(vq, v_req_pi.tag); 1182 task_attr = v_req_pi.task_attr; 1183 cdb = &v_req_pi.cdb[0]; 1184 lun = ((v_req_pi.lun[2] << 8) | v_req_pi.lun[3]) & 0x3FFF; 1185 } else { 1186 tag = vhost64_to_cpu(vq, v_req.tag); 1187 task_attr = v_req.task_attr; 1188 cdb = &v_req.cdb[0]; 1189 lun = ((v_req.lun[2] << 8) | v_req.lun[3]) & 0x3FFF; 1190 } 1191 /* 1192 * Check that the received CDB size does not exceeded our 1193 * hardcoded max for vhost-scsi, then get a pre-allocated 1194 * cmd descriptor for the new virtio-scsi tag. 1195 * 1196 * TODO what if cdb was too small for varlen cdb header? 1197 */ 1198 if (unlikely(scsi_command_size(cdb) > VHOST_SCSI_MAX_CDB_SIZE)) { 1199 vq_err(vq, "Received SCSI CDB with command_size: %d that" 1200 " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n", 1201 scsi_command_size(cdb), VHOST_SCSI_MAX_CDB_SIZE); 1202 vhost_scsi_send_bad_target(vs, vq, head, out); 1203 continue; 1204 } 1205 cmd = vhost_scsi_get_tag(vq, tpg, cdb, tag, lun, task_attr, 1206 exp_data_len + prot_bytes, 1207 data_direction); 1208 if (IS_ERR(cmd)) { 1209 vq_err(vq, "vhost_scsi_get_tag failed %ld\n", 1210 PTR_ERR(cmd)); 1211 vhost_scsi_send_bad_target(vs, vq, head, out); 1212 continue; 1213 } 1214 cmd->tvc_vhost = vs; 1215 cmd->tvc_vq = vq; 1216 cmd->tvc_resp_iov = &vq->iov[out]; 1217 cmd->tvc_in_iovs = in; 1218 1219 pr_debug("vhost_scsi got command opcode: %#02x, lun: %d\n", 1220 cmd->tvc_cdb[0], cmd->tvc_lun); 1221 pr_debug("cmd: %p exp_data_len: %d, prot_bytes: %d data_direction:" 1222 " %d\n", cmd, exp_data_len, prot_bytes, data_direction); 1223 1224 if (data_direction != DMA_NONE) { 1225 ret = vhost_scsi_mapal(cmd, 1226 prot_bytes, &prot_iter, 1227 exp_data_len, &data_iter); 1228 if (unlikely(ret)) { 1229 vq_err(vq, "Failed to map iov to sgl\n"); 1230 vhost_scsi_release_cmd(&cmd->tvc_se_cmd); 1231 vhost_scsi_send_bad_target(vs, vq, head, out); 1232 continue; 1233 } 1234 } 1235 /* 1236 * Save the descriptor from vhost_get_vq_desc() to be used to 1237 * complete the virtio-scsi request in TCM callback context via 1238 * vhost_scsi_queue_data_in() and vhost_scsi_queue_status() 1239 */ 1240 cmd->tvc_vq_desc = head; 1241 /* 1242 * Dispatch cmd descriptor for cmwq execution in process 1243 * context provided by vhost_scsi_workqueue. This also ensures 1244 * cmd is executed on the same kworker CPU as this vhost 1245 * thread to gain positive L2 cache locality effects. 1246 */ 1247 INIT_WORK(&cmd->work, vhost_scsi_submission_work); 1248 queue_work(vhost_scsi_workqueue, &cmd->work); 1249 } 1250 out: 1251 mutex_unlock(&vq->mutex); 1252 } 1253 1254 static void vhost_scsi_ctl_handle_kick(struct vhost_work *work) 1255 { 1256 pr_debug("%s: The handling func for control queue.\n", __func__); 1257 } 1258 1259 static void 1260 vhost_scsi_send_evt(struct vhost_scsi *vs, 1261 struct vhost_scsi_tpg *tpg, 1262 struct se_lun *lun, 1263 u32 event, 1264 u32 reason) 1265 { 1266 struct vhost_scsi_evt *evt; 1267 1268 evt = vhost_scsi_allocate_evt(vs, event, reason); 1269 if (!evt) 1270 return; 1271 1272 if (tpg && lun) { 1273 /* TODO: share lun setup code with virtio-scsi.ko */ 1274 /* 1275 * Note: evt->event is zeroed when we allocate it and 1276 * lun[4-7] need to be zero according to virtio-scsi spec. 1277 */ 1278 evt->event.lun[0] = 0x01; 1279 evt->event.lun[1] = tpg->tport_tpgt; 1280 if (lun->unpacked_lun >= 256) 1281 evt->event.lun[2] = lun->unpacked_lun >> 8 | 0x40 ; 1282 evt->event.lun[3] = lun->unpacked_lun & 0xFF; 1283 } 1284 1285 llist_add(&evt->list, &vs->vs_event_list); 1286 vhost_work_queue(&vs->dev, &vs->vs_event_work); 1287 } 1288 1289 static void vhost_scsi_evt_handle_kick(struct vhost_work *work) 1290 { 1291 struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue, 1292 poll.work); 1293 struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev); 1294 1295 mutex_lock(&vq->mutex); 1296 if (!vq->private_data) 1297 goto out; 1298 1299 if (vs->vs_events_missed) 1300 vhost_scsi_send_evt(vs, NULL, NULL, VIRTIO_SCSI_T_NO_EVENT, 0); 1301 out: 1302 mutex_unlock(&vq->mutex); 1303 } 1304 1305 static void vhost_scsi_handle_kick(struct vhost_work *work) 1306 { 1307 struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue, 1308 poll.work); 1309 struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev); 1310 1311 vhost_scsi_handle_vq(vs, vq); 1312 } 1313 1314 static void vhost_scsi_flush_vq(struct vhost_scsi *vs, int index) 1315 { 1316 vhost_poll_flush(&vs->vqs[index].vq.poll); 1317 } 1318 1319 /* Callers must hold dev mutex */ 1320 static void vhost_scsi_flush(struct vhost_scsi *vs) 1321 { 1322 struct vhost_scsi_inflight *old_inflight[VHOST_SCSI_MAX_VQ]; 1323 int i; 1324 1325 /* Init new inflight and remember the old inflight */ 1326 vhost_scsi_init_inflight(vs, old_inflight); 1327 1328 /* 1329 * The inflight->kref was initialized to 1. We decrement it here to 1330 * indicate the start of the flush operation so that it will reach 0 1331 * when all the reqs are finished. 1332 */ 1333 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) 1334 kref_put(&old_inflight[i]->kref, vhost_scsi_done_inflight); 1335 1336 /* Flush both the vhost poll and vhost work */ 1337 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) 1338 vhost_scsi_flush_vq(vs, i); 1339 vhost_work_flush(&vs->dev, &vs->vs_completion_work); 1340 vhost_work_flush(&vs->dev, &vs->vs_event_work); 1341 1342 /* Wait for all reqs issued before the flush to be finished */ 1343 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) 1344 wait_for_completion(&old_inflight[i]->comp); 1345 } 1346 1347 /* 1348 * Called from vhost_scsi_ioctl() context to walk the list of available 1349 * vhost_scsi_tpg with an active struct vhost_scsi_nexus 1350 * 1351 * The lock nesting rule is: 1352 * vhost_scsi_mutex -> vs->dev.mutex -> tpg->tv_tpg_mutex -> vq->mutex 1353 */ 1354 static int 1355 vhost_scsi_set_endpoint(struct vhost_scsi *vs, 1356 struct vhost_scsi_target *t) 1357 { 1358 struct se_portal_group *se_tpg; 1359 struct vhost_scsi_tport *tv_tport; 1360 struct vhost_scsi_tpg *tpg; 1361 struct vhost_scsi_tpg **vs_tpg; 1362 struct vhost_virtqueue *vq; 1363 int index, ret, i, len; 1364 bool match = false; 1365 1366 mutex_lock(&vhost_scsi_mutex); 1367 mutex_lock(&vs->dev.mutex); 1368 1369 /* Verify that ring has been setup correctly. */ 1370 for (index = 0; index < vs->dev.nvqs; ++index) { 1371 /* Verify that ring has been setup correctly. */ 1372 if (!vhost_vq_access_ok(&vs->vqs[index].vq)) { 1373 ret = -EFAULT; 1374 goto out; 1375 } 1376 } 1377 1378 len = sizeof(vs_tpg[0]) * VHOST_SCSI_MAX_TARGET; 1379 vs_tpg = kzalloc(len, GFP_KERNEL); 1380 if (!vs_tpg) { 1381 ret = -ENOMEM; 1382 goto out; 1383 } 1384 if (vs->vs_tpg) 1385 memcpy(vs_tpg, vs->vs_tpg, len); 1386 1387 list_for_each_entry(tpg, &vhost_scsi_list, tv_tpg_list) { 1388 mutex_lock(&tpg->tv_tpg_mutex); 1389 if (!tpg->tpg_nexus) { 1390 mutex_unlock(&tpg->tv_tpg_mutex); 1391 continue; 1392 } 1393 if (tpg->tv_tpg_vhost_count != 0) { 1394 mutex_unlock(&tpg->tv_tpg_mutex); 1395 continue; 1396 } 1397 tv_tport = tpg->tport; 1398 1399 if (!strcmp(tv_tport->tport_name, t->vhost_wwpn)) { 1400 if (vs->vs_tpg && vs->vs_tpg[tpg->tport_tpgt]) { 1401 kfree(vs_tpg); 1402 mutex_unlock(&tpg->tv_tpg_mutex); 1403 ret = -EEXIST; 1404 goto out; 1405 } 1406 /* 1407 * In order to ensure individual vhost-scsi configfs 1408 * groups cannot be removed while in use by vhost ioctl, 1409 * go ahead and take an explicit se_tpg->tpg_group.cg_item 1410 * dependency now. 1411 */ 1412 se_tpg = &tpg->se_tpg; 1413 ret = target_depend_item(&se_tpg->tpg_group.cg_item); 1414 if (ret) { 1415 pr_warn("configfs_depend_item() failed: %d\n", ret); 1416 kfree(vs_tpg); 1417 mutex_unlock(&tpg->tv_tpg_mutex); 1418 goto out; 1419 } 1420 tpg->tv_tpg_vhost_count++; 1421 tpg->vhost_scsi = vs; 1422 vs_tpg[tpg->tport_tpgt] = tpg; 1423 smp_mb__after_atomic(); 1424 match = true; 1425 } 1426 mutex_unlock(&tpg->tv_tpg_mutex); 1427 } 1428 1429 if (match) { 1430 memcpy(vs->vs_vhost_wwpn, t->vhost_wwpn, 1431 sizeof(vs->vs_vhost_wwpn)); 1432 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) { 1433 vq = &vs->vqs[i].vq; 1434 mutex_lock(&vq->mutex); 1435 vq->private_data = vs_tpg; 1436 vhost_init_used(vq); 1437 mutex_unlock(&vq->mutex); 1438 } 1439 ret = 0; 1440 } else { 1441 ret = -EEXIST; 1442 } 1443 1444 /* 1445 * Act as synchronize_rcu to make sure access to 1446 * old vs->vs_tpg is finished. 1447 */ 1448 vhost_scsi_flush(vs); 1449 kfree(vs->vs_tpg); 1450 vs->vs_tpg = vs_tpg; 1451 1452 out: 1453 mutex_unlock(&vs->dev.mutex); 1454 mutex_unlock(&vhost_scsi_mutex); 1455 return ret; 1456 } 1457 1458 static int 1459 vhost_scsi_clear_endpoint(struct vhost_scsi *vs, 1460 struct vhost_scsi_target *t) 1461 { 1462 struct se_portal_group *se_tpg; 1463 struct vhost_scsi_tport *tv_tport; 1464 struct vhost_scsi_tpg *tpg; 1465 struct vhost_virtqueue *vq; 1466 bool match = false; 1467 int index, ret, i; 1468 u8 target; 1469 1470 mutex_lock(&vhost_scsi_mutex); 1471 mutex_lock(&vs->dev.mutex); 1472 /* Verify that ring has been setup correctly. */ 1473 for (index = 0; index < vs->dev.nvqs; ++index) { 1474 if (!vhost_vq_access_ok(&vs->vqs[index].vq)) { 1475 ret = -EFAULT; 1476 goto err_dev; 1477 } 1478 } 1479 1480 if (!vs->vs_tpg) { 1481 ret = 0; 1482 goto err_dev; 1483 } 1484 1485 for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) { 1486 target = i; 1487 tpg = vs->vs_tpg[target]; 1488 if (!tpg) 1489 continue; 1490 1491 mutex_lock(&tpg->tv_tpg_mutex); 1492 tv_tport = tpg->tport; 1493 if (!tv_tport) { 1494 ret = -ENODEV; 1495 goto err_tpg; 1496 } 1497 1498 if (strcmp(tv_tport->tport_name, t->vhost_wwpn)) { 1499 pr_warn("tv_tport->tport_name: %s, tpg->tport_tpgt: %hu" 1500 " does not match t->vhost_wwpn: %s, t->vhost_tpgt: %hu\n", 1501 tv_tport->tport_name, tpg->tport_tpgt, 1502 t->vhost_wwpn, t->vhost_tpgt); 1503 ret = -EINVAL; 1504 goto err_tpg; 1505 } 1506 tpg->tv_tpg_vhost_count--; 1507 tpg->vhost_scsi = NULL; 1508 vs->vs_tpg[target] = NULL; 1509 match = true; 1510 mutex_unlock(&tpg->tv_tpg_mutex); 1511 /* 1512 * Release se_tpg->tpg_group.cg_item configfs dependency now 1513 * to allow vhost-scsi WWPN se_tpg->tpg_group shutdown to occur. 1514 */ 1515 se_tpg = &tpg->se_tpg; 1516 target_undepend_item(&se_tpg->tpg_group.cg_item); 1517 } 1518 if (match) { 1519 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) { 1520 vq = &vs->vqs[i].vq; 1521 mutex_lock(&vq->mutex); 1522 vq->private_data = NULL; 1523 mutex_unlock(&vq->mutex); 1524 } 1525 } 1526 /* 1527 * Act as synchronize_rcu to make sure access to 1528 * old vs->vs_tpg is finished. 1529 */ 1530 vhost_scsi_flush(vs); 1531 kfree(vs->vs_tpg); 1532 vs->vs_tpg = NULL; 1533 WARN_ON(vs->vs_events_nr); 1534 mutex_unlock(&vs->dev.mutex); 1535 mutex_unlock(&vhost_scsi_mutex); 1536 return 0; 1537 1538 err_tpg: 1539 mutex_unlock(&tpg->tv_tpg_mutex); 1540 err_dev: 1541 mutex_unlock(&vs->dev.mutex); 1542 mutex_unlock(&vhost_scsi_mutex); 1543 return ret; 1544 } 1545 1546 static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features) 1547 { 1548 struct vhost_virtqueue *vq; 1549 int i; 1550 1551 if (features & ~VHOST_SCSI_FEATURES) 1552 return -EOPNOTSUPP; 1553 1554 mutex_lock(&vs->dev.mutex); 1555 if ((features & (1 << VHOST_F_LOG_ALL)) && 1556 !vhost_log_access_ok(&vs->dev)) { 1557 mutex_unlock(&vs->dev.mutex); 1558 return -EFAULT; 1559 } 1560 1561 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) { 1562 vq = &vs->vqs[i].vq; 1563 mutex_lock(&vq->mutex); 1564 vq->acked_features = features; 1565 mutex_unlock(&vq->mutex); 1566 } 1567 mutex_unlock(&vs->dev.mutex); 1568 return 0; 1569 } 1570 1571 static int vhost_scsi_open(struct inode *inode, struct file *f) 1572 { 1573 struct vhost_scsi *vs; 1574 struct vhost_virtqueue **vqs; 1575 int r = -ENOMEM, i; 1576 1577 vs = kzalloc(sizeof(*vs), GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT); 1578 if (!vs) { 1579 vs = vzalloc(sizeof(*vs)); 1580 if (!vs) 1581 goto err_vs; 1582 } 1583 1584 vqs = kmalloc(VHOST_SCSI_MAX_VQ * sizeof(*vqs), GFP_KERNEL); 1585 if (!vqs) 1586 goto err_vqs; 1587 1588 vhost_work_init(&vs->vs_completion_work, vhost_scsi_complete_cmd_work); 1589 vhost_work_init(&vs->vs_event_work, vhost_scsi_evt_work); 1590 1591 vs->vs_events_nr = 0; 1592 vs->vs_events_missed = false; 1593 1594 vqs[VHOST_SCSI_VQ_CTL] = &vs->vqs[VHOST_SCSI_VQ_CTL].vq; 1595 vqs[VHOST_SCSI_VQ_EVT] = &vs->vqs[VHOST_SCSI_VQ_EVT].vq; 1596 vs->vqs[VHOST_SCSI_VQ_CTL].vq.handle_kick = vhost_scsi_ctl_handle_kick; 1597 vs->vqs[VHOST_SCSI_VQ_EVT].vq.handle_kick = vhost_scsi_evt_handle_kick; 1598 for (i = VHOST_SCSI_VQ_IO; i < VHOST_SCSI_MAX_VQ; i++) { 1599 vqs[i] = &vs->vqs[i].vq; 1600 vs->vqs[i].vq.handle_kick = vhost_scsi_handle_kick; 1601 } 1602 vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ); 1603 1604 vhost_scsi_init_inflight(vs, NULL); 1605 1606 f->private_data = vs; 1607 return 0; 1608 1609 err_vqs: 1610 kvfree(vs); 1611 err_vs: 1612 return r; 1613 } 1614 1615 static int vhost_scsi_release(struct inode *inode, struct file *f) 1616 { 1617 struct vhost_scsi *vs = f->private_data; 1618 struct vhost_scsi_target t; 1619 1620 mutex_lock(&vs->dev.mutex); 1621 memcpy(t.vhost_wwpn, vs->vs_vhost_wwpn, sizeof(t.vhost_wwpn)); 1622 mutex_unlock(&vs->dev.mutex); 1623 vhost_scsi_clear_endpoint(vs, &t); 1624 vhost_dev_stop(&vs->dev); 1625 vhost_dev_cleanup(&vs->dev, false); 1626 /* Jobs can re-queue themselves in evt kick handler. Do extra flush. */ 1627 vhost_scsi_flush(vs); 1628 kfree(vs->dev.vqs); 1629 kvfree(vs); 1630 return 0; 1631 } 1632 1633 static long 1634 vhost_scsi_ioctl(struct file *f, 1635 unsigned int ioctl, 1636 unsigned long arg) 1637 { 1638 struct vhost_scsi *vs = f->private_data; 1639 struct vhost_scsi_target backend; 1640 void __user *argp = (void __user *)arg; 1641 u64 __user *featurep = argp; 1642 u32 __user *eventsp = argp; 1643 u32 events_missed; 1644 u64 features; 1645 int r, abi_version = VHOST_SCSI_ABI_VERSION; 1646 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq; 1647 1648 switch (ioctl) { 1649 case VHOST_SCSI_SET_ENDPOINT: 1650 if (copy_from_user(&backend, argp, sizeof backend)) 1651 return -EFAULT; 1652 if (backend.reserved != 0) 1653 return -EOPNOTSUPP; 1654 1655 return vhost_scsi_set_endpoint(vs, &backend); 1656 case VHOST_SCSI_CLEAR_ENDPOINT: 1657 if (copy_from_user(&backend, argp, sizeof backend)) 1658 return -EFAULT; 1659 if (backend.reserved != 0) 1660 return -EOPNOTSUPP; 1661 1662 return vhost_scsi_clear_endpoint(vs, &backend); 1663 case VHOST_SCSI_GET_ABI_VERSION: 1664 if (copy_to_user(argp, &abi_version, sizeof abi_version)) 1665 return -EFAULT; 1666 return 0; 1667 case VHOST_SCSI_SET_EVENTS_MISSED: 1668 if (get_user(events_missed, eventsp)) 1669 return -EFAULT; 1670 mutex_lock(&vq->mutex); 1671 vs->vs_events_missed = events_missed; 1672 mutex_unlock(&vq->mutex); 1673 return 0; 1674 case VHOST_SCSI_GET_EVENTS_MISSED: 1675 mutex_lock(&vq->mutex); 1676 events_missed = vs->vs_events_missed; 1677 mutex_unlock(&vq->mutex); 1678 if (put_user(events_missed, eventsp)) 1679 return -EFAULT; 1680 return 0; 1681 case VHOST_GET_FEATURES: 1682 features = VHOST_SCSI_FEATURES; 1683 if (copy_to_user(featurep, &features, sizeof features)) 1684 return -EFAULT; 1685 return 0; 1686 case VHOST_SET_FEATURES: 1687 if (copy_from_user(&features, featurep, sizeof features)) 1688 return -EFAULT; 1689 return vhost_scsi_set_features(vs, features); 1690 default: 1691 mutex_lock(&vs->dev.mutex); 1692 r = vhost_dev_ioctl(&vs->dev, ioctl, argp); 1693 /* TODO: flush backend after dev ioctl. */ 1694 if (r == -ENOIOCTLCMD) 1695 r = vhost_vring_ioctl(&vs->dev, ioctl, argp); 1696 mutex_unlock(&vs->dev.mutex); 1697 return r; 1698 } 1699 } 1700 1701 #ifdef CONFIG_COMPAT 1702 static long vhost_scsi_compat_ioctl(struct file *f, unsigned int ioctl, 1703 unsigned long arg) 1704 { 1705 return vhost_scsi_ioctl(f, ioctl, (unsigned long)compat_ptr(arg)); 1706 } 1707 #endif 1708 1709 static const struct file_operations vhost_scsi_fops = { 1710 .owner = THIS_MODULE, 1711 .release = vhost_scsi_release, 1712 .unlocked_ioctl = vhost_scsi_ioctl, 1713 #ifdef CONFIG_COMPAT 1714 .compat_ioctl = vhost_scsi_compat_ioctl, 1715 #endif 1716 .open = vhost_scsi_open, 1717 .llseek = noop_llseek, 1718 }; 1719 1720 static struct miscdevice vhost_scsi_misc = { 1721 MISC_DYNAMIC_MINOR, 1722 "vhost-scsi", 1723 &vhost_scsi_fops, 1724 }; 1725 1726 static int __init vhost_scsi_register(void) 1727 { 1728 return misc_register(&vhost_scsi_misc); 1729 } 1730 1731 static int vhost_scsi_deregister(void) 1732 { 1733 return misc_deregister(&vhost_scsi_misc); 1734 } 1735 1736 static char *vhost_scsi_dump_proto_id(struct vhost_scsi_tport *tport) 1737 { 1738 switch (tport->tport_proto_id) { 1739 case SCSI_PROTOCOL_SAS: 1740 return "SAS"; 1741 case SCSI_PROTOCOL_FCP: 1742 return "FCP"; 1743 case SCSI_PROTOCOL_ISCSI: 1744 return "iSCSI"; 1745 default: 1746 break; 1747 } 1748 1749 return "Unknown"; 1750 } 1751 1752 static void 1753 vhost_scsi_do_plug(struct vhost_scsi_tpg *tpg, 1754 struct se_lun *lun, bool plug) 1755 { 1756 1757 struct vhost_scsi *vs = tpg->vhost_scsi; 1758 struct vhost_virtqueue *vq; 1759 u32 reason; 1760 1761 if (!vs) 1762 return; 1763 1764 mutex_lock(&vs->dev.mutex); 1765 1766 if (plug) 1767 reason = VIRTIO_SCSI_EVT_RESET_RESCAN; 1768 else 1769 reason = VIRTIO_SCSI_EVT_RESET_REMOVED; 1770 1771 vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq; 1772 mutex_lock(&vq->mutex); 1773 if (vhost_has_feature(vq, VIRTIO_SCSI_F_HOTPLUG)) 1774 vhost_scsi_send_evt(vs, tpg, lun, 1775 VIRTIO_SCSI_T_TRANSPORT_RESET, reason); 1776 mutex_unlock(&vq->mutex); 1777 mutex_unlock(&vs->dev.mutex); 1778 } 1779 1780 static void vhost_scsi_hotplug(struct vhost_scsi_tpg *tpg, struct se_lun *lun) 1781 { 1782 vhost_scsi_do_plug(tpg, lun, true); 1783 } 1784 1785 static void vhost_scsi_hotunplug(struct vhost_scsi_tpg *tpg, struct se_lun *lun) 1786 { 1787 vhost_scsi_do_plug(tpg, lun, false); 1788 } 1789 1790 static int vhost_scsi_port_link(struct se_portal_group *se_tpg, 1791 struct se_lun *lun) 1792 { 1793 struct vhost_scsi_tpg *tpg = container_of(se_tpg, 1794 struct vhost_scsi_tpg, se_tpg); 1795 1796 mutex_lock(&vhost_scsi_mutex); 1797 1798 mutex_lock(&tpg->tv_tpg_mutex); 1799 tpg->tv_tpg_port_count++; 1800 mutex_unlock(&tpg->tv_tpg_mutex); 1801 1802 vhost_scsi_hotplug(tpg, lun); 1803 1804 mutex_unlock(&vhost_scsi_mutex); 1805 1806 return 0; 1807 } 1808 1809 static void vhost_scsi_port_unlink(struct se_portal_group *se_tpg, 1810 struct se_lun *lun) 1811 { 1812 struct vhost_scsi_tpg *tpg = container_of(se_tpg, 1813 struct vhost_scsi_tpg, se_tpg); 1814 1815 mutex_lock(&vhost_scsi_mutex); 1816 1817 mutex_lock(&tpg->tv_tpg_mutex); 1818 tpg->tv_tpg_port_count--; 1819 mutex_unlock(&tpg->tv_tpg_mutex); 1820 1821 vhost_scsi_hotunplug(tpg, lun); 1822 1823 mutex_unlock(&vhost_scsi_mutex); 1824 } 1825 1826 static struct se_node_acl * 1827 vhost_scsi_make_nodeacl(struct se_portal_group *se_tpg, 1828 struct config_group *group, 1829 const char *name) 1830 { 1831 struct se_node_acl *se_nacl, *se_nacl_new; 1832 struct vhost_scsi_nacl *nacl; 1833 u64 wwpn = 0; 1834 u32 nexus_depth; 1835 1836 /* vhost_scsi_parse_wwn(name, &wwpn, 1) < 0) 1837 return ERR_PTR(-EINVAL); */ 1838 se_nacl_new = vhost_scsi_alloc_fabric_acl(se_tpg); 1839 if (!se_nacl_new) 1840 return ERR_PTR(-ENOMEM); 1841 1842 nexus_depth = 1; 1843 /* 1844 * se_nacl_new may be released by core_tpg_add_initiator_node_acl() 1845 * when converting a NodeACL from demo mode -> explict 1846 */ 1847 se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new, 1848 name, nexus_depth); 1849 if (IS_ERR(se_nacl)) { 1850 vhost_scsi_release_fabric_acl(se_tpg, se_nacl_new); 1851 return se_nacl; 1852 } 1853 /* 1854 * Locate our struct vhost_scsi_nacl and set the FC Nport WWPN 1855 */ 1856 nacl = container_of(se_nacl, struct vhost_scsi_nacl, se_node_acl); 1857 nacl->iport_wwpn = wwpn; 1858 1859 return se_nacl; 1860 } 1861 1862 static void vhost_scsi_drop_nodeacl(struct se_node_acl *se_acl) 1863 { 1864 struct vhost_scsi_nacl *nacl = container_of(se_acl, 1865 struct vhost_scsi_nacl, se_node_acl); 1866 core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1); 1867 kfree(nacl); 1868 } 1869 1870 static void vhost_scsi_free_cmd_map_res(struct vhost_scsi_nexus *nexus, 1871 struct se_session *se_sess) 1872 { 1873 struct vhost_scsi_cmd *tv_cmd; 1874 unsigned int i; 1875 1876 if (!se_sess->sess_cmd_map) 1877 return; 1878 1879 for (i = 0; i < VHOST_SCSI_DEFAULT_TAGS; i++) { 1880 tv_cmd = &((struct vhost_scsi_cmd *)se_sess->sess_cmd_map)[i]; 1881 1882 kfree(tv_cmd->tvc_sgl); 1883 kfree(tv_cmd->tvc_prot_sgl); 1884 kfree(tv_cmd->tvc_upages); 1885 } 1886 } 1887 1888 static ssize_t vhost_scsi_tpg_attrib_store_fabric_prot_type( 1889 struct se_portal_group *se_tpg, 1890 const char *page, 1891 size_t count) 1892 { 1893 struct vhost_scsi_tpg *tpg = container_of(se_tpg, 1894 struct vhost_scsi_tpg, se_tpg); 1895 unsigned long val; 1896 int ret = kstrtoul(page, 0, &val); 1897 1898 if (ret) { 1899 pr_err("kstrtoul() returned %d for fabric_prot_type\n", ret); 1900 return ret; 1901 } 1902 if (val != 0 && val != 1 && val != 3) { 1903 pr_err("Invalid vhost_scsi fabric_prot_type: %lu\n", val); 1904 return -EINVAL; 1905 } 1906 tpg->tv_fabric_prot_type = val; 1907 1908 return count; 1909 } 1910 1911 static ssize_t vhost_scsi_tpg_attrib_show_fabric_prot_type( 1912 struct se_portal_group *se_tpg, 1913 char *page) 1914 { 1915 struct vhost_scsi_tpg *tpg = container_of(se_tpg, 1916 struct vhost_scsi_tpg, se_tpg); 1917 1918 return sprintf(page, "%d\n", tpg->tv_fabric_prot_type); 1919 } 1920 TF_TPG_ATTRIB_ATTR(vhost_scsi, fabric_prot_type, S_IRUGO | S_IWUSR); 1921 1922 static struct configfs_attribute *vhost_scsi_tpg_attrib_attrs[] = { 1923 &vhost_scsi_tpg_attrib_fabric_prot_type.attr, 1924 NULL, 1925 }; 1926 1927 static int vhost_scsi_make_nexus(struct vhost_scsi_tpg *tpg, 1928 const char *name) 1929 { 1930 struct se_portal_group *se_tpg; 1931 struct se_session *se_sess; 1932 struct vhost_scsi_nexus *tv_nexus; 1933 struct vhost_scsi_cmd *tv_cmd; 1934 unsigned int i; 1935 1936 mutex_lock(&tpg->tv_tpg_mutex); 1937 if (tpg->tpg_nexus) { 1938 mutex_unlock(&tpg->tv_tpg_mutex); 1939 pr_debug("tpg->tpg_nexus already exists\n"); 1940 return -EEXIST; 1941 } 1942 se_tpg = &tpg->se_tpg; 1943 1944 tv_nexus = kzalloc(sizeof(struct vhost_scsi_nexus), GFP_KERNEL); 1945 if (!tv_nexus) { 1946 mutex_unlock(&tpg->tv_tpg_mutex); 1947 pr_err("Unable to allocate struct vhost_scsi_nexus\n"); 1948 return -ENOMEM; 1949 } 1950 /* 1951 * Initialize the struct se_session pointer and setup tagpool 1952 * for struct vhost_scsi_cmd descriptors 1953 */ 1954 tv_nexus->tvn_se_sess = transport_init_session_tags( 1955 VHOST_SCSI_DEFAULT_TAGS, 1956 sizeof(struct vhost_scsi_cmd), 1957 TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS); 1958 if (IS_ERR(tv_nexus->tvn_se_sess)) { 1959 mutex_unlock(&tpg->tv_tpg_mutex); 1960 kfree(tv_nexus); 1961 return -ENOMEM; 1962 } 1963 se_sess = tv_nexus->tvn_se_sess; 1964 for (i = 0; i < VHOST_SCSI_DEFAULT_TAGS; i++) { 1965 tv_cmd = &((struct vhost_scsi_cmd *)se_sess->sess_cmd_map)[i]; 1966 1967 tv_cmd->tvc_sgl = kzalloc(sizeof(struct scatterlist) * 1968 VHOST_SCSI_PREALLOC_SGLS, GFP_KERNEL); 1969 if (!tv_cmd->tvc_sgl) { 1970 mutex_unlock(&tpg->tv_tpg_mutex); 1971 pr_err("Unable to allocate tv_cmd->tvc_sgl\n"); 1972 goto out; 1973 } 1974 1975 tv_cmd->tvc_upages = kzalloc(sizeof(struct page *) * 1976 VHOST_SCSI_PREALLOC_UPAGES, GFP_KERNEL); 1977 if (!tv_cmd->tvc_upages) { 1978 mutex_unlock(&tpg->tv_tpg_mutex); 1979 pr_err("Unable to allocate tv_cmd->tvc_upages\n"); 1980 goto out; 1981 } 1982 1983 tv_cmd->tvc_prot_sgl = kzalloc(sizeof(struct scatterlist) * 1984 VHOST_SCSI_PREALLOC_PROT_SGLS, GFP_KERNEL); 1985 if (!tv_cmd->tvc_prot_sgl) { 1986 mutex_unlock(&tpg->tv_tpg_mutex); 1987 pr_err("Unable to allocate tv_cmd->tvc_prot_sgl\n"); 1988 goto out; 1989 } 1990 } 1991 /* 1992 * Since we are running in 'demo mode' this call with generate a 1993 * struct se_node_acl for the vhost_scsi struct se_portal_group with 1994 * the SCSI Initiator port name of the passed configfs group 'name'. 1995 */ 1996 tv_nexus->tvn_se_sess->se_node_acl = core_tpg_check_initiator_node_acl( 1997 se_tpg, (unsigned char *)name); 1998 if (!tv_nexus->tvn_se_sess->se_node_acl) { 1999 mutex_unlock(&tpg->tv_tpg_mutex); 2000 pr_debug("core_tpg_check_initiator_node_acl() failed" 2001 " for %s\n", name); 2002 goto out; 2003 } 2004 /* 2005 * Now register the TCM vhost virtual I_T Nexus as active. 2006 */ 2007 transport_register_session(se_tpg, tv_nexus->tvn_se_sess->se_node_acl, 2008 tv_nexus->tvn_se_sess, tv_nexus); 2009 tpg->tpg_nexus = tv_nexus; 2010 2011 mutex_unlock(&tpg->tv_tpg_mutex); 2012 return 0; 2013 2014 out: 2015 vhost_scsi_free_cmd_map_res(tv_nexus, se_sess); 2016 transport_free_session(se_sess); 2017 kfree(tv_nexus); 2018 return -ENOMEM; 2019 } 2020 2021 static int vhost_scsi_drop_nexus(struct vhost_scsi_tpg *tpg) 2022 { 2023 struct se_session *se_sess; 2024 struct vhost_scsi_nexus *tv_nexus; 2025 2026 mutex_lock(&tpg->tv_tpg_mutex); 2027 tv_nexus = tpg->tpg_nexus; 2028 if (!tv_nexus) { 2029 mutex_unlock(&tpg->tv_tpg_mutex); 2030 return -ENODEV; 2031 } 2032 2033 se_sess = tv_nexus->tvn_se_sess; 2034 if (!se_sess) { 2035 mutex_unlock(&tpg->tv_tpg_mutex); 2036 return -ENODEV; 2037 } 2038 2039 if (tpg->tv_tpg_port_count != 0) { 2040 mutex_unlock(&tpg->tv_tpg_mutex); 2041 pr_err("Unable to remove TCM_vhost I_T Nexus with" 2042 " active TPG port count: %d\n", 2043 tpg->tv_tpg_port_count); 2044 return -EBUSY; 2045 } 2046 2047 if (tpg->tv_tpg_vhost_count != 0) { 2048 mutex_unlock(&tpg->tv_tpg_mutex); 2049 pr_err("Unable to remove TCM_vhost I_T Nexus with" 2050 " active TPG vhost count: %d\n", 2051 tpg->tv_tpg_vhost_count); 2052 return -EBUSY; 2053 } 2054 2055 pr_debug("TCM_vhost_ConfigFS: Removing I_T Nexus to emulated" 2056 " %s Initiator Port: %s\n", vhost_scsi_dump_proto_id(tpg->tport), 2057 tv_nexus->tvn_se_sess->se_node_acl->initiatorname); 2058 2059 vhost_scsi_free_cmd_map_res(tv_nexus, se_sess); 2060 /* 2061 * Release the SCSI I_T Nexus to the emulated vhost Target Port 2062 */ 2063 transport_deregister_session(tv_nexus->tvn_se_sess); 2064 tpg->tpg_nexus = NULL; 2065 mutex_unlock(&tpg->tv_tpg_mutex); 2066 2067 kfree(tv_nexus); 2068 return 0; 2069 } 2070 2071 static ssize_t vhost_scsi_tpg_show_nexus(struct se_portal_group *se_tpg, 2072 char *page) 2073 { 2074 struct vhost_scsi_tpg *tpg = container_of(se_tpg, 2075 struct vhost_scsi_tpg, se_tpg); 2076 struct vhost_scsi_nexus *tv_nexus; 2077 ssize_t ret; 2078 2079 mutex_lock(&tpg->tv_tpg_mutex); 2080 tv_nexus = tpg->tpg_nexus; 2081 if (!tv_nexus) { 2082 mutex_unlock(&tpg->tv_tpg_mutex); 2083 return -ENODEV; 2084 } 2085 ret = snprintf(page, PAGE_SIZE, "%s\n", 2086 tv_nexus->tvn_se_sess->se_node_acl->initiatorname); 2087 mutex_unlock(&tpg->tv_tpg_mutex); 2088 2089 return ret; 2090 } 2091 2092 static ssize_t vhost_scsi_tpg_store_nexus(struct se_portal_group *se_tpg, 2093 const char *page, 2094 size_t count) 2095 { 2096 struct vhost_scsi_tpg *tpg = container_of(se_tpg, 2097 struct vhost_scsi_tpg, se_tpg); 2098 struct vhost_scsi_tport *tport_wwn = tpg->tport; 2099 unsigned char i_port[VHOST_SCSI_NAMELEN], *ptr, *port_ptr; 2100 int ret; 2101 /* 2102 * Shutdown the active I_T nexus if 'NULL' is passed.. 2103 */ 2104 if (!strncmp(page, "NULL", 4)) { 2105 ret = vhost_scsi_drop_nexus(tpg); 2106 return (!ret) ? count : ret; 2107 } 2108 /* 2109 * Otherwise make sure the passed virtual Initiator port WWN matches 2110 * the fabric protocol_id set in vhost_scsi_make_tport(), and call 2111 * vhost_scsi_make_nexus(). 2112 */ 2113 if (strlen(page) >= VHOST_SCSI_NAMELEN) { 2114 pr_err("Emulated NAA Sas Address: %s, exceeds" 2115 " max: %d\n", page, VHOST_SCSI_NAMELEN); 2116 return -EINVAL; 2117 } 2118 snprintf(&i_port[0], VHOST_SCSI_NAMELEN, "%s", page); 2119 2120 ptr = strstr(i_port, "naa."); 2121 if (ptr) { 2122 if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_SAS) { 2123 pr_err("Passed SAS Initiator Port %s does not" 2124 " match target port protoid: %s\n", i_port, 2125 vhost_scsi_dump_proto_id(tport_wwn)); 2126 return -EINVAL; 2127 } 2128 port_ptr = &i_port[0]; 2129 goto check_newline; 2130 } 2131 ptr = strstr(i_port, "fc."); 2132 if (ptr) { 2133 if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_FCP) { 2134 pr_err("Passed FCP Initiator Port %s does not" 2135 " match target port protoid: %s\n", i_port, 2136 vhost_scsi_dump_proto_id(tport_wwn)); 2137 return -EINVAL; 2138 } 2139 port_ptr = &i_port[3]; /* Skip over "fc." */ 2140 goto check_newline; 2141 } 2142 ptr = strstr(i_port, "iqn."); 2143 if (ptr) { 2144 if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_ISCSI) { 2145 pr_err("Passed iSCSI Initiator Port %s does not" 2146 " match target port protoid: %s\n", i_port, 2147 vhost_scsi_dump_proto_id(tport_wwn)); 2148 return -EINVAL; 2149 } 2150 port_ptr = &i_port[0]; 2151 goto check_newline; 2152 } 2153 pr_err("Unable to locate prefix for emulated Initiator Port:" 2154 " %s\n", i_port); 2155 return -EINVAL; 2156 /* 2157 * Clear any trailing newline for the NAA WWN 2158 */ 2159 check_newline: 2160 if (i_port[strlen(i_port)-1] == '\n') 2161 i_port[strlen(i_port)-1] = '\0'; 2162 2163 ret = vhost_scsi_make_nexus(tpg, port_ptr); 2164 if (ret < 0) 2165 return ret; 2166 2167 return count; 2168 } 2169 2170 TF_TPG_BASE_ATTR(vhost_scsi, nexus, S_IRUGO | S_IWUSR); 2171 2172 static struct configfs_attribute *vhost_scsi_tpg_attrs[] = { 2173 &vhost_scsi_tpg_nexus.attr, 2174 NULL, 2175 }; 2176 2177 static struct se_portal_group * 2178 vhost_scsi_make_tpg(struct se_wwn *wwn, 2179 struct config_group *group, 2180 const char *name) 2181 { 2182 struct vhost_scsi_tport *tport = container_of(wwn, 2183 struct vhost_scsi_tport, tport_wwn); 2184 2185 struct vhost_scsi_tpg *tpg; 2186 u16 tpgt; 2187 int ret; 2188 2189 if (strstr(name, "tpgt_") != name) 2190 return ERR_PTR(-EINVAL); 2191 if (kstrtou16(name + 5, 10, &tpgt) || tpgt >= VHOST_SCSI_MAX_TARGET) 2192 return ERR_PTR(-EINVAL); 2193 2194 tpg = kzalloc(sizeof(struct vhost_scsi_tpg), GFP_KERNEL); 2195 if (!tpg) { 2196 pr_err("Unable to allocate struct vhost_scsi_tpg"); 2197 return ERR_PTR(-ENOMEM); 2198 } 2199 mutex_init(&tpg->tv_tpg_mutex); 2200 INIT_LIST_HEAD(&tpg->tv_tpg_list); 2201 tpg->tport = tport; 2202 tpg->tport_tpgt = tpgt; 2203 2204 ret = core_tpg_register(&vhost_scsi_ops, wwn, 2205 &tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL); 2206 if (ret < 0) { 2207 kfree(tpg); 2208 return NULL; 2209 } 2210 mutex_lock(&vhost_scsi_mutex); 2211 list_add_tail(&tpg->tv_tpg_list, &vhost_scsi_list); 2212 mutex_unlock(&vhost_scsi_mutex); 2213 2214 return &tpg->se_tpg; 2215 } 2216 2217 static void vhost_scsi_drop_tpg(struct se_portal_group *se_tpg) 2218 { 2219 struct vhost_scsi_tpg *tpg = container_of(se_tpg, 2220 struct vhost_scsi_tpg, se_tpg); 2221 2222 mutex_lock(&vhost_scsi_mutex); 2223 list_del(&tpg->tv_tpg_list); 2224 mutex_unlock(&vhost_scsi_mutex); 2225 /* 2226 * Release the virtual I_T Nexus for this vhost TPG 2227 */ 2228 vhost_scsi_drop_nexus(tpg); 2229 /* 2230 * Deregister the se_tpg from TCM.. 2231 */ 2232 core_tpg_deregister(se_tpg); 2233 kfree(tpg); 2234 } 2235 2236 static struct se_wwn * 2237 vhost_scsi_make_tport(struct target_fabric_configfs *tf, 2238 struct config_group *group, 2239 const char *name) 2240 { 2241 struct vhost_scsi_tport *tport; 2242 char *ptr; 2243 u64 wwpn = 0; 2244 int off = 0; 2245 2246 /* if (vhost_scsi_parse_wwn(name, &wwpn, 1) < 0) 2247 return ERR_PTR(-EINVAL); */ 2248 2249 tport = kzalloc(sizeof(struct vhost_scsi_tport), GFP_KERNEL); 2250 if (!tport) { 2251 pr_err("Unable to allocate struct vhost_scsi_tport"); 2252 return ERR_PTR(-ENOMEM); 2253 } 2254 tport->tport_wwpn = wwpn; 2255 /* 2256 * Determine the emulated Protocol Identifier and Target Port Name 2257 * based on the incoming configfs directory name. 2258 */ 2259 ptr = strstr(name, "naa."); 2260 if (ptr) { 2261 tport->tport_proto_id = SCSI_PROTOCOL_SAS; 2262 goto check_len; 2263 } 2264 ptr = strstr(name, "fc."); 2265 if (ptr) { 2266 tport->tport_proto_id = SCSI_PROTOCOL_FCP; 2267 off = 3; /* Skip over "fc." */ 2268 goto check_len; 2269 } 2270 ptr = strstr(name, "iqn."); 2271 if (ptr) { 2272 tport->tport_proto_id = SCSI_PROTOCOL_ISCSI; 2273 goto check_len; 2274 } 2275 2276 pr_err("Unable to locate prefix for emulated Target Port:" 2277 " %s\n", name); 2278 kfree(tport); 2279 return ERR_PTR(-EINVAL); 2280 2281 check_len: 2282 if (strlen(name) >= VHOST_SCSI_NAMELEN) { 2283 pr_err("Emulated %s Address: %s, exceeds" 2284 " max: %d\n", name, vhost_scsi_dump_proto_id(tport), 2285 VHOST_SCSI_NAMELEN); 2286 kfree(tport); 2287 return ERR_PTR(-EINVAL); 2288 } 2289 snprintf(&tport->tport_name[0], VHOST_SCSI_NAMELEN, "%s", &name[off]); 2290 2291 pr_debug("TCM_VHost_ConfigFS: Allocated emulated Target" 2292 " %s Address: %s\n", vhost_scsi_dump_proto_id(tport), name); 2293 2294 return &tport->tport_wwn; 2295 } 2296 2297 static void vhost_scsi_drop_tport(struct se_wwn *wwn) 2298 { 2299 struct vhost_scsi_tport *tport = container_of(wwn, 2300 struct vhost_scsi_tport, tport_wwn); 2301 2302 pr_debug("TCM_VHost_ConfigFS: Deallocating emulated Target" 2303 " %s Address: %s\n", vhost_scsi_dump_proto_id(tport), 2304 tport->tport_name); 2305 2306 kfree(tport); 2307 } 2308 2309 static ssize_t 2310 vhost_scsi_wwn_show_attr_version(struct target_fabric_configfs *tf, 2311 char *page) 2312 { 2313 return sprintf(page, "TCM_VHOST fabric module %s on %s/%s" 2314 "on "UTS_RELEASE"\n", VHOST_SCSI_VERSION, utsname()->sysname, 2315 utsname()->machine); 2316 } 2317 2318 TF_WWN_ATTR_RO(vhost_scsi, version); 2319 2320 static struct configfs_attribute *vhost_scsi_wwn_attrs[] = { 2321 &vhost_scsi_wwn_version.attr, 2322 NULL, 2323 }; 2324 2325 static struct target_core_fabric_ops vhost_scsi_ops = { 2326 .module = THIS_MODULE, 2327 .name = "vhost", 2328 .get_fabric_name = vhost_scsi_get_fabric_name, 2329 .get_fabric_proto_ident = vhost_scsi_get_fabric_proto_ident, 2330 .tpg_get_wwn = vhost_scsi_get_fabric_wwn, 2331 .tpg_get_tag = vhost_scsi_get_tpgt, 2332 .tpg_get_default_depth = vhost_scsi_get_default_depth, 2333 .tpg_get_pr_transport_id = vhost_scsi_get_pr_transport_id, 2334 .tpg_get_pr_transport_id_len = vhost_scsi_get_pr_transport_id_len, 2335 .tpg_parse_pr_out_transport_id = vhost_scsi_parse_pr_out_transport_id, 2336 .tpg_check_demo_mode = vhost_scsi_check_true, 2337 .tpg_check_demo_mode_cache = vhost_scsi_check_true, 2338 .tpg_check_demo_mode_write_protect = vhost_scsi_check_false, 2339 .tpg_check_prod_mode_write_protect = vhost_scsi_check_false, 2340 .tpg_check_prot_fabric_only = vhost_scsi_check_prot_fabric_only, 2341 .tpg_alloc_fabric_acl = vhost_scsi_alloc_fabric_acl, 2342 .tpg_release_fabric_acl = vhost_scsi_release_fabric_acl, 2343 .tpg_get_inst_index = vhost_scsi_tpg_get_inst_index, 2344 .release_cmd = vhost_scsi_release_cmd, 2345 .check_stop_free = vhost_scsi_check_stop_free, 2346 .shutdown_session = vhost_scsi_shutdown_session, 2347 .close_session = vhost_scsi_close_session, 2348 .sess_get_index = vhost_scsi_sess_get_index, 2349 .sess_get_initiator_sid = NULL, 2350 .write_pending = vhost_scsi_write_pending, 2351 .write_pending_status = vhost_scsi_write_pending_status, 2352 .set_default_node_attributes = vhost_scsi_set_default_node_attrs, 2353 .get_task_tag = vhost_scsi_get_task_tag, 2354 .get_cmd_state = vhost_scsi_get_cmd_state, 2355 .queue_data_in = vhost_scsi_queue_data_in, 2356 .queue_status = vhost_scsi_queue_status, 2357 .queue_tm_rsp = vhost_scsi_queue_tm_rsp, 2358 .aborted_task = vhost_scsi_aborted_task, 2359 /* 2360 * Setup callers for generic logic in target_core_fabric_configfs.c 2361 */ 2362 .fabric_make_wwn = vhost_scsi_make_tport, 2363 .fabric_drop_wwn = vhost_scsi_drop_tport, 2364 .fabric_make_tpg = vhost_scsi_make_tpg, 2365 .fabric_drop_tpg = vhost_scsi_drop_tpg, 2366 .fabric_post_link = vhost_scsi_port_link, 2367 .fabric_pre_unlink = vhost_scsi_port_unlink, 2368 .fabric_make_np = NULL, 2369 .fabric_drop_np = NULL, 2370 .fabric_make_nodeacl = vhost_scsi_make_nodeacl, 2371 .fabric_drop_nodeacl = vhost_scsi_drop_nodeacl, 2372 2373 .tfc_wwn_attrs = vhost_scsi_wwn_attrs, 2374 .tfc_tpg_base_attrs = vhost_scsi_tpg_attrs, 2375 .tfc_tpg_attrib_attrs = vhost_scsi_tpg_attrib_attrs, 2376 }; 2377 2378 static int __init vhost_scsi_init(void) 2379 { 2380 int ret = -ENOMEM; 2381 2382 pr_debug("TCM_VHOST fabric module %s on %s/%s" 2383 " on "UTS_RELEASE"\n", VHOST_SCSI_VERSION, utsname()->sysname, 2384 utsname()->machine); 2385 2386 /* 2387 * Use our own dedicated workqueue for submitting I/O into 2388 * target core to avoid contention within system_wq. 2389 */ 2390 vhost_scsi_workqueue = alloc_workqueue("vhost_scsi", 0, 0); 2391 if (!vhost_scsi_workqueue) 2392 goto out; 2393 2394 ret = vhost_scsi_register(); 2395 if (ret < 0) 2396 goto out_destroy_workqueue; 2397 2398 ret = target_register_template(&vhost_scsi_ops); 2399 if (ret < 0) 2400 goto out_vhost_scsi_deregister; 2401 2402 return 0; 2403 2404 out_vhost_scsi_deregister: 2405 vhost_scsi_deregister(); 2406 out_destroy_workqueue: 2407 destroy_workqueue(vhost_scsi_workqueue); 2408 out: 2409 return ret; 2410 }; 2411 2412 static void vhost_scsi_exit(void) 2413 { 2414 target_unregister_template(&vhost_scsi_ops); 2415 vhost_scsi_deregister(); 2416 destroy_workqueue(vhost_scsi_workqueue); 2417 }; 2418 2419 MODULE_DESCRIPTION("VHOST_SCSI series fabric driver"); 2420 MODULE_ALIAS("tcm_vhost"); 2421 MODULE_LICENSE("GPL"); 2422 module_init(vhost_scsi_init); 2423 module_exit(vhost_scsi_exit); 2424