1 /******************************************************************************* 2 * Vhost kernel TCM fabric driver for virtio SCSI initiators 3 * 4 * (C) Copyright 2010-2012 RisingTide Systems LLC. 5 * (C) Copyright 2010-2012 IBM Corp. 6 * 7 * Licensed to the Linux Foundation under the General Public License (GPL) version 2. 8 * 9 * Authors: Nicholas A. Bellinger <nab@risingtidesystems.com> 10 * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com> 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of the GNU General Public License as published by 14 * the Free Software Foundation; either version 2 of the License, or 15 * (at your option) any later version. 16 * 17 * This program is distributed in the hope that it will be useful, 18 * but WITHOUT ANY WARRANTY; without even the implied warranty of 19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 20 * GNU General Public License for more details. 21 * 22 ****************************************************************************/ 23 24 #include <linux/module.h> 25 #include <linux/moduleparam.h> 26 #include <generated/utsrelease.h> 27 #include <linux/utsname.h> 28 #include <linux/init.h> 29 #include <linux/slab.h> 30 #include <linux/kthread.h> 31 #include <linux/types.h> 32 #include <linux/string.h> 33 #include <linux/configfs.h> 34 #include <linux/ctype.h> 35 #include <linux/compat.h> 36 #include <linux/eventfd.h> 37 #include <linux/fs.h> 38 #include <linux/miscdevice.h> 39 #include <asm/unaligned.h> 40 #include <scsi/scsi.h> 41 #include <scsi/scsi_tcq.h> 42 #include <target/target_core_base.h> 43 #include <target/target_core_fabric.h> 44 #include <target/target_core_fabric_configfs.h> 45 #include <target/target_core_configfs.h> 46 #include <target/configfs_macros.h> 47 #include <linux/vhost.h> 48 #include <linux/virtio_scsi.h> 49 #include <linux/llist.h> 50 #include <linux/bitmap.h> 51 52 #include "vhost.h" 53 54 #define TCM_VHOST_VERSION "v0.1" 55 #define TCM_VHOST_NAMELEN 256 56 #define TCM_VHOST_MAX_CDB_SIZE 32 57 58 struct vhost_scsi_inflight { 59 /* Wait for the flush operation to finish */ 60 struct completion comp; 61 /* Refcount for the inflight reqs */ 62 struct kref kref; 63 }; 64 65 struct tcm_vhost_cmd { 66 /* Descriptor from vhost_get_vq_desc() for virt_queue segment */ 67 int tvc_vq_desc; 68 /* virtio-scsi initiator task attribute */ 69 int tvc_task_attr; 70 /* virtio-scsi initiator data direction */ 71 enum dma_data_direction tvc_data_direction; 72 /* Expected data transfer length from virtio-scsi header */ 73 u32 tvc_exp_data_len; 74 /* The Tag from include/linux/virtio_scsi.h:struct virtio_scsi_cmd_req */ 75 u64 tvc_tag; 76 /* The number of scatterlists associated with this cmd */ 77 u32 tvc_sgl_count; 78 /* Saved unpacked SCSI LUN for tcm_vhost_submission_work() */ 79 u32 tvc_lun; 80 /* Pointer to the SGL formatted memory from virtio-scsi */ 81 struct scatterlist *tvc_sgl; 82 /* Pointer to response */ 83 struct virtio_scsi_cmd_resp __user *tvc_resp; 84 /* Pointer to vhost_scsi for our device */ 85 struct vhost_scsi *tvc_vhost; 86 /* Pointer to vhost_virtqueue for the cmd */ 87 struct vhost_virtqueue *tvc_vq; 88 /* Pointer to vhost nexus memory */ 89 struct tcm_vhost_nexus *tvc_nexus; 90 /* The TCM I/O descriptor that is accessed via container_of() */ 91 struct se_cmd tvc_se_cmd; 92 /* work item used for cmwq dispatch to tcm_vhost_submission_work() */ 93 struct work_struct work; 94 /* Copy of the incoming SCSI command descriptor block (CDB) */ 95 unsigned char tvc_cdb[TCM_VHOST_MAX_CDB_SIZE]; 96 /* Sense buffer that will be mapped into outgoing status */ 97 unsigned char tvc_sense_buf[TRANSPORT_SENSE_BUFFER]; 98 /* Completed commands list, serviced from vhost worker thread */ 99 struct llist_node tvc_completion_list; 100 /* Used to track inflight cmd */ 101 struct vhost_scsi_inflight *inflight; 102 }; 103 104 struct tcm_vhost_nexus { 105 /* Pointer to TCM session for I_T Nexus */ 106 struct se_session *tvn_se_sess; 107 }; 108 109 struct tcm_vhost_nacl { 110 /* Binary World Wide unique Port Name for Vhost Initiator port */ 111 u64 iport_wwpn; 112 /* ASCII formatted WWPN for Sas Initiator port */ 113 char iport_name[TCM_VHOST_NAMELEN]; 114 /* Returned by tcm_vhost_make_nodeacl() */ 115 struct se_node_acl se_node_acl; 116 }; 117 118 struct tcm_vhost_tpg { 119 /* Vhost port target portal group tag for TCM */ 120 u16 tport_tpgt; 121 /* Used to track number of TPG Port/Lun Links wrt to explict I_T Nexus shutdown */ 122 int tv_tpg_port_count; 123 /* Used for vhost_scsi device reference to tpg_nexus, protected by tv_tpg_mutex */ 124 int tv_tpg_vhost_count; 125 /* list for tcm_vhost_list */ 126 struct list_head tv_tpg_list; 127 /* Used to protect access for tpg_nexus */ 128 struct mutex tv_tpg_mutex; 129 /* Pointer to the TCM VHost I_T Nexus for this TPG endpoint */ 130 struct tcm_vhost_nexus *tpg_nexus; 131 /* Pointer back to tcm_vhost_tport */ 132 struct tcm_vhost_tport *tport; 133 /* Returned by tcm_vhost_make_tpg() */ 134 struct se_portal_group se_tpg; 135 /* Pointer back to vhost_scsi, protected by tv_tpg_mutex */ 136 struct vhost_scsi *vhost_scsi; 137 }; 138 139 struct tcm_vhost_tport { 140 /* SCSI protocol the tport is providing */ 141 u8 tport_proto_id; 142 /* Binary World Wide unique Port Name for Vhost Target port */ 143 u64 tport_wwpn; 144 /* ASCII formatted WWPN for Vhost Target port */ 145 char tport_name[TCM_VHOST_NAMELEN]; 146 /* Returned by tcm_vhost_make_tport() */ 147 struct se_wwn tport_wwn; 148 }; 149 150 struct tcm_vhost_evt { 151 /* event to be sent to guest */ 152 struct virtio_scsi_event event; 153 /* event list, serviced from vhost worker thread */ 154 struct llist_node list; 155 }; 156 157 enum { 158 VHOST_SCSI_VQ_CTL = 0, 159 VHOST_SCSI_VQ_EVT = 1, 160 VHOST_SCSI_VQ_IO = 2, 161 }; 162 163 enum { 164 VHOST_SCSI_FEATURES = VHOST_FEATURES | (1ULL << VIRTIO_SCSI_F_HOTPLUG) 165 }; 166 167 #define VHOST_SCSI_MAX_TARGET 256 168 #define VHOST_SCSI_MAX_VQ 128 169 #define VHOST_SCSI_MAX_EVENT 128 170 171 struct vhost_scsi_virtqueue { 172 struct vhost_virtqueue vq; 173 /* 174 * Reference counting for inflight reqs, used for flush operation. At 175 * each time, one reference tracks new commands submitted, while we 176 * wait for another one to reach 0. 177 */ 178 struct vhost_scsi_inflight inflights[2]; 179 /* 180 * Indicate current inflight in use, protected by vq->mutex. 181 * Writers must also take dev mutex and flush under it. 182 */ 183 int inflight_idx; 184 }; 185 186 struct vhost_scsi { 187 /* Protected by vhost_scsi->dev.mutex */ 188 struct tcm_vhost_tpg **vs_tpg; 189 char vs_vhost_wwpn[TRANSPORT_IQN_LEN]; 190 191 struct vhost_dev dev; 192 struct vhost_scsi_virtqueue vqs[VHOST_SCSI_MAX_VQ]; 193 194 struct vhost_work vs_completion_work; /* cmd completion work item */ 195 struct llist_head vs_completion_list; /* cmd completion queue */ 196 197 struct vhost_work vs_event_work; /* evt injection work item */ 198 struct llist_head vs_event_list; /* evt injection queue */ 199 200 bool vs_events_missed; /* any missed events, protected by vq->mutex */ 201 int vs_events_nr; /* num of pending events, protected by vq->mutex */ 202 }; 203 204 /* Local pointer to allocated TCM configfs fabric module */ 205 static struct target_fabric_configfs *tcm_vhost_fabric_configfs; 206 207 static struct workqueue_struct *tcm_vhost_workqueue; 208 209 /* Global spinlock to protect tcm_vhost TPG list for vhost IOCTL access */ 210 static DEFINE_MUTEX(tcm_vhost_mutex); 211 static LIST_HEAD(tcm_vhost_list); 212 213 static int iov_num_pages(struct iovec *iov) 214 { 215 return (PAGE_ALIGN((unsigned long)iov->iov_base + iov->iov_len) - 216 ((unsigned long)iov->iov_base & PAGE_MASK)) >> PAGE_SHIFT; 217 } 218 219 static void tcm_vhost_done_inflight(struct kref *kref) 220 { 221 struct vhost_scsi_inflight *inflight; 222 223 inflight = container_of(kref, struct vhost_scsi_inflight, kref); 224 complete(&inflight->comp); 225 } 226 227 static void tcm_vhost_init_inflight(struct vhost_scsi *vs, 228 struct vhost_scsi_inflight *old_inflight[]) 229 { 230 struct vhost_scsi_inflight *new_inflight; 231 struct vhost_virtqueue *vq; 232 int idx, i; 233 234 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) { 235 vq = &vs->vqs[i].vq; 236 237 mutex_lock(&vq->mutex); 238 239 /* store old infight */ 240 idx = vs->vqs[i].inflight_idx; 241 if (old_inflight) 242 old_inflight[i] = &vs->vqs[i].inflights[idx]; 243 244 /* setup new infight */ 245 vs->vqs[i].inflight_idx = idx ^ 1; 246 new_inflight = &vs->vqs[i].inflights[idx ^ 1]; 247 kref_init(&new_inflight->kref); 248 init_completion(&new_inflight->comp); 249 250 mutex_unlock(&vq->mutex); 251 } 252 } 253 254 static struct vhost_scsi_inflight * 255 tcm_vhost_get_inflight(struct vhost_virtqueue *vq) 256 { 257 struct vhost_scsi_inflight *inflight; 258 struct vhost_scsi_virtqueue *svq; 259 260 svq = container_of(vq, struct vhost_scsi_virtqueue, vq); 261 inflight = &svq->inflights[svq->inflight_idx]; 262 kref_get(&inflight->kref); 263 264 return inflight; 265 } 266 267 static void tcm_vhost_put_inflight(struct vhost_scsi_inflight *inflight) 268 { 269 kref_put(&inflight->kref, tcm_vhost_done_inflight); 270 } 271 272 static int tcm_vhost_check_true(struct se_portal_group *se_tpg) 273 { 274 return 1; 275 } 276 277 static int tcm_vhost_check_false(struct se_portal_group *se_tpg) 278 { 279 return 0; 280 } 281 282 static char *tcm_vhost_get_fabric_name(void) 283 { 284 return "vhost"; 285 } 286 287 static u8 tcm_vhost_get_fabric_proto_ident(struct se_portal_group *se_tpg) 288 { 289 struct tcm_vhost_tpg *tpg = container_of(se_tpg, 290 struct tcm_vhost_tpg, se_tpg); 291 struct tcm_vhost_tport *tport = tpg->tport; 292 293 switch (tport->tport_proto_id) { 294 case SCSI_PROTOCOL_SAS: 295 return sas_get_fabric_proto_ident(se_tpg); 296 case SCSI_PROTOCOL_FCP: 297 return fc_get_fabric_proto_ident(se_tpg); 298 case SCSI_PROTOCOL_ISCSI: 299 return iscsi_get_fabric_proto_ident(se_tpg); 300 default: 301 pr_err("Unknown tport_proto_id: 0x%02x, using" 302 " SAS emulation\n", tport->tport_proto_id); 303 break; 304 } 305 306 return sas_get_fabric_proto_ident(se_tpg); 307 } 308 309 static char *tcm_vhost_get_fabric_wwn(struct se_portal_group *se_tpg) 310 { 311 struct tcm_vhost_tpg *tpg = container_of(se_tpg, 312 struct tcm_vhost_tpg, se_tpg); 313 struct tcm_vhost_tport *tport = tpg->tport; 314 315 return &tport->tport_name[0]; 316 } 317 318 static u16 tcm_vhost_get_tag(struct se_portal_group *se_tpg) 319 { 320 struct tcm_vhost_tpg *tpg = container_of(se_tpg, 321 struct tcm_vhost_tpg, se_tpg); 322 return tpg->tport_tpgt; 323 } 324 325 static u32 tcm_vhost_get_default_depth(struct se_portal_group *se_tpg) 326 { 327 return 1; 328 } 329 330 static u32 331 tcm_vhost_get_pr_transport_id(struct se_portal_group *se_tpg, 332 struct se_node_acl *se_nacl, 333 struct t10_pr_registration *pr_reg, 334 int *format_code, 335 unsigned char *buf) 336 { 337 struct tcm_vhost_tpg *tpg = container_of(se_tpg, 338 struct tcm_vhost_tpg, se_tpg); 339 struct tcm_vhost_tport *tport = tpg->tport; 340 341 switch (tport->tport_proto_id) { 342 case SCSI_PROTOCOL_SAS: 343 return sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg, 344 format_code, buf); 345 case SCSI_PROTOCOL_FCP: 346 return fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg, 347 format_code, buf); 348 case SCSI_PROTOCOL_ISCSI: 349 return iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg, 350 format_code, buf); 351 default: 352 pr_err("Unknown tport_proto_id: 0x%02x, using" 353 " SAS emulation\n", tport->tport_proto_id); 354 break; 355 } 356 357 return sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg, 358 format_code, buf); 359 } 360 361 static u32 362 tcm_vhost_get_pr_transport_id_len(struct se_portal_group *se_tpg, 363 struct se_node_acl *se_nacl, 364 struct t10_pr_registration *pr_reg, 365 int *format_code) 366 { 367 struct tcm_vhost_tpg *tpg = container_of(se_tpg, 368 struct tcm_vhost_tpg, se_tpg); 369 struct tcm_vhost_tport *tport = tpg->tport; 370 371 switch (tport->tport_proto_id) { 372 case SCSI_PROTOCOL_SAS: 373 return sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg, 374 format_code); 375 case SCSI_PROTOCOL_FCP: 376 return fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg, 377 format_code); 378 case SCSI_PROTOCOL_ISCSI: 379 return iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg, 380 format_code); 381 default: 382 pr_err("Unknown tport_proto_id: 0x%02x, using" 383 " SAS emulation\n", tport->tport_proto_id); 384 break; 385 } 386 387 return sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg, 388 format_code); 389 } 390 391 static char * 392 tcm_vhost_parse_pr_out_transport_id(struct se_portal_group *se_tpg, 393 const char *buf, 394 u32 *out_tid_len, 395 char **port_nexus_ptr) 396 { 397 struct tcm_vhost_tpg *tpg = container_of(se_tpg, 398 struct tcm_vhost_tpg, se_tpg); 399 struct tcm_vhost_tport *tport = tpg->tport; 400 401 switch (tport->tport_proto_id) { 402 case SCSI_PROTOCOL_SAS: 403 return sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len, 404 port_nexus_ptr); 405 case SCSI_PROTOCOL_FCP: 406 return fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len, 407 port_nexus_ptr); 408 case SCSI_PROTOCOL_ISCSI: 409 return iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len, 410 port_nexus_ptr); 411 default: 412 pr_err("Unknown tport_proto_id: 0x%02x, using" 413 " SAS emulation\n", tport->tport_proto_id); 414 break; 415 } 416 417 return sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len, 418 port_nexus_ptr); 419 } 420 421 static struct se_node_acl * 422 tcm_vhost_alloc_fabric_acl(struct se_portal_group *se_tpg) 423 { 424 struct tcm_vhost_nacl *nacl; 425 426 nacl = kzalloc(sizeof(struct tcm_vhost_nacl), GFP_KERNEL); 427 if (!nacl) { 428 pr_err("Unable to allocate struct tcm_vhost_nacl\n"); 429 return NULL; 430 } 431 432 return &nacl->se_node_acl; 433 } 434 435 static void 436 tcm_vhost_release_fabric_acl(struct se_portal_group *se_tpg, 437 struct se_node_acl *se_nacl) 438 { 439 struct tcm_vhost_nacl *nacl = container_of(se_nacl, 440 struct tcm_vhost_nacl, se_node_acl); 441 kfree(nacl); 442 } 443 444 static u32 tcm_vhost_tpg_get_inst_index(struct se_portal_group *se_tpg) 445 { 446 return 1; 447 } 448 449 static void tcm_vhost_release_cmd(struct se_cmd *se_cmd) 450 { 451 struct tcm_vhost_cmd *tv_cmd = container_of(se_cmd, 452 struct tcm_vhost_cmd, tvc_se_cmd); 453 454 if (tv_cmd->tvc_sgl_count) { 455 u32 i; 456 for (i = 0; i < tv_cmd->tvc_sgl_count; i++) 457 put_page(sg_page(&tv_cmd->tvc_sgl[i])); 458 459 kfree(tv_cmd->tvc_sgl); 460 } 461 462 tcm_vhost_put_inflight(tv_cmd->inflight); 463 kfree(tv_cmd); 464 } 465 466 static int tcm_vhost_shutdown_session(struct se_session *se_sess) 467 { 468 return 0; 469 } 470 471 static void tcm_vhost_close_session(struct se_session *se_sess) 472 { 473 return; 474 } 475 476 static u32 tcm_vhost_sess_get_index(struct se_session *se_sess) 477 { 478 return 0; 479 } 480 481 static int tcm_vhost_write_pending(struct se_cmd *se_cmd) 482 { 483 /* Go ahead and process the write immediately */ 484 target_execute_cmd(se_cmd); 485 return 0; 486 } 487 488 static int tcm_vhost_write_pending_status(struct se_cmd *se_cmd) 489 { 490 return 0; 491 } 492 493 static void tcm_vhost_set_default_node_attrs(struct se_node_acl *nacl) 494 { 495 return; 496 } 497 498 static u32 tcm_vhost_get_task_tag(struct se_cmd *se_cmd) 499 { 500 return 0; 501 } 502 503 static int tcm_vhost_get_cmd_state(struct se_cmd *se_cmd) 504 { 505 return 0; 506 } 507 508 static void vhost_scsi_complete_cmd(struct tcm_vhost_cmd *cmd) 509 { 510 struct vhost_scsi *vs = cmd->tvc_vhost; 511 512 llist_add(&cmd->tvc_completion_list, &vs->vs_completion_list); 513 514 vhost_work_queue(&vs->dev, &vs->vs_completion_work); 515 } 516 517 static int tcm_vhost_queue_data_in(struct se_cmd *se_cmd) 518 { 519 struct tcm_vhost_cmd *cmd = container_of(se_cmd, 520 struct tcm_vhost_cmd, tvc_se_cmd); 521 vhost_scsi_complete_cmd(cmd); 522 return 0; 523 } 524 525 static int tcm_vhost_queue_status(struct se_cmd *se_cmd) 526 { 527 struct tcm_vhost_cmd *cmd = container_of(se_cmd, 528 struct tcm_vhost_cmd, tvc_se_cmd); 529 vhost_scsi_complete_cmd(cmd); 530 return 0; 531 } 532 533 static void tcm_vhost_queue_tm_rsp(struct se_cmd *se_cmd) 534 { 535 return; 536 } 537 538 static void tcm_vhost_free_evt(struct vhost_scsi *vs, struct tcm_vhost_evt *evt) 539 { 540 vs->vs_events_nr--; 541 kfree(evt); 542 } 543 544 static struct tcm_vhost_evt * 545 tcm_vhost_allocate_evt(struct vhost_scsi *vs, 546 u32 event, u32 reason) 547 { 548 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq; 549 struct tcm_vhost_evt *evt; 550 551 if (vs->vs_events_nr > VHOST_SCSI_MAX_EVENT) { 552 vs->vs_events_missed = true; 553 return NULL; 554 } 555 556 evt = kzalloc(sizeof(*evt), GFP_KERNEL); 557 if (!evt) { 558 vq_err(vq, "Failed to allocate tcm_vhost_evt\n"); 559 vs->vs_events_missed = true; 560 return NULL; 561 } 562 563 evt->event.event = event; 564 evt->event.reason = reason; 565 vs->vs_events_nr++; 566 567 return evt; 568 } 569 570 static void vhost_scsi_free_cmd(struct tcm_vhost_cmd *cmd) 571 { 572 struct se_cmd *se_cmd = &cmd->tvc_se_cmd; 573 574 /* TODO locking against target/backend threads? */ 575 transport_generic_free_cmd(se_cmd, 0); 576 577 } 578 579 static int vhost_scsi_check_stop_free(struct se_cmd *se_cmd) 580 { 581 return target_put_sess_cmd(se_cmd->se_sess, se_cmd); 582 } 583 584 static void 585 tcm_vhost_do_evt_work(struct vhost_scsi *vs, struct tcm_vhost_evt *evt) 586 { 587 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq; 588 struct virtio_scsi_event *event = &evt->event; 589 struct virtio_scsi_event __user *eventp; 590 unsigned out, in; 591 int head, ret; 592 593 if (!vq->private_data) { 594 vs->vs_events_missed = true; 595 return; 596 } 597 598 again: 599 vhost_disable_notify(&vs->dev, vq); 600 head = vhost_get_vq_desc(&vs->dev, vq, vq->iov, 601 ARRAY_SIZE(vq->iov), &out, &in, 602 NULL, NULL); 603 if (head < 0) { 604 vs->vs_events_missed = true; 605 return; 606 } 607 if (head == vq->num) { 608 if (vhost_enable_notify(&vs->dev, vq)) 609 goto again; 610 vs->vs_events_missed = true; 611 return; 612 } 613 614 if ((vq->iov[out].iov_len != sizeof(struct virtio_scsi_event))) { 615 vq_err(vq, "Expecting virtio_scsi_event, got %zu bytes\n", 616 vq->iov[out].iov_len); 617 vs->vs_events_missed = true; 618 return; 619 } 620 621 if (vs->vs_events_missed) { 622 event->event |= VIRTIO_SCSI_T_EVENTS_MISSED; 623 vs->vs_events_missed = false; 624 } 625 626 eventp = vq->iov[out].iov_base; 627 ret = __copy_to_user(eventp, event, sizeof(*event)); 628 if (!ret) 629 vhost_add_used_and_signal(&vs->dev, vq, head, 0); 630 else 631 vq_err(vq, "Faulted on tcm_vhost_send_event\n"); 632 } 633 634 static void tcm_vhost_evt_work(struct vhost_work *work) 635 { 636 struct vhost_scsi *vs = container_of(work, struct vhost_scsi, 637 vs_event_work); 638 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq; 639 struct tcm_vhost_evt *evt; 640 struct llist_node *llnode; 641 642 mutex_lock(&vq->mutex); 643 llnode = llist_del_all(&vs->vs_event_list); 644 while (llnode) { 645 evt = llist_entry(llnode, struct tcm_vhost_evt, list); 646 llnode = llist_next(llnode); 647 tcm_vhost_do_evt_work(vs, evt); 648 tcm_vhost_free_evt(vs, evt); 649 } 650 mutex_unlock(&vq->mutex); 651 } 652 653 /* Fill in status and signal that we are done processing this command 654 * 655 * This is scheduled in the vhost work queue so we are called with the owner 656 * process mm and can access the vring. 657 */ 658 static void vhost_scsi_complete_cmd_work(struct vhost_work *work) 659 { 660 struct vhost_scsi *vs = container_of(work, struct vhost_scsi, 661 vs_completion_work); 662 DECLARE_BITMAP(signal, VHOST_SCSI_MAX_VQ); 663 struct virtio_scsi_cmd_resp v_rsp; 664 struct tcm_vhost_cmd *cmd; 665 struct llist_node *llnode; 666 struct se_cmd *se_cmd; 667 int ret, vq; 668 669 bitmap_zero(signal, VHOST_SCSI_MAX_VQ); 670 llnode = llist_del_all(&vs->vs_completion_list); 671 while (llnode) { 672 cmd = llist_entry(llnode, struct tcm_vhost_cmd, 673 tvc_completion_list); 674 llnode = llist_next(llnode); 675 se_cmd = &cmd->tvc_se_cmd; 676 677 pr_debug("%s tv_cmd %p resid %u status %#02x\n", __func__, 678 cmd, se_cmd->residual_count, se_cmd->scsi_status); 679 680 memset(&v_rsp, 0, sizeof(v_rsp)); 681 v_rsp.resid = se_cmd->residual_count; 682 /* TODO is status_qualifier field needed? */ 683 v_rsp.status = se_cmd->scsi_status; 684 v_rsp.sense_len = se_cmd->scsi_sense_length; 685 memcpy(v_rsp.sense, cmd->tvc_sense_buf, 686 v_rsp.sense_len); 687 ret = copy_to_user(cmd->tvc_resp, &v_rsp, sizeof(v_rsp)); 688 if (likely(ret == 0)) { 689 struct vhost_scsi_virtqueue *q; 690 vhost_add_used(cmd->tvc_vq, cmd->tvc_vq_desc, 0); 691 q = container_of(cmd->tvc_vq, struct vhost_scsi_virtqueue, vq); 692 vq = q - vs->vqs; 693 __set_bit(vq, signal); 694 } else 695 pr_err("Faulted on virtio_scsi_cmd_resp\n"); 696 697 vhost_scsi_free_cmd(cmd); 698 } 699 700 vq = -1; 701 while ((vq = find_next_bit(signal, VHOST_SCSI_MAX_VQ, vq + 1)) 702 < VHOST_SCSI_MAX_VQ) 703 vhost_signal(&vs->dev, &vs->vqs[vq].vq); 704 } 705 706 static struct tcm_vhost_cmd * 707 vhost_scsi_allocate_cmd(struct vhost_virtqueue *vq, 708 struct tcm_vhost_tpg *tpg, 709 struct virtio_scsi_cmd_req *v_req, 710 u32 exp_data_len, 711 int data_direction) 712 { 713 struct tcm_vhost_cmd *cmd; 714 struct tcm_vhost_nexus *tv_nexus; 715 716 tv_nexus = tpg->tpg_nexus; 717 if (!tv_nexus) { 718 pr_err("Unable to locate active struct tcm_vhost_nexus\n"); 719 return ERR_PTR(-EIO); 720 } 721 722 cmd = kzalloc(sizeof(struct tcm_vhost_cmd), GFP_ATOMIC); 723 if (!cmd) { 724 pr_err("Unable to allocate struct tcm_vhost_cmd\n"); 725 return ERR_PTR(-ENOMEM); 726 } 727 cmd->tvc_tag = v_req->tag; 728 cmd->tvc_task_attr = v_req->task_attr; 729 cmd->tvc_exp_data_len = exp_data_len; 730 cmd->tvc_data_direction = data_direction; 731 cmd->tvc_nexus = tv_nexus; 732 cmd->inflight = tcm_vhost_get_inflight(vq); 733 734 return cmd; 735 } 736 737 /* 738 * Map a user memory range into a scatterlist 739 * 740 * Returns the number of scatterlist entries used or -errno on error. 741 */ 742 static int 743 vhost_scsi_map_to_sgl(struct scatterlist *sgl, 744 unsigned int sgl_count, 745 struct iovec *iov, 746 int write) 747 { 748 unsigned int npages = 0, pages_nr, offset, nbytes; 749 struct scatterlist *sg = sgl; 750 void __user *ptr = iov->iov_base; 751 size_t len = iov->iov_len; 752 struct page **pages; 753 int ret, i; 754 755 pages_nr = iov_num_pages(iov); 756 if (pages_nr > sgl_count) 757 return -ENOBUFS; 758 759 pages = kmalloc(pages_nr * sizeof(struct page *), GFP_KERNEL); 760 if (!pages) 761 return -ENOMEM; 762 763 ret = get_user_pages_fast((unsigned long)ptr, pages_nr, write, pages); 764 /* No pages were pinned */ 765 if (ret < 0) 766 goto out; 767 /* Less pages pinned than wanted */ 768 if (ret != pages_nr) { 769 for (i = 0; i < ret; i++) 770 put_page(pages[i]); 771 ret = -EFAULT; 772 goto out; 773 } 774 775 while (len > 0) { 776 offset = (uintptr_t)ptr & ~PAGE_MASK; 777 nbytes = min_t(unsigned int, PAGE_SIZE - offset, len); 778 sg_set_page(sg, pages[npages], nbytes, offset); 779 ptr += nbytes; 780 len -= nbytes; 781 sg++; 782 npages++; 783 } 784 785 out: 786 kfree(pages); 787 return ret; 788 } 789 790 static int 791 vhost_scsi_map_iov_to_sgl(struct tcm_vhost_cmd *cmd, 792 struct iovec *iov, 793 unsigned int niov, 794 int write) 795 { 796 int ret; 797 unsigned int i; 798 u32 sgl_count; 799 struct scatterlist *sg; 800 801 /* 802 * Find out how long sglist needs to be 803 */ 804 sgl_count = 0; 805 for (i = 0; i < niov; i++) 806 sgl_count += iov_num_pages(&iov[i]); 807 808 /* TODO overflow checking */ 809 810 sg = kmalloc(sizeof(cmd->tvc_sgl[0]) * sgl_count, GFP_ATOMIC); 811 if (!sg) 812 return -ENOMEM; 813 pr_debug("%s sg %p sgl_count %u is_err %d\n", __func__, 814 sg, sgl_count, !sg); 815 sg_init_table(sg, sgl_count); 816 817 cmd->tvc_sgl = sg; 818 cmd->tvc_sgl_count = sgl_count; 819 820 pr_debug("Mapping %u iovecs for %u pages\n", niov, sgl_count); 821 for (i = 0; i < niov; i++) { 822 ret = vhost_scsi_map_to_sgl(sg, sgl_count, &iov[i], write); 823 if (ret < 0) { 824 for (i = 0; i < cmd->tvc_sgl_count; i++) 825 put_page(sg_page(&cmd->tvc_sgl[i])); 826 kfree(cmd->tvc_sgl); 827 cmd->tvc_sgl = NULL; 828 cmd->tvc_sgl_count = 0; 829 return ret; 830 } 831 832 sg += ret; 833 sgl_count -= ret; 834 } 835 return 0; 836 } 837 838 static void tcm_vhost_submission_work(struct work_struct *work) 839 { 840 struct tcm_vhost_cmd *cmd = 841 container_of(work, struct tcm_vhost_cmd, work); 842 struct tcm_vhost_nexus *tv_nexus; 843 struct se_cmd *se_cmd = &cmd->tvc_se_cmd; 844 struct scatterlist *sg_ptr, *sg_bidi_ptr = NULL; 845 int rc, sg_no_bidi = 0; 846 847 if (cmd->tvc_sgl_count) { 848 sg_ptr = cmd->tvc_sgl; 849 /* FIXME: Fix BIDI operation in tcm_vhost_submission_work() */ 850 #if 0 851 if (se_cmd->se_cmd_flags & SCF_BIDI) { 852 sg_bidi_ptr = NULL; 853 sg_no_bidi = 0; 854 } 855 #endif 856 } else { 857 sg_ptr = NULL; 858 } 859 tv_nexus = cmd->tvc_nexus; 860 861 rc = target_submit_cmd_map_sgls(se_cmd, tv_nexus->tvn_se_sess, 862 cmd->tvc_cdb, &cmd->tvc_sense_buf[0], 863 cmd->tvc_lun, cmd->tvc_exp_data_len, 864 cmd->tvc_task_attr, cmd->tvc_data_direction, 865 TARGET_SCF_ACK_KREF, sg_ptr, cmd->tvc_sgl_count, 866 sg_bidi_ptr, sg_no_bidi); 867 if (rc < 0) { 868 transport_send_check_condition_and_sense(se_cmd, 869 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0); 870 transport_generic_free_cmd(se_cmd, 0); 871 } 872 } 873 874 static void 875 vhost_scsi_send_bad_target(struct vhost_scsi *vs, 876 struct vhost_virtqueue *vq, 877 int head, unsigned out) 878 { 879 struct virtio_scsi_cmd_resp __user *resp; 880 struct virtio_scsi_cmd_resp rsp; 881 int ret; 882 883 memset(&rsp, 0, sizeof(rsp)); 884 rsp.response = VIRTIO_SCSI_S_BAD_TARGET; 885 resp = vq->iov[out].iov_base; 886 ret = __copy_to_user(resp, &rsp, sizeof(rsp)); 887 if (!ret) 888 vhost_add_used_and_signal(&vs->dev, vq, head, 0); 889 else 890 pr_err("Faulted on virtio_scsi_cmd_resp\n"); 891 } 892 893 static void 894 vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) 895 { 896 struct tcm_vhost_tpg **vs_tpg; 897 struct virtio_scsi_cmd_req v_req; 898 struct tcm_vhost_tpg *tpg; 899 struct tcm_vhost_cmd *cmd; 900 u32 exp_data_len, data_first, data_num, data_direction; 901 unsigned out, in, i; 902 int head, ret; 903 u8 target; 904 905 /* 906 * We can handle the vq only after the endpoint is setup by calling the 907 * VHOST_SCSI_SET_ENDPOINT ioctl. 908 * 909 * TODO: Check that we are running from vhost_worker which acts 910 * as read-side critical section for vhost kind of RCU. 911 * See the comments in struct vhost_virtqueue in drivers/vhost/vhost.h 912 */ 913 vs_tpg = rcu_dereference_check(vq->private_data, 1); 914 if (!vs_tpg) 915 return; 916 917 mutex_lock(&vq->mutex); 918 vhost_disable_notify(&vs->dev, vq); 919 920 for (;;) { 921 head = vhost_get_vq_desc(&vs->dev, vq, vq->iov, 922 ARRAY_SIZE(vq->iov), &out, &in, 923 NULL, NULL); 924 pr_debug("vhost_get_vq_desc: head: %d, out: %u in: %u\n", 925 head, out, in); 926 /* On error, stop handling until the next kick. */ 927 if (unlikely(head < 0)) 928 break; 929 /* Nothing new? Wait for eventfd to tell us they refilled. */ 930 if (head == vq->num) { 931 if (unlikely(vhost_enable_notify(&vs->dev, vq))) { 932 vhost_disable_notify(&vs->dev, vq); 933 continue; 934 } 935 break; 936 } 937 938 /* FIXME: BIDI operation */ 939 if (out == 1 && in == 1) { 940 data_direction = DMA_NONE; 941 data_first = 0; 942 data_num = 0; 943 } else if (out == 1 && in > 1) { 944 data_direction = DMA_FROM_DEVICE; 945 data_first = out + 1; 946 data_num = in - 1; 947 } else if (out > 1 && in == 1) { 948 data_direction = DMA_TO_DEVICE; 949 data_first = 1; 950 data_num = out - 1; 951 } else { 952 vq_err(vq, "Invalid buffer layout out: %u in: %u\n", 953 out, in); 954 break; 955 } 956 957 /* 958 * Check for a sane resp buffer so we can report errors to 959 * the guest. 960 */ 961 if (unlikely(vq->iov[out].iov_len != 962 sizeof(struct virtio_scsi_cmd_resp))) { 963 vq_err(vq, "Expecting virtio_scsi_cmd_resp, got %zu" 964 " bytes\n", vq->iov[out].iov_len); 965 break; 966 } 967 968 if (unlikely(vq->iov[0].iov_len != sizeof(v_req))) { 969 vq_err(vq, "Expecting virtio_scsi_cmd_req, got %zu" 970 " bytes\n", vq->iov[0].iov_len); 971 break; 972 } 973 pr_debug("Calling __copy_from_user: vq->iov[0].iov_base: %p," 974 " len: %zu\n", vq->iov[0].iov_base, sizeof(v_req)); 975 ret = __copy_from_user(&v_req, vq->iov[0].iov_base, 976 sizeof(v_req)); 977 if (unlikely(ret)) { 978 vq_err(vq, "Faulted on virtio_scsi_cmd_req\n"); 979 break; 980 } 981 982 /* Extract the tpgt */ 983 target = v_req.lun[1]; 984 tpg = ACCESS_ONCE(vs_tpg[target]); 985 986 /* Target does not exist, fail the request */ 987 if (unlikely(!tpg)) { 988 vhost_scsi_send_bad_target(vs, vq, head, out); 989 continue; 990 } 991 992 exp_data_len = 0; 993 for (i = 0; i < data_num; i++) 994 exp_data_len += vq->iov[data_first + i].iov_len; 995 996 cmd = vhost_scsi_allocate_cmd(vq, tpg, &v_req, 997 exp_data_len, data_direction); 998 if (IS_ERR(cmd)) { 999 vq_err(vq, "vhost_scsi_allocate_cmd failed %ld\n", 1000 PTR_ERR(cmd)); 1001 goto err_cmd; 1002 } 1003 pr_debug("Allocated tv_cmd: %p exp_data_len: %d, data_direction" 1004 ": %d\n", cmd, exp_data_len, data_direction); 1005 1006 cmd->tvc_vhost = vs; 1007 cmd->tvc_vq = vq; 1008 cmd->tvc_resp = vq->iov[out].iov_base; 1009 1010 /* 1011 * Copy in the recieved CDB descriptor into cmd->tvc_cdb 1012 * that will be used by tcm_vhost_new_cmd_map() and down into 1013 * target_setup_cmd_from_cdb() 1014 */ 1015 memcpy(cmd->tvc_cdb, v_req.cdb, TCM_VHOST_MAX_CDB_SIZE); 1016 /* 1017 * Check that the recieved CDB size does not exceeded our 1018 * hardcoded max for tcm_vhost 1019 */ 1020 /* TODO what if cdb was too small for varlen cdb header? */ 1021 if (unlikely(scsi_command_size(cmd->tvc_cdb) > 1022 TCM_VHOST_MAX_CDB_SIZE)) { 1023 vq_err(vq, "Received SCSI CDB with command_size: %d that" 1024 " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n", 1025 scsi_command_size(cmd->tvc_cdb), 1026 TCM_VHOST_MAX_CDB_SIZE); 1027 goto err_free; 1028 } 1029 cmd->tvc_lun = ((v_req.lun[2] << 8) | v_req.lun[3]) & 0x3FFF; 1030 1031 pr_debug("vhost_scsi got command opcode: %#02x, lun: %d\n", 1032 cmd->tvc_cdb[0], cmd->tvc_lun); 1033 1034 if (data_direction != DMA_NONE) { 1035 ret = vhost_scsi_map_iov_to_sgl(cmd, 1036 &vq->iov[data_first], data_num, 1037 data_direction == DMA_TO_DEVICE); 1038 if (unlikely(ret)) { 1039 vq_err(vq, "Failed to map iov to sgl\n"); 1040 goto err_free; 1041 } 1042 } 1043 1044 /* 1045 * Save the descriptor from vhost_get_vq_desc() to be used to 1046 * complete the virtio-scsi request in TCM callback context via 1047 * tcm_vhost_queue_data_in() and tcm_vhost_queue_status() 1048 */ 1049 cmd->tvc_vq_desc = head; 1050 /* 1051 * Dispatch tv_cmd descriptor for cmwq execution in process 1052 * context provided by tcm_vhost_workqueue. This also ensures 1053 * tv_cmd is executed on the same kworker CPU as this vhost 1054 * thread to gain positive L2 cache locality effects.. 1055 */ 1056 INIT_WORK(&cmd->work, tcm_vhost_submission_work); 1057 queue_work(tcm_vhost_workqueue, &cmd->work); 1058 } 1059 1060 mutex_unlock(&vq->mutex); 1061 return; 1062 1063 err_free: 1064 vhost_scsi_free_cmd(cmd); 1065 err_cmd: 1066 vhost_scsi_send_bad_target(vs, vq, head, out); 1067 mutex_unlock(&vq->mutex); 1068 } 1069 1070 static void vhost_scsi_ctl_handle_kick(struct vhost_work *work) 1071 { 1072 pr_debug("%s: The handling func for control queue.\n", __func__); 1073 } 1074 1075 static void 1076 tcm_vhost_send_evt(struct vhost_scsi *vs, 1077 struct tcm_vhost_tpg *tpg, 1078 struct se_lun *lun, 1079 u32 event, 1080 u32 reason) 1081 { 1082 struct tcm_vhost_evt *evt; 1083 1084 evt = tcm_vhost_allocate_evt(vs, event, reason); 1085 if (!evt) 1086 return; 1087 1088 if (tpg && lun) { 1089 /* TODO: share lun setup code with virtio-scsi.ko */ 1090 /* 1091 * Note: evt->event is zeroed when we allocate it and 1092 * lun[4-7] need to be zero according to virtio-scsi spec. 1093 */ 1094 evt->event.lun[0] = 0x01; 1095 evt->event.lun[1] = tpg->tport_tpgt & 0xFF; 1096 if (lun->unpacked_lun >= 256) 1097 evt->event.lun[2] = lun->unpacked_lun >> 8 | 0x40 ; 1098 evt->event.lun[3] = lun->unpacked_lun & 0xFF; 1099 } 1100 1101 llist_add(&evt->list, &vs->vs_event_list); 1102 vhost_work_queue(&vs->dev, &vs->vs_event_work); 1103 } 1104 1105 static void vhost_scsi_evt_handle_kick(struct vhost_work *work) 1106 { 1107 struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue, 1108 poll.work); 1109 struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev); 1110 1111 mutex_lock(&vq->mutex); 1112 if (!vq->private_data) 1113 goto out; 1114 1115 if (vs->vs_events_missed) 1116 tcm_vhost_send_evt(vs, NULL, NULL, VIRTIO_SCSI_T_NO_EVENT, 0); 1117 out: 1118 mutex_unlock(&vq->mutex); 1119 } 1120 1121 static void vhost_scsi_handle_kick(struct vhost_work *work) 1122 { 1123 struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue, 1124 poll.work); 1125 struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev); 1126 1127 vhost_scsi_handle_vq(vs, vq); 1128 } 1129 1130 static void vhost_scsi_flush_vq(struct vhost_scsi *vs, int index) 1131 { 1132 vhost_poll_flush(&vs->vqs[index].vq.poll); 1133 } 1134 1135 /* Callers must hold dev mutex */ 1136 static void vhost_scsi_flush(struct vhost_scsi *vs) 1137 { 1138 struct vhost_scsi_inflight *old_inflight[VHOST_SCSI_MAX_VQ]; 1139 int i; 1140 1141 /* Init new inflight and remember the old inflight */ 1142 tcm_vhost_init_inflight(vs, old_inflight); 1143 1144 /* 1145 * The inflight->kref was initialized to 1. We decrement it here to 1146 * indicate the start of the flush operation so that it will reach 0 1147 * when all the reqs are finished. 1148 */ 1149 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) 1150 kref_put(&old_inflight[i]->kref, tcm_vhost_done_inflight); 1151 1152 /* Flush both the vhost poll and vhost work */ 1153 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) 1154 vhost_scsi_flush_vq(vs, i); 1155 vhost_work_flush(&vs->dev, &vs->vs_completion_work); 1156 vhost_work_flush(&vs->dev, &vs->vs_event_work); 1157 1158 /* Wait for all reqs issued before the flush to be finished */ 1159 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) 1160 wait_for_completion(&old_inflight[i]->comp); 1161 } 1162 1163 /* 1164 * Called from vhost_scsi_ioctl() context to walk the list of available 1165 * tcm_vhost_tpg with an active struct tcm_vhost_nexus 1166 * 1167 * The lock nesting rule is: 1168 * tcm_vhost_mutex -> vs->dev.mutex -> tpg->tv_tpg_mutex -> vq->mutex 1169 */ 1170 static int 1171 vhost_scsi_set_endpoint(struct vhost_scsi *vs, 1172 struct vhost_scsi_target *t) 1173 { 1174 struct tcm_vhost_tport *tv_tport; 1175 struct tcm_vhost_tpg *tpg; 1176 struct tcm_vhost_tpg **vs_tpg; 1177 struct vhost_virtqueue *vq; 1178 int index, ret, i, len; 1179 bool match = false; 1180 1181 mutex_lock(&tcm_vhost_mutex); 1182 mutex_lock(&vs->dev.mutex); 1183 1184 /* Verify that ring has been setup correctly. */ 1185 for (index = 0; index < vs->dev.nvqs; ++index) { 1186 /* Verify that ring has been setup correctly. */ 1187 if (!vhost_vq_access_ok(&vs->vqs[index].vq)) { 1188 ret = -EFAULT; 1189 goto out; 1190 } 1191 } 1192 1193 len = sizeof(vs_tpg[0]) * VHOST_SCSI_MAX_TARGET; 1194 vs_tpg = kzalloc(len, GFP_KERNEL); 1195 if (!vs_tpg) { 1196 ret = -ENOMEM; 1197 goto out; 1198 } 1199 if (vs->vs_tpg) 1200 memcpy(vs_tpg, vs->vs_tpg, len); 1201 1202 list_for_each_entry(tpg, &tcm_vhost_list, tv_tpg_list) { 1203 mutex_lock(&tpg->tv_tpg_mutex); 1204 if (!tpg->tpg_nexus) { 1205 mutex_unlock(&tpg->tv_tpg_mutex); 1206 continue; 1207 } 1208 if (tpg->tv_tpg_vhost_count != 0) { 1209 mutex_unlock(&tpg->tv_tpg_mutex); 1210 continue; 1211 } 1212 tv_tport = tpg->tport; 1213 1214 if (!strcmp(tv_tport->tport_name, t->vhost_wwpn)) { 1215 if (vs->vs_tpg && vs->vs_tpg[tpg->tport_tpgt]) { 1216 kfree(vs_tpg); 1217 mutex_unlock(&tpg->tv_tpg_mutex); 1218 ret = -EEXIST; 1219 goto out; 1220 } 1221 tpg->tv_tpg_vhost_count++; 1222 tpg->vhost_scsi = vs; 1223 vs_tpg[tpg->tport_tpgt] = tpg; 1224 smp_mb__after_atomic_inc(); 1225 match = true; 1226 } 1227 mutex_unlock(&tpg->tv_tpg_mutex); 1228 } 1229 1230 if (match) { 1231 memcpy(vs->vs_vhost_wwpn, t->vhost_wwpn, 1232 sizeof(vs->vs_vhost_wwpn)); 1233 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) { 1234 vq = &vs->vqs[i].vq; 1235 /* Flushing the vhost_work acts as synchronize_rcu */ 1236 mutex_lock(&vq->mutex); 1237 rcu_assign_pointer(vq->private_data, vs_tpg); 1238 vhost_init_used(vq); 1239 mutex_unlock(&vq->mutex); 1240 } 1241 ret = 0; 1242 } else { 1243 ret = -EEXIST; 1244 } 1245 1246 /* 1247 * Act as synchronize_rcu to make sure access to 1248 * old vs->vs_tpg is finished. 1249 */ 1250 vhost_scsi_flush(vs); 1251 kfree(vs->vs_tpg); 1252 vs->vs_tpg = vs_tpg; 1253 1254 out: 1255 mutex_unlock(&vs->dev.mutex); 1256 mutex_unlock(&tcm_vhost_mutex); 1257 return ret; 1258 } 1259 1260 static int 1261 vhost_scsi_clear_endpoint(struct vhost_scsi *vs, 1262 struct vhost_scsi_target *t) 1263 { 1264 struct tcm_vhost_tport *tv_tport; 1265 struct tcm_vhost_tpg *tpg; 1266 struct vhost_virtqueue *vq; 1267 bool match = false; 1268 int index, ret, i; 1269 u8 target; 1270 1271 mutex_lock(&tcm_vhost_mutex); 1272 mutex_lock(&vs->dev.mutex); 1273 /* Verify that ring has been setup correctly. */ 1274 for (index = 0; index < vs->dev.nvqs; ++index) { 1275 if (!vhost_vq_access_ok(&vs->vqs[index].vq)) { 1276 ret = -EFAULT; 1277 goto err_dev; 1278 } 1279 } 1280 1281 if (!vs->vs_tpg) { 1282 ret = 0; 1283 goto err_dev; 1284 } 1285 1286 for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) { 1287 target = i; 1288 tpg = vs->vs_tpg[target]; 1289 if (!tpg) 1290 continue; 1291 1292 mutex_lock(&tpg->tv_tpg_mutex); 1293 tv_tport = tpg->tport; 1294 if (!tv_tport) { 1295 ret = -ENODEV; 1296 goto err_tpg; 1297 } 1298 1299 if (strcmp(tv_tport->tport_name, t->vhost_wwpn)) { 1300 pr_warn("tv_tport->tport_name: %s, tpg->tport_tpgt: %hu" 1301 " does not match t->vhost_wwpn: %s, t->vhost_tpgt: %hu\n", 1302 tv_tport->tport_name, tpg->tport_tpgt, 1303 t->vhost_wwpn, t->vhost_tpgt); 1304 ret = -EINVAL; 1305 goto err_tpg; 1306 } 1307 tpg->tv_tpg_vhost_count--; 1308 tpg->vhost_scsi = NULL; 1309 vs->vs_tpg[target] = NULL; 1310 match = true; 1311 mutex_unlock(&tpg->tv_tpg_mutex); 1312 } 1313 if (match) { 1314 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) { 1315 vq = &vs->vqs[i].vq; 1316 /* Flushing the vhost_work acts as synchronize_rcu */ 1317 mutex_lock(&vq->mutex); 1318 rcu_assign_pointer(vq->private_data, NULL); 1319 mutex_unlock(&vq->mutex); 1320 } 1321 } 1322 /* 1323 * Act as synchronize_rcu to make sure access to 1324 * old vs->vs_tpg is finished. 1325 */ 1326 vhost_scsi_flush(vs); 1327 kfree(vs->vs_tpg); 1328 vs->vs_tpg = NULL; 1329 WARN_ON(vs->vs_events_nr); 1330 mutex_unlock(&vs->dev.mutex); 1331 mutex_unlock(&tcm_vhost_mutex); 1332 return 0; 1333 1334 err_tpg: 1335 mutex_unlock(&tpg->tv_tpg_mutex); 1336 err_dev: 1337 mutex_unlock(&vs->dev.mutex); 1338 mutex_unlock(&tcm_vhost_mutex); 1339 return ret; 1340 } 1341 1342 static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features) 1343 { 1344 if (features & ~VHOST_SCSI_FEATURES) 1345 return -EOPNOTSUPP; 1346 1347 mutex_lock(&vs->dev.mutex); 1348 if ((features & (1 << VHOST_F_LOG_ALL)) && 1349 !vhost_log_access_ok(&vs->dev)) { 1350 mutex_unlock(&vs->dev.mutex); 1351 return -EFAULT; 1352 } 1353 vs->dev.acked_features = features; 1354 smp_wmb(); 1355 vhost_scsi_flush(vs); 1356 mutex_unlock(&vs->dev.mutex); 1357 return 0; 1358 } 1359 1360 static int vhost_scsi_open(struct inode *inode, struct file *f) 1361 { 1362 struct vhost_scsi *vs; 1363 struct vhost_virtqueue **vqs; 1364 int r, i; 1365 1366 vs = kzalloc(sizeof(*vs), GFP_KERNEL); 1367 if (!vs) 1368 return -ENOMEM; 1369 1370 vqs = kmalloc(VHOST_SCSI_MAX_VQ * sizeof(*vqs), GFP_KERNEL); 1371 if (!vqs) { 1372 kfree(vs); 1373 return -ENOMEM; 1374 } 1375 1376 vhost_work_init(&vs->vs_completion_work, vhost_scsi_complete_cmd_work); 1377 vhost_work_init(&vs->vs_event_work, tcm_vhost_evt_work); 1378 1379 vs->vs_events_nr = 0; 1380 vs->vs_events_missed = false; 1381 1382 vqs[VHOST_SCSI_VQ_CTL] = &vs->vqs[VHOST_SCSI_VQ_CTL].vq; 1383 vqs[VHOST_SCSI_VQ_EVT] = &vs->vqs[VHOST_SCSI_VQ_EVT].vq; 1384 vs->vqs[VHOST_SCSI_VQ_CTL].vq.handle_kick = vhost_scsi_ctl_handle_kick; 1385 vs->vqs[VHOST_SCSI_VQ_EVT].vq.handle_kick = vhost_scsi_evt_handle_kick; 1386 for (i = VHOST_SCSI_VQ_IO; i < VHOST_SCSI_MAX_VQ; i++) { 1387 vqs[i] = &vs->vqs[i].vq; 1388 vs->vqs[i].vq.handle_kick = vhost_scsi_handle_kick; 1389 } 1390 r = vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ); 1391 1392 tcm_vhost_init_inflight(vs, NULL); 1393 1394 if (r < 0) { 1395 kfree(vqs); 1396 kfree(vs); 1397 return r; 1398 } 1399 1400 f->private_data = vs; 1401 return 0; 1402 } 1403 1404 static int vhost_scsi_release(struct inode *inode, struct file *f) 1405 { 1406 struct vhost_scsi *vs = f->private_data; 1407 struct vhost_scsi_target t; 1408 1409 mutex_lock(&vs->dev.mutex); 1410 memcpy(t.vhost_wwpn, vs->vs_vhost_wwpn, sizeof(t.vhost_wwpn)); 1411 mutex_unlock(&vs->dev.mutex); 1412 vhost_scsi_clear_endpoint(vs, &t); 1413 vhost_dev_stop(&vs->dev); 1414 vhost_dev_cleanup(&vs->dev, false); 1415 /* Jobs can re-queue themselves in evt kick handler. Do extra flush. */ 1416 vhost_scsi_flush(vs); 1417 kfree(vs->dev.vqs); 1418 kfree(vs); 1419 return 0; 1420 } 1421 1422 static long 1423 vhost_scsi_ioctl(struct file *f, 1424 unsigned int ioctl, 1425 unsigned long arg) 1426 { 1427 struct vhost_scsi *vs = f->private_data; 1428 struct vhost_scsi_target backend; 1429 void __user *argp = (void __user *)arg; 1430 u64 __user *featurep = argp; 1431 u32 __user *eventsp = argp; 1432 u32 events_missed; 1433 u64 features; 1434 int r, abi_version = VHOST_SCSI_ABI_VERSION; 1435 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq; 1436 1437 switch (ioctl) { 1438 case VHOST_SCSI_SET_ENDPOINT: 1439 if (copy_from_user(&backend, argp, sizeof backend)) 1440 return -EFAULT; 1441 if (backend.reserved != 0) 1442 return -EOPNOTSUPP; 1443 1444 return vhost_scsi_set_endpoint(vs, &backend); 1445 case VHOST_SCSI_CLEAR_ENDPOINT: 1446 if (copy_from_user(&backend, argp, sizeof backend)) 1447 return -EFAULT; 1448 if (backend.reserved != 0) 1449 return -EOPNOTSUPP; 1450 1451 return vhost_scsi_clear_endpoint(vs, &backend); 1452 case VHOST_SCSI_GET_ABI_VERSION: 1453 if (copy_to_user(argp, &abi_version, sizeof abi_version)) 1454 return -EFAULT; 1455 return 0; 1456 case VHOST_SCSI_SET_EVENTS_MISSED: 1457 if (get_user(events_missed, eventsp)) 1458 return -EFAULT; 1459 mutex_lock(&vq->mutex); 1460 vs->vs_events_missed = events_missed; 1461 mutex_unlock(&vq->mutex); 1462 return 0; 1463 case VHOST_SCSI_GET_EVENTS_MISSED: 1464 mutex_lock(&vq->mutex); 1465 events_missed = vs->vs_events_missed; 1466 mutex_unlock(&vq->mutex); 1467 if (put_user(events_missed, eventsp)) 1468 return -EFAULT; 1469 return 0; 1470 case VHOST_GET_FEATURES: 1471 features = VHOST_SCSI_FEATURES; 1472 if (copy_to_user(featurep, &features, sizeof features)) 1473 return -EFAULT; 1474 return 0; 1475 case VHOST_SET_FEATURES: 1476 if (copy_from_user(&features, featurep, sizeof features)) 1477 return -EFAULT; 1478 return vhost_scsi_set_features(vs, features); 1479 default: 1480 mutex_lock(&vs->dev.mutex); 1481 r = vhost_dev_ioctl(&vs->dev, ioctl, argp); 1482 /* TODO: flush backend after dev ioctl. */ 1483 if (r == -ENOIOCTLCMD) 1484 r = vhost_vring_ioctl(&vs->dev, ioctl, argp); 1485 mutex_unlock(&vs->dev.mutex); 1486 return r; 1487 } 1488 } 1489 1490 #ifdef CONFIG_COMPAT 1491 static long vhost_scsi_compat_ioctl(struct file *f, unsigned int ioctl, 1492 unsigned long arg) 1493 { 1494 return vhost_scsi_ioctl(f, ioctl, (unsigned long)compat_ptr(arg)); 1495 } 1496 #endif 1497 1498 static const struct file_operations vhost_scsi_fops = { 1499 .owner = THIS_MODULE, 1500 .release = vhost_scsi_release, 1501 .unlocked_ioctl = vhost_scsi_ioctl, 1502 #ifdef CONFIG_COMPAT 1503 .compat_ioctl = vhost_scsi_compat_ioctl, 1504 #endif 1505 .open = vhost_scsi_open, 1506 .llseek = noop_llseek, 1507 }; 1508 1509 static struct miscdevice vhost_scsi_misc = { 1510 MISC_DYNAMIC_MINOR, 1511 "vhost-scsi", 1512 &vhost_scsi_fops, 1513 }; 1514 1515 static int __init vhost_scsi_register(void) 1516 { 1517 return misc_register(&vhost_scsi_misc); 1518 } 1519 1520 static int vhost_scsi_deregister(void) 1521 { 1522 return misc_deregister(&vhost_scsi_misc); 1523 } 1524 1525 static char *tcm_vhost_dump_proto_id(struct tcm_vhost_tport *tport) 1526 { 1527 switch (tport->tport_proto_id) { 1528 case SCSI_PROTOCOL_SAS: 1529 return "SAS"; 1530 case SCSI_PROTOCOL_FCP: 1531 return "FCP"; 1532 case SCSI_PROTOCOL_ISCSI: 1533 return "iSCSI"; 1534 default: 1535 break; 1536 } 1537 1538 return "Unknown"; 1539 } 1540 1541 static void 1542 tcm_vhost_do_plug(struct tcm_vhost_tpg *tpg, 1543 struct se_lun *lun, bool plug) 1544 { 1545 1546 struct vhost_scsi *vs = tpg->vhost_scsi; 1547 struct vhost_virtqueue *vq; 1548 u32 reason; 1549 1550 if (!vs) 1551 return; 1552 1553 mutex_lock(&vs->dev.mutex); 1554 if (!vhost_has_feature(&vs->dev, VIRTIO_SCSI_F_HOTPLUG)) { 1555 mutex_unlock(&vs->dev.mutex); 1556 return; 1557 } 1558 1559 if (plug) 1560 reason = VIRTIO_SCSI_EVT_RESET_RESCAN; 1561 else 1562 reason = VIRTIO_SCSI_EVT_RESET_REMOVED; 1563 1564 vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq; 1565 mutex_lock(&vq->mutex); 1566 tcm_vhost_send_evt(vs, tpg, lun, 1567 VIRTIO_SCSI_T_TRANSPORT_RESET, reason); 1568 mutex_unlock(&vq->mutex); 1569 mutex_unlock(&vs->dev.mutex); 1570 } 1571 1572 static void tcm_vhost_hotplug(struct tcm_vhost_tpg *tpg, struct se_lun *lun) 1573 { 1574 tcm_vhost_do_plug(tpg, lun, true); 1575 } 1576 1577 static void tcm_vhost_hotunplug(struct tcm_vhost_tpg *tpg, struct se_lun *lun) 1578 { 1579 tcm_vhost_do_plug(tpg, lun, false); 1580 } 1581 1582 static int tcm_vhost_port_link(struct se_portal_group *se_tpg, 1583 struct se_lun *lun) 1584 { 1585 struct tcm_vhost_tpg *tpg = container_of(se_tpg, 1586 struct tcm_vhost_tpg, se_tpg); 1587 1588 mutex_lock(&tcm_vhost_mutex); 1589 1590 mutex_lock(&tpg->tv_tpg_mutex); 1591 tpg->tv_tpg_port_count++; 1592 mutex_unlock(&tpg->tv_tpg_mutex); 1593 1594 tcm_vhost_hotplug(tpg, lun); 1595 1596 mutex_unlock(&tcm_vhost_mutex); 1597 1598 return 0; 1599 } 1600 1601 static void tcm_vhost_port_unlink(struct se_portal_group *se_tpg, 1602 struct se_lun *lun) 1603 { 1604 struct tcm_vhost_tpg *tpg = container_of(se_tpg, 1605 struct tcm_vhost_tpg, se_tpg); 1606 1607 mutex_lock(&tcm_vhost_mutex); 1608 1609 mutex_lock(&tpg->tv_tpg_mutex); 1610 tpg->tv_tpg_port_count--; 1611 mutex_unlock(&tpg->tv_tpg_mutex); 1612 1613 tcm_vhost_hotunplug(tpg, lun); 1614 1615 mutex_unlock(&tcm_vhost_mutex); 1616 } 1617 1618 static struct se_node_acl * 1619 tcm_vhost_make_nodeacl(struct se_portal_group *se_tpg, 1620 struct config_group *group, 1621 const char *name) 1622 { 1623 struct se_node_acl *se_nacl, *se_nacl_new; 1624 struct tcm_vhost_nacl *nacl; 1625 u64 wwpn = 0; 1626 u32 nexus_depth; 1627 1628 /* tcm_vhost_parse_wwn(name, &wwpn, 1) < 0) 1629 return ERR_PTR(-EINVAL); */ 1630 se_nacl_new = tcm_vhost_alloc_fabric_acl(se_tpg); 1631 if (!se_nacl_new) 1632 return ERR_PTR(-ENOMEM); 1633 1634 nexus_depth = 1; 1635 /* 1636 * se_nacl_new may be released by core_tpg_add_initiator_node_acl() 1637 * when converting a NodeACL from demo mode -> explict 1638 */ 1639 se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new, 1640 name, nexus_depth); 1641 if (IS_ERR(se_nacl)) { 1642 tcm_vhost_release_fabric_acl(se_tpg, se_nacl_new); 1643 return se_nacl; 1644 } 1645 /* 1646 * Locate our struct tcm_vhost_nacl and set the FC Nport WWPN 1647 */ 1648 nacl = container_of(se_nacl, struct tcm_vhost_nacl, se_node_acl); 1649 nacl->iport_wwpn = wwpn; 1650 1651 return se_nacl; 1652 } 1653 1654 static void tcm_vhost_drop_nodeacl(struct se_node_acl *se_acl) 1655 { 1656 struct tcm_vhost_nacl *nacl = container_of(se_acl, 1657 struct tcm_vhost_nacl, se_node_acl); 1658 core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1); 1659 kfree(nacl); 1660 } 1661 1662 static int tcm_vhost_make_nexus(struct tcm_vhost_tpg *tpg, 1663 const char *name) 1664 { 1665 struct se_portal_group *se_tpg; 1666 struct tcm_vhost_nexus *tv_nexus; 1667 1668 mutex_lock(&tpg->tv_tpg_mutex); 1669 if (tpg->tpg_nexus) { 1670 mutex_unlock(&tpg->tv_tpg_mutex); 1671 pr_debug("tpg->tpg_nexus already exists\n"); 1672 return -EEXIST; 1673 } 1674 se_tpg = &tpg->se_tpg; 1675 1676 tv_nexus = kzalloc(sizeof(struct tcm_vhost_nexus), GFP_KERNEL); 1677 if (!tv_nexus) { 1678 mutex_unlock(&tpg->tv_tpg_mutex); 1679 pr_err("Unable to allocate struct tcm_vhost_nexus\n"); 1680 return -ENOMEM; 1681 } 1682 /* 1683 * Initialize the struct se_session pointer 1684 */ 1685 tv_nexus->tvn_se_sess = transport_init_session(); 1686 if (IS_ERR(tv_nexus->tvn_se_sess)) { 1687 mutex_unlock(&tpg->tv_tpg_mutex); 1688 kfree(tv_nexus); 1689 return -ENOMEM; 1690 } 1691 /* 1692 * Since we are running in 'demo mode' this call with generate a 1693 * struct se_node_acl for the tcm_vhost struct se_portal_group with 1694 * the SCSI Initiator port name of the passed configfs group 'name'. 1695 */ 1696 tv_nexus->tvn_se_sess->se_node_acl = core_tpg_check_initiator_node_acl( 1697 se_tpg, (unsigned char *)name); 1698 if (!tv_nexus->tvn_se_sess->se_node_acl) { 1699 mutex_unlock(&tpg->tv_tpg_mutex); 1700 pr_debug("core_tpg_check_initiator_node_acl() failed" 1701 " for %s\n", name); 1702 transport_free_session(tv_nexus->tvn_se_sess); 1703 kfree(tv_nexus); 1704 return -ENOMEM; 1705 } 1706 /* 1707 * Now register the TCM vhost virtual I_T Nexus as active with the 1708 * call to __transport_register_session() 1709 */ 1710 __transport_register_session(se_tpg, tv_nexus->tvn_se_sess->se_node_acl, 1711 tv_nexus->tvn_se_sess, tv_nexus); 1712 tpg->tpg_nexus = tv_nexus; 1713 1714 mutex_unlock(&tpg->tv_tpg_mutex); 1715 return 0; 1716 } 1717 1718 static int tcm_vhost_drop_nexus(struct tcm_vhost_tpg *tpg) 1719 { 1720 struct se_session *se_sess; 1721 struct tcm_vhost_nexus *tv_nexus; 1722 1723 mutex_lock(&tpg->tv_tpg_mutex); 1724 tv_nexus = tpg->tpg_nexus; 1725 if (!tv_nexus) { 1726 mutex_unlock(&tpg->tv_tpg_mutex); 1727 return -ENODEV; 1728 } 1729 1730 se_sess = tv_nexus->tvn_se_sess; 1731 if (!se_sess) { 1732 mutex_unlock(&tpg->tv_tpg_mutex); 1733 return -ENODEV; 1734 } 1735 1736 if (tpg->tv_tpg_port_count != 0) { 1737 mutex_unlock(&tpg->tv_tpg_mutex); 1738 pr_err("Unable to remove TCM_vhost I_T Nexus with" 1739 " active TPG port count: %d\n", 1740 tpg->tv_tpg_port_count); 1741 return -EBUSY; 1742 } 1743 1744 if (tpg->tv_tpg_vhost_count != 0) { 1745 mutex_unlock(&tpg->tv_tpg_mutex); 1746 pr_err("Unable to remove TCM_vhost I_T Nexus with" 1747 " active TPG vhost count: %d\n", 1748 tpg->tv_tpg_vhost_count); 1749 return -EBUSY; 1750 } 1751 1752 pr_debug("TCM_vhost_ConfigFS: Removing I_T Nexus to emulated" 1753 " %s Initiator Port: %s\n", tcm_vhost_dump_proto_id(tpg->tport), 1754 tv_nexus->tvn_se_sess->se_node_acl->initiatorname); 1755 /* 1756 * Release the SCSI I_T Nexus to the emulated vhost Target Port 1757 */ 1758 transport_deregister_session(tv_nexus->tvn_se_sess); 1759 tpg->tpg_nexus = NULL; 1760 mutex_unlock(&tpg->tv_tpg_mutex); 1761 1762 kfree(tv_nexus); 1763 return 0; 1764 } 1765 1766 static ssize_t tcm_vhost_tpg_show_nexus(struct se_portal_group *se_tpg, 1767 char *page) 1768 { 1769 struct tcm_vhost_tpg *tpg = container_of(se_tpg, 1770 struct tcm_vhost_tpg, se_tpg); 1771 struct tcm_vhost_nexus *tv_nexus; 1772 ssize_t ret; 1773 1774 mutex_lock(&tpg->tv_tpg_mutex); 1775 tv_nexus = tpg->tpg_nexus; 1776 if (!tv_nexus) { 1777 mutex_unlock(&tpg->tv_tpg_mutex); 1778 return -ENODEV; 1779 } 1780 ret = snprintf(page, PAGE_SIZE, "%s\n", 1781 tv_nexus->tvn_se_sess->se_node_acl->initiatorname); 1782 mutex_unlock(&tpg->tv_tpg_mutex); 1783 1784 return ret; 1785 } 1786 1787 static ssize_t tcm_vhost_tpg_store_nexus(struct se_portal_group *se_tpg, 1788 const char *page, 1789 size_t count) 1790 { 1791 struct tcm_vhost_tpg *tpg = container_of(se_tpg, 1792 struct tcm_vhost_tpg, se_tpg); 1793 struct tcm_vhost_tport *tport_wwn = tpg->tport; 1794 unsigned char i_port[TCM_VHOST_NAMELEN], *ptr, *port_ptr; 1795 int ret; 1796 /* 1797 * Shutdown the active I_T nexus if 'NULL' is passed.. 1798 */ 1799 if (!strncmp(page, "NULL", 4)) { 1800 ret = tcm_vhost_drop_nexus(tpg); 1801 return (!ret) ? count : ret; 1802 } 1803 /* 1804 * Otherwise make sure the passed virtual Initiator port WWN matches 1805 * the fabric protocol_id set in tcm_vhost_make_tport(), and call 1806 * tcm_vhost_make_nexus(). 1807 */ 1808 if (strlen(page) >= TCM_VHOST_NAMELEN) { 1809 pr_err("Emulated NAA Sas Address: %s, exceeds" 1810 " max: %d\n", page, TCM_VHOST_NAMELEN); 1811 return -EINVAL; 1812 } 1813 snprintf(&i_port[0], TCM_VHOST_NAMELEN, "%s", page); 1814 1815 ptr = strstr(i_port, "naa."); 1816 if (ptr) { 1817 if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_SAS) { 1818 pr_err("Passed SAS Initiator Port %s does not" 1819 " match target port protoid: %s\n", i_port, 1820 tcm_vhost_dump_proto_id(tport_wwn)); 1821 return -EINVAL; 1822 } 1823 port_ptr = &i_port[0]; 1824 goto check_newline; 1825 } 1826 ptr = strstr(i_port, "fc."); 1827 if (ptr) { 1828 if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_FCP) { 1829 pr_err("Passed FCP Initiator Port %s does not" 1830 " match target port protoid: %s\n", i_port, 1831 tcm_vhost_dump_proto_id(tport_wwn)); 1832 return -EINVAL; 1833 } 1834 port_ptr = &i_port[3]; /* Skip over "fc." */ 1835 goto check_newline; 1836 } 1837 ptr = strstr(i_port, "iqn."); 1838 if (ptr) { 1839 if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_ISCSI) { 1840 pr_err("Passed iSCSI Initiator Port %s does not" 1841 " match target port protoid: %s\n", i_port, 1842 tcm_vhost_dump_proto_id(tport_wwn)); 1843 return -EINVAL; 1844 } 1845 port_ptr = &i_port[0]; 1846 goto check_newline; 1847 } 1848 pr_err("Unable to locate prefix for emulated Initiator Port:" 1849 " %s\n", i_port); 1850 return -EINVAL; 1851 /* 1852 * Clear any trailing newline for the NAA WWN 1853 */ 1854 check_newline: 1855 if (i_port[strlen(i_port)-1] == '\n') 1856 i_port[strlen(i_port)-1] = '\0'; 1857 1858 ret = tcm_vhost_make_nexus(tpg, port_ptr); 1859 if (ret < 0) 1860 return ret; 1861 1862 return count; 1863 } 1864 1865 TF_TPG_BASE_ATTR(tcm_vhost, nexus, S_IRUGO | S_IWUSR); 1866 1867 static struct configfs_attribute *tcm_vhost_tpg_attrs[] = { 1868 &tcm_vhost_tpg_nexus.attr, 1869 NULL, 1870 }; 1871 1872 static struct se_portal_group * 1873 tcm_vhost_make_tpg(struct se_wwn *wwn, 1874 struct config_group *group, 1875 const char *name) 1876 { 1877 struct tcm_vhost_tport *tport = container_of(wwn, 1878 struct tcm_vhost_tport, tport_wwn); 1879 1880 struct tcm_vhost_tpg *tpg; 1881 unsigned long tpgt; 1882 int ret; 1883 1884 if (strstr(name, "tpgt_") != name) 1885 return ERR_PTR(-EINVAL); 1886 if (kstrtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX) 1887 return ERR_PTR(-EINVAL); 1888 1889 tpg = kzalloc(sizeof(struct tcm_vhost_tpg), GFP_KERNEL); 1890 if (!tpg) { 1891 pr_err("Unable to allocate struct tcm_vhost_tpg"); 1892 return ERR_PTR(-ENOMEM); 1893 } 1894 mutex_init(&tpg->tv_tpg_mutex); 1895 INIT_LIST_HEAD(&tpg->tv_tpg_list); 1896 tpg->tport = tport; 1897 tpg->tport_tpgt = tpgt; 1898 1899 ret = core_tpg_register(&tcm_vhost_fabric_configfs->tf_ops, wwn, 1900 &tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL); 1901 if (ret < 0) { 1902 kfree(tpg); 1903 return NULL; 1904 } 1905 mutex_lock(&tcm_vhost_mutex); 1906 list_add_tail(&tpg->tv_tpg_list, &tcm_vhost_list); 1907 mutex_unlock(&tcm_vhost_mutex); 1908 1909 return &tpg->se_tpg; 1910 } 1911 1912 static void tcm_vhost_drop_tpg(struct se_portal_group *se_tpg) 1913 { 1914 struct tcm_vhost_tpg *tpg = container_of(se_tpg, 1915 struct tcm_vhost_tpg, se_tpg); 1916 1917 mutex_lock(&tcm_vhost_mutex); 1918 list_del(&tpg->tv_tpg_list); 1919 mutex_unlock(&tcm_vhost_mutex); 1920 /* 1921 * Release the virtual I_T Nexus for this vhost TPG 1922 */ 1923 tcm_vhost_drop_nexus(tpg); 1924 /* 1925 * Deregister the se_tpg from TCM.. 1926 */ 1927 core_tpg_deregister(se_tpg); 1928 kfree(tpg); 1929 } 1930 1931 static struct se_wwn * 1932 tcm_vhost_make_tport(struct target_fabric_configfs *tf, 1933 struct config_group *group, 1934 const char *name) 1935 { 1936 struct tcm_vhost_tport *tport; 1937 char *ptr; 1938 u64 wwpn = 0; 1939 int off = 0; 1940 1941 /* if (tcm_vhost_parse_wwn(name, &wwpn, 1) < 0) 1942 return ERR_PTR(-EINVAL); */ 1943 1944 tport = kzalloc(sizeof(struct tcm_vhost_tport), GFP_KERNEL); 1945 if (!tport) { 1946 pr_err("Unable to allocate struct tcm_vhost_tport"); 1947 return ERR_PTR(-ENOMEM); 1948 } 1949 tport->tport_wwpn = wwpn; 1950 /* 1951 * Determine the emulated Protocol Identifier and Target Port Name 1952 * based on the incoming configfs directory name. 1953 */ 1954 ptr = strstr(name, "naa."); 1955 if (ptr) { 1956 tport->tport_proto_id = SCSI_PROTOCOL_SAS; 1957 goto check_len; 1958 } 1959 ptr = strstr(name, "fc."); 1960 if (ptr) { 1961 tport->tport_proto_id = SCSI_PROTOCOL_FCP; 1962 off = 3; /* Skip over "fc." */ 1963 goto check_len; 1964 } 1965 ptr = strstr(name, "iqn."); 1966 if (ptr) { 1967 tport->tport_proto_id = SCSI_PROTOCOL_ISCSI; 1968 goto check_len; 1969 } 1970 1971 pr_err("Unable to locate prefix for emulated Target Port:" 1972 " %s\n", name); 1973 kfree(tport); 1974 return ERR_PTR(-EINVAL); 1975 1976 check_len: 1977 if (strlen(name) >= TCM_VHOST_NAMELEN) { 1978 pr_err("Emulated %s Address: %s, exceeds" 1979 " max: %d\n", name, tcm_vhost_dump_proto_id(tport), 1980 TCM_VHOST_NAMELEN); 1981 kfree(tport); 1982 return ERR_PTR(-EINVAL); 1983 } 1984 snprintf(&tport->tport_name[0], TCM_VHOST_NAMELEN, "%s", &name[off]); 1985 1986 pr_debug("TCM_VHost_ConfigFS: Allocated emulated Target" 1987 " %s Address: %s\n", tcm_vhost_dump_proto_id(tport), name); 1988 1989 return &tport->tport_wwn; 1990 } 1991 1992 static void tcm_vhost_drop_tport(struct se_wwn *wwn) 1993 { 1994 struct tcm_vhost_tport *tport = container_of(wwn, 1995 struct tcm_vhost_tport, tport_wwn); 1996 1997 pr_debug("TCM_VHost_ConfigFS: Deallocating emulated Target" 1998 " %s Address: %s\n", tcm_vhost_dump_proto_id(tport), 1999 tport->tport_name); 2000 2001 kfree(tport); 2002 } 2003 2004 static ssize_t 2005 tcm_vhost_wwn_show_attr_version(struct target_fabric_configfs *tf, 2006 char *page) 2007 { 2008 return sprintf(page, "TCM_VHOST fabric module %s on %s/%s" 2009 "on "UTS_RELEASE"\n", TCM_VHOST_VERSION, utsname()->sysname, 2010 utsname()->machine); 2011 } 2012 2013 TF_WWN_ATTR_RO(tcm_vhost, version); 2014 2015 static struct configfs_attribute *tcm_vhost_wwn_attrs[] = { 2016 &tcm_vhost_wwn_version.attr, 2017 NULL, 2018 }; 2019 2020 static struct target_core_fabric_ops tcm_vhost_ops = { 2021 .get_fabric_name = tcm_vhost_get_fabric_name, 2022 .get_fabric_proto_ident = tcm_vhost_get_fabric_proto_ident, 2023 .tpg_get_wwn = tcm_vhost_get_fabric_wwn, 2024 .tpg_get_tag = tcm_vhost_get_tag, 2025 .tpg_get_default_depth = tcm_vhost_get_default_depth, 2026 .tpg_get_pr_transport_id = tcm_vhost_get_pr_transport_id, 2027 .tpg_get_pr_transport_id_len = tcm_vhost_get_pr_transport_id_len, 2028 .tpg_parse_pr_out_transport_id = tcm_vhost_parse_pr_out_transport_id, 2029 .tpg_check_demo_mode = tcm_vhost_check_true, 2030 .tpg_check_demo_mode_cache = tcm_vhost_check_true, 2031 .tpg_check_demo_mode_write_protect = tcm_vhost_check_false, 2032 .tpg_check_prod_mode_write_protect = tcm_vhost_check_false, 2033 .tpg_alloc_fabric_acl = tcm_vhost_alloc_fabric_acl, 2034 .tpg_release_fabric_acl = tcm_vhost_release_fabric_acl, 2035 .tpg_get_inst_index = tcm_vhost_tpg_get_inst_index, 2036 .release_cmd = tcm_vhost_release_cmd, 2037 .check_stop_free = vhost_scsi_check_stop_free, 2038 .shutdown_session = tcm_vhost_shutdown_session, 2039 .close_session = tcm_vhost_close_session, 2040 .sess_get_index = tcm_vhost_sess_get_index, 2041 .sess_get_initiator_sid = NULL, 2042 .write_pending = tcm_vhost_write_pending, 2043 .write_pending_status = tcm_vhost_write_pending_status, 2044 .set_default_node_attributes = tcm_vhost_set_default_node_attrs, 2045 .get_task_tag = tcm_vhost_get_task_tag, 2046 .get_cmd_state = tcm_vhost_get_cmd_state, 2047 .queue_data_in = tcm_vhost_queue_data_in, 2048 .queue_status = tcm_vhost_queue_status, 2049 .queue_tm_rsp = tcm_vhost_queue_tm_rsp, 2050 /* 2051 * Setup callers for generic logic in target_core_fabric_configfs.c 2052 */ 2053 .fabric_make_wwn = tcm_vhost_make_tport, 2054 .fabric_drop_wwn = tcm_vhost_drop_tport, 2055 .fabric_make_tpg = tcm_vhost_make_tpg, 2056 .fabric_drop_tpg = tcm_vhost_drop_tpg, 2057 .fabric_post_link = tcm_vhost_port_link, 2058 .fabric_pre_unlink = tcm_vhost_port_unlink, 2059 .fabric_make_np = NULL, 2060 .fabric_drop_np = NULL, 2061 .fabric_make_nodeacl = tcm_vhost_make_nodeacl, 2062 .fabric_drop_nodeacl = tcm_vhost_drop_nodeacl, 2063 }; 2064 2065 static int tcm_vhost_register_configfs(void) 2066 { 2067 struct target_fabric_configfs *fabric; 2068 int ret; 2069 2070 pr_debug("TCM_VHOST fabric module %s on %s/%s" 2071 " on "UTS_RELEASE"\n", TCM_VHOST_VERSION, utsname()->sysname, 2072 utsname()->machine); 2073 /* 2074 * Register the top level struct config_item_type with TCM core 2075 */ 2076 fabric = target_fabric_configfs_init(THIS_MODULE, "vhost"); 2077 if (IS_ERR(fabric)) { 2078 pr_err("target_fabric_configfs_init() failed\n"); 2079 return PTR_ERR(fabric); 2080 } 2081 /* 2082 * Setup fabric->tf_ops from our local tcm_vhost_ops 2083 */ 2084 fabric->tf_ops = tcm_vhost_ops; 2085 /* 2086 * Setup default attribute lists for various fabric->tf_cit_tmpl 2087 */ 2088 TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = tcm_vhost_wwn_attrs; 2089 TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = tcm_vhost_tpg_attrs; 2090 TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL; 2091 TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL; 2092 TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL; 2093 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL; 2094 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL; 2095 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL; 2096 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL; 2097 /* 2098 * Register the fabric for use within TCM 2099 */ 2100 ret = target_fabric_configfs_register(fabric); 2101 if (ret < 0) { 2102 pr_err("target_fabric_configfs_register() failed" 2103 " for TCM_VHOST\n"); 2104 return ret; 2105 } 2106 /* 2107 * Setup our local pointer to *fabric 2108 */ 2109 tcm_vhost_fabric_configfs = fabric; 2110 pr_debug("TCM_VHOST[0] - Set fabric -> tcm_vhost_fabric_configfs\n"); 2111 return 0; 2112 }; 2113 2114 static void tcm_vhost_deregister_configfs(void) 2115 { 2116 if (!tcm_vhost_fabric_configfs) 2117 return; 2118 2119 target_fabric_configfs_deregister(tcm_vhost_fabric_configfs); 2120 tcm_vhost_fabric_configfs = NULL; 2121 pr_debug("TCM_VHOST[0] - Cleared tcm_vhost_fabric_configfs\n"); 2122 }; 2123 2124 static int __init tcm_vhost_init(void) 2125 { 2126 int ret = -ENOMEM; 2127 /* 2128 * Use our own dedicated workqueue for submitting I/O into 2129 * target core to avoid contention within system_wq. 2130 */ 2131 tcm_vhost_workqueue = alloc_workqueue("tcm_vhost", 0, 0); 2132 if (!tcm_vhost_workqueue) 2133 goto out; 2134 2135 ret = vhost_scsi_register(); 2136 if (ret < 0) 2137 goto out_destroy_workqueue; 2138 2139 ret = tcm_vhost_register_configfs(); 2140 if (ret < 0) 2141 goto out_vhost_scsi_deregister; 2142 2143 return 0; 2144 2145 out_vhost_scsi_deregister: 2146 vhost_scsi_deregister(); 2147 out_destroy_workqueue: 2148 destroy_workqueue(tcm_vhost_workqueue); 2149 out: 2150 return ret; 2151 }; 2152 2153 static void tcm_vhost_exit(void) 2154 { 2155 tcm_vhost_deregister_configfs(); 2156 vhost_scsi_deregister(); 2157 destroy_workqueue(tcm_vhost_workqueue); 2158 }; 2159 2160 MODULE_DESCRIPTION("VHOST_SCSI series fabric driver"); 2161 MODULE_ALIAS("tcm_vhost"); 2162 MODULE_LICENSE("GPL"); 2163 module_init(tcm_vhost_init); 2164 module_exit(tcm_vhost_exit); 2165