1 /* 2 * Copyright (C) 2013 Shaohua Li <shli@kernel.org> 3 * Copyright (C) 2014 Red Hat, Inc. 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms and conditions of the GNU General Public License, 7 * version 2, as published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * more details. 13 * 14 * You should have received a copy of the GNU General Public License along with 15 * this program; if not, write to the Free Software Foundation, Inc., 16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 17 */ 18 19 #include <linux/spinlock.h> 20 #include <linux/module.h> 21 #include <linux/idr.h> 22 #include <linux/kernel.h> 23 #include <linux/timer.h> 24 #include <linux/parser.h> 25 #include <linux/uio_driver.h> 26 #include <net/genetlink.h> 27 #include <scsi/scsi_common.h> 28 #include <scsi/scsi_proto.h> 29 #include <target/target_core_base.h> 30 #include <target/target_core_fabric.h> 31 #include <target/target_core_backend.h> 32 #include <target/target_core_backend_configfs.h> 33 34 #include <linux/target_core_user.h> 35 36 /* 37 * Define a shared-memory interface for LIO to pass SCSI commands and 38 * data to userspace for processing. This is to allow backends that 39 * are too complex for in-kernel support to be possible. 40 * 41 * It uses the UIO framework to do a lot of the device-creation and 42 * introspection work for us. 43 * 44 * See the .h file for how the ring is laid out. Note that while the 45 * command ring is defined, the particulars of the data area are 46 * not. Offset values in the command entry point to other locations 47 * internal to the mmap()ed area. There is separate space outside the 48 * command ring for data buffers. This leaves maximum flexibility for 49 * moving buffer allocations, or even page flipping or other 50 * allocation techniques, without altering the command ring layout. 51 * 52 * SECURITY: 53 * The user process must be assumed to be malicious. There's no way to 54 * prevent it breaking the command ring protocol if it wants, but in 55 * order to prevent other issues we must only ever read *data* from 56 * the shared memory area, not offsets or sizes. This applies to 57 * command ring entries as well as the mailbox. Extra code needed for 58 * this may have a 'UAM' comment. 59 */ 60 61 62 #define TCMU_TIME_OUT (30 * MSEC_PER_SEC) 63 64 #define CMDR_SIZE (16 * 4096) 65 #define DATA_SIZE (257 * 4096) 66 67 #define TCMU_RING_SIZE (CMDR_SIZE + DATA_SIZE) 68 69 static struct device *tcmu_root_device; 70 71 struct tcmu_hba { 72 u32 host_id; 73 }; 74 75 #define TCMU_CONFIG_LEN 256 76 77 struct tcmu_dev { 78 struct se_device se_dev; 79 80 char *name; 81 struct se_hba *hba; 82 83 #define TCMU_DEV_BIT_OPEN 0 84 #define TCMU_DEV_BIT_BROKEN 1 85 unsigned long flags; 86 87 struct uio_info uio_info; 88 89 struct tcmu_mailbox *mb_addr; 90 size_t dev_size; 91 u32 cmdr_size; 92 u32 cmdr_last_cleaned; 93 /* Offset of data ring from start of mb */ 94 size_t data_off; 95 size_t data_size; 96 /* Ring head + tail values. */ 97 /* Must add data_off and mb_addr to get the address */ 98 size_t data_head; 99 size_t data_tail; 100 101 wait_queue_head_t wait_cmdr; 102 /* TODO should this be a mutex? */ 103 spinlock_t cmdr_lock; 104 105 struct idr commands; 106 spinlock_t commands_lock; 107 108 struct timer_list timeout; 109 110 char dev_config[TCMU_CONFIG_LEN]; 111 }; 112 113 #define TCMU_DEV(_se_dev) container_of(_se_dev, struct tcmu_dev, se_dev) 114 115 #define CMDR_OFF sizeof(struct tcmu_mailbox) 116 117 struct tcmu_cmd { 118 struct se_cmd *se_cmd; 119 struct tcmu_dev *tcmu_dev; 120 121 uint16_t cmd_id; 122 123 /* Can't use se_cmd->data_length when cleaning up expired cmds, because if 124 cmd has been completed then accessing se_cmd is off limits */ 125 size_t data_length; 126 127 unsigned long deadline; 128 129 #define TCMU_CMD_BIT_EXPIRED 0 130 unsigned long flags; 131 }; 132 133 static struct kmem_cache *tcmu_cmd_cache; 134 135 /* multicast group */ 136 enum tcmu_multicast_groups { 137 TCMU_MCGRP_CONFIG, 138 }; 139 140 static const struct genl_multicast_group tcmu_mcgrps[] = { 141 [TCMU_MCGRP_CONFIG] = { .name = "config", }, 142 }; 143 144 /* Our generic netlink family */ 145 static struct genl_family tcmu_genl_family = { 146 .id = GENL_ID_GENERATE, 147 .hdrsize = 0, 148 .name = "TCM-USER", 149 .version = 1, 150 .maxattr = TCMU_ATTR_MAX, 151 .mcgrps = tcmu_mcgrps, 152 .n_mcgrps = ARRAY_SIZE(tcmu_mcgrps), 153 }; 154 155 static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd) 156 { 157 struct se_device *se_dev = se_cmd->se_dev; 158 struct tcmu_dev *udev = TCMU_DEV(se_dev); 159 struct tcmu_cmd *tcmu_cmd; 160 int cmd_id; 161 162 tcmu_cmd = kmem_cache_zalloc(tcmu_cmd_cache, GFP_KERNEL); 163 if (!tcmu_cmd) 164 return NULL; 165 166 tcmu_cmd->se_cmd = se_cmd; 167 tcmu_cmd->tcmu_dev = udev; 168 tcmu_cmd->data_length = se_cmd->data_length; 169 170 tcmu_cmd->deadline = jiffies + msecs_to_jiffies(TCMU_TIME_OUT); 171 172 idr_preload(GFP_KERNEL); 173 spin_lock_irq(&udev->commands_lock); 174 cmd_id = idr_alloc(&udev->commands, tcmu_cmd, 0, 175 USHRT_MAX, GFP_NOWAIT); 176 spin_unlock_irq(&udev->commands_lock); 177 idr_preload_end(); 178 179 if (cmd_id < 0) { 180 kmem_cache_free(tcmu_cmd_cache, tcmu_cmd); 181 return NULL; 182 } 183 tcmu_cmd->cmd_id = cmd_id; 184 185 return tcmu_cmd; 186 } 187 188 static inline void tcmu_flush_dcache_range(void *vaddr, size_t size) 189 { 190 unsigned long offset = (unsigned long) vaddr & ~PAGE_MASK; 191 192 size = round_up(size+offset, PAGE_SIZE); 193 vaddr -= offset; 194 195 while (size) { 196 flush_dcache_page(virt_to_page(vaddr)); 197 size -= PAGE_SIZE; 198 } 199 } 200 201 /* 202 * Some ring helper functions. We don't assume size is a power of 2 so 203 * we can't use circ_buf.h. 204 */ 205 static inline size_t spc_used(size_t head, size_t tail, size_t size) 206 { 207 int diff = head - tail; 208 209 if (diff >= 0) 210 return diff; 211 else 212 return size + diff; 213 } 214 215 static inline size_t spc_free(size_t head, size_t tail, size_t size) 216 { 217 /* Keep 1 byte unused or we can't tell full from empty */ 218 return (size - spc_used(head, tail, size) - 1); 219 } 220 221 static inline size_t head_to_end(size_t head, size_t size) 222 { 223 return size - head; 224 } 225 226 #define UPDATE_HEAD(head, used, size) smp_store_release(&head, ((head % size) + used) % size) 227 228 /* 229 * We can't queue a command until we have space available on the cmd ring *and* space 230 * space avail on the data ring. 231 * 232 * Called with ring lock held. 233 */ 234 static bool is_ring_space_avail(struct tcmu_dev *udev, size_t cmd_size, size_t data_needed) 235 { 236 struct tcmu_mailbox *mb = udev->mb_addr; 237 size_t space; 238 u32 cmd_head; 239 size_t cmd_needed; 240 241 tcmu_flush_dcache_range(mb, sizeof(*mb)); 242 243 cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */ 244 245 /* 246 * If cmd end-of-ring space is too small then we need space for a NOP plus 247 * original cmd - cmds are internally contiguous. 248 */ 249 if (head_to_end(cmd_head, udev->cmdr_size) >= cmd_size) 250 cmd_needed = cmd_size; 251 else 252 cmd_needed = cmd_size + head_to_end(cmd_head, udev->cmdr_size); 253 254 space = spc_free(cmd_head, udev->cmdr_last_cleaned, udev->cmdr_size); 255 if (space < cmd_needed) { 256 pr_debug("no cmd space: %u %u %u\n", cmd_head, 257 udev->cmdr_last_cleaned, udev->cmdr_size); 258 return false; 259 } 260 261 space = spc_free(udev->data_head, udev->data_tail, udev->data_size); 262 if (space < data_needed) { 263 pr_debug("no data space: %zu %zu %zu\n", udev->data_head, 264 udev->data_tail, udev->data_size); 265 return false; 266 } 267 268 return true; 269 } 270 271 static int tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd) 272 { 273 struct tcmu_dev *udev = tcmu_cmd->tcmu_dev; 274 struct se_cmd *se_cmd = tcmu_cmd->se_cmd; 275 size_t base_command_size, command_size; 276 struct tcmu_mailbox *mb; 277 struct tcmu_cmd_entry *entry; 278 int i; 279 struct scatterlist *sg; 280 struct iovec *iov; 281 int iov_cnt = 0; 282 uint32_t cmd_head; 283 uint64_t cdb_off; 284 285 if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) 286 return -EINVAL; 287 288 /* 289 * Must be a certain minimum size for response sense info, but 290 * also may be larger if the iov array is large. 291 * 292 * iovs = sgl_nents+1, for end-of-ring case, plus another 1 293 * b/c size == offsetof one-past-element. 294 */ 295 base_command_size = max(offsetof(struct tcmu_cmd_entry, 296 req.iov[se_cmd->t_data_nents + 2]), 297 sizeof(struct tcmu_cmd_entry)); 298 command_size = base_command_size 299 + round_up(scsi_command_size(se_cmd->t_task_cdb), TCMU_OP_ALIGN_SIZE); 300 301 WARN_ON(command_size & (TCMU_OP_ALIGN_SIZE-1)); 302 303 spin_lock_irq(&udev->cmdr_lock); 304 305 mb = udev->mb_addr; 306 cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */ 307 if ((command_size > (udev->cmdr_size / 2)) 308 || tcmu_cmd->data_length > (udev->data_size - 1)) 309 pr_warn("TCMU: Request of size %zu/%zu may be too big for %u/%zu " 310 "cmd/data ring buffers\n", command_size, tcmu_cmd->data_length, 311 udev->cmdr_size, udev->data_size); 312 313 while (!is_ring_space_avail(udev, command_size, tcmu_cmd->data_length)) { 314 int ret; 315 DEFINE_WAIT(__wait); 316 317 prepare_to_wait(&udev->wait_cmdr, &__wait, TASK_INTERRUPTIBLE); 318 319 pr_debug("sleeping for ring space\n"); 320 spin_unlock_irq(&udev->cmdr_lock); 321 ret = schedule_timeout(msecs_to_jiffies(TCMU_TIME_OUT)); 322 finish_wait(&udev->wait_cmdr, &__wait); 323 if (!ret) { 324 pr_warn("tcmu: command timed out\n"); 325 return -ETIMEDOUT; 326 } 327 328 spin_lock_irq(&udev->cmdr_lock); 329 330 /* We dropped cmdr_lock, cmd_head is stale */ 331 cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */ 332 } 333 334 /* Insert a PAD if end-of-ring space is too small */ 335 if (head_to_end(cmd_head, udev->cmdr_size) < command_size) { 336 size_t pad_size = head_to_end(cmd_head, udev->cmdr_size); 337 338 entry = (void *) mb + CMDR_OFF + cmd_head; 339 tcmu_flush_dcache_range(entry, sizeof(*entry)); 340 tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_PAD); 341 tcmu_hdr_set_len(&entry->hdr.len_op, pad_size); 342 entry->hdr.cmd_id = 0; /* not used for PAD */ 343 entry->hdr.kflags = 0; 344 entry->hdr.uflags = 0; 345 346 UPDATE_HEAD(mb->cmd_head, pad_size, udev->cmdr_size); 347 348 cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */ 349 WARN_ON(cmd_head != 0); 350 } 351 352 entry = (void *) mb + CMDR_OFF + cmd_head; 353 tcmu_flush_dcache_range(entry, sizeof(*entry)); 354 tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_CMD); 355 tcmu_hdr_set_len(&entry->hdr.len_op, command_size); 356 entry->hdr.cmd_id = tcmu_cmd->cmd_id; 357 entry->hdr.kflags = 0; 358 entry->hdr.uflags = 0; 359 360 /* 361 * Fix up iovecs, and handle if allocation in data ring wrapped. 362 */ 363 iov = &entry->req.iov[0]; 364 for_each_sg(se_cmd->t_data_sg, sg, se_cmd->t_data_nents, i) { 365 size_t copy_bytes = min((size_t)sg->length, 366 head_to_end(udev->data_head, udev->data_size)); 367 void *from = kmap_atomic(sg_page(sg)) + sg->offset; 368 void *to = (void *) mb + udev->data_off + udev->data_head; 369 370 if (tcmu_cmd->se_cmd->data_direction == DMA_TO_DEVICE) { 371 memcpy(to, from, copy_bytes); 372 tcmu_flush_dcache_range(to, copy_bytes); 373 } 374 375 /* Even iov_base is relative to mb_addr */ 376 iov->iov_len = copy_bytes; 377 iov->iov_base = (void __user *) udev->data_off + 378 udev->data_head; 379 iov_cnt++; 380 iov++; 381 382 UPDATE_HEAD(udev->data_head, copy_bytes, udev->data_size); 383 384 /* Uh oh, we wrapped the buffer. Must split sg across 2 iovs. */ 385 if (sg->length != copy_bytes) { 386 from += copy_bytes; 387 copy_bytes = sg->length - copy_bytes; 388 389 iov->iov_len = copy_bytes; 390 iov->iov_base = (void __user *) udev->data_off + 391 udev->data_head; 392 393 if (se_cmd->data_direction == DMA_TO_DEVICE) { 394 to = (void *) mb + udev->data_off + udev->data_head; 395 memcpy(to, from, copy_bytes); 396 tcmu_flush_dcache_range(to, copy_bytes); 397 } 398 399 iov_cnt++; 400 iov++; 401 402 UPDATE_HEAD(udev->data_head, copy_bytes, udev->data_size); 403 } 404 405 kunmap_atomic(from); 406 } 407 entry->req.iov_cnt = iov_cnt; 408 entry->req.iov_bidi_cnt = 0; 409 entry->req.iov_dif_cnt = 0; 410 411 /* All offsets relative to mb_addr, not start of entry! */ 412 cdb_off = CMDR_OFF + cmd_head + base_command_size; 413 memcpy((void *) mb + cdb_off, se_cmd->t_task_cdb, scsi_command_size(se_cmd->t_task_cdb)); 414 entry->req.cdb_off = cdb_off; 415 tcmu_flush_dcache_range(entry, sizeof(*entry)); 416 417 UPDATE_HEAD(mb->cmd_head, command_size, udev->cmdr_size); 418 tcmu_flush_dcache_range(mb, sizeof(*mb)); 419 420 spin_unlock_irq(&udev->cmdr_lock); 421 422 /* TODO: only if FLUSH and FUA? */ 423 uio_event_notify(&udev->uio_info); 424 425 mod_timer(&udev->timeout, 426 round_jiffies_up(jiffies + msecs_to_jiffies(TCMU_TIME_OUT))); 427 428 return 0; 429 } 430 431 static int tcmu_queue_cmd(struct se_cmd *se_cmd) 432 { 433 struct se_device *se_dev = se_cmd->se_dev; 434 struct tcmu_dev *udev = TCMU_DEV(se_dev); 435 struct tcmu_cmd *tcmu_cmd; 436 int ret; 437 438 tcmu_cmd = tcmu_alloc_cmd(se_cmd); 439 if (!tcmu_cmd) 440 return -ENOMEM; 441 442 ret = tcmu_queue_cmd_ring(tcmu_cmd); 443 if (ret < 0) { 444 pr_err("TCMU: Could not queue command\n"); 445 spin_lock_irq(&udev->commands_lock); 446 idr_remove(&udev->commands, tcmu_cmd->cmd_id); 447 spin_unlock_irq(&udev->commands_lock); 448 449 kmem_cache_free(tcmu_cmd_cache, tcmu_cmd); 450 } 451 452 return ret; 453 } 454 455 static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *entry) 456 { 457 struct se_cmd *se_cmd = cmd->se_cmd; 458 struct tcmu_dev *udev = cmd->tcmu_dev; 459 460 if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) { 461 /* cmd has been completed already from timeout, just reclaim data 462 ring space */ 463 UPDATE_HEAD(udev->data_tail, cmd->data_length, udev->data_size); 464 return; 465 } 466 467 if (entry->hdr.uflags & TCMU_UFLAG_UNKNOWN_OP) { 468 UPDATE_HEAD(udev->data_tail, cmd->data_length, udev->data_size); 469 pr_warn("TCMU: Userspace set UNKNOWN_OP flag on se_cmd %p\n", 470 cmd->se_cmd); 471 transport_generic_request_failure(cmd->se_cmd, 472 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE); 473 cmd->se_cmd = NULL; 474 kmem_cache_free(tcmu_cmd_cache, cmd); 475 return; 476 } 477 478 if (entry->rsp.scsi_status == SAM_STAT_CHECK_CONDITION) { 479 memcpy(se_cmd->sense_buffer, entry->rsp.sense_buffer, 480 se_cmd->scsi_sense_length); 481 482 UPDATE_HEAD(udev->data_tail, cmd->data_length, udev->data_size); 483 } 484 else if (se_cmd->data_direction == DMA_FROM_DEVICE) { 485 struct scatterlist *sg; 486 int i; 487 488 /* It'd be easier to look at entry's iovec again, but UAM */ 489 for_each_sg(se_cmd->t_data_sg, sg, se_cmd->t_data_nents, i) { 490 size_t copy_bytes; 491 void *to; 492 void *from; 493 494 copy_bytes = min((size_t)sg->length, 495 head_to_end(udev->data_tail, udev->data_size)); 496 497 to = kmap_atomic(sg_page(sg)) + sg->offset; 498 WARN_ON(sg->length + sg->offset > PAGE_SIZE); 499 from = (void *) udev->mb_addr + udev->data_off + udev->data_tail; 500 tcmu_flush_dcache_range(from, copy_bytes); 501 memcpy(to, from, copy_bytes); 502 503 UPDATE_HEAD(udev->data_tail, copy_bytes, udev->data_size); 504 505 /* Uh oh, wrapped the data buffer for this sg's data */ 506 if (sg->length != copy_bytes) { 507 from = (void *) udev->mb_addr + udev->data_off + udev->data_tail; 508 WARN_ON(udev->data_tail); 509 to += copy_bytes; 510 copy_bytes = sg->length - copy_bytes; 511 tcmu_flush_dcache_range(from, copy_bytes); 512 memcpy(to, from, copy_bytes); 513 514 UPDATE_HEAD(udev->data_tail, copy_bytes, udev->data_size); 515 } 516 517 kunmap_atomic(to); 518 } 519 520 } else if (se_cmd->data_direction == DMA_TO_DEVICE) { 521 UPDATE_HEAD(udev->data_tail, cmd->data_length, udev->data_size); 522 } else { 523 pr_warn("TCMU: data direction was %d!\n", se_cmd->data_direction); 524 } 525 526 target_complete_cmd(cmd->se_cmd, entry->rsp.scsi_status); 527 cmd->se_cmd = NULL; 528 529 kmem_cache_free(tcmu_cmd_cache, cmd); 530 } 531 532 static unsigned int tcmu_handle_completions(struct tcmu_dev *udev) 533 { 534 struct tcmu_mailbox *mb; 535 LIST_HEAD(cpl_cmds); 536 unsigned long flags; 537 int handled = 0; 538 539 if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) { 540 pr_err("ring broken, not handling completions\n"); 541 return 0; 542 } 543 544 spin_lock_irqsave(&udev->cmdr_lock, flags); 545 546 mb = udev->mb_addr; 547 tcmu_flush_dcache_range(mb, sizeof(*mb)); 548 549 while (udev->cmdr_last_cleaned != ACCESS_ONCE(mb->cmd_tail)) { 550 551 struct tcmu_cmd_entry *entry = (void *) mb + CMDR_OFF + udev->cmdr_last_cleaned; 552 struct tcmu_cmd *cmd; 553 554 tcmu_flush_dcache_range(entry, sizeof(*entry)); 555 556 if (tcmu_hdr_get_op(entry->hdr.len_op) == TCMU_OP_PAD) { 557 UPDATE_HEAD(udev->cmdr_last_cleaned, 558 tcmu_hdr_get_len(entry->hdr.len_op), 559 udev->cmdr_size); 560 continue; 561 } 562 WARN_ON(tcmu_hdr_get_op(entry->hdr.len_op) != TCMU_OP_CMD); 563 564 spin_lock(&udev->commands_lock); 565 cmd = idr_find(&udev->commands, entry->hdr.cmd_id); 566 if (cmd) 567 idr_remove(&udev->commands, cmd->cmd_id); 568 spin_unlock(&udev->commands_lock); 569 570 if (!cmd) { 571 pr_err("cmd_id not found, ring is broken\n"); 572 set_bit(TCMU_DEV_BIT_BROKEN, &udev->flags); 573 break; 574 } 575 576 tcmu_handle_completion(cmd, entry); 577 578 UPDATE_HEAD(udev->cmdr_last_cleaned, 579 tcmu_hdr_get_len(entry->hdr.len_op), 580 udev->cmdr_size); 581 582 handled++; 583 } 584 585 if (mb->cmd_tail == mb->cmd_head) 586 del_timer(&udev->timeout); /* no more pending cmds */ 587 588 spin_unlock_irqrestore(&udev->cmdr_lock, flags); 589 590 wake_up(&udev->wait_cmdr); 591 592 return handled; 593 } 594 595 static int tcmu_check_expired_cmd(int id, void *p, void *data) 596 { 597 struct tcmu_cmd *cmd = p; 598 599 if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) 600 return 0; 601 602 if (!time_after(cmd->deadline, jiffies)) 603 return 0; 604 605 set_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags); 606 target_complete_cmd(cmd->se_cmd, SAM_STAT_CHECK_CONDITION); 607 cmd->se_cmd = NULL; 608 609 kmem_cache_free(tcmu_cmd_cache, cmd); 610 611 return 0; 612 } 613 614 static void tcmu_device_timedout(unsigned long data) 615 { 616 struct tcmu_dev *udev = (struct tcmu_dev *)data; 617 unsigned long flags; 618 int handled; 619 620 handled = tcmu_handle_completions(udev); 621 622 pr_warn("%d completions handled from timeout\n", handled); 623 624 spin_lock_irqsave(&udev->commands_lock, flags); 625 idr_for_each(&udev->commands, tcmu_check_expired_cmd, NULL); 626 spin_unlock_irqrestore(&udev->commands_lock, flags); 627 628 /* 629 * We don't need to wakeup threads on wait_cmdr since they have their 630 * own timeout. 631 */ 632 } 633 634 static int tcmu_attach_hba(struct se_hba *hba, u32 host_id) 635 { 636 struct tcmu_hba *tcmu_hba; 637 638 tcmu_hba = kzalloc(sizeof(struct tcmu_hba), GFP_KERNEL); 639 if (!tcmu_hba) 640 return -ENOMEM; 641 642 tcmu_hba->host_id = host_id; 643 hba->hba_ptr = tcmu_hba; 644 645 return 0; 646 } 647 648 static void tcmu_detach_hba(struct se_hba *hba) 649 { 650 kfree(hba->hba_ptr); 651 hba->hba_ptr = NULL; 652 } 653 654 static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name) 655 { 656 struct tcmu_dev *udev; 657 658 udev = kzalloc(sizeof(struct tcmu_dev), GFP_KERNEL); 659 if (!udev) 660 return NULL; 661 662 udev->name = kstrdup(name, GFP_KERNEL); 663 if (!udev->name) { 664 kfree(udev); 665 return NULL; 666 } 667 668 udev->hba = hba; 669 670 init_waitqueue_head(&udev->wait_cmdr); 671 spin_lock_init(&udev->cmdr_lock); 672 673 idr_init(&udev->commands); 674 spin_lock_init(&udev->commands_lock); 675 676 setup_timer(&udev->timeout, tcmu_device_timedout, 677 (unsigned long)udev); 678 679 return &udev->se_dev; 680 } 681 682 static int tcmu_irqcontrol(struct uio_info *info, s32 irq_on) 683 { 684 struct tcmu_dev *tcmu_dev = container_of(info, struct tcmu_dev, uio_info); 685 686 tcmu_handle_completions(tcmu_dev); 687 688 return 0; 689 } 690 691 /* 692 * mmap code from uio.c. Copied here because we want to hook mmap() 693 * and this stuff must come along. 694 */ 695 static int tcmu_find_mem_index(struct vm_area_struct *vma) 696 { 697 struct tcmu_dev *udev = vma->vm_private_data; 698 struct uio_info *info = &udev->uio_info; 699 700 if (vma->vm_pgoff < MAX_UIO_MAPS) { 701 if (info->mem[vma->vm_pgoff].size == 0) 702 return -1; 703 return (int)vma->vm_pgoff; 704 } 705 return -1; 706 } 707 708 static int tcmu_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 709 { 710 struct tcmu_dev *udev = vma->vm_private_data; 711 struct uio_info *info = &udev->uio_info; 712 struct page *page; 713 unsigned long offset; 714 void *addr; 715 716 int mi = tcmu_find_mem_index(vma); 717 if (mi < 0) 718 return VM_FAULT_SIGBUS; 719 720 /* 721 * We need to subtract mi because userspace uses offset = N*PAGE_SIZE 722 * to use mem[N]. 723 */ 724 offset = (vmf->pgoff - mi) << PAGE_SHIFT; 725 726 addr = (void *)(unsigned long)info->mem[mi].addr + offset; 727 if (info->mem[mi].memtype == UIO_MEM_LOGICAL) 728 page = virt_to_page(addr); 729 else 730 page = vmalloc_to_page(addr); 731 get_page(page); 732 vmf->page = page; 733 return 0; 734 } 735 736 static const struct vm_operations_struct tcmu_vm_ops = { 737 .fault = tcmu_vma_fault, 738 }; 739 740 static int tcmu_mmap(struct uio_info *info, struct vm_area_struct *vma) 741 { 742 struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info); 743 744 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; 745 vma->vm_ops = &tcmu_vm_ops; 746 747 vma->vm_private_data = udev; 748 749 /* Ensure the mmap is exactly the right size */ 750 if (vma_pages(vma) != (TCMU_RING_SIZE >> PAGE_SHIFT)) 751 return -EINVAL; 752 753 return 0; 754 } 755 756 static int tcmu_open(struct uio_info *info, struct inode *inode) 757 { 758 struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info); 759 760 /* O_EXCL not supported for char devs, so fake it? */ 761 if (test_and_set_bit(TCMU_DEV_BIT_OPEN, &udev->flags)) 762 return -EBUSY; 763 764 pr_debug("open\n"); 765 766 return 0; 767 } 768 769 static int tcmu_release(struct uio_info *info, struct inode *inode) 770 { 771 struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info); 772 773 clear_bit(TCMU_DEV_BIT_OPEN, &udev->flags); 774 775 pr_debug("close\n"); 776 777 return 0; 778 } 779 780 static int tcmu_netlink_event(enum tcmu_genl_cmd cmd, const char *name, int minor) 781 { 782 struct sk_buff *skb; 783 void *msg_header; 784 int ret = -ENOMEM; 785 786 skb = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); 787 if (!skb) 788 return ret; 789 790 msg_header = genlmsg_put(skb, 0, 0, &tcmu_genl_family, 0, cmd); 791 if (!msg_header) 792 goto free_skb; 793 794 ret = nla_put_string(skb, TCMU_ATTR_DEVICE, name); 795 if (ret < 0) 796 goto free_skb; 797 798 ret = nla_put_u32(skb, TCMU_ATTR_MINOR, minor); 799 if (ret < 0) 800 goto free_skb; 801 802 genlmsg_end(skb, msg_header); 803 804 ret = genlmsg_multicast(&tcmu_genl_family, skb, 0, 805 TCMU_MCGRP_CONFIG, GFP_KERNEL); 806 807 /* We don't care if no one is listening */ 808 if (ret == -ESRCH) 809 ret = 0; 810 811 return ret; 812 free_skb: 813 nlmsg_free(skb); 814 return ret; 815 } 816 817 static int tcmu_configure_device(struct se_device *dev) 818 { 819 struct tcmu_dev *udev = TCMU_DEV(dev); 820 struct tcmu_hba *hba = udev->hba->hba_ptr; 821 struct uio_info *info; 822 struct tcmu_mailbox *mb; 823 size_t size; 824 size_t used; 825 int ret = 0; 826 char *str; 827 828 info = &udev->uio_info; 829 830 size = snprintf(NULL, 0, "tcm-user/%u/%s/%s", hba->host_id, udev->name, 831 udev->dev_config); 832 size += 1; /* for \0 */ 833 str = kmalloc(size, GFP_KERNEL); 834 if (!str) 835 return -ENOMEM; 836 837 used = snprintf(str, size, "tcm-user/%u/%s", hba->host_id, udev->name); 838 839 if (udev->dev_config[0]) 840 snprintf(str + used, size - used, "/%s", udev->dev_config); 841 842 info->name = str; 843 844 udev->mb_addr = vzalloc(TCMU_RING_SIZE); 845 if (!udev->mb_addr) { 846 ret = -ENOMEM; 847 goto err_vzalloc; 848 } 849 850 /* mailbox fits in first part of CMDR space */ 851 udev->cmdr_size = CMDR_SIZE - CMDR_OFF; 852 udev->data_off = CMDR_SIZE; 853 udev->data_size = TCMU_RING_SIZE - CMDR_SIZE; 854 855 mb = udev->mb_addr; 856 mb->version = TCMU_MAILBOX_VERSION; 857 mb->cmdr_off = CMDR_OFF; 858 mb->cmdr_size = udev->cmdr_size; 859 860 WARN_ON(!PAGE_ALIGNED(udev->data_off)); 861 WARN_ON(udev->data_size % PAGE_SIZE); 862 863 info->version = xstr(TCMU_MAILBOX_VERSION); 864 865 info->mem[0].name = "tcm-user command & data buffer"; 866 info->mem[0].addr = (phys_addr_t) udev->mb_addr; 867 info->mem[0].size = TCMU_RING_SIZE; 868 info->mem[0].memtype = UIO_MEM_VIRTUAL; 869 870 info->irqcontrol = tcmu_irqcontrol; 871 info->irq = UIO_IRQ_CUSTOM; 872 873 info->mmap = tcmu_mmap; 874 info->open = tcmu_open; 875 info->release = tcmu_release; 876 877 ret = uio_register_device(tcmu_root_device, info); 878 if (ret) 879 goto err_register; 880 881 /* Other attributes can be configured in userspace */ 882 dev->dev_attrib.hw_block_size = 512; 883 dev->dev_attrib.hw_max_sectors = 128; 884 dev->dev_attrib.hw_queue_depth = 128; 885 886 ret = tcmu_netlink_event(TCMU_CMD_ADDED_DEVICE, udev->uio_info.name, 887 udev->uio_info.uio_dev->minor); 888 if (ret) 889 goto err_netlink; 890 891 return 0; 892 893 err_netlink: 894 uio_unregister_device(&udev->uio_info); 895 err_register: 896 vfree(udev->mb_addr); 897 err_vzalloc: 898 kfree(info->name); 899 900 return ret; 901 } 902 903 static int tcmu_check_pending_cmd(int id, void *p, void *data) 904 { 905 struct tcmu_cmd *cmd = p; 906 907 if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) 908 return 0; 909 return -EINVAL; 910 } 911 912 static void tcmu_free_device(struct se_device *dev) 913 { 914 struct tcmu_dev *udev = TCMU_DEV(dev); 915 int i; 916 917 del_timer_sync(&udev->timeout); 918 919 vfree(udev->mb_addr); 920 921 /* Upper layer should drain all requests before calling this */ 922 spin_lock_irq(&udev->commands_lock); 923 i = idr_for_each(&udev->commands, tcmu_check_pending_cmd, NULL); 924 idr_destroy(&udev->commands); 925 spin_unlock_irq(&udev->commands_lock); 926 WARN_ON(i); 927 928 /* Device was configured */ 929 if (udev->uio_info.uio_dev) { 930 tcmu_netlink_event(TCMU_CMD_REMOVED_DEVICE, udev->uio_info.name, 931 udev->uio_info.uio_dev->minor); 932 933 uio_unregister_device(&udev->uio_info); 934 kfree(udev->uio_info.name); 935 kfree(udev->name); 936 } 937 938 kfree(udev); 939 } 940 941 enum { 942 Opt_dev_config, Opt_dev_size, Opt_hw_block_size, Opt_err, 943 }; 944 945 static match_table_t tokens = { 946 {Opt_dev_config, "dev_config=%s"}, 947 {Opt_dev_size, "dev_size=%u"}, 948 {Opt_hw_block_size, "hw_block_size=%u"}, 949 {Opt_err, NULL} 950 }; 951 952 static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev, 953 const char *page, ssize_t count) 954 { 955 struct tcmu_dev *udev = TCMU_DEV(dev); 956 char *orig, *ptr, *opts, *arg_p; 957 substring_t args[MAX_OPT_ARGS]; 958 int ret = 0, token; 959 unsigned long tmp_ul; 960 961 opts = kstrdup(page, GFP_KERNEL); 962 if (!opts) 963 return -ENOMEM; 964 965 orig = opts; 966 967 while ((ptr = strsep(&opts, ",\n")) != NULL) { 968 if (!*ptr) 969 continue; 970 971 token = match_token(ptr, tokens, args); 972 switch (token) { 973 case Opt_dev_config: 974 if (match_strlcpy(udev->dev_config, &args[0], 975 TCMU_CONFIG_LEN) == 0) { 976 ret = -EINVAL; 977 break; 978 } 979 pr_debug("TCMU: Referencing Path: %s\n", udev->dev_config); 980 break; 981 case Opt_dev_size: 982 arg_p = match_strdup(&args[0]); 983 if (!arg_p) { 984 ret = -ENOMEM; 985 break; 986 } 987 ret = kstrtoul(arg_p, 0, (unsigned long *) &udev->dev_size); 988 kfree(arg_p); 989 if (ret < 0) 990 pr_err("kstrtoul() failed for dev_size=\n"); 991 break; 992 case Opt_hw_block_size: 993 arg_p = match_strdup(&args[0]); 994 if (!arg_p) { 995 ret = -ENOMEM; 996 break; 997 } 998 ret = kstrtoul(arg_p, 0, &tmp_ul); 999 kfree(arg_p); 1000 if (ret < 0) { 1001 pr_err("kstrtoul() failed for hw_block_size=\n"); 1002 break; 1003 } 1004 if (!tmp_ul) { 1005 pr_err("hw_block_size must be nonzero\n"); 1006 break; 1007 } 1008 dev->dev_attrib.hw_block_size = tmp_ul; 1009 break; 1010 default: 1011 break; 1012 } 1013 } 1014 1015 kfree(orig); 1016 return (!ret) ? count : ret; 1017 } 1018 1019 static ssize_t tcmu_show_configfs_dev_params(struct se_device *dev, char *b) 1020 { 1021 struct tcmu_dev *udev = TCMU_DEV(dev); 1022 ssize_t bl = 0; 1023 1024 bl = sprintf(b + bl, "Config: %s ", 1025 udev->dev_config[0] ? udev->dev_config : "NULL"); 1026 bl += sprintf(b + bl, "Size: %zu\n", udev->dev_size); 1027 1028 return bl; 1029 } 1030 1031 static sector_t tcmu_get_blocks(struct se_device *dev) 1032 { 1033 struct tcmu_dev *udev = TCMU_DEV(dev); 1034 1035 return div_u64(udev->dev_size - dev->dev_attrib.block_size, 1036 dev->dev_attrib.block_size); 1037 } 1038 1039 static sense_reason_t 1040 tcmu_pass_op(struct se_cmd *se_cmd) 1041 { 1042 int ret = tcmu_queue_cmd(se_cmd); 1043 1044 if (ret != 0) 1045 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 1046 else 1047 return TCM_NO_SENSE; 1048 } 1049 1050 static sense_reason_t 1051 tcmu_parse_cdb(struct se_cmd *cmd) 1052 { 1053 return passthrough_parse_cdb(cmd, tcmu_pass_op); 1054 } 1055 1056 DEF_TB_DEV_ATTRIB_RO(tcmu, hw_pi_prot_type); 1057 TB_DEV_ATTR_RO(tcmu, hw_pi_prot_type); 1058 1059 DEF_TB_DEV_ATTRIB_RO(tcmu, hw_block_size); 1060 TB_DEV_ATTR_RO(tcmu, hw_block_size); 1061 1062 DEF_TB_DEV_ATTRIB_RO(tcmu, hw_max_sectors); 1063 TB_DEV_ATTR_RO(tcmu, hw_max_sectors); 1064 1065 DEF_TB_DEV_ATTRIB_RO(tcmu, hw_queue_depth); 1066 TB_DEV_ATTR_RO(tcmu, hw_queue_depth); 1067 1068 static struct configfs_attribute *tcmu_backend_dev_attrs[] = { 1069 &tcmu_dev_attrib_hw_pi_prot_type.attr, 1070 &tcmu_dev_attrib_hw_block_size.attr, 1071 &tcmu_dev_attrib_hw_max_sectors.attr, 1072 &tcmu_dev_attrib_hw_queue_depth.attr, 1073 NULL, 1074 }; 1075 1076 static struct se_subsystem_api tcmu_template = { 1077 .name = "user", 1078 .inquiry_prod = "USER", 1079 .inquiry_rev = TCMU_VERSION, 1080 .owner = THIS_MODULE, 1081 .transport_flags = TRANSPORT_FLAG_PASSTHROUGH, 1082 .attach_hba = tcmu_attach_hba, 1083 .detach_hba = tcmu_detach_hba, 1084 .alloc_device = tcmu_alloc_device, 1085 .configure_device = tcmu_configure_device, 1086 .free_device = tcmu_free_device, 1087 .parse_cdb = tcmu_parse_cdb, 1088 .set_configfs_dev_params = tcmu_set_configfs_dev_params, 1089 .show_configfs_dev_params = tcmu_show_configfs_dev_params, 1090 .get_device_type = sbc_get_device_type, 1091 .get_blocks = tcmu_get_blocks, 1092 }; 1093 1094 static int __init tcmu_module_init(void) 1095 { 1096 struct target_backend_cits *tbc = &tcmu_template.tb_cits; 1097 int ret; 1098 1099 BUILD_BUG_ON((sizeof(struct tcmu_cmd_entry) % TCMU_OP_ALIGN_SIZE) != 0); 1100 1101 tcmu_cmd_cache = kmem_cache_create("tcmu_cmd_cache", 1102 sizeof(struct tcmu_cmd), 1103 __alignof__(struct tcmu_cmd), 1104 0, NULL); 1105 if (!tcmu_cmd_cache) 1106 return -ENOMEM; 1107 1108 tcmu_root_device = root_device_register("tcm_user"); 1109 if (IS_ERR(tcmu_root_device)) { 1110 ret = PTR_ERR(tcmu_root_device); 1111 goto out_free_cache; 1112 } 1113 1114 ret = genl_register_family(&tcmu_genl_family); 1115 if (ret < 0) { 1116 goto out_unreg_device; 1117 } 1118 1119 target_core_setup_sub_cits(&tcmu_template); 1120 tbc->tb_dev_attrib_cit.ct_attrs = tcmu_backend_dev_attrs; 1121 1122 ret = transport_subsystem_register(&tcmu_template); 1123 if (ret) 1124 goto out_unreg_genl; 1125 1126 return 0; 1127 1128 out_unreg_genl: 1129 genl_unregister_family(&tcmu_genl_family); 1130 out_unreg_device: 1131 root_device_unregister(tcmu_root_device); 1132 out_free_cache: 1133 kmem_cache_destroy(tcmu_cmd_cache); 1134 1135 return ret; 1136 } 1137 1138 static void __exit tcmu_module_exit(void) 1139 { 1140 transport_subsystem_release(&tcmu_template); 1141 genl_unregister_family(&tcmu_genl_family); 1142 root_device_unregister(tcmu_root_device); 1143 kmem_cache_destroy(tcmu_cmd_cache); 1144 } 1145 1146 MODULE_DESCRIPTION("TCM USER subsystem plugin"); 1147 MODULE_AUTHOR("Shaohua Li <shli@kernel.org>"); 1148 MODULE_AUTHOR("Andy Grover <agrover@redhat.com>"); 1149 MODULE_LICENSE("GPL"); 1150 1151 module_init(tcmu_module_init); 1152 module_exit(tcmu_module_exit); 1153