1 /******************************************************************************* 2 * Filename: target_core_device.c (based on iscsi_target_device.c) 3 * 4 * This file contains the TCM Virtual Device and Disk Transport 5 * agnostic related functions. 6 * 7 * (c) Copyright 2003-2013 Datera, Inc. 8 * 9 * Nicholas A. Bellinger <nab@kernel.org> 10 * 11 * This program is free software; you can redistribute it and/or modify 12 * it under the terms of the GNU General Public License as published by 13 * the Free Software Foundation; either version 2 of the License, or 14 * (at your option) any later version. 15 * 16 * This program is distributed in the hope that it will be useful, 17 * but WITHOUT ANY WARRANTY; without even the implied warranty of 18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 19 * GNU General Public License for more details. 20 * 21 * You should have received a copy of the GNU General Public License 22 * along with this program; if not, write to the Free Software 23 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 24 * 25 ******************************************************************************/ 26 27 #include <linux/net.h> 28 #include <linux/string.h> 29 #include <linux/delay.h> 30 #include <linux/timer.h> 31 #include <linux/slab.h> 32 #include <linux/spinlock.h> 33 #include <linux/kthread.h> 34 #include <linux/in.h> 35 #include <linux/export.h> 36 #include <linux/t10-pi.h> 37 #include <asm/unaligned.h> 38 #include <net/sock.h> 39 #include <net/tcp.h> 40 #include <scsi/scsi_common.h> 41 #include <scsi/scsi_proto.h> 42 43 #include <target/target_core_base.h> 44 #include <target/target_core_backend.h> 45 #include <target/target_core_fabric.h> 46 47 #include "target_core_internal.h" 48 #include "target_core_alua.h" 49 #include "target_core_pr.h" 50 #include "target_core_ua.h" 51 52 static DEFINE_MUTEX(device_mutex); 53 static LIST_HEAD(device_list); 54 static DEFINE_IDR(devices_idr); 55 56 static struct se_hba *lun0_hba; 57 /* not static, needed by tpg.c */ 58 struct se_device *g_lun0_dev; 59 60 sense_reason_t 61 transport_lookup_cmd_lun(struct se_cmd *se_cmd, u64 unpacked_lun) 62 { 63 struct se_lun *se_lun = NULL; 64 struct se_session *se_sess = se_cmd->se_sess; 65 struct se_node_acl *nacl = se_sess->se_node_acl; 66 struct se_dev_entry *deve; 67 sense_reason_t ret = TCM_NO_SENSE; 68 69 rcu_read_lock(); 70 deve = target_nacl_find_deve(nacl, unpacked_lun); 71 if (deve) { 72 atomic_long_inc(&deve->total_cmds); 73 74 if (se_cmd->data_direction == DMA_TO_DEVICE) 75 atomic_long_add(se_cmd->data_length, 76 &deve->write_bytes); 77 else if (se_cmd->data_direction == DMA_FROM_DEVICE) 78 atomic_long_add(se_cmd->data_length, 79 &deve->read_bytes); 80 81 se_lun = rcu_dereference(deve->se_lun); 82 83 if (!percpu_ref_tryget_live(&se_lun->lun_ref)) { 84 se_lun = NULL; 85 goto out_unlock; 86 } 87 88 se_cmd->se_lun = rcu_dereference(deve->se_lun); 89 se_cmd->pr_res_key = deve->pr_res_key; 90 se_cmd->orig_fe_lun = unpacked_lun; 91 se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; 92 se_cmd->lun_ref_active = true; 93 94 if ((se_cmd->data_direction == DMA_TO_DEVICE) && 95 deve->lun_access_ro) { 96 pr_err("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN" 97 " Access for 0x%08llx\n", 98 se_cmd->se_tfo->get_fabric_name(), 99 unpacked_lun); 100 rcu_read_unlock(); 101 ret = TCM_WRITE_PROTECTED; 102 goto ref_dev; 103 } 104 } 105 out_unlock: 106 rcu_read_unlock(); 107 108 if (!se_lun) { 109 /* 110 * Use the se_portal_group->tpg_virt_lun0 to allow for 111 * REPORT_LUNS, et al to be returned when no active 112 * MappedLUN=0 exists for this Initiator Port. 113 */ 114 if (unpacked_lun != 0) { 115 pr_err("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN" 116 " Access for 0x%08llx\n", 117 se_cmd->se_tfo->get_fabric_name(), 118 unpacked_lun); 119 return TCM_NON_EXISTENT_LUN; 120 } 121 122 se_lun = se_sess->se_tpg->tpg_virt_lun0; 123 se_cmd->se_lun = se_sess->se_tpg->tpg_virt_lun0; 124 se_cmd->orig_fe_lun = 0; 125 se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; 126 127 percpu_ref_get(&se_lun->lun_ref); 128 se_cmd->lun_ref_active = true; 129 130 /* 131 * Force WRITE PROTECT for virtual LUN 0 132 */ 133 if ((se_cmd->data_direction != DMA_FROM_DEVICE) && 134 (se_cmd->data_direction != DMA_NONE)) { 135 ret = TCM_WRITE_PROTECTED; 136 goto ref_dev; 137 } 138 } 139 /* 140 * RCU reference protected by percpu se_lun->lun_ref taken above that 141 * must drop to zero (including initial reference) before this se_lun 142 * pointer can be kfree_rcu() by the final se_lun->lun_group put via 143 * target_core_fabric_configfs.c:target_fabric_port_release 144 */ 145 ref_dev: 146 se_cmd->se_dev = rcu_dereference_raw(se_lun->lun_se_dev); 147 atomic_long_inc(&se_cmd->se_dev->num_cmds); 148 149 if (se_cmd->data_direction == DMA_TO_DEVICE) 150 atomic_long_add(se_cmd->data_length, 151 &se_cmd->se_dev->write_bytes); 152 else if (se_cmd->data_direction == DMA_FROM_DEVICE) 153 atomic_long_add(se_cmd->data_length, 154 &se_cmd->se_dev->read_bytes); 155 156 return ret; 157 } 158 EXPORT_SYMBOL(transport_lookup_cmd_lun); 159 160 int transport_lookup_tmr_lun(struct se_cmd *se_cmd, u64 unpacked_lun) 161 { 162 struct se_dev_entry *deve; 163 struct se_lun *se_lun = NULL; 164 struct se_session *se_sess = se_cmd->se_sess; 165 struct se_node_acl *nacl = se_sess->se_node_acl; 166 struct se_tmr_req *se_tmr = se_cmd->se_tmr_req; 167 unsigned long flags; 168 169 rcu_read_lock(); 170 deve = target_nacl_find_deve(nacl, unpacked_lun); 171 if (deve) { 172 se_lun = rcu_dereference(deve->se_lun); 173 174 if (!percpu_ref_tryget_live(&se_lun->lun_ref)) { 175 se_lun = NULL; 176 goto out_unlock; 177 } 178 179 se_cmd->se_lun = rcu_dereference(deve->se_lun); 180 se_cmd->pr_res_key = deve->pr_res_key; 181 se_cmd->orig_fe_lun = unpacked_lun; 182 se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; 183 se_cmd->lun_ref_active = true; 184 } 185 out_unlock: 186 rcu_read_unlock(); 187 188 if (!se_lun) { 189 pr_debug("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN" 190 " Access for 0x%08llx\n", 191 se_cmd->se_tfo->get_fabric_name(), 192 unpacked_lun); 193 return -ENODEV; 194 } 195 se_cmd->se_dev = rcu_dereference_raw(se_lun->lun_se_dev); 196 se_tmr->tmr_dev = rcu_dereference_raw(se_lun->lun_se_dev); 197 198 spin_lock_irqsave(&se_tmr->tmr_dev->se_tmr_lock, flags); 199 list_add_tail(&se_tmr->tmr_list, &se_tmr->tmr_dev->dev_tmr_list); 200 spin_unlock_irqrestore(&se_tmr->tmr_dev->se_tmr_lock, flags); 201 202 return 0; 203 } 204 EXPORT_SYMBOL(transport_lookup_tmr_lun); 205 206 bool target_lun_is_rdonly(struct se_cmd *cmd) 207 { 208 struct se_session *se_sess = cmd->se_sess; 209 struct se_dev_entry *deve; 210 bool ret; 211 212 rcu_read_lock(); 213 deve = target_nacl_find_deve(se_sess->se_node_acl, cmd->orig_fe_lun); 214 ret = deve && deve->lun_access_ro; 215 rcu_read_unlock(); 216 217 return ret; 218 } 219 EXPORT_SYMBOL(target_lun_is_rdonly); 220 221 /* 222 * This function is called from core_scsi3_emulate_pro_register_and_move() 223 * and core_scsi3_decode_spec_i_port(), and will increment &deve->pr_kref 224 * when a matching rtpi is found. 225 */ 226 struct se_dev_entry *core_get_se_deve_from_rtpi( 227 struct se_node_acl *nacl, 228 u16 rtpi) 229 { 230 struct se_dev_entry *deve; 231 struct se_lun *lun; 232 struct se_portal_group *tpg = nacl->se_tpg; 233 234 rcu_read_lock(); 235 hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) { 236 lun = rcu_dereference(deve->se_lun); 237 if (!lun) { 238 pr_err("%s device entries device pointer is" 239 " NULL, but Initiator has access.\n", 240 tpg->se_tpg_tfo->get_fabric_name()); 241 continue; 242 } 243 if (lun->lun_rtpi != rtpi) 244 continue; 245 246 kref_get(&deve->pr_kref); 247 rcu_read_unlock(); 248 249 return deve; 250 } 251 rcu_read_unlock(); 252 253 return NULL; 254 } 255 256 void core_free_device_list_for_node( 257 struct se_node_acl *nacl, 258 struct se_portal_group *tpg) 259 { 260 struct se_dev_entry *deve; 261 262 mutex_lock(&nacl->lun_entry_mutex); 263 hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) { 264 struct se_lun *lun = rcu_dereference_check(deve->se_lun, 265 lockdep_is_held(&nacl->lun_entry_mutex)); 266 core_disable_device_list_for_node(lun, deve, nacl, tpg); 267 } 268 mutex_unlock(&nacl->lun_entry_mutex); 269 } 270 271 void core_update_device_list_access( 272 u64 mapped_lun, 273 bool lun_access_ro, 274 struct se_node_acl *nacl) 275 { 276 struct se_dev_entry *deve; 277 278 mutex_lock(&nacl->lun_entry_mutex); 279 deve = target_nacl_find_deve(nacl, mapped_lun); 280 if (deve) 281 deve->lun_access_ro = lun_access_ro; 282 mutex_unlock(&nacl->lun_entry_mutex); 283 } 284 285 /* 286 * Called with rcu_read_lock or nacl->device_list_lock held. 287 */ 288 struct se_dev_entry *target_nacl_find_deve(struct se_node_acl *nacl, u64 mapped_lun) 289 { 290 struct se_dev_entry *deve; 291 292 hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) 293 if (deve->mapped_lun == mapped_lun) 294 return deve; 295 296 return NULL; 297 } 298 EXPORT_SYMBOL(target_nacl_find_deve); 299 300 void target_pr_kref_release(struct kref *kref) 301 { 302 struct se_dev_entry *deve = container_of(kref, struct se_dev_entry, 303 pr_kref); 304 complete(&deve->pr_comp); 305 } 306 307 static void 308 target_luns_data_has_changed(struct se_node_acl *nacl, struct se_dev_entry *new, 309 bool skip_new) 310 { 311 struct se_dev_entry *tmp; 312 313 rcu_read_lock(); 314 hlist_for_each_entry_rcu(tmp, &nacl->lun_entry_hlist, link) { 315 if (skip_new && tmp == new) 316 continue; 317 core_scsi3_ua_allocate(tmp, 0x3F, 318 ASCQ_3FH_REPORTED_LUNS_DATA_HAS_CHANGED); 319 } 320 rcu_read_unlock(); 321 } 322 323 int core_enable_device_list_for_node( 324 struct se_lun *lun, 325 struct se_lun_acl *lun_acl, 326 u64 mapped_lun, 327 bool lun_access_ro, 328 struct se_node_acl *nacl, 329 struct se_portal_group *tpg) 330 { 331 struct se_dev_entry *orig, *new; 332 333 new = kzalloc(sizeof(*new), GFP_KERNEL); 334 if (!new) { 335 pr_err("Unable to allocate se_dev_entry memory\n"); 336 return -ENOMEM; 337 } 338 339 atomic_set(&new->ua_count, 0); 340 spin_lock_init(&new->ua_lock); 341 INIT_LIST_HEAD(&new->ua_list); 342 INIT_LIST_HEAD(&new->lun_link); 343 344 new->mapped_lun = mapped_lun; 345 kref_init(&new->pr_kref); 346 init_completion(&new->pr_comp); 347 348 new->lun_access_ro = lun_access_ro; 349 new->creation_time = get_jiffies_64(); 350 new->attach_count++; 351 352 mutex_lock(&nacl->lun_entry_mutex); 353 orig = target_nacl_find_deve(nacl, mapped_lun); 354 if (orig && orig->se_lun) { 355 struct se_lun *orig_lun = rcu_dereference_check(orig->se_lun, 356 lockdep_is_held(&nacl->lun_entry_mutex)); 357 358 if (orig_lun != lun) { 359 pr_err("Existing orig->se_lun doesn't match new lun" 360 " for dynamic -> explicit NodeACL conversion:" 361 " %s\n", nacl->initiatorname); 362 mutex_unlock(&nacl->lun_entry_mutex); 363 kfree(new); 364 return -EINVAL; 365 } 366 if (orig->se_lun_acl != NULL) { 367 pr_warn_ratelimited("Detected existing explicit" 368 " se_lun_acl->se_lun_group reference for %s" 369 " mapped_lun: %llu, failing\n", 370 nacl->initiatorname, mapped_lun); 371 mutex_unlock(&nacl->lun_entry_mutex); 372 kfree(new); 373 return -EINVAL; 374 } 375 376 rcu_assign_pointer(new->se_lun, lun); 377 rcu_assign_pointer(new->se_lun_acl, lun_acl); 378 hlist_del_rcu(&orig->link); 379 hlist_add_head_rcu(&new->link, &nacl->lun_entry_hlist); 380 mutex_unlock(&nacl->lun_entry_mutex); 381 382 spin_lock(&lun->lun_deve_lock); 383 list_del(&orig->lun_link); 384 list_add_tail(&new->lun_link, &lun->lun_deve_list); 385 spin_unlock(&lun->lun_deve_lock); 386 387 kref_put(&orig->pr_kref, target_pr_kref_release); 388 wait_for_completion(&orig->pr_comp); 389 390 target_luns_data_has_changed(nacl, new, true); 391 kfree_rcu(orig, rcu_head); 392 return 0; 393 } 394 395 rcu_assign_pointer(new->se_lun, lun); 396 rcu_assign_pointer(new->se_lun_acl, lun_acl); 397 hlist_add_head_rcu(&new->link, &nacl->lun_entry_hlist); 398 mutex_unlock(&nacl->lun_entry_mutex); 399 400 spin_lock(&lun->lun_deve_lock); 401 list_add_tail(&new->lun_link, &lun->lun_deve_list); 402 spin_unlock(&lun->lun_deve_lock); 403 404 target_luns_data_has_changed(nacl, new, true); 405 return 0; 406 } 407 408 /* 409 * Called with se_node_acl->lun_entry_mutex held. 410 */ 411 void core_disable_device_list_for_node( 412 struct se_lun *lun, 413 struct se_dev_entry *orig, 414 struct se_node_acl *nacl, 415 struct se_portal_group *tpg) 416 { 417 /* 418 * rcu_dereference_raw protected by se_lun->lun_group symlink 419 * reference to se_device->dev_group. 420 */ 421 struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev); 422 /* 423 * If the MappedLUN entry is being disabled, the entry in 424 * lun->lun_deve_list must be removed now before clearing the 425 * struct se_dev_entry pointers below as logic in 426 * core_alua_do_transition_tg_pt() depends on these being present. 427 * 428 * deve->se_lun_acl will be NULL for demo-mode created LUNs 429 * that have not been explicitly converted to MappedLUNs -> 430 * struct se_lun_acl, but we remove deve->lun_link from 431 * lun->lun_deve_list. This also means that active UAs and 432 * NodeACL context specific PR metadata for demo-mode 433 * MappedLUN *deve will be released below.. 434 */ 435 spin_lock(&lun->lun_deve_lock); 436 list_del(&orig->lun_link); 437 spin_unlock(&lun->lun_deve_lock); 438 /* 439 * Disable struct se_dev_entry LUN ACL mapping 440 */ 441 core_scsi3_ua_release_all(orig); 442 443 hlist_del_rcu(&orig->link); 444 clear_bit(DEF_PR_REG_ACTIVE, &orig->deve_flags); 445 orig->lun_access_ro = false; 446 orig->creation_time = 0; 447 orig->attach_count--; 448 /* 449 * Before firing off RCU callback, wait for any in process SPEC_I_PT=1 450 * or REGISTER_AND_MOVE PR operation to complete. 451 */ 452 kref_put(&orig->pr_kref, target_pr_kref_release); 453 wait_for_completion(&orig->pr_comp); 454 455 rcu_assign_pointer(orig->se_lun, NULL); 456 rcu_assign_pointer(orig->se_lun_acl, NULL); 457 458 kfree_rcu(orig, rcu_head); 459 460 core_scsi3_free_pr_reg_from_nacl(dev, nacl); 461 target_luns_data_has_changed(nacl, NULL, false); 462 } 463 464 /* core_clear_lun_from_tpg(): 465 * 466 * 467 */ 468 void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg) 469 { 470 struct se_node_acl *nacl; 471 struct se_dev_entry *deve; 472 473 mutex_lock(&tpg->acl_node_mutex); 474 list_for_each_entry(nacl, &tpg->acl_node_list, acl_list) { 475 476 mutex_lock(&nacl->lun_entry_mutex); 477 hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) { 478 struct se_lun *tmp_lun = rcu_dereference_check(deve->se_lun, 479 lockdep_is_held(&nacl->lun_entry_mutex)); 480 481 if (lun != tmp_lun) 482 continue; 483 484 core_disable_device_list_for_node(lun, deve, nacl, tpg); 485 } 486 mutex_unlock(&nacl->lun_entry_mutex); 487 } 488 mutex_unlock(&tpg->acl_node_mutex); 489 } 490 491 int core_alloc_rtpi(struct se_lun *lun, struct se_device *dev) 492 { 493 struct se_lun *tmp; 494 495 spin_lock(&dev->se_port_lock); 496 if (dev->export_count == 0x0000ffff) { 497 pr_warn("Reached dev->dev_port_count ==" 498 " 0x0000ffff\n"); 499 spin_unlock(&dev->se_port_lock); 500 return -ENOSPC; 501 } 502 again: 503 /* 504 * Allocate the next RELATIVE TARGET PORT IDENTIFIER for this struct se_device 505 * Here is the table from spc4r17 section 7.7.3.8. 506 * 507 * Table 473 -- RELATIVE TARGET PORT IDENTIFIER field 508 * 509 * Code Description 510 * 0h Reserved 511 * 1h Relative port 1, historically known as port A 512 * 2h Relative port 2, historically known as port B 513 * 3h to FFFFh Relative port 3 through 65 535 514 */ 515 lun->lun_rtpi = dev->dev_rpti_counter++; 516 if (!lun->lun_rtpi) 517 goto again; 518 519 list_for_each_entry(tmp, &dev->dev_sep_list, lun_dev_link) { 520 /* 521 * Make sure RELATIVE TARGET PORT IDENTIFIER is unique 522 * for 16-bit wrap.. 523 */ 524 if (lun->lun_rtpi == tmp->lun_rtpi) 525 goto again; 526 } 527 spin_unlock(&dev->se_port_lock); 528 529 return 0; 530 } 531 532 static void se_release_vpd_for_dev(struct se_device *dev) 533 { 534 struct t10_vpd *vpd, *vpd_tmp; 535 536 spin_lock(&dev->t10_wwn.t10_vpd_lock); 537 list_for_each_entry_safe(vpd, vpd_tmp, 538 &dev->t10_wwn.t10_vpd_list, vpd_list) { 539 list_del(&vpd->vpd_list); 540 kfree(vpd); 541 } 542 spin_unlock(&dev->t10_wwn.t10_vpd_lock); 543 } 544 545 static u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size) 546 { 547 u32 aligned_max_sectors; 548 u32 alignment; 549 /* 550 * Limit max_sectors to a PAGE_SIZE aligned value for modern 551 * transport_allocate_data_tasks() operation. 552 */ 553 alignment = max(1ul, PAGE_SIZE / block_size); 554 aligned_max_sectors = rounddown(max_sectors, alignment); 555 556 if (max_sectors != aligned_max_sectors) 557 pr_info("Rounding down aligned max_sectors from %u to %u\n", 558 max_sectors, aligned_max_sectors); 559 560 return aligned_max_sectors; 561 } 562 563 int core_dev_add_lun( 564 struct se_portal_group *tpg, 565 struct se_device *dev, 566 struct se_lun *lun) 567 { 568 int rc; 569 570 rc = core_tpg_add_lun(tpg, lun, false, dev); 571 if (rc < 0) 572 return rc; 573 574 pr_debug("%s_TPG[%u]_LUN[%llu] - Activated %s Logical Unit from" 575 " CORE HBA: %u\n", tpg->se_tpg_tfo->get_fabric_name(), 576 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, 577 tpg->se_tpg_tfo->get_fabric_name(), dev->se_hba->hba_id); 578 /* 579 * Update LUN maps for dynamically added initiators when 580 * generate_node_acl is enabled. 581 */ 582 if (tpg->se_tpg_tfo->tpg_check_demo_mode(tpg)) { 583 struct se_node_acl *acl; 584 585 mutex_lock(&tpg->acl_node_mutex); 586 list_for_each_entry(acl, &tpg->acl_node_list, acl_list) { 587 if (acl->dynamic_node_acl && 588 (!tpg->se_tpg_tfo->tpg_check_demo_mode_login_only || 589 !tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg))) { 590 core_tpg_add_node_to_devs(acl, tpg, lun); 591 } 592 } 593 mutex_unlock(&tpg->acl_node_mutex); 594 } 595 596 return 0; 597 } 598 599 /* core_dev_del_lun(): 600 * 601 * 602 */ 603 void core_dev_del_lun( 604 struct se_portal_group *tpg, 605 struct se_lun *lun) 606 { 607 pr_debug("%s_TPG[%u]_LUN[%llu] - Deactivating %s Logical Unit from" 608 " device object\n", tpg->se_tpg_tfo->get_fabric_name(), 609 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, 610 tpg->se_tpg_tfo->get_fabric_name()); 611 612 core_tpg_remove_lun(tpg, lun); 613 } 614 615 struct se_lun_acl *core_dev_init_initiator_node_lun_acl( 616 struct se_portal_group *tpg, 617 struct se_node_acl *nacl, 618 u64 mapped_lun, 619 int *ret) 620 { 621 struct se_lun_acl *lacl; 622 623 if (strlen(nacl->initiatorname) >= TRANSPORT_IQN_LEN) { 624 pr_err("%s InitiatorName exceeds maximum size.\n", 625 tpg->se_tpg_tfo->get_fabric_name()); 626 *ret = -EOVERFLOW; 627 return NULL; 628 } 629 lacl = kzalloc(sizeof(struct se_lun_acl), GFP_KERNEL); 630 if (!lacl) { 631 pr_err("Unable to allocate memory for struct se_lun_acl.\n"); 632 *ret = -ENOMEM; 633 return NULL; 634 } 635 636 lacl->mapped_lun = mapped_lun; 637 lacl->se_lun_nacl = nacl; 638 639 return lacl; 640 } 641 642 int core_dev_add_initiator_node_lun_acl( 643 struct se_portal_group *tpg, 644 struct se_lun_acl *lacl, 645 struct se_lun *lun, 646 bool lun_access_ro) 647 { 648 struct se_node_acl *nacl = lacl->se_lun_nacl; 649 /* 650 * rcu_dereference_raw protected by se_lun->lun_group symlink 651 * reference to se_device->dev_group. 652 */ 653 struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev); 654 655 if (!nacl) 656 return -EINVAL; 657 658 if (lun->lun_access_ro) 659 lun_access_ro = true; 660 661 lacl->se_lun = lun; 662 663 if (core_enable_device_list_for_node(lun, lacl, lacl->mapped_lun, 664 lun_access_ro, nacl, tpg) < 0) 665 return -EINVAL; 666 667 pr_debug("%s_TPG[%hu]_LUN[%llu->%llu] - Added %s ACL for " 668 " InitiatorNode: %s\n", tpg->se_tpg_tfo->get_fabric_name(), 669 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, lacl->mapped_lun, 670 lun_access_ro ? "RO" : "RW", 671 nacl->initiatorname); 672 /* 673 * Check to see if there are any existing persistent reservation APTPL 674 * pre-registrations that need to be enabled for this LUN ACL.. 675 */ 676 core_scsi3_check_aptpl_registration(dev, tpg, lun, nacl, 677 lacl->mapped_lun); 678 return 0; 679 } 680 681 int core_dev_del_initiator_node_lun_acl( 682 struct se_lun *lun, 683 struct se_lun_acl *lacl) 684 { 685 struct se_portal_group *tpg = lun->lun_tpg; 686 struct se_node_acl *nacl; 687 struct se_dev_entry *deve; 688 689 nacl = lacl->se_lun_nacl; 690 if (!nacl) 691 return -EINVAL; 692 693 mutex_lock(&nacl->lun_entry_mutex); 694 deve = target_nacl_find_deve(nacl, lacl->mapped_lun); 695 if (deve) 696 core_disable_device_list_for_node(lun, deve, nacl, tpg); 697 mutex_unlock(&nacl->lun_entry_mutex); 698 699 pr_debug("%s_TPG[%hu]_LUN[%llu] - Removed ACL for" 700 " InitiatorNode: %s Mapped LUN: %llu\n", 701 tpg->se_tpg_tfo->get_fabric_name(), 702 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, 703 nacl->initiatorname, lacl->mapped_lun); 704 705 return 0; 706 } 707 708 void core_dev_free_initiator_node_lun_acl( 709 struct se_portal_group *tpg, 710 struct se_lun_acl *lacl) 711 { 712 pr_debug("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s" 713 " Mapped LUN: %llu\n", tpg->se_tpg_tfo->get_fabric_name(), 714 tpg->se_tpg_tfo->tpg_get_tag(tpg), 715 tpg->se_tpg_tfo->get_fabric_name(), 716 lacl->se_lun_nacl->initiatorname, lacl->mapped_lun); 717 718 kfree(lacl); 719 } 720 721 static void scsi_dump_inquiry(struct se_device *dev) 722 { 723 struct t10_wwn *wwn = &dev->t10_wwn; 724 char buf[17]; 725 int i, device_type; 726 /* 727 * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer 728 */ 729 for (i = 0; i < 8; i++) 730 if (wwn->vendor[i] >= 0x20) 731 buf[i] = wwn->vendor[i]; 732 else 733 buf[i] = ' '; 734 buf[i] = '\0'; 735 pr_debug(" Vendor: %s\n", buf); 736 737 for (i = 0; i < 16; i++) 738 if (wwn->model[i] >= 0x20) 739 buf[i] = wwn->model[i]; 740 else 741 buf[i] = ' '; 742 buf[i] = '\0'; 743 pr_debug(" Model: %s\n", buf); 744 745 for (i = 0; i < 4; i++) 746 if (wwn->revision[i] >= 0x20) 747 buf[i] = wwn->revision[i]; 748 else 749 buf[i] = ' '; 750 buf[i] = '\0'; 751 pr_debug(" Revision: %s\n", buf); 752 753 device_type = dev->transport->get_device_type(dev); 754 pr_debug(" Type: %s ", scsi_device_type(device_type)); 755 } 756 757 struct se_device *target_alloc_device(struct se_hba *hba, const char *name) 758 { 759 struct se_device *dev; 760 struct se_lun *xcopy_lun; 761 762 dev = hba->backend->ops->alloc_device(hba, name); 763 if (!dev) 764 return NULL; 765 766 dev->se_hba = hba; 767 dev->transport = hba->backend->ops; 768 dev->prot_length = sizeof(struct t10_pi_tuple); 769 dev->hba_index = hba->hba_index; 770 771 INIT_LIST_HEAD(&dev->dev_sep_list); 772 INIT_LIST_HEAD(&dev->dev_tmr_list); 773 INIT_LIST_HEAD(&dev->delayed_cmd_list); 774 INIT_LIST_HEAD(&dev->state_list); 775 INIT_LIST_HEAD(&dev->qf_cmd_list); 776 spin_lock_init(&dev->execute_task_lock); 777 spin_lock_init(&dev->delayed_cmd_lock); 778 spin_lock_init(&dev->dev_reservation_lock); 779 spin_lock_init(&dev->se_port_lock); 780 spin_lock_init(&dev->se_tmr_lock); 781 spin_lock_init(&dev->qf_cmd_lock); 782 sema_init(&dev->caw_sem, 1); 783 INIT_LIST_HEAD(&dev->t10_wwn.t10_vpd_list); 784 spin_lock_init(&dev->t10_wwn.t10_vpd_lock); 785 INIT_LIST_HEAD(&dev->t10_pr.registration_list); 786 INIT_LIST_HEAD(&dev->t10_pr.aptpl_reg_list); 787 spin_lock_init(&dev->t10_pr.registration_lock); 788 spin_lock_init(&dev->t10_pr.aptpl_reg_lock); 789 INIT_LIST_HEAD(&dev->t10_alua.tg_pt_gps_list); 790 spin_lock_init(&dev->t10_alua.tg_pt_gps_lock); 791 INIT_LIST_HEAD(&dev->t10_alua.lba_map_list); 792 spin_lock_init(&dev->t10_alua.lba_map_lock); 793 794 dev->t10_wwn.t10_dev = dev; 795 dev->t10_alua.t10_dev = dev; 796 797 dev->dev_attrib.da_dev = dev; 798 dev->dev_attrib.emulate_model_alias = DA_EMULATE_MODEL_ALIAS; 799 dev->dev_attrib.emulate_dpo = 1; 800 dev->dev_attrib.emulate_fua_write = 1; 801 dev->dev_attrib.emulate_fua_read = 1; 802 dev->dev_attrib.emulate_write_cache = DA_EMULATE_WRITE_CACHE; 803 dev->dev_attrib.emulate_ua_intlck_ctrl = DA_EMULATE_UA_INTLLCK_CTRL; 804 dev->dev_attrib.emulate_tas = DA_EMULATE_TAS; 805 dev->dev_attrib.emulate_tpu = DA_EMULATE_TPU; 806 dev->dev_attrib.emulate_tpws = DA_EMULATE_TPWS; 807 dev->dev_attrib.emulate_caw = DA_EMULATE_CAW; 808 dev->dev_attrib.emulate_3pc = DA_EMULATE_3PC; 809 dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE0_PROT; 810 dev->dev_attrib.enforce_pr_isids = DA_ENFORCE_PR_ISIDS; 811 dev->dev_attrib.force_pr_aptpl = DA_FORCE_PR_APTPL; 812 dev->dev_attrib.is_nonrot = DA_IS_NONROT; 813 dev->dev_attrib.emulate_rest_reord = DA_EMULATE_REST_REORD; 814 dev->dev_attrib.max_unmap_lba_count = DA_MAX_UNMAP_LBA_COUNT; 815 dev->dev_attrib.max_unmap_block_desc_count = 816 DA_MAX_UNMAP_BLOCK_DESC_COUNT; 817 dev->dev_attrib.unmap_granularity = DA_UNMAP_GRANULARITY_DEFAULT; 818 dev->dev_attrib.unmap_granularity_alignment = 819 DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT; 820 dev->dev_attrib.unmap_zeroes_data = 821 DA_UNMAP_ZEROES_DATA_DEFAULT; 822 dev->dev_attrib.max_write_same_len = DA_MAX_WRITE_SAME_LEN; 823 824 xcopy_lun = &dev->xcopy_lun; 825 rcu_assign_pointer(xcopy_lun->lun_se_dev, dev); 826 init_completion(&xcopy_lun->lun_ref_comp); 827 init_completion(&xcopy_lun->lun_shutdown_comp); 828 INIT_LIST_HEAD(&xcopy_lun->lun_deve_list); 829 INIT_LIST_HEAD(&xcopy_lun->lun_dev_link); 830 mutex_init(&xcopy_lun->lun_tg_pt_md_mutex); 831 xcopy_lun->lun_tpg = &xcopy_pt_tpg; 832 833 return dev; 834 } 835 836 /* 837 * Check if the underlying struct block_device request_queue supports 838 * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM 839 * in ATA and we need to set TPE=1 840 */ 841 bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib, 842 struct request_queue *q) 843 { 844 int block_size = queue_logical_block_size(q); 845 846 if (!blk_queue_discard(q)) 847 return false; 848 849 attrib->max_unmap_lba_count = 850 q->limits.max_discard_sectors >> (ilog2(block_size) - 9); 851 /* 852 * Currently hardcoded to 1 in Linux/SCSI code.. 853 */ 854 attrib->max_unmap_block_desc_count = 1; 855 attrib->unmap_granularity = q->limits.discard_granularity / block_size; 856 attrib->unmap_granularity_alignment = q->limits.discard_alignment / 857 block_size; 858 attrib->unmap_zeroes_data = (q->limits.max_write_zeroes_sectors); 859 return true; 860 } 861 EXPORT_SYMBOL(target_configure_unmap_from_queue); 862 863 /* 864 * Convert from blocksize advertised to the initiator to the 512 byte 865 * units unconditionally used by the Linux block layer. 866 */ 867 sector_t target_to_linux_sector(struct se_device *dev, sector_t lb) 868 { 869 switch (dev->dev_attrib.block_size) { 870 case 4096: 871 return lb << 3; 872 case 2048: 873 return lb << 2; 874 case 1024: 875 return lb << 1; 876 default: 877 return lb; 878 } 879 } 880 EXPORT_SYMBOL(target_to_linux_sector); 881 882 /** 883 * target_find_device - find a se_device by its dev_index 884 * @id: dev_index 885 * @do_depend: true if caller needs target_depend_item to be done 886 * 887 * If do_depend is true, the caller must do a target_undepend_item 888 * when finished using the device. 889 * 890 * If do_depend is false, the caller must be called in a configfs 891 * callback or during removal. 892 */ 893 struct se_device *target_find_device(int id, bool do_depend) 894 { 895 struct se_device *dev; 896 897 mutex_lock(&device_mutex); 898 dev = idr_find(&devices_idr, id); 899 if (dev && do_depend && target_depend_item(&dev->dev_group.cg_item)) 900 dev = NULL; 901 mutex_unlock(&device_mutex); 902 return dev; 903 } 904 EXPORT_SYMBOL(target_find_device); 905 906 struct devices_idr_iter { 907 int (*fn)(struct se_device *dev, void *data); 908 void *data; 909 }; 910 911 static int target_devices_idr_iter(int id, void *p, void *data) 912 { 913 struct devices_idr_iter *iter = data; 914 struct se_device *dev = p; 915 916 /* 917 * We add the device early to the idr, so it can be used 918 * by backend modules during configuration. We do not want 919 * to allow other callers to access partially setup devices, 920 * so we skip them here. 921 */ 922 if (!(dev->dev_flags & DF_CONFIGURED)) 923 return 0; 924 925 return iter->fn(dev, iter->data); 926 } 927 928 /** 929 * target_for_each_device - iterate over configured devices 930 * @fn: iterator function 931 * @data: pointer to data that will be passed to fn 932 * 933 * fn must return 0 to continue looping over devices. non-zero will break 934 * from the loop and return that value to the caller. 935 */ 936 int target_for_each_device(int (*fn)(struct se_device *dev, void *data), 937 void *data) 938 { 939 struct devices_idr_iter iter; 940 int ret; 941 942 iter.fn = fn; 943 iter.data = data; 944 945 mutex_lock(&device_mutex); 946 ret = idr_for_each(&devices_idr, target_devices_idr_iter, &iter); 947 mutex_unlock(&device_mutex); 948 return ret; 949 } 950 951 int target_configure_device(struct se_device *dev) 952 { 953 struct se_hba *hba = dev->se_hba; 954 int ret, id; 955 956 if (dev->dev_flags & DF_CONFIGURED) { 957 pr_err("se_dev->se_dev_ptr already set for storage" 958 " object\n"); 959 return -EEXIST; 960 } 961 962 /* 963 * Add early so modules like tcmu can use during its 964 * configuration. 965 */ 966 mutex_lock(&device_mutex); 967 /* 968 * Use cyclic to try and avoid collisions with devices 969 * that were recently removed. 970 */ 971 id = idr_alloc_cyclic(&devices_idr, dev, 0, INT_MAX, GFP_KERNEL); 972 mutex_unlock(&device_mutex); 973 if (id < 0) { 974 ret = -ENOMEM; 975 goto out; 976 } 977 dev->dev_index = id; 978 979 ret = dev->transport->configure_device(dev); 980 if (ret) 981 goto out_free_index; 982 /* 983 * XXX: there is not much point to have two different values here.. 984 */ 985 dev->dev_attrib.block_size = dev->dev_attrib.hw_block_size; 986 dev->dev_attrib.queue_depth = dev->dev_attrib.hw_queue_depth; 987 988 /* 989 * Align max_hw_sectors down to PAGE_SIZE I/O transfers 990 */ 991 dev->dev_attrib.hw_max_sectors = 992 se_dev_align_max_sectors(dev->dev_attrib.hw_max_sectors, 993 dev->dev_attrib.hw_block_size); 994 dev->dev_attrib.optimal_sectors = dev->dev_attrib.hw_max_sectors; 995 996 dev->creation_time = get_jiffies_64(); 997 998 ret = core_setup_alua(dev); 999 if (ret) 1000 goto out_destroy_device; 1001 1002 /* 1003 * Startup the struct se_device processing thread 1004 */ 1005 dev->tmr_wq = alloc_workqueue("tmr-%s", WQ_MEM_RECLAIM | WQ_UNBOUND, 1, 1006 dev->transport->name); 1007 if (!dev->tmr_wq) { 1008 pr_err("Unable to create tmr workqueue for %s\n", 1009 dev->transport->name); 1010 ret = -ENOMEM; 1011 goto out_free_alua; 1012 } 1013 1014 /* 1015 * Setup work_queue for QUEUE_FULL 1016 */ 1017 INIT_WORK(&dev->qf_work_queue, target_qf_do_work); 1018 1019 /* 1020 * Preload the initial INQUIRY const values if we are doing 1021 * anything virtual (IBLOCK, FILEIO, RAMDISK), but not for TCM/pSCSI 1022 * passthrough because this is being provided by the backend LLD. 1023 */ 1024 if (!(dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)) { 1025 strncpy(&dev->t10_wwn.vendor[0], "LIO-ORG", 8); 1026 strncpy(&dev->t10_wwn.model[0], 1027 dev->transport->inquiry_prod, 16); 1028 strncpy(&dev->t10_wwn.revision[0], 1029 dev->transport->inquiry_rev, 4); 1030 } 1031 1032 scsi_dump_inquiry(dev); 1033 1034 spin_lock(&hba->device_lock); 1035 hba->dev_count++; 1036 spin_unlock(&hba->device_lock); 1037 1038 dev->dev_flags |= DF_CONFIGURED; 1039 1040 return 0; 1041 1042 out_free_alua: 1043 core_alua_free_lu_gp_mem(dev); 1044 out_destroy_device: 1045 dev->transport->destroy_device(dev); 1046 out_free_index: 1047 mutex_lock(&device_mutex); 1048 idr_remove(&devices_idr, dev->dev_index); 1049 mutex_unlock(&device_mutex); 1050 out: 1051 se_release_vpd_for_dev(dev); 1052 return ret; 1053 } 1054 1055 void target_free_device(struct se_device *dev) 1056 { 1057 struct se_hba *hba = dev->se_hba; 1058 1059 WARN_ON(!list_empty(&dev->dev_sep_list)); 1060 1061 if (dev->dev_flags & DF_CONFIGURED) { 1062 destroy_workqueue(dev->tmr_wq); 1063 1064 dev->transport->destroy_device(dev); 1065 1066 mutex_lock(&device_mutex); 1067 idr_remove(&devices_idr, dev->dev_index); 1068 mutex_unlock(&device_mutex); 1069 1070 spin_lock(&hba->device_lock); 1071 hba->dev_count--; 1072 spin_unlock(&hba->device_lock); 1073 } 1074 1075 core_alua_free_lu_gp_mem(dev); 1076 core_alua_set_lba_map(dev, NULL, 0, 0); 1077 core_scsi3_free_all_registrations(dev); 1078 se_release_vpd_for_dev(dev); 1079 1080 if (dev->transport->free_prot) 1081 dev->transport->free_prot(dev); 1082 1083 dev->transport->free_device(dev); 1084 } 1085 1086 int core_dev_setup_virtual_lun0(void) 1087 { 1088 struct se_hba *hba; 1089 struct se_device *dev; 1090 char buf[] = "rd_pages=8,rd_nullio=1"; 1091 int ret; 1092 1093 hba = core_alloc_hba("rd_mcp", 0, HBA_FLAGS_INTERNAL_USE); 1094 if (IS_ERR(hba)) 1095 return PTR_ERR(hba); 1096 1097 dev = target_alloc_device(hba, "virt_lun0"); 1098 if (!dev) { 1099 ret = -ENOMEM; 1100 goto out_free_hba; 1101 } 1102 1103 hba->backend->ops->set_configfs_dev_params(dev, buf, sizeof(buf)); 1104 1105 ret = target_configure_device(dev); 1106 if (ret) 1107 goto out_free_se_dev; 1108 1109 lun0_hba = hba; 1110 g_lun0_dev = dev; 1111 return 0; 1112 1113 out_free_se_dev: 1114 target_free_device(dev); 1115 out_free_hba: 1116 core_delete_hba(hba); 1117 return ret; 1118 } 1119 1120 1121 void core_dev_release_virtual_lun0(void) 1122 { 1123 struct se_hba *hba = lun0_hba; 1124 1125 if (!hba) 1126 return; 1127 1128 if (g_lun0_dev) 1129 target_free_device(g_lun0_dev); 1130 core_delete_hba(hba); 1131 } 1132 1133 /* 1134 * Common CDB parsing for kernel and user passthrough. 1135 */ 1136 sense_reason_t 1137 passthrough_parse_cdb(struct se_cmd *cmd, 1138 sense_reason_t (*exec_cmd)(struct se_cmd *cmd)) 1139 { 1140 unsigned char *cdb = cmd->t_task_cdb; 1141 struct se_device *dev = cmd->se_dev; 1142 unsigned int size; 1143 1144 /* 1145 * Clear a lun set in the cdb if the initiator talking to use spoke 1146 * and old standards version, as we can't assume the underlying device 1147 * won't choke up on it. 1148 */ 1149 switch (cdb[0]) { 1150 case READ_10: /* SBC - RDProtect */ 1151 case READ_12: /* SBC - RDProtect */ 1152 case READ_16: /* SBC - RDProtect */ 1153 case SEND_DIAGNOSTIC: /* SPC - SELF-TEST Code */ 1154 case VERIFY: /* SBC - VRProtect */ 1155 case VERIFY_16: /* SBC - VRProtect */ 1156 case WRITE_VERIFY: /* SBC - VRProtect */ 1157 case WRITE_VERIFY_12: /* SBC - VRProtect */ 1158 case MAINTENANCE_IN: /* SPC - Parameter Data Format for SA RTPG */ 1159 break; 1160 default: 1161 cdb[1] &= 0x1f; /* clear logical unit number */ 1162 break; 1163 } 1164 1165 /* 1166 * For REPORT LUNS we always need to emulate the response, for everything 1167 * else, pass it up. 1168 */ 1169 if (cdb[0] == REPORT_LUNS) { 1170 cmd->execute_cmd = spc_emulate_report_luns; 1171 return TCM_NO_SENSE; 1172 } 1173 1174 /* 1175 * For PERSISTENT RESERVE IN/OUT, RELEASE, and RESERVE we need to 1176 * emulate the response, since tcmu does not have the information 1177 * required to process these commands. 1178 */ 1179 if (!(dev->transport->transport_flags & 1180 TRANSPORT_FLAG_PASSTHROUGH_PGR)) { 1181 if (cdb[0] == PERSISTENT_RESERVE_IN) { 1182 cmd->execute_cmd = target_scsi3_emulate_pr_in; 1183 size = get_unaligned_be16(&cdb[7]); 1184 return target_cmd_size_check(cmd, size); 1185 } 1186 if (cdb[0] == PERSISTENT_RESERVE_OUT) { 1187 cmd->execute_cmd = target_scsi3_emulate_pr_out; 1188 size = get_unaligned_be32(&cdb[5]); 1189 return target_cmd_size_check(cmd, size); 1190 } 1191 1192 if (cdb[0] == RELEASE || cdb[0] == RELEASE_10) { 1193 cmd->execute_cmd = target_scsi2_reservation_release; 1194 if (cdb[0] == RELEASE_10) 1195 size = get_unaligned_be16(&cdb[7]); 1196 else 1197 size = cmd->data_length; 1198 return target_cmd_size_check(cmd, size); 1199 } 1200 if (cdb[0] == RESERVE || cdb[0] == RESERVE_10) { 1201 cmd->execute_cmd = target_scsi2_reservation_reserve; 1202 if (cdb[0] == RESERVE_10) 1203 size = get_unaligned_be16(&cdb[7]); 1204 else 1205 size = cmd->data_length; 1206 return target_cmd_size_check(cmd, size); 1207 } 1208 } 1209 1210 /* Set DATA_CDB flag for ops that should have it */ 1211 switch (cdb[0]) { 1212 case READ_6: 1213 case READ_10: 1214 case READ_12: 1215 case READ_16: 1216 case WRITE_6: 1217 case WRITE_10: 1218 case WRITE_12: 1219 case WRITE_16: 1220 case WRITE_VERIFY: 1221 case WRITE_VERIFY_12: 1222 case WRITE_VERIFY_16: 1223 case COMPARE_AND_WRITE: 1224 case XDWRITEREAD_10: 1225 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 1226 break; 1227 case VARIABLE_LENGTH_CMD: 1228 switch (get_unaligned_be16(&cdb[8])) { 1229 case READ_32: 1230 case WRITE_32: 1231 case WRITE_VERIFY_32: 1232 case XDWRITEREAD_32: 1233 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 1234 break; 1235 } 1236 } 1237 1238 cmd->execute_cmd = exec_cmd; 1239 1240 return TCM_NO_SENSE; 1241 } 1242 EXPORT_SYMBOL(passthrough_parse_cdb); 1243