1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /******************************************************************************* 3 * Filename: target_core_device.c (based on iscsi_target_device.c) 4 * 5 * This file contains the TCM Virtual Device and Disk Transport 6 * agnostic related functions. 7 * 8 * (c) Copyright 2003-2013 Datera, Inc. 9 * 10 * Nicholas A. Bellinger <nab@kernel.org> 11 * 12 ******************************************************************************/ 13 14 #include <linux/net.h> 15 #include <linux/string.h> 16 #include <linux/delay.h> 17 #include <linux/timer.h> 18 #include <linux/slab.h> 19 #include <linux/spinlock.h> 20 #include <linux/kthread.h> 21 #include <linux/in.h> 22 #include <linux/export.h> 23 #include <linux/t10-pi.h> 24 #include <asm/unaligned.h> 25 #include <net/sock.h> 26 #include <net/tcp.h> 27 #include <scsi/scsi_common.h> 28 #include <scsi/scsi_proto.h> 29 30 #include <target/target_core_base.h> 31 #include <target/target_core_backend.h> 32 #include <target/target_core_fabric.h> 33 34 #include "target_core_internal.h" 35 #include "target_core_alua.h" 36 #include "target_core_pr.h" 37 #include "target_core_ua.h" 38 39 static DEFINE_MUTEX(device_mutex); 40 static LIST_HEAD(device_list); 41 static DEFINE_IDR(devices_idr); 42 43 static struct se_hba *lun0_hba; 44 /* not static, needed by tpg.c */ 45 struct se_device *g_lun0_dev; 46 47 sense_reason_t 48 transport_lookup_cmd_lun(struct se_cmd *se_cmd) 49 { 50 struct se_lun *se_lun = NULL; 51 struct se_session *se_sess = se_cmd->se_sess; 52 struct se_node_acl *nacl = se_sess->se_node_acl; 53 struct se_dev_entry *deve; 54 sense_reason_t ret = TCM_NO_SENSE; 55 56 rcu_read_lock(); 57 deve = target_nacl_find_deve(nacl, se_cmd->orig_fe_lun); 58 if (deve) { 59 atomic_long_inc(&deve->total_cmds); 60 61 if (se_cmd->data_direction == DMA_TO_DEVICE) 62 atomic_long_add(se_cmd->data_length, 63 &deve->write_bytes); 64 else if (se_cmd->data_direction == DMA_FROM_DEVICE) 65 atomic_long_add(se_cmd->data_length, 66 &deve->read_bytes); 67 68 se_lun = rcu_dereference(deve->se_lun); 69 70 if (!percpu_ref_tryget_live(&se_lun->lun_ref)) { 71 se_lun = NULL; 72 goto out_unlock; 73 } 74 75 se_cmd->se_lun = se_lun; 76 se_cmd->pr_res_key = deve->pr_res_key; 77 se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; 78 se_cmd->lun_ref_active = true; 79 80 if ((se_cmd->data_direction == DMA_TO_DEVICE) && 81 deve->lun_access_ro) { 82 pr_err("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN" 83 " Access for 0x%08llx\n", 84 se_cmd->se_tfo->fabric_name, 85 se_cmd->orig_fe_lun); 86 rcu_read_unlock(); 87 ret = TCM_WRITE_PROTECTED; 88 goto ref_dev; 89 } 90 } 91 out_unlock: 92 rcu_read_unlock(); 93 94 if (!se_lun) { 95 /* 96 * Use the se_portal_group->tpg_virt_lun0 to allow for 97 * REPORT_LUNS, et al to be returned when no active 98 * MappedLUN=0 exists for this Initiator Port. 99 */ 100 if (se_cmd->orig_fe_lun != 0) { 101 pr_err("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN" 102 " Access for 0x%08llx from %s\n", 103 se_cmd->se_tfo->fabric_name, 104 se_cmd->orig_fe_lun, 105 nacl->initiatorname); 106 return TCM_NON_EXISTENT_LUN; 107 } 108 109 se_lun = se_sess->se_tpg->tpg_virt_lun0; 110 se_cmd->se_lun = se_sess->se_tpg->tpg_virt_lun0; 111 se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; 112 113 percpu_ref_get(&se_lun->lun_ref); 114 se_cmd->lun_ref_active = true; 115 116 /* 117 * Force WRITE PROTECT for virtual LUN 0 118 */ 119 if ((se_cmd->data_direction != DMA_FROM_DEVICE) && 120 (se_cmd->data_direction != DMA_NONE)) { 121 ret = TCM_WRITE_PROTECTED; 122 goto ref_dev; 123 } 124 } 125 /* 126 * RCU reference protected by percpu se_lun->lun_ref taken above that 127 * must drop to zero (including initial reference) before this se_lun 128 * pointer can be kfree_rcu() by the final se_lun->lun_group put via 129 * target_core_fabric_configfs.c:target_fabric_port_release 130 */ 131 ref_dev: 132 se_cmd->se_dev = rcu_dereference_raw(se_lun->lun_se_dev); 133 atomic_long_inc(&se_cmd->se_dev->num_cmds); 134 135 if (se_cmd->data_direction == DMA_TO_DEVICE) 136 atomic_long_add(se_cmd->data_length, 137 &se_cmd->se_dev->write_bytes); 138 else if (se_cmd->data_direction == DMA_FROM_DEVICE) 139 atomic_long_add(se_cmd->data_length, 140 &se_cmd->se_dev->read_bytes); 141 142 return ret; 143 } 144 EXPORT_SYMBOL(transport_lookup_cmd_lun); 145 146 int transport_lookup_tmr_lun(struct se_cmd *se_cmd) 147 { 148 struct se_dev_entry *deve; 149 struct se_lun *se_lun = NULL; 150 struct se_session *se_sess = se_cmd->se_sess; 151 struct se_node_acl *nacl = se_sess->se_node_acl; 152 struct se_tmr_req *se_tmr = se_cmd->se_tmr_req; 153 unsigned long flags; 154 155 rcu_read_lock(); 156 deve = target_nacl_find_deve(nacl, se_cmd->orig_fe_lun); 157 if (deve) { 158 se_lun = rcu_dereference(deve->se_lun); 159 160 if (!percpu_ref_tryget_live(&se_lun->lun_ref)) { 161 se_lun = NULL; 162 goto out_unlock; 163 } 164 165 se_cmd->se_lun = se_lun; 166 se_cmd->pr_res_key = deve->pr_res_key; 167 se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; 168 se_cmd->lun_ref_active = true; 169 } 170 out_unlock: 171 rcu_read_unlock(); 172 173 if (!se_lun) { 174 pr_debug("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN" 175 " Access for 0x%08llx for %s\n", 176 se_cmd->se_tfo->fabric_name, 177 se_cmd->orig_fe_lun, 178 nacl->initiatorname); 179 return -ENODEV; 180 } 181 se_cmd->se_dev = rcu_dereference_raw(se_lun->lun_se_dev); 182 se_tmr->tmr_dev = rcu_dereference_raw(se_lun->lun_se_dev); 183 184 spin_lock_irqsave(&se_tmr->tmr_dev->se_tmr_lock, flags); 185 list_add_tail(&se_tmr->tmr_list, &se_tmr->tmr_dev->dev_tmr_list); 186 spin_unlock_irqrestore(&se_tmr->tmr_dev->se_tmr_lock, flags); 187 188 return 0; 189 } 190 EXPORT_SYMBOL(transport_lookup_tmr_lun); 191 192 bool target_lun_is_rdonly(struct se_cmd *cmd) 193 { 194 struct se_session *se_sess = cmd->se_sess; 195 struct se_dev_entry *deve; 196 bool ret; 197 198 rcu_read_lock(); 199 deve = target_nacl_find_deve(se_sess->se_node_acl, cmd->orig_fe_lun); 200 ret = deve && deve->lun_access_ro; 201 rcu_read_unlock(); 202 203 return ret; 204 } 205 EXPORT_SYMBOL(target_lun_is_rdonly); 206 207 /* 208 * This function is called from core_scsi3_emulate_pro_register_and_move() 209 * and core_scsi3_decode_spec_i_port(), and will increment &deve->pr_kref 210 * when a matching rtpi is found. 211 */ 212 struct se_dev_entry *core_get_se_deve_from_rtpi( 213 struct se_node_acl *nacl, 214 u16 rtpi) 215 { 216 struct se_dev_entry *deve; 217 struct se_lun *lun; 218 struct se_portal_group *tpg = nacl->se_tpg; 219 220 rcu_read_lock(); 221 hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) { 222 lun = rcu_dereference(deve->se_lun); 223 if (!lun) { 224 pr_err("%s device entries device pointer is" 225 " NULL, but Initiator has access.\n", 226 tpg->se_tpg_tfo->fabric_name); 227 continue; 228 } 229 if (lun->lun_rtpi != rtpi) 230 continue; 231 232 kref_get(&deve->pr_kref); 233 rcu_read_unlock(); 234 235 return deve; 236 } 237 rcu_read_unlock(); 238 239 return NULL; 240 } 241 242 void core_free_device_list_for_node( 243 struct se_node_acl *nacl, 244 struct se_portal_group *tpg) 245 { 246 struct se_dev_entry *deve; 247 248 mutex_lock(&nacl->lun_entry_mutex); 249 hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) { 250 struct se_lun *lun = rcu_dereference_check(deve->se_lun, 251 lockdep_is_held(&nacl->lun_entry_mutex)); 252 core_disable_device_list_for_node(lun, deve, nacl, tpg); 253 } 254 mutex_unlock(&nacl->lun_entry_mutex); 255 } 256 257 void core_update_device_list_access( 258 u64 mapped_lun, 259 bool lun_access_ro, 260 struct se_node_acl *nacl) 261 { 262 struct se_dev_entry *deve; 263 264 mutex_lock(&nacl->lun_entry_mutex); 265 deve = target_nacl_find_deve(nacl, mapped_lun); 266 if (deve) 267 deve->lun_access_ro = lun_access_ro; 268 mutex_unlock(&nacl->lun_entry_mutex); 269 } 270 271 /* 272 * Called with rcu_read_lock or nacl->device_list_lock held. 273 */ 274 struct se_dev_entry *target_nacl_find_deve(struct se_node_acl *nacl, u64 mapped_lun) 275 { 276 struct se_dev_entry *deve; 277 278 hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) 279 if (deve->mapped_lun == mapped_lun) 280 return deve; 281 282 return NULL; 283 } 284 EXPORT_SYMBOL(target_nacl_find_deve); 285 286 void target_pr_kref_release(struct kref *kref) 287 { 288 struct se_dev_entry *deve = container_of(kref, struct se_dev_entry, 289 pr_kref); 290 complete(&deve->pr_comp); 291 } 292 293 static void 294 target_luns_data_has_changed(struct se_node_acl *nacl, struct se_dev_entry *new, 295 bool skip_new) 296 { 297 struct se_dev_entry *tmp; 298 299 rcu_read_lock(); 300 hlist_for_each_entry_rcu(tmp, &nacl->lun_entry_hlist, link) { 301 if (skip_new && tmp == new) 302 continue; 303 core_scsi3_ua_allocate(tmp, 0x3F, 304 ASCQ_3FH_REPORTED_LUNS_DATA_HAS_CHANGED); 305 } 306 rcu_read_unlock(); 307 } 308 309 int core_enable_device_list_for_node( 310 struct se_lun *lun, 311 struct se_lun_acl *lun_acl, 312 u64 mapped_lun, 313 bool lun_access_ro, 314 struct se_node_acl *nacl, 315 struct se_portal_group *tpg) 316 { 317 struct se_dev_entry *orig, *new; 318 319 new = kzalloc(sizeof(*new), GFP_KERNEL); 320 if (!new) { 321 pr_err("Unable to allocate se_dev_entry memory\n"); 322 return -ENOMEM; 323 } 324 325 spin_lock_init(&new->ua_lock); 326 INIT_LIST_HEAD(&new->ua_list); 327 INIT_LIST_HEAD(&new->lun_link); 328 329 new->mapped_lun = mapped_lun; 330 kref_init(&new->pr_kref); 331 init_completion(&new->pr_comp); 332 333 new->lun_access_ro = lun_access_ro; 334 new->creation_time = get_jiffies_64(); 335 new->attach_count++; 336 337 mutex_lock(&nacl->lun_entry_mutex); 338 orig = target_nacl_find_deve(nacl, mapped_lun); 339 if (orig && orig->se_lun) { 340 struct se_lun *orig_lun = rcu_dereference_check(orig->se_lun, 341 lockdep_is_held(&nacl->lun_entry_mutex)); 342 343 if (orig_lun != lun) { 344 pr_err("Existing orig->se_lun doesn't match new lun" 345 " for dynamic -> explicit NodeACL conversion:" 346 " %s\n", nacl->initiatorname); 347 mutex_unlock(&nacl->lun_entry_mutex); 348 kfree(new); 349 return -EINVAL; 350 } 351 if (orig->se_lun_acl != NULL) { 352 pr_warn_ratelimited("Detected existing explicit" 353 " se_lun_acl->se_lun_group reference for %s" 354 " mapped_lun: %llu, failing\n", 355 nacl->initiatorname, mapped_lun); 356 mutex_unlock(&nacl->lun_entry_mutex); 357 kfree(new); 358 return -EINVAL; 359 } 360 361 rcu_assign_pointer(new->se_lun, lun); 362 rcu_assign_pointer(new->se_lun_acl, lun_acl); 363 hlist_del_rcu(&orig->link); 364 hlist_add_head_rcu(&new->link, &nacl->lun_entry_hlist); 365 mutex_unlock(&nacl->lun_entry_mutex); 366 367 spin_lock(&lun->lun_deve_lock); 368 list_del(&orig->lun_link); 369 list_add_tail(&new->lun_link, &lun->lun_deve_list); 370 spin_unlock(&lun->lun_deve_lock); 371 372 kref_put(&orig->pr_kref, target_pr_kref_release); 373 wait_for_completion(&orig->pr_comp); 374 375 target_luns_data_has_changed(nacl, new, true); 376 kfree_rcu(orig, rcu_head); 377 return 0; 378 } 379 380 rcu_assign_pointer(new->se_lun, lun); 381 rcu_assign_pointer(new->se_lun_acl, lun_acl); 382 hlist_add_head_rcu(&new->link, &nacl->lun_entry_hlist); 383 mutex_unlock(&nacl->lun_entry_mutex); 384 385 spin_lock(&lun->lun_deve_lock); 386 list_add_tail(&new->lun_link, &lun->lun_deve_list); 387 spin_unlock(&lun->lun_deve_lock); 388 389 target_luns_data_has_changed(nacl, new, true); 390 return 0; 391 } 392 393 void core_disable_device_list_for_node( 394 struct se_lun *lun, 395 struct se_dev_entry *orig, 396 struct se_node_acl *nacl, 397 struct se_portal_group *tpg) 398 { 399 /* 400 * rcu_dereference_raw protected by se_lun->lun_group symlink 401 * reference to se_device->dev_group. 402 */ 403 struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev); 404 405 lockdep_assert_held(&nacl->lun_entry_mutex); 406 407 /* 408 * If the MappedLUN entry is being disabled, the entry in 409 * lun->lun_deve_list must be removed now before clearing the 410 * struct se_dev_entry pointers below as logic in 411 * core_alua_do_transition_tg_pt() depends on these being present. 412 * 413 * deve->se_lun_acl will be NULL for demo-mode created LUNs 414 * that have not been explicitly converted to MappedLUNs -> 415 * struct se_lun_acl, but we remove deve->lun_link from 416 * lun->lun_deve_list. This also means that active UAs and 417 * NodeACL context specific PR metadata for demo-mode 418 * MappedLUN *deve will be released below.. 419 */ 420 spin_lock(&lun->lun_deve_lock); 421 list_del(&orig->lun_link); 422 spin_unlock(&lun->lun_deve_lock); 423 /* 424 * Disable struct se_dev_entry LUN ACL mapping 425 */ 426 core_scsi3_ua_release_all(orig); 427 428 hlist_del_rcu(&orig->link); 429 clear_bit(DEF_PR_REG_ACTIVE, &orig->deve_flags); 430 orig->lun_access_ro = false; 431 orig->creation_time = 0; 432 orig->attach_count--; 433 /* 434 * Before firing off RCU callback, wait for any in process SPEC_I_PT=1 435 * or REGISTER_AND_MOVE PR operation to complete. 436 */ 437 kref_put(&orig->pr_kref, target_pr_kref_release); 438 wait_for_completion(&orig->pr_comp); 439 440 rcu_assign_pointer(orig->se_lun, NULL); 441 rcu_assign_pointer(orig->se_lun_acl, NULL); 442 443 kfree_rcu(orig, rcu_head); 444 445 core_scsi3_free_pr_reg_from_nacl(dev, nacl); 446 target_luns_data_has_changed(nacl, NULL, false); 447 } 448 449 /* core_clear_lun_from_tpg(): 450 * 451 * 452 */ 453 void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg) 454 { 455 struct se_node_acl *nacl; 456 struct se_dev_entry *deve; 457 458 mutex_lock(&tpg->acl_node_mutex); 459 list_for_each_entry(nacl, &tpg->acl_node_list, acl_list) { 460 461 mutex_lock(&nacl->lun_entry_mutex); 462 hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) { 463 struct se_lun *tmp_lun = rcu_dereference_check(deve->se_lun, 464 lockdep_is_held(&nacl->lun_entry_mutex)); 465 466 if (lun != tmp_lun) 467 continue; 468 469 core_disable_device_list_for_node(lun, deve, nacl, tpg); 470 } 471 mutex_unlock(&nacl->lun_entry_mutex); 472 } 473 mutex_unlock(&tpg->acl_node_mutex); 474 } 475 476 int core_alloc_rtpi(struct se_lun *lun, struct se_device *dev) 477 { 478 struct se_lun *tmp; 479 480 spin_lock(&dev->se_port_lock); 481 if (dev->export_count == 0x0000ffff) { 482 pr_warn("Reached dev->dev_port_count ==" 483 " 0x0000ffff\n"); 484 spin_unlock(&dev->se_port_lock); 485 return -ENOSPC; 486 } 487 again: 488 /* 489 * Allocate the next RELATIVE TARGET PORT IDENTIFIER for this struct se_device 490 * Here is the table from spc4r17 section 7.7.3.8. 491 * 492 * Table 473 -- RELATIVE TARGET PORT IDENTIFIER field 493 * 494 * Code Description 495 * 0h Reserved 496 * 1h Relative port 1, historically known as port A 497 * 2h Relative port 2, historically known as port B 498 * 3h to FFFFh Relative port 3 through 65 535 499 */ 500 lun->lun_rtpi = dev->dev_rpti_counter++; 501 if (!lun->lun_rtpi) 502 goto again; 503 504 list_for_each_entry(tmp, &dev->dev_sep_list, lun_dev_link) { 505 /* 506 * Make sure RELATIVE TARGET PORT IDENTIFIER is unique 507 * for 16-bit wrap.. 508 */ 509 if (lun->lun_rtpi == tmp->lun_rtpi) 510 goto again; 511 } 512 spin_unlock(&dev->se_port_lock); 513 514 return 0; 515 } 516 517 static void se_release_vpd_for_dev(struct se_device *dev) 518 { 519 struct t10_vpd *vpd, *vpd_tmp; 520 521 spin_lock(&dev->t10_wwn.t10_vpd_lock); 522 list_for_each_entry_safe(vpd, vpd_tmp, 523 &dev->t10_wwn.t10_vpd_list, vpd_list) { 524 list_del(&vpd->vpd_list); 525 kfree(vpd); 526 } 527 spin_unlock(&dev->t10_wwn.t10_vpd_lock); 528 } 529 530 static u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size) 531 { 532 u32 aligned_max_sectors; 533 u32 alignment; 534 /* 535 * Limit max_sectors to a PAGE_SIZE aligned value for modern 536 * transport_allocate_data_tasks() operation. 537 */ 538 alignment = max(1ul, PAGE_SIZE / block_size); 539 aligned_max_sectors = rounddown(max_sectors, alignment); 540 541 if (max_sectors != aligned_max_sectors) 542 pr_info("Rounding down aligned max_sectors from %u to %u\n", 543 max_sectors, aligned_max_sectors); 544 545 return aligned_max_sectors; 546 } 547 548 int core_dev_add_lun( 549 struct se_portal_group *tpg, 550 struct se_device *dev, 551 struct se_lun *lun) 552 { 553 int rc; 554 555 rc = core_tpg_add_lun(tpg, lun, false, dev); 556 if (rc < 0) 557 return rc; 558 559 pr_debug("%s_TPG[%u]_LUN[%llu] - Activated %s Logical Unit from" 560 " CORE HBA: %u\n", tpg->se_tpg_tfo->fabric_name, 561 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, 562 tpg->se_tpg_tfo->fabric_name, dev->se_hba->hba_id); 563 /* 564 * Update LUN maps for dynamically added initiators when 565 * generate_node_acl is enabled. 566 */ 567 if (tpg->se_tpg_tfo->tpg_check_demo_mode(tpg)) { 568 struct se_node_acl *acl; 569 570 mutex_lock(&tpg->acl_node_mutex); 571 list_for_each_entry(acl, &tpg->acl_node_list, acl_list) { 572 if (acl->dynamic_node_acl && 573 (!tpg->se_tpg_tfo->tpg_check_demo_mode_login_only || 574 !tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg))) { 575 core_tpg_add_node_to_devs(acl, tpg, lun); 576 } 577 } 578 mutex_unlock(&tpg->acl_node_mutex); 579 } 580 581 return 0; 582 } 583 584 /* core_dev_del_lun(): 585 * 586 * 587 */ 588 void core_dev_del_lun( 589 struct se_portal_group *tpg, 590 struct se_lun *lun) 591 { 592 pr_debug("%s_TPG[%u]_LUN[%llu] - Deactivating %s Logical Unit from" 593 " device object\n", tpg->se_tpg_tfo->fabric_name, 594 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, 595 tpg->se_tpg_tfo->fabric_name); 596 597 core_tpg_remove_lun(tpg, lun); 598 } 599 600 struct se_lun_acl *core_dev_init_initiator_node_lun_acl( 601 struct se_portal_group *tpg, 602 struct se_node_acl *nacl, 603 u64 mapped_lun, 604 int *ret) 605 { 606 struct se_lun_acl *lacl; 607 608 if (strlen(nacl->initiatorname) >= TRANSPORT_IQN_LEN) { 609 pr_err("%s InitiatorName exceeds maximum size.\n", 610 tpg->se_tpg_tfo->fabric_name); 611 *ret = -EOVERFLOW; 612 return NULL; 613 } 614 lacl = kzalloc(sizeof(struct se_lun_acl), GFP_KERNEL); 615 if (!lacl) { 616 pr_err("Unable to allocate memory for struct se_lun_acl.\n"); 617 *ret = -ENOMEM; 618 return NULL; 619 } 620 621 lacl->mapped_lun = mapped_lun; 622 lacl->se_lun_nacl = nacl; 623 624 return lacl; 625 } 626 627 int core_dev_add_initiator_node_lun_acl( 628 struct se_portal_group *tpg, 629 struct se_lun_acl *lacl, 630 struct se_lun *lun, 631 bool lun_access_ro) 632 { 633 struct se_node_acl *nacl = lacl->se_lun_nacl; 634 /* 635 * rcu_dereference_raw protected by se_lun->lun_group symlink 636 * reference to se_device->dev_group. 637 */ 638 struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev); 639 640 if (!nacl) 641 return -EINVAL; 642 643 if (lun->lun_access_ro) 644 lun_access_ro = true; 645 646 lacl->se_lun = lun; 647 648 if (core_enable_device_list_for_node(lun, lacl, lacl->mapped_lun, 649 lun_access_ro, nacl, tpg) < 0) 650 return -EINVAL; 651 652 pr_debug("%s_TPG[%hu]_LUN[%llu->%llu] - Added %s ACL for " 653 " InitiatorNode: %s\n", tpg->se_tpg_tfo->fabric_name, 654 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, lacl->mapped_lun, 655 lun_access_ro ? "RO" : "RW", 656 nacl->initiatorname); 657 /* 658 * Check to see if there are any existing persistent reservation APTPL 659 * pre-registrations that need to be enabled for this LUN ACL.. 660 */ 661 core_scsi3_check_aptpl_registration(dev, tpg, lun, nacl, 662 lacl->mapped_lun); 663 return 0; 664 } 665 666 int core_dev_del_initiator_node_lun_acl( 667 struct se_lun *lun, 668 struct se_lun_acl *lacl) 669 { 670 struct se_portal_group *tpg = lun->lun_tpg; 671 struct se_node_acl *nacl; 672 struct se_dev_entry *deve; 673 674 nacl = lacl->se_lun_nacl; 675 if (!nacl) 676 return -EINVAL; 677 678 mutex_lock(&nacl->lun_entry_mutex); 679 deve = target_nacl_find_deve(nacl, lacl->mapped_lun); 680 if (deve) 681 core_disable_device_list_for_node(lun, deve, nacl, tpg); 682 mutex_unlock(&nacl->lun_entry_mutex); 683 684 pr_debug("%s_TPG[%hu]_LUN[%llu] - Removed ACL for" 685 " InitiatorNode: %s Mapped LUN: %llu\n", 686 tpg->se_tpg_tfo->fabric_name, 687 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, 688 nacl->initiatorname, lacl->mapped_lun); 689 690 return 0; 691 } 692 693 void core_dev_free_initiator_node_lun_acl( 694 struct se_portal_group *tpg, 695 struct se_lun_acl *lacl) 696 { 697 pr_debug("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s" 698 " Mapped LUN: %llu\n", tpg->se_tpg_tfo->fabric_name, 699 tpg->se_tpg_tfo->tpg_get_tag(tpg), 700 tpg->se_tpg_tfo->fabric_name, 701 lacl->se_lun_nacl->initiatorname, lacl->mapped_lun); 702 703 kfree(lacl); 704 } 705 706 static void scsi_dump_inquiry(struct se_device *dev) 707 { 708 struct t10_wwn *wwn = &dev->t10_wwn; 709 int device_type = dev->transport->get_device_type(dev); 710 711 /* 712 * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer 713 */ 714 pr_debug(" Vendor: %-" __stringify(INQUIRY_VENDOR_LEN) "s\n", 715 wwn->vendor); 716 pr_debug(" Model: %-" __stringify(INQUIRY_MODEL_LEN) "s\n", 717 wwn->model); 718 pr_debug(" Revision: %-" __stringify(INQUIRY_REVISION_LEN) "s\n", 719 wwn->revision); 720 pr_debug(" Type: %s ", scsi_device_type(device_type)); 721 } 722 723 struct se_device *target_alloc_device(struct se_hba *hba, const char *name) 724 { 725 struct se_device *dev; 726 struct se_lun *xcopy_lun; 727 728 dev = hba->backend->ops->alloc_device(hba, name); 729 if (!dev) 730 return NULL; 731 732 dev->se_hba = hba; 733 dev->transport = hba->backend->ops; 734 dev->transport_flags = dev->transport->transport_flags_default; 735 dev->prot_length = sizeof(struct t10_pi_tuple); 736 dev->hba_index = hba->hba_index; 737 738 INIT_LIST_HEAD(&dev->dev_sep_list); 739 INIT_LIST_HEAD(&dev->dev_tmr_list); 740 INIT_LIST_HEAD(&dev->delayed_cmd_list); 741 INIT_LIST_HEAD(&dev->state_list); 742 INIT_LIST_HEAD(&dev->qf_cmd_list); 743 spin_lock_init(&dev->execute_task_lock); 744 spin_lock_init(&dev->delayed_cmd_lock); 745 spin_lock_init(&dev->dev_reservation_lock); 746 spin_lock_init(&dev->se_port_lock); 747 spin_lock_init(&dev->se_tmr_lock); 748 spin_lock_init(&dev->qf_cmd_lock); 749 sema_init(&dev->caw_sem, 1); 750 INIT_LIST_HEAD(&dev->t10_wwn.t10_vpd_list); 751 spin_lock_init(&dev->t10_wwn.t10_vpd_lock); 752 INIT_LIST_HEAD(&dev->t10_pr.registration_list); 753 INIT_LIST_HEAD(&dev->t10_pr.aptpl_reg_list); 754 spin_lock_init(&dev->t10_pr.registration_lock); 755 spin_lock_init(&dev->t10_pr.aptpl_reg_lock); 756 INIT_LIST_HEAD(&dev->t10_alua.tg_pt_gps_list); 757 spin_lock_init(&dev->t10_alua.tg_pt_gps_lock); 758 INIT_LIST_HEAD(&dev->t10_alua.lba_map_list); 759 spin_lock_init(&dev->t10_alua.lba_map_lock); 760 761 dev->t10_wwn.t10_dev = dev; 762 dev->t10_alua.t10_dev = dev; 763 764 dev->dev_attrib.da_dev = dev; 765 dev->dev_attrib.emulate_model_alias = DA_EMULATE_MODEL_ALIAS; 766 dev->dev_attrib.emulate_dpo = 1; 767 dev->dev_attrib.emulate_fua_write = 1; 768 dev->dev_attrib.emulate_fua_read = 1; 769 dev->dev_attrib.emulate_write_cache = DA_EMULATE_WRITE_CACHE; 770 dev->dev_attrib.emulate_ua_intlck_ctrl = TARGET_UA_INTLCK_CTRL_CLEAR; 771 dev->dev_attrib.emulate_tas = DA_EMULATE_TAS; 772 dev->dev_attrib.emulate_tpu = DA_EMULATE_TPU; 773 dev->dev_attrib.emulate_tpws = DA_EMULATE_TPWS; 774 dev->dev_attrib.emulate_caw = DA_EMULATE_CAW; 775 dev->dev_attrib.emulate_3pc = DA_EMULATE_3PC; 776 dev->dev_attrib.emulate_pr = DA_EMULATE_PR; 777 dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE0_PROT; 778 dev->dev_attrib.enforce_pr_isids = DA_ENFORCE_PR_ISIDS; 779 dev->dev_attrib.force_pr_aptpl = DA_FORCE_PR_APTPL; 780 dev->dev_attrib.is_nonrot = DA_IS_NONROT; 781 dev->dev_attrib.emulate_rest_reord = DA_EMULATE_REST_REORD; 782 dev->dev_attrib.max_unmap_lba_count = DA_MAX_UNMAP_LBA_COUNT; 783 dev->dev_attrib.max_unmap_block_desc_count = 784 DA_MAX_UNMAP_BLOCK_DESC_COUNT; 785 dev->dev_attrib.unmap_granularity = DA_UNMAP_GRANULARITY_DEFAULT; 786 dev->dev_attrib.unmap_granularity_alignment = 787 DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT; 788 dev->dev_attrib.unmap_zeroes_data = 789 DA_UNMAP_ZEROES_DATA_DEFAULT; 790 dev->dev_attrib.max_write_same_len = DA_MAX_WRITE_SAME_LEN; 791 792 xcopy_lun = &dev->xcopy_lun; 793 rcu_assign_pointer(xcopy_lun->lun_se_dev, dev); 794 init_completion(&xcopy_lun->lun_shutdown_comp); 795 INIT_LIST_HEAD(&xcopy_lun->lun_deve_list); 796 INIT_LIST_HEAD(&xcopy_lun->lun_dev_link); 797 mutex_init(&xcopy_lun->lun_tg_pt_md_mutex); 798 xcopy_lun->lun_tpg = &xcopy_pt_tpg; 799 800 /* Preload the default INQUIRY const values */ 801 strlcpy(dev->t10_wwn.vendor, "LIO-ORG", sizeof(dev->t10_wwn.vendor)); 802 strlcpy(dev->t10_wwn.model, dev->transport->inquiry_prod, 803 sizeof(dev->t10_wwn.model)); 804 strlcpy(dev->t10_wwn.revision, dev->transport->inquiry_rev, 805 sizeof(dev->t10_wwn.revision)); 806 807 return dev; 808 } 809 810 /* 811 * Check if the underlying struct block_device request_queue supports 812 * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM 813 * in ATA and we need to set TPE=1 814 */ 815 bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib, 816 struct request_queue *q) 817 { 818 int block_size = queue_logical_block_size(q); 819 820 if (!blk_queue_discard(q)) 821 return false; 822 823 attrib->max_unmap_lba_count = 824 q->limits.max_discard_sectors >> (ilog2(block_size) - 9); 825 /* 826 * Currently hardcoded to 1 in Linux/SCSI code.. 827 */ 828 attrib->max_unmap_block_desc_count = 1; 829 attrib->unmap_granularity = q->limits.discard_granularity / block_size; 830 attrib->unmap_granularity_alignment = q->limits.discard_alignment / 831 block_size; 832 attrib->unmap_zeroes_data = !!(q->limits.max_write_zeroes_sectors); 833 return true; 834 } 835 EXPORT_SYMBOL(target_configure_unmap_from_queue); 836 837 /* 838 * Convert from blocksize advertised to the initiator to the 512 byte 839 * units unconditionally used by the Linux block layer. 840 */ 841 sector_t target_to_linux_sector(struct se_device *dev, sector_t lb) 842 { 843 switch (dev->dev_attrib.block_size) { 844 case 4096: 845 return lb << 3; 846 case 2048: 847 return lb << 2; 848 case 1024: 849 return lb << 1; 850 default: 851 return lb; 852 } 853 } 854 EXPORT_SYMBOL(target_to_linux_sector); 855 856 struct devices_idr_iter { 857 struct config_item *prev_item; 858 int (*fn)(struct se_device *dev, void *data); 859 void *data; 860 }; 861 862 static int target_devices_idr_iter(int id, void *p, void *data) 863 __must_hold(&device_mutex) 864 { 865 struct devices_idr_iter *iter = data; 866 struct se_device *dev = p; 867 int ret; 868 869 config_item_put(iter->prev_item); 870 iter->prev_item = NULL; 871 872 /* 873 * We add the device early to the idr, so it can be used 874 * by backend modules during configuration. We do not want 875 * to allow other callers to access partially setup devices, 876 * so we skip them here. 877 */ 878 if (!target_dev_configured(dev)) 879 return 0; 880 881 iter->prev_item = config_item_get_unless_zero(&dev->dev_group.cg_item); 882 if (!iter->prev_item) 883 return 0; 884 mutex_unlock(&device_mutex); 885 886 ret = iter->fn(dev, iter->data); 887 888 mutex_lock(&device_mutex); 889 return ret; 890 } 891 892 /** 893 * target_for_each_device - iterate over configured devices 894 * @fn: iterator function 895 * @data: pointer to data that will be passed to fn 896 * 897 * fn must return 0 to continue looping over devices. non-zero will break 898 * from the loop and return that value to the caller. 899 */ 900 int target_for_each_device(int (*fn)(struct se_device *dev, void *data), 901 void *data) 902 { 903 struct devices_idr_iter iter = { .fn = fn, .data = data }; 904 int ret; 905 906 mutex_lock(&device_mutex); 907 ret = idr_for_each(&devices_idr, target_devices_idr_iter, &iter); 908 mutex_unlock(&device_mutex); 909 config_item_put(iter.prev_item); 910 return ret; 911 } 912 913 int target_configure_device(struct se_device *dev) 914 { 915 struct se_hba *hba = dev->se_hba; 916 int ret, id; 917 918 if (target_dev_configured(dev)) { 919 pr_err("se_dev->se_dev_ptr already set for storage" 920 " object\n"); 921 return -EEXIST; 922 } 923 924 /* 925 * Add early so modules like tcmu can use during its 926 * configuration. 927 */ 928 mutex_lock(&device_mutex); 929 /* 930 * Use cyclic to try and avoid collisions with devices 931 * that were recently removed. 932 */ 933 id = idr_alloc_cyclic(&devices_idr, dev, 0, INT_MAX, GFP_KERNEL); 934 mutex_unlock(&device_mutex); 935 if (id < 0) { 936 ret = -ENOMEM; 937 goto out; 938 } 939 dev->dev_index = id; 940 941 ret = dev->transport->configure_device(dev); 942 if (ret) 943 goto out_free_index; 944 /* 945 * XXX: there is not much point to have two different values here.. 946 */ 947 dev->dev_attrib.block_size = dev->dev_attrib.hw_block_size; 948 dev->dev_attrib.queue_depth = dev->dev_attrib.hw_queue_depth; 949 950 /* 951 * Align max_hw_sectors down to PAGE_SIZE I/O transfers 952 */ 953 dev->dev_attrib.hw_max_sectors = 954 se_dev_align_max_sectors(dev->dev_attrib.hw_max_sectors, 955 dev->dev_attrib.hw_block_size); 956 dev->dev_attrib.optimal_sectors = dev->dev_attrib.hw_max_sectors; 957 958 dev->creation_time = get_jiffies_64(); 959 960 ret = core_setup_alua(dev); 961 if (ret) 962 goto out_destroy_device; 963 964 /* 965 * Setup work_queue for QUEUE_FULL 966 */ 967 INIT_WORK(&dev->qf_work_queue, target_qf_do_work); 968 969 scsi_dump_inquiry(dev); 970 971 spin_lock(&hba->device_lock); 972 hba->dev_count++; 973 spin_unlock(&hba->device_lock); 974 975 dev->dev_flags |= DF_CONFIGURED; 976 977 return 0; 978 979 out_destroy_device: 980 dev->transport->destroy_device(dev); 981 out_free_index: 982 mutex_lock(&device_mutex); 983 idr_remove(&devices_idr, dev->dev_index); 984 mutex_unlock(&device_mutex); 985 out: 986 se_release_vpd_for_dev(dev); 987 return ret; 988 } 989 990 void target_free_device(struct se_device *dev) 991 { 992 struct se_hba *hba = dev->se_hba; 993 994 WARN_ON(!list_empty(&dev->dev_sep_list)); 995 996 if (target_dev_configured(dev)) { 997 dev->transport->destroy_device(dev); 998 999 mutex_lock(&device_mutex); 1000 idr_remove(&devices_idr, dev->dev_index); 1001 mutex_unlock(&device_mutex); 1002 1003 spin_lock(&hba->device_lock); 1004 hba->dev_count--; 1005 spin_unlock(&hba->device_lock); 1006 } 1007 1008 core_alua_free_lu_gp_mem(dev); 1009 core_alua_set_lba_map(dev, NULL, 0, 0); 1010 core_scsi3_free_all_registrations(dev); 1011 se_release_vpd_for_dev(dev); 1012 1013 if (dev->transport->free_prot) 1014 dev->transport->free_prot(dev); 1015 1016 dev->transport->free_device(dev); 1017 } 1018 1019 int core_dev_setup_virtual_lun0(void) 1020 { 1021 struct se_hba *hba; 1022 struct se_device *dev; 1023 char buf[] = "rd_pages=8,rd_nullio=1"; 1024 int ret; 1025 1026 hba = core_alloc_hba("rd_mcp", 0, HBA_FLAGS_INTERNAL_USE); 1027 if (IS_ERR(hba)) 1028 return PTR_ERR(hba); 1029 1030 dev = target_alloc_device(hba, "virt_lun0"); 1031 if (!dev) { 1032 ret = -ENOMEM; 1033 goto out_free_hba; 1034 } 1035 1036 hba->backend->ops->set_configfs_dev_params(dev, buf, sizeof(buf)); 1037 1038 ret = target_configure_device(dev); 1039 if (ret) 1040 goto out_free_se_dev; 1041 1042 lun0_hba = hba; 1043 g_lun0_dev = dev; 1044 return 0; 1045 1046 out_free_se_dev: 1047 target_free_device(dev); 1048 out_free_hba: 1049 core_delete_hba(hba); 1050 return ret; 1051 } 1052 1053 1054 void core_dev_release_virtual_lun0(void) 1055 { 1056 struct se_hba *hba = lun0_hba; 1057 1058 if (!hba) 1059 return; 1060 1061 if (g_lun0_dev) 1062 target_free_device(g_lun0_dev); 1063 core_delete_hba(hba); 1064 } 1065 1066 /* 1067 * Common CDB parsing for kernel and user passthrough. 1068 */ 1069 sense_reason_t 1070 passthrough_parse_cdb(struct se_cmd *cmd, 1071 sense_reason_t (*exec_cmd)(struct se_cmd *cmd)) 1072 { 1073 unsigned char *cdb = cmd->t_task_cdb; 1074 struct se_device *dev = cmd->se_dev; 1075 unsigned int size; 1076 1077 /* 1078 * For REPORT LUNS we always need to emulate the response, for everything 1079 * else, pass it up. 1080 */ 1081 if (cdb[0] == REPORT_LUNS) { 1082 cmd->execute_cmd = spc_emulate_report_luns; 1083 return TCM_NO_SENSE; 1084 } 1085 1086 /* 1087 * With emulate_pr disabled, all reservation requests should fail, 1088 * regardless of whether or not TRANSPORT_FLAG_PASSTHROUGH_PGR is set. 1089 */ 1090 if (!dev->dev_attrib.emulate_pr && 1091 ((cdb[0] == PERSISTENT_RESERVE_IN) || 1092 (cdb[0] == PERSISTENT_RESERVE_OUT) || 1093 (cdb[0] == RELEASE || cdb[0] == RELEASE_10) || 1094 (cdb[0] == RESERVE || cdb[0] == RESERVE_10))) { 1095 return TCM_UNSUPPORTED_SCSI_OPCODE; 1096 } 1097 1098 /* 1099 * For PERSISTENT RESERVE IN/OUT, RELEASE, and RESERVE we need to 1100 * emulate the response, since tcmu does not have the information 1101 * required to process these commands. 1102 */ 1103 if (!(dev->transport_flags & 1104 TRANSPORT_FLAG_PASSTHROUGH_PGR)) { 1105 if (cdb[0] == PERSISTENT_RESERVE_IN) { 1106 cmd->execute_cmd = target_scsi3_emulate_pr_in; 1107 size = get_unaligned_be16(&cdb[7]); 1108 return target_cmd_size_check(cmd, size); 1109 } 1110 if (cdb[0] == PERSISTENT_RESERVE_OUT) { 1111 cmd->execute_cmd = target_scsi3_emulate_pr_out; 1112 size = get_unaligned_be32(&cdb[5]); 1113 return target_cmd_size_check(cmd, size); 1114 } 1115 1116 if (cdb[0] == RELEASE || cdb[0] == RELEASE_10) { 1117 cmd->execute_cmd = target_scsi2_reservation_release; 1118 if (cdb[0] == RELEASE_10) 1119 size = get_unaligned_be16(&cdb[7]); 1120 else 1121 size = cmd->data_length; 1122 return target_cmd_size_check(cmd, size); 1123 } 1124 if (cdb[0] == RESERVE || cdb[0] == RESERVE_10) { 1125 cmd->execute_cmd = target_scsi2_reservation_reserve; 1126 if (cdb[0] == RESERVE_10) 1127 size = get_unaligned_be16(&cdb[7]); 1128 else 1129 size = cmd->data_length; 1130 return target_cmd_size_check(cmd, size); 1131 } 1132 } 1133 1134 /* Set DATA_CDB flag for ops that should have it */ 1135 switch (cdb[0]) { 1136 case READ_6: 1137 case READ_10: 1138 case READ_12: 1139 case READ_16: 1140 case WRITE_6: 1141 case WRITE_10: 1142 case WRITE_12: 1143 case WRITE_16: 1144 case WRITE_VERIFY: 1145 case WRITE_VERIFY_12: 1146 case WRITE_VERIFY_16: 1147 case COMPARE_AND_WRITE: 1148 case XDWRITEREAD_10: 1149 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 1150 break; 1151 case VARIABLE_LENGTH_CMD: 1152 switch (get_unaligned_be16(&cdb[8])) { 1153 case READ_32: 1154 case WRITE_32: 1155 case WRITE_VERIFY_32: 1156 case XDWRITEREAD_32: 1157 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 1158 break; 1159 } 1160 } 1161 1162 cmd->execute_cmd = exec_cmd; 1163 1164 return TCM_NO_SENSE; 1165 } 1166 EXPORT_SYMBOL(passthrough_parse_cdb); 1167