1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /******************************************************************************* 3 * Filename: target_core_device.c (based on iscsi_target_device.c) 4 * 5 * This file contains the TCM Virtual Device and Disk Transport 6 * agnostic related functions. 7 * 8 * (c) Copyright 2003-2013 Datera, Inc. 9 * 10 * Nicholas A. Bellinger <nab@kernel.org> 11 * 12 ******************************************************************************/ 13 14 #include <linux/net.h> 15 #include <linux/string.h> 16 #include <linux/delay.h> 17 #include <linux/timer.h> 18 #include <linux/slab.h> 19 #include <linux/spinlock.h> 20 #include <linux/kthread.h> 21 #include <linux/in.h> 22 #include <linux/export.h> 23 #include <linux/t10-pi.h> 24 #include <linux/unaligned.h> 25 #include <net/sock.h> 26 #include <net/tcp.h> 27 #include <scsi/scsi_common.h> 28 #include <scsi/scsi_proto.h> 29 30 #include <target/target_core_base.h> 31 #include <target/target_core_backend.h> 32 #include <target/target_core_fabric.h> 33 34 #include "target_core_internal.h" 35 #include "target_core_alua.h" 36 #include "target_core_pr.h" 37 #include "target_core_ua.h" 38 39 static DEFINE_MUTEX(device_mutex); 40 static DEFINE_IDR(devices_idr); 41 42 static struct se_hba *lun0_hba; 43 /* not static, needed by tpg.c */ 44 struct se_device *g_lun0_dev; 45 46 sense_reason_t 47 transport_lookup_cmd_lun(struct se_cmd *se_cmd) 48 { 49 struct se_lun *se_lun = NULL; 50 struct se_session *se_sess = se_cmd->se_sess; 51 struct se_node_acl *nacl = se_sess->se_node_acl; 52 struct se_dev_entry *deve; 53 sense_reason_t ret = TCM_NO_SENSE; 54 55 rcu_read_lock(); 56 deve = target_nacl_find_deve(nacl, se_cmd->orig_fe_lun); 57 if (deve) { 58 this_cpu_inc(deve->stats->total_cmds); 59 60 if (se_cmd->data_direction == DMA_TO_DEVICE) 61 this_cpu_add(deve->stats->write_bytes, 62 se_cmd->data_length); 63 else if (se_cmd->data_direction == DMA_FROM_DEVICE) 64 this_cpu_add(deve->stats->read_bytes, 65 se_cmd->data_length); 66 67 if ((se_cmd->data_direction == DMA_TO_DEVICE) && 68 deve->lun_access_ro) { 69 pr_err("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN" 70 " Access for 0x%08llx\n", 71 se_cmd->se_tfo->fabric_name, 72 se_cmd->orig_fe_lun); 73 rcu_read_unlock(); 74 return TCM_WRITE_PROTECTED; 75 } 76 77 se_lun = deve->se_lun; 78 79 if (!percpu_ref_tryget_live(&se_lun->lun_ref)) { 80 se_lun = NULL; 81 goto out_unlock; 82 } 83 84 se_cmd->se_lun = se_lun; 85 se_cmd->pr_res_key = deve->pr_res_key; 86 se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; 87 se_cmd->lun_ref_active = true; 88 } 89 out_unlock: 90 rcu_read_unlock(); 91 92 if (!se_lun) { 93 /* 94 * Use the se_portal_group->tpg_virt_lun0 to allow for 95 * REPORT_LUNS, et al to be returned when no active 96 * MappedLUN=0 exists for this Initiator Port. 97 */ 98 if (se_cmd->orig_fe_lun != 0) { 99 pr_err("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN" 100 " Access for 0x%08llx from %s\n", 101 se_cmd->se_tfo->fabric_name, 102 se_cmd->orig_fe_lun, 103 nacl->initiatorname); 104 return TCM_NON_EXISTENT_LUN; 105 } 106 107 /* 108 * Force WRITE PROTECT for virtual LUN 0 109 */ 110 if ((se_cmd->data_direction != DMA_FROM_DEVICE) && 111 (se_cmd->data_direction != DMA_NONE)) 112 return TCM_WRITE_PROTECTED; 113 114 se_lun = se_sess->se_tpg->tpg_virt_lun0; 115 if (!percpu_ref_tryget_live(&se_lun->lun_ref)) 116 return TCM_NON_EXISTENT_LUN; 117 118 se_cmd->se_lun = se_sess->se_tpg->tpg_virt_lun0; 119 se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; 120 se_cmd->lun_ref_active = true; 121 } 122 /* 123 * RCU reference protected by percpu se_lun->lun_ref taken above that 124 * must drop to zero (including initial reference) before this se_lun 125 * pointer can be kfree_rcu() by the final se_lun->lun_group put via 126 * target_core_fabric_configfs.c:target_fabric_port_release 127 */ 128 se_cmd->se_dev = rcu_dereference_raw(se_lun->lun_se_dev); 129 this_cpu_inc(se_cmd->se_dev->stats->total_cmds); 130 131 if (se_cmd->data_direction == DMA_TO_DEVICE) 132 this_cpu_add(se_cmd->se_dev->stats->write_bytes, 133 se_cmd->data_length); 134 else if (se_cmd->data_direction == DMA_FROM_DEVICE) 135 this_cpu_add(se_cmd->se_dev->stats->read_bytes, 136 se_cmd->data_length); 137 138 return ret; 139 } 140 EXPORT_SYMBOL(transport_lookup_cmd_lun); 141 142 int transport_lookup_tmr_lun(struct se_cmd *se_cmd) 143 { 144 struct se_dev_entry *deve; 145 struct se_lun *se_lun = NULL; 146 struct se_session *se_sess = se_cmd->se_sess; 147 struct se_node_acl *nacl = se_sess->se_node_acl; 148 struct se_tmr_req *se_tmr = se_cmd->se_tmr_req; 149 150 rcu_read_lock(); 151 deve = target_nacl_find_deve(nacl, se_cmd->orig_fe_lun); 152 if (deve) { 153 se_lun = deve->se_lun; 154 155 if (!percpu_ref_tryget_live(&se_lun->lun_ref)) { 156 se_lun = NULL; 157 goto out_unlock; 158 } 159 160 se_cmd->se_lun = se_lun; 161 se_cmd->pr_res_key = deve->pr_res_key; 162 se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; 163 se_cmd->lun_ref_active = true; 164 } 165 out_unlock: 166 rcu_read_unlock(); 167 168 if (!se_lun) { 169 pr_debug("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN" 170 " Access for 0x%08llx for %s\n", 171 se_cmd->se_tfo->fabric_name, 172 se_cmd->orig_fe_lun, 173 nacl->initiatorname); 174 return -ENODEV; 175 } 176 se_cmd->se_dev = rcu_dereference_raw(se_lun->lun_se_dev); 177 se_tmr->tmr_dev = rcu_dereference_raw(se_lun->lun_se_dev); 178 179 return 0; 180 } 181 EXPORT_SYMBOL(transport_lookup_tmr_lun); 182 183 bool target_lun_is_rdonly(struct se_cmd *cmd) 184 { 185 struct se_session *se_sess = cmd->se_sess; 186 struct se_dev_entry *deve; 187 bool ret; 188 189 rcu_read_lock(); 190 deve = target_nacl_find_deve(se_sess->se_node_acl, cmd->orig_fe_lun); 191 ret = deve && deve->lun_access_ro; 192 rcu_read_unlock(); 193 194 return ret; 195 } 196 EXPORT_SYMBOL(target_lun_is_rdonly); 197 198 /* 199 * This function is called from core_scsi3_emulate_pro_register_and_move() 200 * and core_scsi3_decode_spec_i_port(), and will increment &deve->pr_kref 201 * when a matching rtpi is found. 202 */ 203 struct se_dev_entry *core_get_se_deve_from_rtpi( 204 struct se_node_acl *nacl, 205 u16 rtpi) 206 { 207 struct se_dev_entry *deve; 208 struct se_lun *lun; 209 struct se_portal_group *tpg = nacl->se_tpg; 210 211 rcu_read_lock(); 212 hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) { 213 lun = deve->se_lun; 214 if (!lun) { 215 pr_err("%s device entries device pointer is" 216 " NULL, but Initiator has access.\n", 217 tpg->se_tpg_tfo->fabric_name); 218 continue; 219 } 220 if (lun->lun_tpg->tpg_rtpi != rtpi) 221 continue; 222 223 kref_get(&deve->pr_kref); 224 rcu_read_unlock(); 225 226 return deve; 227 } 228 rcu_read_unlock(); 229 230 return NULL; 231 } 232 233 void core_free_device_list_for_node( 234 struct se_node_acl *nacl, 235 struct se_portal_group *tpg) 236 { 237 struct se_dev_entry *deve; 238 239 mutex_lock(&nacl->lun_entry_mutex); 240 hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) 241 core_disable_device_list_for_node(deve->se_lun, deve, nacl, tpg); 242 mutex_unlock(&nacl->lun_entry_mutex); 243 } 244 245 void core_update_device_list_access( 246 u64 mapped_lun, 247 bool lun_access_ro, 248 struct se_node_acl *nacl) 249 { 250 struct se_dev_entry *deve; 251 252 mutex_lock(&nacl->lun_entry_mutex); 253 deve = target_nacl_find_deve(nacl, mapped_lun); 254 if (deve) 255 deve->lun_access_ro = lun_access_ro; 256 mutex_unlock(&nacl->lun_entry_mutex); 257 } 258 259 /* 260 * Called with rcu_read_lock or nacl->device_list_lock held. 261 */ 262 struct se_dev_entry *target_nacl_find_deve(struct se_node_acl *nacl, u64 mapped_lun) 263 { 264 struct se_dev_entry *deve; 265 266 hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) 267 if (deve->mapped_lun == mapped_lun) 268 return deve; 269 270 return NULL; 271 } 272 EXPORT_SYMBOL(target_nacl_find_deve); 273 274 void target_pr_kref_release(struct kref *kref) 275 { 276 struct se_dev_entry *deve = container_of(kref, struct se_dev_entry, 277 pr_kref); 278 complete(&deve->pr_comp); 279 } 280 281 /* 282 * Establish UA condition on SCSI device - all LUNs 283 */ 284 void target_dev_ua_allocate(struct se_device *dev, u8 asc, u8 ascq) 285 { 286 struct se_dev_entry *se_deve; 287 struct se_lun *lun; 288 289 spin_lock(&dev->se_port_lock); 290 list_for_each_entry(lun, &dev->dev_sep_list, lun_dev_link) { 291 292 spin_lock(&lun->lun_deve_lock); 293 list_for_each_entry(se_deve, &lun->lun_deve_list, lun_link) 294 core_scsi3_ua_allocate(se_deve, asc, ascq); 295 spin_unlock(&lun->lun_deve_lock); 296 } 297 spin_unlock(&dev->se_port_lock); 298 } 299 300 static void 301 target_luns_data_has_changed(struct se_node_acl *nacl, struct se_dev_entry *new, 302 bool skip_new) 303 { 304 struct se_dev_entry *tmp; 305 306 rcu_read_lock(); 307 hlist_for_each_entry_rcu(tmp, &nacl->lun_entry_hlist, link) { 308 if (skip_new && tmp == new) 309 continue; 310 core_scsi3_ua_allocate(tmp, 0x3F, 311 ASCQ_3FH_REPORTED_LUNS_DATA_HAS_CHANGED); 312 } 313 rcu_read_unlock(); 314 } 315 316 int core_enable_device_list_for_node( 317 struct se_lun *lun, 318 struct se_lun_acl *lun_acl, 319 u64 mapped_lun, 320 bool lun_access_ro, 321 struct se_node_acl *nacl, 322 struct se_portal_group *tpg) 323 { 324 struct se_dev_entry *orig, *new; 325 int ret = 0; 326 327 new = kzalloc(sizeof(*new), GFP_KERNEL); 328 if (!new) { 329 pr_err("Unable to allocate se_dev_entry memory\n"); 330 return -ENOMEM; 331 } 332 333 new->stats = alloc_percpu(struct se_dev_entry_io_stats); 334 if (!new->stats) { 335 ret = -ENOMEM; 336 goto free_deve; 337 } 338 339 spin_lock_init(&new->ua_lock); 340 INIT_LIST_HEAD(&new->ua_list); 341 INIT_LIST_HEAD(&new->lun_link); 342 343 new->mapped_lun = mapped_lun; 344 kref_init(&new->pr_kref); 345 init_completion(&new->pr_comp); 346 347 new->lun_access_ro = lun_access_ro; 348 new->creation_time = get_jiffies_64(); 349 new->attach_count++; 350 351 mutex_lock(&nacl->lun_entry_mutex); 352 orig = target_nacl_find_deve(nacl, mapped_lun); 353 if (orig && orig->se_lun) { 354 struct se_lun *orig_lun = orig->se_lun; 355 356 if (orig_lun != lun) { 357 pr_err("Existing orig->se_lun doesn't match new lun" 358 " for dynamic -> explicit NodeACL conversion:" 359 " %s\n", nacl->initiatorname); 360 mutex_unlock(&nacl->lun_entry_mutex); 361 ret = -EINVAL; 362 goto free_stats; 363 } 364 if (orig->se_lun_acl != NULL) { 365 pr_warn_ratelimited("Detected existing explicit" 366 " se_lun_acl->se_lun_group reference for %s" 367 " mapped_lun: %llu, failing\n", 368 nacl->initiatorname, mapped_lun); 369 mutex_unlock(&nacl->lun_entry_mutex); 370 ret = -EINVAL; 371 goto free_stats; 372 } 373 374 new->se_lun = lun; 375 new->se_lun_acl = lun_acl; 376 hlist_del_rcu(&orig->link); 377 hlist_add_head_rcu(&new->link, &nacl->lun_entry_hlist); 378 mutex_unlock(&nacl->lun_entry_mutex); 379 380 spin_lock(&lun->lun_deve_lock); 381 list_del(&orig->lun_link); 382 list_add_tail(&new->lun_link, &lun->lun_deve_list); 383 spin_unlock(&lun->lun_deve_lock); 384 385 kref_put(&orig->pr_kref, target_pr_kref_release); 386 wait_for_completion(&orig->pr_comp); 387 388 target_luns_data_has_changed(nacl, new, true); 389 kfree_rcu(orig, rcu_head); 390 return 0; 391 } 392 393 new->se_lun = lun; 394 new->se_lun_acl = lun_acl; 395 hlist_add_head_rcu(&new->link, &nacl->lun_entry_hlist); 396 mutex_unlock(&nacl->lun_entry_mutex); 397 398 spin_lock(&lun->lun_deve_lock); 399 list_add_tail(&new->lun_link, &lun->lun_deve_list); 400 spin_unlock(&lun->lun_deve_lock); 401 402 target_luns_data_has_changed(nacl, new, true); 403 return 0; 404 405 free_stats: 406 free_percpu(new->stats); 407 free_deve: 408 kfree(new); 409 return ret; 410 } 411 412 static void target_free_dev_entry(struct rcu_head *head) 413 { 414 struct se_dev_entry *deve = container_of(head, struct se_dev_entry, 415 rcu_head); 416 free_percpu(deve->stats); 417 kfree(deve); 418 } 419 420 void core_disable_device_list_for_node( 421 struct se_lun *lun, 422 struct se_dev_entry *orig, 423 struct se_node_acl *nacl, 424 struct se_portal_group *tpg) 425 { 426 /* 427 * rcu_dereference_raw protected by se_lun->lun_group symlink 428 * reference to se_device->dev_group. 429 */ 430 struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev); 431 432 lockdep_assert_held(&nacl->lun_entry_mutex); 433 434 /* 435 * If the MappedLUN entry is being disabled, the entry in 436 * lun->lun_deve_list must be removed now before clearing the 437 * struct se_dev_entry pointers below as logic in 438 * core_alua_do_transition_tg_pt() depends on these being present. 439 * 440 * deve->se_lun_acl will be NULL for demo-mode created LUNs 441 * that have not been explicitly converted to MappedLUNs -> 442 * struct se_lun_acl, but we remove deve->lun_link from 443 * lun->lun_deve_list. This also means that active UAs and 444 * NodeACL context specific PR metadata for demo-mode 445 * MappedLUN *deve will be released below.. 446 */ 447 spin_lock(&lun->lun_deve_lock); 448 list_del(&orig->lun_link); 449 spin_unlock(&lun->lun_deve_lock); 450 /* 451 * Disable struct se_dev_entry LUN ACL mapping 452 */ 453 core_scsi3_ua_release_all(orig); 454 455 hlist_del_rcu(&orig->link); 456 clear_bit(DEF_PR_REG_ACTIVE, &orig->deve_flags); 457 orig->lun_access_ro = false; 458 orig->creation_time = 0; 459 orig->attach_count--; 460 /* 461 * Before firing off RCU callback, wait for any in process SPEC_I_PT=1 462 * or REGISTER_AND_MOVE PR operation to complete. 463 */ 464 kref_put(&orig->pr_kref, target_pr_kref_release); 465 wait_for_completion(&orig->pr_comp); 466 467 call_rcu(&orig->rcu_head, target_free_dev_entry); 468 469 core_scsi3_free_pr_reg_from_nacl(dev, nacl); 470 target_luns_data_has_changed(nacl, NULL, false); 471 } 472 473 /* core_clear_lun_from_tpg(): 474 * 475 * 476 */ 477 void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg) 478 { 479 struct se_node_acl *nacl; 480 struct se_dev_entry *deve; 481 482 mutex_lock(&tpg->acl_node_mutex); 483 list_for_each_entry(nacl, &tpg->acl_node_list, acl_list) { 484 485 mutex_lock(&nacl->lun_entry_mutex); 486 hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) { 487 if (lun != deve->se_lun) 488 continue; 489 490 core_disable_device_list_for_node(lun, deve, nacl, tpg); 491 } 492 mutex_unlock(&nacl->lun_entry_mutex); 493 } 494 mutex_unlock(&tpg->acl_node_mutex); 495 } 496 497 static void se_release_vpd_for_dev(struct se_device *dev) 498 { 499 struct t10_vpd *vpd, *vpd_tmp; 500 501 spin_lock(&dev->t10_wwn.t10_vpd_lock); 502 list_for_each_entry_safe(vpd, vpd_tmp, 503 &dev->t10_wwn.t10_vpd_list, vpd_list) { 504 list_del(&vpd->vpd_list); 505 kfree(vpd); 506 } 507 spin_unlock(&dev->t10_wwn.t10_vpd_lock); 508 } 509 510 static u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size) 511 { 512 u32 aligned_max_sectors; 513 u32 alignment; 514 /* 515 * Limit max_sectors to a PAGE_SIZE aligned value for modern 516 * transport_allocate_data_tasks() operation. 517 */ 518 alignment = max(1ul, PAGE_SIZE / block_size); 519 aligned_max_sectors = rounddown(max_sectors, alignment); 520 521 if (max_sectors != aligned_max_sectors) 522 pr_info("Rounding down aligned max_sectors from %u to %u\n", 523 max_sectors, aligned_max_sectors); 524 525 return aligned_max_sectors; 526 } 527 528 int core_dev_add_lun( 529 struct se_portal_group *tpg, 530 struct se_device *dev, 531 struct se_lun *lun) 532 { 533 int rc; 534 535 rc = core_tpg_add_lun(tpg, lun, false, dev); 536 if (rc < 0) 537 return rc; 538 539 pr_debug("%s_TPG[%u]_LUN[%llu] - Activated %s Logical Unit from" 540 " CORE HBA: %u\n", tpg->se_tpg_tfo->fabric_name, 541 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, 542 tpg->se_tpg_tfo->fabric_name, dev->se_hba->hba_id); 543 /* 544 * Update LUN maps for dynamically added initiators when 545 * generate_node_acl is enabled. 546 */ 547 if (tpg->se_tpg_tfo->tpg_check_demo_mode(tpg)) { 548 struct se_node_acl *acl; 549 550 mutex_lock(&tpg->acl_node_mutex); 551 list_for_each_entry(acl, &tpg->acl_node_list, acl_list) { 552 if (acl->dynamic_node_acl && 553 (!tpg->se_tpg_tfo->tpg_check_demo_mode_login_only || 554 !tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg))) { 555 core_tpg_add_node_to_devs(acl, tpg, lun); 556 } 557 } 558 mutex_unlock(&tpg->acl_node_mutex); 559 } 560 561 return 0; 562 } 563 564 /* core_dev_del_lun(): 565 * 566 * 567 */ 568 void core_dev_del_lun( 569 struct se_portal_group *tpg, 570 struct se_lun *lun) 571 { 572 pr_debug("%s_TPG[%u]_LUN[%llu] - Deactivating %s Logical Unit from" 573 " device object\n", tpg->se_tpg_tfo->fabric_name, 574 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, 575 tpg->se_tpg_tfo->fabric_name); 576 577 core_tpg_remove_lun(tpg, lun); 578 } 579 580 struct se_lun_acl *core_dev_init_initiator_node_lun_acl( 581 struct se_portal_group *tpg, 582 struct se_node_acl *nacl, 583 u64 mapped_lun, 584 int *ret) 585 { 586 struct se_lun_acl *lacl; 587 588 if (strlen(nacl->initiatorname) >= TRANSPORT_IQN_LEN) { 589 pr_err("%s InitiatorName exceeds maximum size.\n", 590 tpg->se_tpg_tfo->fabric_name); 591 *ret = -EOVERFLOW; 592 return NULL; 593 } 594 lacl = kzalloc(sizeof(struct se_lun_acl), GFP_KERNEL); 595 if (!lacl) { 596 pr_err("Unable to allocate memory for struct se_lun_acl.\n"); 597 *ret = -ENOMEM; 598 return NULL; 599 } 600 601 lacl->mapped_lun = mapped_lun; 602 lacl->se_lun_nacl = nacl; 603 604 return lacl; 605 } 606 607 int core_dev_add_initiator_node_lun_acl( 608 struct se_portal_group *tpg, 609 struct se_lun_acl *lacl, 610 struct se_lun *lun, 611 bool lun_access_ro) 612 { 613 struct se_node_acl *nacl = lacl->se_lun_nacl; 614 /* 615 * rcu_dereference_raw protected by se_lun->lun_group symlink 616 * reference to se_device->dev_group. 617 */ 618 struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev); 619 620 if (!nacl) 621 return -EINVAL; 622 623 if (lun->lun_access_ro) 624 lun_access_ro = true; 625 626 lacl->se_lun = lun; 627 628 if (core_enable_device_list_for_node(lun, lacl, lacl->mapped_lun, 629 lun_access_ro, nacl, tpg) < 0) 630 return -EINVAL; 631 632 pr_debug("%s_TPG[%hu]_LUN[%llu->%llu] - Added %s ACL for " 633 " InitiatorNode: %s\n", tpg->se_tpg_tfo->fabric_name, 634 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, lacl->mapped_lun, 635 lun_access_ro ? "RO" : "RW", 636 nacl->initiatorname); 637 /* 638 * Check to see if there are any existing persistent reservation APTPL 639 * pre-registrations that need to be enabled for this LUN ACL.. 640 */ 641 core_scsi3_check_aptpl_registration(dev, tpg, lun, nacl, 642 lacl->mapped_lun); 643 return 0; 644 } 645 646 int core_dev_del_initiator_node_lun_acl( 647 struct se_lun *lun, 648 struct se_lun_acl *lacl) 649 { 650 struct se_portal_group *tpg = lun->lun_tpg; 651 struct se_node_acl *nacl; 652 struct se_dev_entry *deve; 653 654 nacl = lacl->se_lun_nacl; 655 if (!nacl) 656 return -EINVAL; 657 658 mutex_lock(&nacl->lun_entry_mutex); 659 deve = target_nacl_find_deve(nacl, lacl->mapped_lun); 660 if (deve) 661 core_disable_device_list_for_node(lun, deve, nacl, tpg); 662 mutex_unlock(&nacl->lun_entry_mutex); 663 664 pr_debug("%s_TPG[%hu]_LUN[%llu] - Removed ACL for" 665 " InitiatorNode: %s Mapped LUN: %llu\n", 666 tpg->se_tpg_tfo->fabric_name, 667 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, 668 nacl->initiatorname, lacl->mapped_lun); 669 670 return 0; 671 } 672 673 void core_dev_free_initiator_node_lun_acl( 674 struct se_portal_group *tpg, 675 struct se_lun_acl *lacl) 676 { 677 pr_debug("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s" 678 " Mapped LUN: %llu\n", tpg->se_tpg_tfo->fabric_name, 679 tpg->se_tpg_tfo->tpg_get_tag(tpg), 680 tpg->se_tpg_tfo->fabric_name, 681 lacl->se_lun_nacl->initiatorname, lacl->mapped_lun); 682 683 kfree(lacl); 684 } 685 686 static void scsi_dump_inquiry(struct se_device *dev) 687 { 688 struct t10_wwn *wwn = &dev->t10_wwn; 689 int device_type = dev->transport->get_device_type(dev); 690 691 /* 692 * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer 693 */ 694 pr_debug(" Vendor: %-" __stringify(INQUIRY_VENDOR_LEN) "s\n", 695 wwn->vendor); 696 pr_debug(" Model: %-" __stringify(INQUIRY_MODEL_LEN) "s\n", 697 wwn->model); 698 pr_debug(" Revision: %-" __stringify(INQUIRY_REVISION_LEN) "s\n", 699 wwn->revision); 700 pr_debug(" Type: %s ", scsi_device_type(device_type)); 701 } 702 703 static void target_non_ordered_release(struct percpu_ref *ref) 704 { 705 struct se_device *dev = container_of(ref, struct se_device, 706 non_ordered); 707 unsigned long flags; 708 709 spin_lock_irqsave(&dev->delayed_cmd_lock, flags); 710 if (!list_empty(&dev->delayed_cmd_list)) 711 schedule_work(&dev->delayed_cmd_work); 712 spin_unlock_irqrestore(&dev->delayed_cmd_lock, flags); 713 } 714 715 struct se_device *target_alloc_device(struct se_hba *hba, const char *name) 716 { 717 struct se_device *dev; 718 struct se_lun *xcopy_lun; 719 int i; 720 721 dev = hba->backend->ops->alloc_device(hba, name); 722 if (!dev) 723 return NULL; 724 725 dev->stats = alloc_percpu(struct se_dev_io_stats); 726 if (!dev->stats) 727 goto free_device; 728 729 dev->queues = kcalloc(nr_cpu_ids, sizeof(*dev->queues), GFP_KERNEL); 730 if (!dev->queues) 731 goto free_stats; 732 733 dev->queue_cnt = nr_cpu_ids; 734 for (i = 0; i < dev->queue_cnt; i++) { 735 struct se_device_queue *q; 736 737 q = &dev->queues[i]; 738 INIT_LIST_HEAD(&q->state_list); 739 spin_lock_init(&q->lock); 740 741 init_llist_head(&q->sq.cmd_list); 742 INIT_WORK(&q->sq.work, target_queued_submit_work); 743 } 744 745 if (percpu_ref_init(&dev->non_ordered, target_non_ordered_release, 746 PERCPU_REF_ALLOW_REINIT, GFP_KERNEL)) 747 goto free_queues; 748 749 dev->se_hba = hba; 750 dev->transport = hba->backend->ops; 751 dev->transport_flags = dev->transport->transport_flags_default; 752 dev->prot_length = sizeof(struct t10_pi_tuple); 753 dev->hba_index = hba->hba_index; 754 755 INIT_LIST_HEAD(&dev->dev_sep_list); 756 INIT_LIST_HEAD(&dev->dev_tmr_list); 757 INIT_LIST_HEAD(&dev->delayed_cmd_list); 758 INIT_LIST_HEAD(&dev->qf_cmd_list); 759 spin_lock_init(&dev->delayed_cmd_lock); 760 spin_lock_init(&dev->dev_reservation_lock); 761 spin_lock_init(&dev->se_port_lock); 762 spin_lock_init(&dev->se_tmr_lock); 763 spin_lock_init(&dev->qf_cmd_lock); 764 sema_init(&dev->caw_sem, 1); 765 INIT_LIST_HEAD(&dev->t10_wwn.t10_vpd_list); 766 spin_lock_init(&dev->t10_wwn.t10_vpd_lock); 767 INIT_LIST_HEAD(&dev->t10_pr.registration_list); 768 INIT_LIST_HEAD(&dev->t10_pr.aptpl_reg_list); 769 spin_lock_init(&dev->t10_pr.registration_lock); 770 spin_lock_init(&dev->t10_pr.aptpl_reg_lock); 771 INIT_LIST_HEAD(&dev->t10_alua.tg_pt_gps_list); 772 spin_lock_init(&dev->t10_alua.tg_pt_gps_lock); 773 INIT_LIST_HEAD(&dev->t10_alua.lba_map_list); 774 spin_lock_init(&dev->t10_alua.lba_map_lock); 775 776 INIT_WORK(&dev->delayed_cmd_work, target_do_delayed_work); 777 mutex_init(&dev->lun_reset_mutex); 778 779 dev->t10_wwn.t10_dev = dev; 780 /* 781 * Use OpenFabrics IEEE Company ID: 00 14 05 782 */ 783 dev->t10_wwn.company_id = 0x001405; 784 785 dev->t10_alua.t10_dev = dev; 786 787 dev->dev_attrib.da_dev = dev; 788 dev->dev_attrib.emulate_model_alias = DA_EMULATE_MODEL_ALIAS; 789 dev->dev_attrib.emulate_dpo = 1; 790 dev->dev_attrib.emulate_fua_write = 1; 791 dev->dev_attrib.emulate_fua_read = 1; 792 dev->dev_attrib.emulate_write_cache = DA_EMULATE_WRITE_CACHE; 793 dev->dev_attrib.emulate_ua_intlck_ctrl = TARGET_UA_INTLCK_CTRL_CLEAR; 794 dev->dev_attrib.emulate_tas = DA_EMULATE_TAS; 795 dev->dev_attrib.emulate_tpu = DA_EMULATE_TPU; 796 dev->dev_attrib.emulate_tpws = DA_EMULATE_TPWS; 797 dev->dev_attrib.emulate_caw = DA_EMULATE_CAW; 798 dev->dev_attrib.emulate_3pc = DA_EMULATE_3PC; 799 dev->dev_attrib.emulate_pr = DA_EMULATE_PR; 800 dev->dev_attrib.emulate_rsoc = DA_EMULATE_RSOC; 801 dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE0_PROT; 802 dev->dev_attrib.enforce_pr_isids = DA_ENFORCE_PR_ISIDS; 803 dev->dev_attrib.force_pr_aptpl = DA_FORCE_PR_APTPL; 804 dev->dev_attrib.is_nonrot = DA_IS_NONROT; 805 dev->dev_attrib.emulate_rest_reord = DA_EMULATE_REST_REORD; 806 dev->dev_attrib.max_unmap_lba_count = DA_MAX_UNMAP_LBA_COUNT; 807 dev->dev_attrib.max_unmap_block_desc_count = 808 DA_MAX_UNMAP_BLOCK_DESC_COUNT; 809 dev->dev_attrib.unmap_granularity = DA_UNMAP_GRANULARITY_DEFAULT; 810 dev->dev_attrib.unmap_granularity_alignment = 811 DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT; 812 dev->dev_attrib.unmap_zeroes_data = 813 DA_UNMAP_ZEROES_DATA_DEFAULT; 814 dev->dev_attrib.max_write_same_len = DA_MAX_WRITE_SAME_LEN; 815 dev->dev_attrib.submit_type = TARGET_FABRIC_DEFAULT_SUBMIT; 816 817 xcopy_lun = &dev->xcopy_lun; 818 rcu_assign_pointer(xcopy_lun->lun_se_dev, dev); 819 init_completion(&xcopy_lun->lun_shutdown_comp); 820 INIT_LIST_HEAD(&xcopy_lun->lun_deve_list); 821 INIT_LIST_HEAD(&xcopy_lun->lun_dev_link); 822 mutex_init(&xcopy_lun->lun_tg_pt_md_mutex); 823 xcopy_lun->lun_tpg = &xcopy_pt_tpg; 824 825 /* Preload the default INQUIRY const values */ 826 strscpy(dev->t10_wwn.vendor, "LIO-ORG", sizeof(dev->t10_wwn.vendor)); 827 strscpy(dev->t10_wwn.model, dev->transport->inquiry_prod, 828 sizeof(dev->t10_wwn.model)); 829 strscpy(dev->t10_wwn.revision, dev->transport->inquiry_rev, 830 sizeof(dev->t10_wwn.revision)); 831 832 return dev; 833 834 free_queues: 835 kfree(dev->queues); 836 free_stats: 837 free_percpu(dev->stats); 838 free_device: 839 hba->backend->ops->free_device(dev); 840 return NULL; 841 } 842 843 /* 844 * Check if the underlying struct block_device supports discard and if yes 845 * configure the UNMAP parameters. 846 */ 847 bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib, 848 struct block_device *bdev) 849 { 850 int block_size = bdev_logical_block_size(bdev); 851 852 if (!bdev_max_discard_sectors(bdev)) 853 return false; 854 855 attrib->max_unmap_lba_count = 856 bdev_max_discard_sectors(bdev) >> (ilog2(block_size) - 9); 857 /* 858 * Currently hardcoded to 1 in Linux/SCSI code.. 859 */ 860 attrib->max_unmap_block_desc_count = 1; 861 attrib->unmap_granularity = bdev_discard_granularity(bdev) / block_size; 862 attrib->unmap_granularity_alignment = 863 bdev_discard_alignment(bdev) / block_size; 864 return true; 865 } 866 EXPORT_SYMBOL(target_configure_unmap_from_queue); 867 868 /* 869 * Convert from blocksize advertised to the initiator to the 512 byte 870 * units unconditionally used by the Linux block layer. 871 */ 872 sector_t target_to_linux_sector(struct se_device *dev, sector_t lb) 873 { 874 switch (dev->dev_attrib.block_size) { 875 case 4096: 876 return lb << 3; 877 case 2048: 878 return lb << 2; 879 case 1024: 880 return lb << 1; 881 default: 882 return lb; 883 } 884 } 885 EXPORT_SYMBOL(target_to_linux_sector); 886 887 struct devices_idr_iter { 888 int (*fn)(struct se_device *dev, void *data); 889 void *data; 890 }; 891 892 static int target_devices_idr_iter(int id, void *p, void *data) 893 __must_hold(&device_mutex) 894 { 895 struct devices_idr_iter *iter = data; 896 struct se_device *dev = p; 897 struct config_item *item; 898 int ret; 899 900 /* 901 * We add the device early to the idr, so it can be used 902 * by backend modules during configuration. We do not want 903 * to allow other callers to access partially setup devices, 904 * so we skip them here. 905 */ 906 if (!target_dev_configured(dev)) 907 return 0; 908 909 item = config_item_get_unless_zero(&dev->dev_group.cg_item); 910 if (!item) 911 return 0; 912 mutex_unlock(&device_mutex); 913 914 ret = iter->fn(dev, iter->data); 915 config_item_put(item); 916 917 mutex_lock(&device_mutex); 918 return ret; 919 } 920 921 /** 922 * target_for_each_device - iterate over configured devices 923 * @fn: iterator function 924 * @data: pointer to data that will be passed to fn 925 * 926 * fn must return 0 to continue looping over devices. non-zero will break 927 * from the loop and return that value to the caller. 928 */ 929 int target_for_each_device(int (*fn)(struct se_device *dev, void *data), 930 void *data) 931 { 932 struct devices_idr_iter iter = { .fn = fn, .data = data }; 933 int ret; 934 935 mutex_lock(&device_mutex); 936 ret = idr_for_each(&devices_idr, target_devices_idr_iter, &iter); 937 mutex_unlock(&device_mutex); 938 return ret; 939 } 940 941 int target_configure_device(struct se_device *dev) 942 { 943 struct se_hba *hba = dev->se_hba; 944 int ret, id; 945 946 if (target_dev_configured(dev)) { 947 pr_err("se_dev->se_dev_ptr already set for storage" 948 " object\n"); 949 return -EEXIST; 950 } 951 952 /* 953 * Add early so modules like tcmu can use during its 954 * configuration. 955 */ 956 mutex_lock(&device_mutex); 957 /* 958 * Use cyclic to try and avoid collisions with devices 959 * that were recently removed. 960 */ 961 id = idr_alloc_cyclic(&devices_idr, dev, 0, INT_MAX, GFP_KERNEL); 962 mutex_unlock(&device_mutex); 963 if (id < 0) { 964 ret = -ENOMEM; 965 goto out; 966 } 967 dev->dev_index = id; 968 969 ret = dev->transport->configure_device(dev); 970 if (ret) 971 goto out_free_index; 972 973 if (dev->transport->configure_unmap && 974 dev->transport->configure_unmap(dev)) { 975 pr_debug("Discard support available, but disabled by default.\n"); 976 } 977 978 /* 979 * XXX: there is not much point to have two different values here.. 980 */ 981 dev->dev_attrib.block_size = dev->dev_attrib.hw_block_size; 982 dev->dev_attrib.queue_depth = dev->dev_attrib.hw_queue_depth; 983 984 /* 985 * Align max_hw_sectors down to PAGE_SIZE I/O transfers 986 */ 987 dev->dev_attrib.hw_max_sectors = 988 se_dev_align_max_sectors(dev->dev_attrib.hw_max_sectors, 989 dev->dev_attrib.hw_block_size); 990 dev->dev_attrib.optimal_sectors = dev->dev_attrib.hw_max_sectors; 991 992 dev->creation_time = get_jiffies_64(); 993 994 ret = core_setup_alua(dev); 995 if (ret) 996 goto out_destroy_device; 997 998 /* 999 * Setup work_queue for QUEUE_FULL 1000 */ 1001 INIT_WORK(&dev->qf_work_queue, target_qf_do_work); 1002 1003 scsi_dump_inquiry(dev); 1004 1005 spin_lock(&hba->device_lock); 1006 hba->dev_count++; 1007 spin_unlock(&hba->device_lock); 1008 1009 dev->dev_flags |= DF_CONFIGURED; 1010 1011 return 0; 1012 1013 out_destroy_device: 1014 dev->transport->destroy_device(dev); 1015 out_free_index: 1016 mutex_lock(&device_mutex); 1017 idr_remove(&devices_idr, dev->dev_index); 1018 mutex_unlock(&device_mutex); 1019 out: 1020 se_release_vpd_for_dev(dev); 1021 return ret; 1022 } 1023 1024 void target_free_device(struct se_device *dev) 1025 { 1026 struct se_hba *hba = dev->se_hba; 1027 1028 WARN_ON(!list_empty(&dev->dev_sep_list)); 1029 1030 percpu_ref_exit(&dev->non_ordered); 1031 cancel_work_sync(&dev->delayed_cmd_work); 1032 1033 if (target_dev_configured(dev)) { 1034 dev->transport->destroy_device(dev); 1035 1036 mutex_lock(&device_mutex); 1037 idr_remove(&devices_idr, dev->dev_index); 1038 mutex_unlock(&device_mutex); 1039 1040 spin_lock(&hba->device_lock); 1041 hba->dev_count--; 1042 spin_unlock(&hba->device_lock); 1043 } 1044 1045 core_alua_free_lu_gp_mem(dev); 1046 core_alua_set_lba_map(dev, NULL, 0, 0); 1047 core_scsi3_free_all_registrations(dev); 1048 se_release_vpd_for_dev(dev); 1049 1050 if (dev->transport->free_prot) 1051 dev->transport->free_prot(dev); 1052 1053 kfree(dev->queues); 1054 free_percpu(dev->stats); 1055 dev->transport->free_device(dev); 1056 } 1057 1058 int core_dev_setup_virtual_lun0(void) 1059 { 1060 struct se_hba *hba; 1061 struct se_device *dev; 1062 char buf[] = "rd_pages=8,rd_nullio=1,rd_dummy=1"; 1063 int ret; 1064 1065 hba = core_alloc_hba("rd_mcp", 0, HBA_FLAGS_INTERNAL_USE); 1066 if (IS_ERR(hba)) 1067 return PTR_ERR(hba); 1068 1069 dev = target_alloc_device(hba, "virt_lun0"); 1070 if (!dev) { 1071 ret = -ENOMEM; 1072 goto out_free_hba; 1073 } 1074 1075 hba->backend->ops->set_configfs_dev_params(dev, buf, sizeof(buf)); 1076 1077 ret = target_configure_device(dev); 1078 if (ret) 1079 goto out_free_se_dev; 1080 1081 lun0_hba = hba; 1082 g_lun0_dev = dev; 1083 return 0; 1084 1085 out_free_se_dev: 1086 target_free_device(dev); 1087 out_free_hba: 1088 core_delete_hba(hba); 1089 return ret; 1090 } 1091 1092 1093 void core_dev_release_virtual_lun0(void) 1094 { 1095 struct se_hba *hba = lun0_hba; 1096 1097 if (!hba) 1098 return; 1099 1100 if (g_lun0_dev) 1101 target_free_device(g_lun0_dev); 1102 core_delete_hba(hba); 1103 } 1104 1105 /* 1106 * Common CDB parsing for kernel and user passthrough. 1107 */ 1108 sense_reason_t 1109 passthrough_parse_cdb(struct se_cmd *cmd, 1110 sense_reason_t (*exec_cmd)(struct se_cmd *cmd)) 1111 { 1112 unsigned char *cdb = cmd->t_task_cdb; 1113 struct se_device *dev = cmd->se_dev; 1114 unsigned int size; 1115 1116 /* 1117 * For REPORT LUNS we always need to emulate the response, for everything 1118 * else, pass it up. 1119 */ 1120 if (cdb[0] == REPORT_LUNS) { 1121 cmd->execute_cmd = spc_emulate_report_luns; 1122 return TCM_NO_SENSE; 1123 } 1124 1125 /* 1126 * With emulate_pr disabled, all reservation requests should fail, 1127 * regardless of whether or not TRANSPORT_FLAG_PASSTHROUGH_PGR is set. 1128 */ 1129 if (!dev->dev_attrib.emulate_pr && 1130 ((cdb[0] == PERSISTENT_RESERVE_IN) || 1131 (cdb[0] == PERSISTENT_RESERVE_OUT) || 1132 (cdb[0] == RELEASE_6 || cdb[0] == RELEASE_10) || 1133 (cdb[0] == RESERVE_6 || cdb[0] == RESERVE_10))) { 1134 return TCM_UNSUPPORTED_SCSI_OPCODE; 1135 } 1136 1137 /* 1138 * For PERSISTENT RESERVE IN/OUT, RELEASE, and RESERVE we need to 1139 * emulate the response, since tcmu does not have the information 1140 * required to process these commands. 1141 */ 1142 if (!(dev->transport_flags & 1143 TRANSPORT_FLAG_PASSTHROUGH_PGR)) { 1144 if (cdb[0] == PERSISTENT_RESERVE_IN) { 1145 cmd->execute_cmd = target_scsi3_emulate_pr_in; 1146 size = get_unaligned_be16(&cdb[7]); 1147 return target_cmd_size_check(cmd, size); 1148 } 1149 if (cdb[0] == PERSISTENT_RESERVE_OUT) { 1150 cmd->execute_cmd = target_scsi3_emulate_pr_out; 1151 size = get_unaligned_be32(&cdb[5]); 1152 return target_cmd_size_check(cmd, size); 1153 } 1154 1155 if (cdb[0] == RELEASE_6 || cdb[0] == RELEASE_10) { 1156 cmd->execute_cmd = target_scsi2_reservation_release; 1157 if (cdb[0] == RELEASE_10) 1158 size = get_unaligned_be16(&cdb[7]); 1159 else 1160 size = cmd->data_length; 1161 return target_cmd_size_check(cmd, size); 1162 } 1163 if (cdb[0] == RESERVE_6 || cdb[0] == RESERVE_10) { 1164 cmd->execute_cmd = target_scsi2_reservation_reserve; 1165 if (cdb[0] == RESERVE_10) 1166 size = get_unaligned_be16(&cdb[7]); 1167 else 1168 size = cmd->data_length; 1169 return target_cmd_size_check(cmd, size); 1170 } 1171 } 1172 1173 /* Set DATA_CDB flag for ops that should have it */ 1174 switch (cdb[0]) { 1175 case READ_6: 1176 case READ_10: 1177 case READ_12: 1178 case READ_16: 1179 case WRITE_6: 1180 case WRITE_10: 1181 case WRITE_12: 1182 case WRITE_16: 1183 case WRITE_VERIFY: 1184 case WRITE_VERIFY_12: 1185 case WRITE_VERIFY_16: 1186 case COMPARE_AND_WRITE: 1187 case XDWRITEREAD_10: 1188 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 1189 break; 1190 case VARIABLE_LENGTH_CMD: 1191 switch (get_unaligned_be16(&cdb[8])) { 1192 case READ_32: 1193 case WRITE_32: 1194 case WRITE_VERIFY_32: 1195 case XDWRITEREAD_32: 1196 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 1197 break; 1198 } 1199 } 1200 1201 cmd->execute_cmd = exec_cmd; 1202 1203 return TCM_NO_SENSE; 1204 } 1205 EXPORT_SYMBOL(passthrough_parse_cdb); 1206