1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /******************************************************************************* 3 * Filename: target_core_device.c (based on iscsi_target_device.c) 4 * 5 * This file contains the TCM Virtual Device and Disk Transport 6 * agnostic related functions. 7 * 8 * (c) Copyright 2003-2013 Datera, Inc. 9 * 10 * Nicholas A. Bellinger <nab@kernel.org> 11 * 12 ******************************************************************************/ 13 14 #include <linux/net.h> 15 #include <linux/string.h> 16 #include <linux/delay.h> 17 #include <linux/timer.h> 18 #include <linux/slab.h> 19 #include <linux/spinlock.h> 20 #include <linux/kthread.h> 21 #include <linux/in.h> 22 #include <linux/export.h> 23 #include <linux/t10-pi.h> 24 #include <linux/unaligned.h> 25 #include <net/sock.h> 26 #include <net/tcp.h> 27 #include <scsi/scsi_common.h> 28 #include <scsi/scsi_proto.h> 29 30 #include <target/target_core_base.h> 31 #include <target/target_core_backend.h> 32 #include <target/target_core_fabric.h> 33 34 #include "target_core_internal.h" 35 #include "target_core_alua.h" 36 #include "target_core_pr.h" 37 #include "target_core_ua.h" 38 39 static DEFINE_MUTEX(device_mutex); 40 static DEFINE_IDR(devices_idr); 41 42 static struct se_hba *lun0_hba; 43 /* not static, needed by tpg.c */ 44 struct se_device *g_lun0_dev; 45 46 sense_reason_t 47 transport_lookup_cmd_lun(struct se_cmd *se_cmd) 48 { 49 struct se_lun *se_lun = NULL; 50 struct se_session *se_sess = se_cmd->se_sess; 51 struct se_node_acl *nacl = se_sess->se_node_acl; 52 struct se_dev_entry *deve; 53 sense_reason_t ret = TCM_NO_SENSE; 54 55 rcu_read_lock(); 56 deve = target_nacl_find_deve(nacl, se_cmd->orig_fe_lun); 57 if (deve) { 58 this_cpu_inc(deve->stats->total_cmds); 59 60 if (se_cmd->data_direction == DMA_TO_DEVICE) 61 this_cpu_add(deve->stats->write_bytes, 62 se_cmd->data_length); 63 else if (se_cmd->data_direction == DMA_FROM_DEVICE) 64 this_cpu_add(deve->stats->read_bytes, 65 se_cmd->data_length); 66 67 if ((se_cmd->data_direction == DMA_TO_DEVICE) && 68 deve->lun_access_ro) { 69 pr_err("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN" 70 " Access for 0x%08llx\n", 71 se_cmd->se_tfo->fabric_name, 72 se_cmd->orig_fe_lun); 73 rcu_read_unlock(); 74 return TCM_WRITE_PROTECTED; 75 } 76 77 se_lun = deve->se_lun; 78 79 if (!percpu_ref_tryget_live(&se_lun->lun_ref)) { 80 se_lun = NULL; 81 goto out_unlock; 82 } 83 84 se_cmd->se_lun = se_lun; 85 se_cmd->pr_res_key = deve->pr_res_key; 86 se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; 87 se_cmd->lun_ref_active = true; 88 } 89 out_unlock: 90 rcu_read_unlock(); 91 92 if (!se_lun) { 93 /* 94 * Use the se_portal_group->tpg_virt_lun0 to allow for 95 * REPORT_LUNS, et al to be returned when no active 96 * MappedLUN=0 exists for this Initiator Port. 97 */ 98 if (se_cmd->orig_fe_lun != 0) { 99 pr_err("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN" 100 " Access for 0x%08llx from %s\n", 101 se_cmd->se_tfo->fabric_name, 102 se_cmd->orig_fe_lun, 103 nacl->initiatorname); 104 return TCM_NON_EXISTENT_LUN; 105 } 106 107 /* 108 * Force WRITE PROTECT for virtual LUN 0 109 */ 110 if ((se_cmd->data_direction != DMA_FROM_DEVICE) && 111 (se_cmd->data_direction != DMA_NONE)) 112 return TCM_WRITE_PROTECTED; 113 114 se_lun = se_sess->se_tpg->tpg_virt_lun0; 115 if (!percpu_ref_tryget_live(&se_lun->lun_ref)) 116 return TCM_NON_EXISTENT_LUN; 117 118 se_cmd->se_lun = se_sess->se_tpg->tpg_virt_lun0; 119 se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; 120 se_cmd->lun_ref_active = true; 121 } 122 /* 123 * RCU reference protected by percpu se_lun->lun_ref taken above that 124 * must drop to zero (including initial reference) before this se_lun 125 * pointer can be kfree_rcu() by the final se_lun->lun_group put via 126 * target_core_fabric_configfs.c:target_fabric_port_release 127 */ 128 se_cmd->se_dev = rcu_dereference_raw(se_lun->lun_se_dev); 129 this_cpu_inc(se_cmd->se_dev->stats->total_cmds); 130 131 if (se_cmd->data_direction == DMA_TO_DEVICE) 132 this_cpu_add(se_cmd->se_dev->stats->write_bytes, 133 se_cmd->data_length); 134 else if (se_cmd->data_direction == DMA_FROM_DEVICE) 135 this_cpu_add(se_cmd->se_dev->stats->read_bytes, 136 se_cmd->data_length); 137 138 return ret; 139 } 140 EXPORT_SYMBOL(transport_lookup_cmd_lun); 141 142 int transport_lookup_tmr_lun(struct se_cmd *se_cmd) 143 { 144 struct se_dev_entry *deve; 145 struct se_lun *se_lun = NULL; 146 struct se_session *se_sess = se_cmd->se_sess; 147 struct se_node_acl *nacl = se_sess->se_node_acl; 148 struct se_tmr_req *se_tmr = se_cmd->se_tmr_req; 149 150 rcu_read_lock(); 151 deve = target_nacl_find_deve(nacl, se_cmd->orig_fe_lun); 152 if (deve) { 153 se_lun = deve->se_lun; 154 155 if (!percpu_ref_tryget_live(&se_lun->lun_ref)) { 156 se_lun = NULL; 157 goto out_unlock; 158 } 159 160 se_cmd->se_lun = se_lun; 161 se_cmd->pr_res_key = deve->pr_res_key; 162 se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; 163 se_cmd->lun_ref_active = true; 164 } 165 out_unlock: 166 rcu_read_unlock(); 167 168 if (!se_lun) { 169 pr_debug("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN" 170 " Access for 0x%08llx for %s\n", 171 se_cmd->se_tfo->fabric_name, 172 se_cmd->orig_fe_lun, 173 nacl->initiatorname); 174 return -ENODEV; 175 } 176 se_cmd->se_dev = rcu_dereference_raw(se_lun->lun_se_dev); 177 se_tmr->tmr_dev = rcu_dereference_raw(se_lun->lun_se_dev); 178 179 return 0; 180 } 181 EXPORT_SYMBOL(transport_lookup_tmr_lun); 182 183 bool target_lun_is_rdonly(struct se_cmd *cmd) 184 { 185 struct se_session *se_sess = cmd->se_sess; 186 struct se_dev_entry *deve; 187 bool ret; 188 189 rcu_read_lock(); 190 deve = target_nacl_find_deve(se_sess->se_node_acl, cmd->orig_fe_lun); 191 ret = deve && deve->lun_access_ro; 192 rcu_read_unlock(); 193 194 return ret; 195 } 196 EXPORT_SYMBOL(target_lun_is_rdonly); 197 198 /* 199 * This function is called from core_scsi3_emulate_pro_register_and_move() 200 * and core_scsi3_decode_spec_i_port(), and will increment &deve->pr_kref 201 * when a matching rtpi is found. 202 */ 203 struct se_dev_entry *core_get_se_deve_from_rtpi( 204 struct se_node_acl *nacl, 205 u16 rtpi) 206 { 207 struct se_dev_entry *deve; 208 struct se_lun *lun; 209 struct se_portal_group *tpg = nacl->se_tpg; 210 211 rcu_read_lock(); 212 hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) { 213 lun = deve->se_lun; 214 if (!lun) { 215 pr_err("%s device entries device pointer is" 216 " NULL, but Initiator has access.\n", 217 tpg->se_tpg_tfo->fabric_name); 218 continue; 219 } 220 if (lun->lun_tpg->tpg_rtpi != rtpi) 221 continue; 222 223 kref_get(&deve->pr_kref); 224 rcu_read_unlock(); 225 226 return deve; 227 } 228 rcu_read_unlock(); 229 230 return NULL; 231 } 232 233 void core_free_device_list_for_node( 234 struct se_node_acl *nacl, 235 struct se_portal_group *tpg) 236 { 237 struct se_dev_entry *deve; 238 239 mutex_lock(&nacl->lun_entry_mutex); 240 hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) 241 core_disable_device_list_for_node(deve->se_lun, deve, nacl, tpg); 242 mutex_unlock(&nacl->lun_entry_mutex); 243 } 244 245 void core_update_device_list_access( 246 u64 mapped_lun, 247 bool lun_access_ro, 248 struct se_node_acl *nacl) 249 { 250 struct se_dev_entry *deve; 251 252 mutex_lock(&nacl->lun_entry_mutex); 253 deve = target_nacl_find_deve(nacl, mapped_lun); 254 if (deve) 255 deve->lun_access_ro = lun_access_ro; 256 mutex_unlock(&nacl->lun_entry_mutex); 257 } 258 259 /* 260 * Called with rcu_read_lock or nacl->device_list_lock held. 261 */ 262 struct se_dev_entry *target_nacl_find_deve(struct se_node_acl *nacl, u64 mapped_lun) 263 { 264 struct se_dev_entry *deve; 265 266 hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) 267 if (deve->mapped_lun == mapped_lun) 268 return deve; 269 270 return NULL; 271 } 272 EXPORT_SYMBOL(target_nacl_find_deve); 273 274 void target_pr_kref_release(struct kref *kref) 275 { 276 struct se_dev_entry *deve = container_of(kref, struct se_dev_entry, 277 pr_kref); 278 complete(&deve->pr_comp); 279 } 280 281 /* 282 * Establish UA condition on SCSI device - all LUNs 283 */ 284 void target_dev_ua_allocate(struct se_device *dev, u8 asc, u8 ascq) 285 { 286 struct se_dev_entry *se_deve; 287 struct se_lun *lun; 288 289 spin_lock(&dev->se_port_lock); 290 list_for_each_entry(lun, &dev->dev_sep_list, lun_dev_link) { 291 292 spin_lock(&lun->lun_deve_lock); 293 list_for_each_entry(se_deve, &lun->lun_deve_list, lun_link) 294 core_scsi3_ua_allocate(se_deve, asc, ascq); 295 spin_unlock(&lun->lun_deve_lock); 296 } 297 spin_unlock(&dev->se_port_lock); 298 } 299 300 static void 301 target_luns_data_has_changed(struct se_node_acl *nacl, struct se_dev_entry *new, 302 bool skip_new) 303 { 304 struct se_dev_entry *tmp; 305 306 rcu_read_lock(); 307 hlist_for_each_entry_rcu(tmp, &nacl->lun_entry_hlist, link) { 308 if (skip_new && tmp == new) 309 continue; 310 core_scsi3_ua_allocate(tmp, 0x3F, 311 ASCQ_3FH_REPORTED_LUNS_DATA_HAS_CHANGED); 312 } 313 rcu_read_unlock(); 314 } 315 316 int core_enable_device_list_for_node( 317 struct se_lun *lun, 318 struct se_lun_acl *lun_acl, 319 u64 mapped_lun, 320 bool lun_access_ro, 321 struct se_node_acl *nacl, 322 struct se_portal_group *tpg) 323 { 324 struct se_dev_entry *orig, *new; 325 int ret = 0; 326 327 new = kzalloc(sizeof(*new), GFP_KERNEL); 328 if (!new) { 329 pr_err("Unable to allocate se_dev_entry memory\n"); 330 return -ENOMEM; 331 } 332 333 new->stats = alloc_percpu(struct se_dev_entry_io_stats); 334 if (!new->stats) { 335 ret = -ENOMEM; 336 goto free_deve; 337 } 338 339 spin_lock_init(&new->ua_lock); 340 INIT_LIST_HEAD(&new->ua_list); 341 INIT_LIST_HEAD(&new->lun_link); 342 343 new->mapped_lun = mapped_lun; 344 kref_init(&new->pr_kref); 345 init_completion(&new->pr_comp); 346 347 new->lun_access_ro = lun_access_ro; 348 new->creation_time = get_jiffies_64(); 349 new->attach_count++; 350 351 mutex_lock(&nacl->lun_entry_mutex); 352 orig = target_nacl_find_deve(nacl, mapped_lun); 353 if (orig && orig->se_lun) { 354 struct se_lun *orig_lun = orig->se_lun; 355 356 if (orig_lun != lun) { 357 pr_err("Existing orig->se_lun doesn't match new lun" 358 " for dynamic -> explicit NodeACL conversion:" 359 " %s\n", nacl->initiatorname); 360 mutex_unlock(&nacl->lun_entry_mutex); 361 ret = -EINVAL; 362 goto free_stats; 363 } 364 if (orig->se_lun_acl != NULL) { 365 pr_warn_ratelimited("Detected existing explicit" 366 " se_lun_acl->se_lun_group reference for %s" 367 " mapped_lun: %llu, failing\n", 368 nacl->initiatorname, mapped_lun); 369 mutex_unlock(&nacl->lun_entry_mutex); 370 ret = -EINVAL; 371 goto free_stats; 372 } 373 374 new->se_lun = lun; 375 new->se_lun_acl = lun_acl; 376 hlist_del_rcu(&orig->link); 377 hlist_add_head_rcu(&new->link, &nacl->lun_entry_hlist); 378 mutex_unlock(&nacl->lun_entry_mutex); 379 380 spin_lock(&lun->lun_deve_lock); 381 list_del(&orig->lun_link); 382 list_add_tail(&new->lun_link, &lun->lun_deve_list); 383 spin_unlock(&lun->lun_deve_lock); 384 385 kref_put(&orig->pr_kref, target_pr_kref_release); 386 wait_for_completion(&orig->pr_comp); 387 388 target_luns_data_has_changed(nacl, new, true); 389 kfree_rcu(orig, rcu_head); 390 return 0; 391 } 392 393 new->se_lun = lun; 394 new->se_lun_acl = lun_acl; 395 hlist_add_head_rcu(&new->link, &nacl->lun_entry_hlist); 396 mutex_unlock(&nacl->lun_entry_mutex); 397 398 spin_lock(&lun->lun_deve_lock); 399 list_add_tail(&new->lun_link, &lun->lun_deve_list); 400 spin_unlock(&lun->lun_deve_lock); 401 402 target_luns_data_has_changed(nacl, new, true); 403 return 0; 404 405 free_stats: 406 free_percpu(new->stats); 407 free_deve: 408 kfree(new); 409 return ret; 410 } 411 412 static void target_free_dev_entry(struct rcu_head *head) 413 { 414 struct se_dev_entry *deve = container_of(head, struct se_dev_entry, 415 rcu_head); 416 free_percpu(deve->stats); 417 kfree(deve); 418 } 419 420 void core_disable_device_list_for_node( 421 struct se_lun *lun, 422 struct se_dev_entry *orig, 423 struct se_node_acl *nacl, 424 struct se_portal_group *tpg) 425 { 426 /* 427 * rcu_dereference_raw protected by se_lun->lun_group symlink 428 * reference to se_device->dev_group. 429 */ 430 struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev); 431 432 lockdep_assert_held(&nacl->lun_entry_mutex); 433 434 /* 435 * If the MappedLUN entry is being disabled, the entry in 436 * lun->lun_deve_list must be removed now before clearing the 437 * struct se_dev_entry pointers below as logic in 438 * core_alua_do_transition_tg_pt() depends on these being present. 439 * 440 * deve->se_lun_acl will be NULL for demo-mode created LUNs 441 * that have not been explicitly converted to MappedLUNs -> 442 * struct se_lun_acl, but we remove deve->lun_link from 443 * lun->lun_deve_list. This also means that active UAs and 444 * NodeACL context specific PR metadata for demo-mode 445 * MappedLUN *deve will be released below.. 446 */ 447 spin_lock(&lun->lun_deve_lock); 448 list_del(&orig->lun_link); 449 spin_unlock(&lun->lun_deve_lock); 450 /* 451 * Disable struct se_dev_entry LUN ACL mapping 452 */ 453 core_scsi3_ua_release_all(orig); 454 455 hlist_del_rcu(&orig->link); 456 clear_bit(DEF_PR_REG_ACTIVE, &orig->deve_flags); 457 orig->lun_access_ro = false; 458 orig->creation_time = 0; 459 orig->attach_count--; 460 /* 461 * Before firing off RCU callback, wait for any in process SPEC_I_PT=1 462 * or REGISTER_AND_MOVE PR operation to complete. 463 */ 464 kref_put(&orig->pr_kref, target_pr_kref_release); 465 wait_for_completion(&orig->pr_comp); 466 467 call_rcu(&orig->rcu_head, target_free_dev_entry); 468 469 core_scsi3_free_pr_reg_from_nacl(dev, nacl); 470 target_luns_data_has_changed(nacl, NULL, false); 471 } 472 473 /* core_clear_lun_from_tpg(): 474 * 475 * 476 */ 477 void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg) 478 { 479 struct se_node_acl *nacl; 480 struct se_dev_entry *deve; 481 482 mutex_lock(&tpg->acl_node_mutex); 483 list_for_each_entry(nacl, &tpg->acl_node_list, acl_list) { 484 485 mutex_lock(&nacl->lun_entry_mutex); 486 hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) { 487 if (lun != deve->se_lun) 488 continue; 489 490 core_disable_device_list_for_node(lun, deve, nacl, tpg); 491 } 492 mutex_unlock(&nacl->lun_entry_mutex); 493 } 494 mutex_unlock(&tpg->acl_node_mutex); 495 } 496 497 static void se_release_vpd_for_dev(struct se_device *dev) 498 { 499 struct t10_vpd *vpd, *vpd_tmp; 500 501 spin_lock(&dev->t10_wwn.t10_vpd_lock); 502 list_for_each_entry_safe(vpd, vpd_tmp, 503 &dev->t10_wwn.t10_vpd_list, vpd_list) { 504 list_del(&vpd->vpd_list); 505 kfree(vpd); 506 } 507 spin_unlock(&dev->t10_wwn.t10_vpd_lock); 508 } 509 510 static u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size) 511 { 512 u32 aligned_max_sectors; 513 u32 alignment; 514 /* 515 * Limit max_sectors to a PAGE_SIZE aligned value for modern 516 * transport_allocate_data_tasks() operation. 517 */ 518 alignment = max(1ul, PAGE_SIZE / block_size); 519 aligned_max_sectors = rounddown(max_sectors, alignment); 520 521 if (max_sectors != aligned_max_sectors) 522 pr_info("Rounding down aligned max_sectors from %u to %u\n", 523 max_sectors, aligned_max_sectors); 524 525 return aligned_max_sectors; 526 } 527 528 int core_dev_add_lun( 529 struct se_portal_group *tpg, 530 struct se_device *dev, 531 struct se_lun *lun) 532 { 533 int rc; 534 535 rc = core_tpg_add_lun(tpg, lun, false, dev); 536 if (rc < 0) 537 return rc; 538 539 pr_debug("%s_TPG[%u]_LUN[%llu] - Activated %s Logical Unit from" 540 " CORE HBA: %u\n", tpg->se_tpg_tfo->fabric_name, 541 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, 542 tpg->se_tpg_tfo->fabric_name, dev->se_hba->hba_id); 543 /* 544 * Update LUN maps for dynamically added initiators when 545 * generate_node_acl is enabled. 546 */ 547 if (tpg->se_tpg_tfo->tpg_check_demo_mode(tpg)) { 548 struct se_node_acl *acl; 549 550 mutex_lock(&tpg->acl_node_mutex); 551 list_for_each_entry(acl, &tpg->acl_node_list, acl_list) { 552 if (acl->dynamic_node_acl && 553 (!tpg->se_tpg_tfo->tpg_check_demo_mode_login_only || 554 !tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg))) { 555 core_tpg_add_node_to_devs(acl, tpg, lun); 556 } 557 } 558 mutex_unlock(&tpg->acl_node_mutex); 559 } 560 561 return 0; 562 } 563 564 /* core_dev_del_lun(): 565 * 566 * 567 */ 568 void core_dev_del_lun( 569 struct se_portal_group *tpg, 570 struct se_lun *lun) 571 { 572 pr_debug("%s_TPG[%u]_LUN[%llu] - Deactivating %s Logical Unit from" 573 " device object\n", tpg->se_tpg_tfo->fabric_name, 574 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, 575 tpg->se_tpg_tfo->fabric_name); 576 577 core_tpg_remove_lun(tpg, lun); 578 } 579 580 struct se_lun_acl *core_dev_init_initiator_node_lun_acl( 581 struct se_portal_group *tpg, 582 struct se_node_acl *nacl, 583 u64 mapped_lun, 584 int *ret) 585 { 586 struct se_lun_acl *lacl; 587 588 if (strlen(nacl->initiatorname) >= TRANSPORT_IQN_LEN) { 589 pr_err("%s InitiatorName exceeds maximum size.\n", 590 tpg->se_tpg_tfo->fabric_name); 591 *ret = -EOVERFLOW; 592 return NULL; 593 } 594 lacl = kzalloc(sizeof(struct se_lun_acl), GFP_KERNEL); 595 if (!lacl) { 596 pr_err("Unable to allocate memory for struct se_lun_acl.\n"); 597 *ret = -ENOMEM; 598 return NULL; 599 } 600 601 lacl->mapped_lun = mapped_lun; 602 lacl->se_lun_nacl = nacl; 603 604 return lacl; 605 } 606 607 int core_dev_add_initiator_node_lun_acl( 608 struct se_portal_group *tpg, 609 struct se_lun_acl *lacl, 610 struct se_lun *lun, 611 bool lun_access_ro) 612 { 613 struct se_node_acl *nacl = lacl->se_lun_nacl; 614 /* 615 * rcu_dereference_raw protected by se_lun->lun_group symlink 616 * reference to se_device->dev_group. 617 */ 618 struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev); 619 620 if (!nacl) 621 return -EINVAL; 622 623 if (lun->lun_access_ro) 624 lun_access_ro = true; 625 626 lacl->se_lun = lun; 627 628 if (core_enable_device_list_for_node(lun, lacl, lacl->mapped_lun, 629 lun_access_ro, nacl, tpg) < 0) 630 return -EINVAL; 631 632 pr_debug("%s_TPG[%hu]_LUN[%llu->%llu] - Added %s ACL for " 633 " InitiatorNode: %s\n", tpg->se_tpg_tfo->fabric_name, 634 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, lacl->mapped_lun, 635 lun_access_ro ? "RO" : "RW", 636 nacl->initiatorname); 637 /* 638 * Check to see if there are any existing persistent reservation APTPL 639 * pre-registrations that need to be enabled for this LUN ACL.. 640 */ 641 core_scsi3_check_aptpl_registration(dev, tpg, lun, nacl, 642 lacl->mapped_lun); 643 return 0; 644 } 645 646 int core_dev_del_initiator_node_lun_acl( 647 struct se_lun *lun, 648 struct se_lun_acl *lacl) 649 { 650 struct se_portal_group *tpg = lun->lun_tpg; 651 struct se_node_acl *nacl; 652 struct se_dev_entry *deve; 653 654 nacl = lacl->se_lun_nacl; 655 if (!nacl) 656 return -EINVAL; 657 658 mutex_lock(&nacl->lun_entry_mutex); 659 deve = target_nacl_find_deve(nacl, lacl->mapped_lun); 660 if (deve) 661 core_disable_device_list_for_node(lun, deve, nacl, tpg); 662 mutex_unlock(&nacl->lun_entry_mutex); 663 664 pr_debug("%s_TPG[%hu]_LUN[%llu] - Removed ACL for" 665 " InitiatorNode: %s Mapped LUN: %llu\n", 666 tpg->se_tpg_tfo->fabric_name, 667 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, 668 nacl->initiatorname, lacl->mapped_lun); 669 670 return 0; 671 } 672 673 void core_dev_free_initiator_node_lun_acl( 674 struct se_portal_group *tpg, 675 struct se_lun_acl *lacl) 676 { 677 pr_debug("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s" 678 " Mapped LUN: %llu\n", tpg->se_tpg_tfo->fabric_name, 679 tpg->se_tpg_tfo->tpg_get_tag(tpg), 680 tpg->se_tpg_tfo->fabric_name, 681 lacl->se_lun_nacl->initiatorname, lacl->mapped_lun); 682 683 kfree(lacl); 684 } 685 686 static void scsi_dump_inquiry(struct se_device *dev) 687 { 688 struct t10_wwn *wwn = &dev->t10_wwn; 689 int device_type = dev->transport->get_device_type(dev); 690 691 /* 692 * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer 693 */ 694 pr_debug(" Vendor: %-" __stringify(INQUIRY_VENDOR_LEN) "s\n", 695 wwn->vendor); 696 pr_debug(" Model: %-" __stringify(INQUIRY_MODEL_LEN) "s\n", 697 wwn->model); 698 pr_debug(" Revision: %-" __stringify(INQUIRY_REVISION_LEN) "s\n", 699 wwn->revision); 700 pr_debug(" Type: %s ", scsi_device_type(device_type)); 701 } 702 703 static void target_non_ordered_release(struct percpu_ref *ref) 704 { 705 struct se_device *dev = container_of(ref, struct se_device, 706 non_ordered); 707 unsigned long flags; 708 709 spin_lock_irqsave(&dev->delayed_cmd_lock, flags); 710 if (!list_empty(&dev->delayed_cmd_list)) 711 schedule_work(&dev->delayed_cmd_work); 712 spin_unlock_irqrestore(&dev->delayed_cmd_lock, flags); 713 } 714 715 struct se_device *target_alloc_device(struct se_hba *hba, const char *name) 716 { 717 struct se_device *dev; 718 struct se_lun *xcopy_lun; 719 int i; 720 721 dev = hba->backend->ops->alloc_device(hba, name); 722 if (!dev) 723 return NULL; 724 725 dev->stats = alloc_percpu(struct se_dev_io_stats); 726 if (!dev->stats) 727 goto free_device; 728 729 dev->queues = kcalloc(nr_cpu_ids, sizeof(*dev->queues), GFP_KERNEL); 730 if (!dev->queues) 731 goto free_stats; 732 733 dev->queue_cnt = nr_cpu_ids; 734 for (i = 0; i < dev->queue_cnt; i++) { 735 struct se_device_queue *q; 736 737 q = &dev->queues[i]; 738 INIT_LIST_HEAD(&q->state_list); 739 spin_lock_init(&q->lock); 740 741 init_llist_head(&q->sq.cmd_list); 742 INIT_WORK(&q->sq.work, target_queued_submit_work); 743 } 744 745 if (percpu_ref_init(&dev->non_ordered, target_non_ordered_release, 746 PERCPU_REF_ALLOW_REINIT, GFP_KERNEL)) 747 goto free_queues; 748 749 dev->se_hba = hba; 750 dev->transport = hba->backend->ops; 751 dev->transport_flags = dev->transport->transport_flags_default; 752 dev->prot_length = sizeof(struct t10_pi_tuple); 753 dev->hba_index = hba->hba_index; 754 755 INIT_LIST_HEAD(&dev->dev_sep_list); 756 INIT_LIST_HEAD(&dev->dev_tmr_list); 757 INIT_LIST_HEAD(&dev->delayed_cmd_list); 758 INIT_LIST_HEAD(&dev->qf_cmd_list); 759 spin_lock_init(&dev->delayed_cmd_lock); 760 spin_lock_init(&dev->dev_reservation_lock); 761 spin_lock_init(&dev->se_port_lock); 762 spin_lock_init(&dev->se_tmr_lock); 763 spin_lock_init(&dev->qf_cmd_lock); 764 sema_init(&dev->caw_sem, 1); 765 INIT_LIST_HEAD(&dev->t10_wwn.t10_vpd_list); 766 spin_lock_init(&dev->t10_wwn.t10_vpd_lock); 767 INIT_LIST_HEAD(&dev->t10_pr.registration_list); 768 INIT_LIST_HEAD(&dev->t10_pr.aptpl_reg_list); 769 spin_lock_init(&dev->t10_pr.registration_lock); 770 spin_lock_init(&dev->t10_pr.aptpl_reg_lock); 771 INIT_LIST_HEAD(&dev->t10_alua.tg_pt_gps_list); 772 spin_lock_init(&dev->t10_alua.tg_pt_gps_lock); 773 INIT_LIST_HEAD(&dev->t10_alua.lba_map_list); 774 spin_lock_init(&dev->t10_alua.lba_map_lock); 775 776 INIT_WORK(&dev->delayed_cmd_work, target_do_delayed_work); 777 mutex_init(&dev->lun_reset_mutex); 778 779 dev->t10_wwn.t10_dev = dev; 780 /* 781 * Use OpenFabrics IEEE Company ID: 00 14 05 782 */ 783 dev->t10_wwn.company_id = 0x001405; 784 785 dev->t10_alua.t10_dev = dev; 786 787 dev->dev_attrib.da_dev = dev; 788 dev->dev_attrib.emulate_model_alias = DA_EMULATE_MODEL_ALIAS; 789 dev->dev_attrib.emulate_dpo = 1; 790 dev->dev_attrib.emulate_fua_write = 1; 791 dev->dev_attrib.emulate_fua_read = 1; 792 dev->dev_attrib.emulate_write_cache = DA_EMULATE_WRITE_CACHE; 793 dev->dev_attrib.emulate_ua_intlck_ctrl = TARGET_UA_INTLCK_CTRL_CLEAR; 794 dev->dev_attrib.emulate_tas = DA_EMULATE_TAS; 795 dev->dev_attrib.emulate_tpu = DA_EMULATE_TPU; 796 dev->dev_attrib.emulate_tpws = DA_EMULATE_TPWS; 797 dev->dev_attrib.emulate_caw = DA_EMULATE_CAW; 798 dev->dev_attrib.emulate_3pc = DA_EMULATE_3PC; 799 dev->dev_attrib.emulate_pr = DA_EMULATE_PR; 800 dev->dev_attrib.emulate_rsoc = DA_EMULATE_RSOC; 801 dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE0_PROT; 802 dev->dev_attrib.enforce_pr_isids = DA_ENFORCE_PR_ISIDS; 803 dev->dev_attrib.force_pr_aptpl = DA_FORCE_PR_APTPL; 804 dev->dev_attrib.is_nonrot = DA_IS_NONROT; 805 dev->dev_attrib.emulate_rest_reord = DA_EMULATE_REST_REORD; 806 dev->dev_attrib.max_unmap_lba_count = DA_MAX_UNMAP_LBA_COUNT; 807 dev->dev_attrib.max_unmap_block_desc_count = 808 DA_MAX_UNMAP_BLOCK_DESC_COUNT; 809 dev->dev_attrib.unmap_granularity = DA_UNMAP_GRANULARITY_DEFAULT; 810 dev->dev_attrib.unmap_granularity_alignment = 811 DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT; 812 dev->dev_attrib.unmap_zeroes_data = 813 DA_UNMAP_ZEROES_DATA_DEFAULT; 814 dev->dev_attrib.max_write_same_len = DA_MAX_WRITE_SAME_LEN; 815 dev->dev_attrib.submit_type = TARGET_FABRIC_DEFAULT_SUBMIT; 816 817 /* Skip allocating lun_stats since we can't export them. */ 818 xcopy_lun = &dev->xcopy_lun; 819 rcu_assign_pointer(xcopy_lun->lun_se_dev, dev); 820 init_completion(&xcopy_lun->lun_shutdown_comp); 821 INIT_LIST_HEAD(&xcopy_lun->lun_deve_list); 822 INIT_LIST_HEAD(&xcopy_lun->lun_dev_link); 823 mutex_init(&xcopy_lun->lun_tg_pt_md_mutex); 824 xcopy_lun->lun_tpg = &xcopy_pt_tpg; 825 826 /* Preload the default INQUIRY const values */ 827 strscpy(dev->t10_wwn.vendor, "LIO-ORG", sizeof(dev->t10_wwn.vendor)); 828 strscpy(dev->t10_wwn.model, dev->transport->inquiry_prod, 829 sizeof(dev->t10_wwn.model)); 830 strscpy(dev->t10_wwn.revision, dev->transport->inquiry_rev, 831 sizeof(dev->t10_wwn.revision)); 832 833 return dev; 834 835 free_queues: 836 kfree(dev->queues); 837 free_stats: 838 free_percpu(dev->stats); 839 free_device: 840 hba->backend->ops->free_device(dev); 841 return NULL; 842 } 843 844 void target_configure_write_atomic_from_bdev(struct se_dev_attrib *attrib, 845 struct block_device *bdev) 846 { 847 struct request_queue *q = bdev_get_queue(bdev); 848 int block_size = bdev_logical_block_size(bdev); 849 850 if (!bdev_can_atomic_write(bdev)) 851 return; 852 853 attrib->atomic_max_len = queue_atomic_write_max_bytes(q) / block_size; 854 attrib->atomic_granularity = attrib->atomic_alignment = 855 queue_atomic_write_unit_min_bytes(q) / block_size; 856 attrib->atomic_max_with_boundary = 0; 857 attrib->atomic_max_boundary = 0; 858 } 859 EXPORT_SYMBOL_GPL(target_configure_write_atomic_from_bdev); 860 861 /* 862 * Check if the underlying struct block_device supports discard and if yes 863 * configure the UNMAP parameters. 864 */ 865 bool target_configure_unmap_from_bdev(struct se_dev_attrib *attrib, 866 struct block_device *bdev) 867 { 868 int block_size = bdev_logical_block_size(bdev); 869 870 if (!bdev_max_discard_sectors(bdev)) 871 return false; 872 873 attrib->max_unmap_lba_count = 874 bdev_max_discard_sectors(bdev) >> (ilog2(block_size) - 9); 875 /* 876 * Currently hardcoded to 1 in Linux/SCSI code.. 877 */ 878 attrib->max_unmap_block_desc_count = 1; 879 attrib->unmap_granularity = bdev_discard_granularity(bdev) / block_size; 880 attrib->unmap_granularity_alignment = 881 bdev_discard_alignment(bdev) / block_size; 882 return true; 883 } 884 EXPORT_SYMBOL(target_configure_unmap_from_bdev); 885 886 /* 887 * Convert from blocksize advertised to the initiator to the 512 byte 888 * units unconditionally used by the Linux block layer. 889 */ 890 sector_t target_to_linux_sector(struct se_device *dev, sector_t lb) 891 { 892 switch (dev->dev_attrib.block_size) { 893 case 4096: 894 return lb << 3; 895 case 2048: 896 return lb << 2; 897 case 1024: 898 return lb << 1; 899 default: 900 return lb; 901 } 902 } 903 EXPORT_SYMBOL(target_to_linux_sector); 904 905 struct devices_idr_iter { 906 int (*fn)(struct se_device *dev, void *data); 907 void *data; 908 }; 909 910 static int target_devices_idr_iter(int id, void *p, void *data) 911 __must_hold(&device_mutex) 912 { 913 struct devices_idr_iter *iter = data; 914 struct se_device *dev = p; 915 struct config_item *item; 916 int ret; 917 918 /* 919 * We add the device early to the idr, so it can be used 920 * by backend modules during configuration. We do not want 921 * to allow other callers to access partially setup devices, 922 * so we skip them here. 923 */ 924 if (!target_dev_configured(dev)) 925 return 0; 926 927 item = config_item_get_unless_zero(&dev->dev_group.cg_item); 928 if (!item) 929 return 0; 930 mutex_unlock(&device_mutex); 931 932 ret = iter->fn(dev, iter->data); 933 config_item_put(item); 934 935 mutex_lock(&device_mutex); 936 return ret; 937 } 938 939 /** 940 * target_for_each_device - iterate over configured devices 941 * @fn: iterator function 942 * @data: pointer to data that will be passed to fn 943 * 944 * fn must return 0 to continue looping over devices. non-zero will break 945 * from the loop and return that value to the caller. 946 */ 947 int target_for_each_device(int (*fn)(struct se_device *dev, void *data), 948 void *data) 949 { 950 struct devices_idr_iter iter = { .fn = fn, .data = data }; 951 int ret; 952 953 mutex_lock(&device_mutex); 954 ret = idr_for_each(&devices_idr, target_devices_idr_iter, &iter); 955 mutex_unlock(&device_mutex); 956 return ret; 957 } 958 959 int target_configure_device(struct se_device *dev) 960 { 961 struct se_hba *hba = dev->se_hba; 962 int ret, id; 963 964 if (target_dev_configured(dev)) { 965 pr_err("se_dev->se_dev_ptr already set for storage" 966 " object\n"); 967 return -EEXIST; 968 } 969 970 /* 971 * Add early so modules like tcmu can use during its 972 * configuration. 973 */ 974 mutex_lock(&device_mutex); 975 /* 976 * Use cyclic to try and avoid collisions with devices 977 * that were recently removed. 978 */ 979 id = idr_alloc_cyclic(&devices_idr, dev, 0, INT_MAX, GFP_KERNEL); 980 mutex_unlock(&device_mutex); 981 if (id < 0) { 982 ret = -ENOMEM; 983 goto out; 984 } 985 dev->dev_index = id; 986 987 ret = dev->transport->configure_device(dev); 988 if (ret) 989 goto out_free_index; 990 991 if (dev->transport->configure_unmap && 992 dev->transport->configure_unmap(dev)) { 993 pr_debug("Discard support available, but disabled by default.\n"); 994 } 995 996 /* 997 * XXX: there is not much point to have two different values here.. 998 */ 999 dev->dev_attrib.block_size = dev->dev_attrib.hw_block_size; 1000 dev->dev_attrib.queue_depth = dev->dev_attrib.hw_queue_depth; 1001 1002 /* 1003 * Align max_hw_sectors down to PAGE_SIZE I/O transfers 1004 */ 1005 dev->dev_attrib.hw_max_sectors = 1006 se_dev_align_max_sectors(dev->dev_attrib.hw_max_sectors, 1007 dev->dev_attrib.hw_block_size); 1008 dev->dev_attrib.optimal_sectors = dev->dev_attrib.hw_max_sectors; 1009 1010 dev->creation_time = get_jiffies_64(); 1011 1012 ret = core_setup_alua(dev); 1013 if (ret) 1014 goto out_destroy_device; 1015 1016 /* 1017 * Setup work_queue for QUEUE_FULL 1018 */ 1019 INIT_WORK(&dev->qf_work_queue, target_qf_do_work); 1020 1021 scsi_dump_inquiry(dev); 1022 1023 spin_lock(&hba->device_lock); 1024 hba->dev_count++; 1025 spin_unlock(&hba->device_lock); 1026 1027 dev->dev_flags |= DF_CONFIGURED; 1028 1029 return 0; 1030 1031 out_destroy_device: 1032 dev->transport->destroy_device(dev); 1033 out_free_index: 1034 mutex_lock(&device_mutex); 1035 idr_remove(&devices_idr, dev->dev_index); 1036 mutex_unlock(&device_mutex); 1037 out: 1038 se_release_vpd_for_dev(dev); 1039 return ret; 1040 } 1041 1042 void target_free_device(struct se_device *dev) 1043 { 1044 struct se_hba *hba = dev->se_hba; 1045 1046 WARN_ON(!list_empty(&dev->dev_sep_list)); 1047 1048 percpu_ref_exit(&dev->non_ordered); 1049 cancel_work_sync(&dev->delayed_cmd_work); 1050 1051 if (target_dev_configured(dev)) { 1052 dev->transport->destroy_device(dev); 1053 1054 mutex_lock(&device_mutex); 1055 idr_remove(&devices_idr, dev->dev_index); 1056 mutex_unlock(&device_mutex); 1057 1058 spin_lock(&hba->device_lock); 1059 hba->dev_count--; 1060 spin_unlock(&hba->device_lock); 1061 } 1062 1063 core_alua_free_lu_gp_mem(dev); 1064 core_alua_set_lba_map(dev, NULL, 0, 0); 1065 core_scsi3_free_all_registrations(dev); 1066 se_release_vpd_for_dev(dev); 1067 1068 if (dev->transport->free_prot) 1069 dev->transport->free_prot(dev); 1070 1071 kfree(dev->queues); 1072 free_percpu(dev->stats); 1073 dev->transport->free_device(dev); 1074 } 1075 1076 int core_dev_setup_virtual_lun0(void) 1077 { 1078 struct se_hba *hba; 1079 struct se_device *dev; 1080 char buf[] = "rd_pages=8,rd_nullio=1,rd_dummy=1"; 1081 int ret; 1082 1083 hba = core_alloc_hba("rd_mcp", 0, HBA_FLAGS_INTERNAL_USE); 1084 if (IS_ERR(hba)) 1085 return PTR_ERR(hba); 1086 1087 dev = target_alloc_device(hba, "virt_lun0"); 1088 if (!dev) { 1089 ret = -ENOMEM; 1090 goto out_free_hba; 1091 } 1092 1093 hba->backend->ops->set_configfs_dev_params(dev, buf, sizeof(buf)); 1094 1095 ret = target_configure_device(dev); 1096 if (ret) 1097 goto out_free_se_dev; 1098 1099 lun0_hba = hba; 1100 g_lun0_dev = dev; 1101 return 0; 1102 1103 out_free_se_dev: 1104 target_free_device(dev); 1105 out_free_hba: 1106 core_delete_hba(hba); 1107 return ret; 1108 } 1109 1110 1111 void core_dev_release_virtual_lun0(void) 1112 { 1113 struct se_hba *hba = lun0_hba; 1114 1115 if (!hba) 1116 return; 1117 1118 if (g_lun0_dev) 1119 target_free_device(g_lun0_dev); 1120 core_delete_hba(hba); 1121 } 1122 1123 /* 1124 * Common CDB parsing for kernel and user passthrough. 1125 */ 1126 sense_reason_t 1127 passthrough_parse_cdb(struct se_cmd *cmd, 1128 sense_reason_t (*exec_cmd)(struct se_cmd *cmd)) 1129 { 1130 unsigned char *cdb = cmd->t_task_cdb; 1131 struct se_device *dev = cmd->se_dev; 1132 unsigned int size; 1133 1134 /* 1135 * For REPORT LUNS we always need to emulate the response, for everything 1136 * else, pass it up. 1137 */ 1138 if (cdb[0] == REPORT_LUNS) { 1139 cmd->execute_cmd = spc_emulate_report_luns; 1140 return TCM_NO_SENSE; 1141 } 1142 1143 /* 1144 * With emulate_pr disabled, all reservation requests should fail, 1145 * regardless of whether or not TRANSPORT_FLAG_PASSTHROUGH_PGR is set. 1146 */ 1147 if (!dev->dev_attrib.emulate_pr && 1148 ((cdb[0] == PERSISTENT_RESERVE_IN) || 1149 (cdb[0] == PERSISTENT_RESERVE_OUT) || 1150 (cdb[0] == RELEASE_6 || cdb[0] == RELEASE_10) || 1151 (cdb[0] == RESERVE_6 || cdb[0] == RESERVE_10))) { 1152 return TCM_UNSUPPORTED_SCSI_OPCODE; 1153 } 1154 1155 /* 1156 * For PERSISTENT RESERVE IN/OUT, RELEASE, and RESERVE we need to 1157 * emulate the response, since tcmu does not have the information 1158 * required to process these commands. 1159 */ 1160 if (!(dev->transport_flags & 1161 TRANSPORT_FLAG_PASSTHROUGH_PGR)) { 1162 if (cdb[0] == PERSISTENT_RESERVE_IN) { 1163 cmd->execute_cmd = target_scsi3_emulate_pr_in; 1164 size = get_unaligned_be16(&cdb[7]); 1165 return target_cmd_size_check(cmd, size); 1166 } 1167 if (cdb[0] == PERSISTENT_RESERVE_OUT) { 1168 cmd->execute_cmd = target_scsi3_emulate_pr_out; 1169 size = get_unaligned_be32(&cdb[5]); 1170 return target_cmd_size_check(cmd, size); 1171 } 1172 1173 if (cdb[0] == RELEASE_6 || cdb[0] == RELEASE_10) { 1174 cmd->execute_cmd = target_scsi2_reservation_release; 1175 if (cdb[0] == RELEASE_10) 1176 size = get_unaligned_be16(&cdb[7]); 1177 else 1178 size = cmd->data_length; 1179 return target_cmd_size_check(cmd, size); 1180 } 1181 if (cdb[0] == RESERVE_6 || cdb[0] == RESERVE_10) { 1182 cmd->execute_cmd = target_scsi2_reservation_reserve; 1183 if (cdb[0] == RESERVE_10) 1184 size = get_unaligned_be16(&cdb[7]); 1185 else 1186 size = cmd->data_length; 1187 return target_cmd_size_check(cmd, size); 1188 } 1189 } 1190 1191 /* Set DATA_CDB flag for ops that should have it */ 1192 switch (cdb[0]) { 1193 case READ_6: 1194 case READ_10: 1195 case READ_12: 1196 case READ_16: 1197 case WRITE_6: 1198 case WRITE_10: 1199 case WRITE_12: 1200 case WRITE_16: 1201 case WRITE_VERIFY: 1202 case WRITE_VERIFY_12: 1203 case WRITE_VERIFY_16: 1204 case COMPARE_AND_WRITE: 1205 case XDWRITEREAD_10: 1206 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 1207 break; 1208 case VARIABLE_LENGTH_CMD: 1209 switch (get_unaligned_be16(&cdb[8])) { 1210 case READ_32: 1211 case WRITE_32: 1212 case WRITE_VERIFY_32: 1213 case XDWRITEREAD_32: 1214 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 1215 break; 1216 } 1217 } 1218 1219 cmd->execute_cmd = exec_cmd; 1220 1221 return TCM_NO_SENSE; 1222 } 1223 EXPORT_SYMBOL(passthrough_parse_cdb); 1224