1 /******************************************************************************* 2 * Filename: target_core_tpg.c 3 * 4 * This file contains generic Target Portal Group related functions. 5 * 6 * Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc. 7 * Copyright (c) 2005, 2006, 2007 SBE, Inc. 8 * Copyright (c) 2007-2010 Rising Tide Systems 9 * Copyright (c) 2008-2010 Linux-iSCSI.org 10 * 11 * Nicholas A. Bellinger <nab@kernel.org> 12 * 13 * This program is free software; you can redistribute it and/or modify 14 * it under the terms of the GNU General Public License as published by 15 * the Free Software Foundation; either version 2 of the License, or 16 * (at your option) any later version. 17 * 18 * This program is distributed in the hope that it will be useful, 19 * but WITHOUT ANY WARRANTY; without even the implied warranty of 20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 21 * GNU General Public License for more details. 22 * 23 * You should have received a copy of the GNU General Public License 24 * along with this program; if not, write to the Free Software 25 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 26 * 27 ******************************************************************************/ 28 29 #include <linux/net.h> 30 #include <linux/string.h> 31 #include <linux/timer.h> 32 #include <linux/slab.h> 33 #include <linux/spinlock.h> 34 #include <linux/in.h> 35 #include <net/sock.h> 36 #include <net/tcp.h> 37 #include <scsi/scsi.h> 38 #include <scsi/scsi_cmnd.h> 39 40 #include <target/target_core_base.h> 41 #include <target/target_core_device.h> 42 #include <target/target_core_tpg.h> 43 #include <target/target_core_transport.h> 44 #include <target/target_core_fabric_ops.h> 45 46 #include "target_core_hba.h" 47 #include "target_core_stat.h" 48 49 extern struct se_device *g_lun0_dev; 50 51 static DEFINE_SPINLOCK(tpg_lock); 52 static LIST_HEAD(tpg_list); 53 54 /* core_clear_initiator_node_from_tpg(): 55 * 56 * 57 */ 58 static void core_clear_initiator_node_from_tpg( 59 struct se_node_acl *nacl, 60 struct se_portal_group *tpg) 61 { 62 int i; 63 struct se_dev_entry *deve; 64 struct se_lun *lun; 65 struct se_lun_acl *acl, *acl_tmp; 66 67 spin_lock_irq(&nacl->device_list_lock); 68 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { 69 deve = &nacl->device_list[i]; 70 71 if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS)) 72 continue; 73 74 if (!deve->se_lun) { 75 pr_err("%s device entries device pointer is" 76 " NULL, but Initiator has access.\n", 77 tpg->se_tpg_tfo->get_fabric_name()); 78 continue; 79 } 80 81 lun = deve->se_lun; 82 spin_unlock_irq(&nacl->device_list_lock); 83 core_update_device_list_for_node(lun, NULL, deve->mapped_lun, 84 TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg, 0); 85 86 spin_lock(&lun->lun_acl_lock); 87 list_for_each_entry_safe(acl, acl_tmp, 88 &lun->lun_acl_list, lacl_list) { 89 if (!strcmp(acl->initiatorname, nacl->initiatorname) && 90 (acl->mapped_lun == deve->mapped_lun)) 91 break; 92 } 93 94 if (!acl) { 95 pr_err("Unable to locate struct se_lun_acl for %s," 96 " mapped_lun: %u\n", nacl->initiatorname, 97 deve->mapped_lun); 98 spin_unlock(&lun->lun_acl_lock); 99 spin_lock_irq(&nacl->device_list_lock); 100 continue; 101 } 102 103 list_del(&acl->lacl_list); 104 spin_unlock(&lun->lun_acl_lock); 105 106 spin_lock_irq(&nacl->device_list_lock); 107 kfree(acl); 108 } 109 spin_unlock_irq(&nacl->device_list_lock); 110 } 111 112 /* __core_tpg_get_initiator_node_acl(): 113 * 114 * spin_lock_bh(&tpg->acl_node_lock); must be held when calling 115 */ 116 struct se_node_acl *__core_tpg_get_initiator_node_acl( 117 struct se_portal_group *tpg, 118 const char *initiatorname) 119 { 120 struct se_node_acl *acl; 121 122 list_for_each_entry(acl, &tpg->acl_node_list, acl_list) { 123 if (!strcmp(acl->initiatorname, initiatorname)) 124 return acl; 125 } 126 127 return NULL; 128 } 129 130 /* core_tpg_get_initiator_node_acl(): 131 * 132 * 133 */ 134 struct se_node_acl *core_tpg_get_initiator_node_acl( 135 struct se_portal_group *tpg, 136 unsigned char *initiatorname) 137 { 138 struct se_node_acl *acl; 139 140 spin_lock_bh(&tpg->acl_node_lock); 141 list_for_each_entry(acl, &tpg->acl_node_list, acl_list) { 142 if (!strcmp(acl->initiatorname, initiatorname) && 143 !acl->dynamic_node_acl) { 144 spin_unlock_bh(&tpg->acl_node_lock); 145 return acl; 146 } 147 } 148 spin_unlock_bh(&tpg->acl_node_lock); 149 150 return NULL; 151 } 152 153 /* core_tpg_add_node_to_devs(): 154 * 155 * 156 */ 157 void core_tpg_add_node_to_devs( 158 struct se_node_acl *acl, 159 struct se_portal_group *tpg) 160 { 161 int i = 0; 162 u32 lun_access = 0; 163 struct se_lun *lun; 164 struct se_device *dev; 165 166 spin_lock(&tpg->tpg_lun_lock); 167 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { 168 lun = &tpg->tpg_lun_list[i]; 169 if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) 170 continue; 171 172 spin_unlock(&tpg->tpg_lun_lock); 173 174 dev = lun->lun_se_dev; 175 /* 176 * By default in LIO-Target $FABRIC_MOD, 177 * demo_mode_write_protect is ON, or READ_ONLY; 178 */ 179 if (!tpg->se_tpg_tfo->tpg_check_demo_mode_write_protect(tpg)) { 180 if (dev->dev_flags & DF_READ_ONLY) 181 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY; 182 else 183 lun_access = TRANSPORT_LUNFLAGS_READ_WRITE; 184 } else { 185 /* 186 * Allow only optical drives to issue R/W in default RO 187 * demo mode. 188 */ 189 if (dev->transport->get_device_type(dev) == TYPE_DISK) 190 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY; 191 else 192 lun_access = TRANSPORT_LUNFLAGS_READ_WRITE; 193 } 194 195 pr_debug("TARGET_CORE[%s]->TPG[%u]_LUN[%u] - Adding %s" 196 " access for LUN in Demo Mode\n", 197 tpg->se_tpg_tfo->get_fabric_name(), 198 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, 199 (lun_access == TRANSPORT_LUNFLAGS_READ_WRITE) ? 200 "READ-WRITE" : "READ-ONLY"); 201 202 core_update_device_list_for_node(lun, NULL, lun->unpacked_lun, 203 lun_access, acl, tpg, 1); 204 spin_lock(&tpg->tpg_lun_lock); 205 } 206 spin_unlock(&tpg->tpg_lun_lock); 207 } 208 209 /* core_set_queue_depth_for_node(): 210 * 211 * 212 */ 213 static int core_set_queue_depth_for_node( 214 struct se_portal_group *tpg, 215 struct se_node_acl *acl) 216 { 217 if (!acl->queue_depth) { 218 pr_err("Queue depth for %s Initiator Node: %s is 0," 219 "defaulting to 1.\n", tpg->se_tpg_tfo->get_fabric_name(), 220 acl->initiatorname); 221 acl->queue_depth = 1; 222 } 223 224 return 0; 225 } 226 227 /* core_create_device_list_for_node(): 228 * 229 * 230 */ 231 static int core_create_device_list_for_node(struct se_node_acl *nacl) 232 { 233 struct se_dev_entry *deve; 234 int i; 235 236 nacl->device_list = kzalloc(sizeof(struct se_dev_entry) * 237 TRANSPORT_MAX_LUNS_PER_TPG, GFP_KERNEL); 238 if (!nacl->device_list) { 239 pr_err("Unable to allocate memory for" 240 " struct se_node_acl->device_list\n"); 241 return -ENOMEM; 242 } 243 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { 244 deve = &nacl->device_list[i]; 245 246 atomic_set(&deve->ua_count, 0); 247 atomic_set(&deve->pr_ref_count, 0); 248 spin_lock_init(&deve->ua_lock); 249 INIT_LIST_HEAD(&deve->alua_port_list); 250 INIT_LIST_HEAD(&deve->ua_list); 251 } 252 253 return 0; 254 } 255 256 /* core_tpg_check_initiator_node_acl() 257 * 258 * 259 */ 260 struct se_node_acl *core_tpg_check_initiator_node_acl( 261 struct se_portal_group *tpg, 262 unsigned char *initiatorname) 263 { 264 struct se_node_acl *acl; 265 266 acl = core_tpg_get_initiator_node_acl(tpg, initiatorname); 267 if (acl) 268 return acl; 269 270 if (!tpg->se_tpg_tfo->tpg_check_demo_mode(tpg)) 271 return NULL; 272 273 acl = tpg->se_tpg_tfo->tpg_alloc_fabric_acl(tpg); 274 if (!acl) 275 return NULL; 276 277 INIT_LIST_HEAD(&acl->acl_list); 278 INIT_LIST_HEAD(&acl->acl_sess_list); 279 spin_lock_init(&acl->device_list_lock); 280 spin_lock_init(&acl->nacl_sess_lock); 281 atomic_set(&acl->acl_pr_ref_count, 0); 282 acl->queue_depth = tpg->se_tpg_tfo->tpg_get_default_depth(tpg); 283 snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname); 284 acl->se_tpg = tpg; 285 acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX); 286 spin_lock_init(&acl->stats_lock); 287 acl->dynamic_node_acl = 1; 288 289 tpg->se_tpg_tfo->set_default_node_attributes(acl); 290 291 if (core_create_device_list_for_node(acl) < 0) { 292 tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl); 293 return NULL; 294 } 295 296 if (core_set_queue_depth_for_node(tpg, acl) < 0) { 297 core_free_device_list_for_node(acl, tpg); 298 tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl); 299 return NULL; 300 } 301 302 core_tpg_add_node_to_devs(acl, tpg); 303 304 spin_lock_bh(&tpg->acl_node_lock); 305 list_add_tail(&acl->acl_list, &tpg->acl_node_list); 306 tpg->num_node_acls++; 307 spin_unlock_bh(&tpg->acl_node_lock); 308 309 pr_debug("%s_TPG[%u] - Added DYNAMIC ACL with TCQ Depth: %d for %s" 310 " Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(), 311 tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth, 312 tpg->se_tpg_tfo->get_fabric_name(), initiatorname); 313 314 return acl; 315 } 316 EXPORT_SYMBOL(core_tpg_check_initiator_node_acl); 317 318 void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *nacl) 319 { 320 while (atomic_read(&nacl->acl_pr_ref_count) != 0) 321 cpu_relax(); 322 } 323 324 void core_tpg_clear_object_luns(struct se_portal_group *tpg) 325 { 326 int i, ret; 327 struct se_lun *lun; 328 329 spin_lock(&tpg->tpg_lun_lock); 330 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { 331 lun = &tpg->tpg_lun_list[i]; 332 333 if ((lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) || 334 (lun->lun_se_dev == NULL)) 335 continue; 336 337 spin_unlock(&tpg->tpg_lun_lock); 338 ret = core_dev_del_lun(tpg, lun->unpacked_lun); 339 spin_lock(&tpg->tpg_lun_lock); 340 } 341 spin_unlock(&tpg->tpg_lun_lock); 342 } 343 EXPORT_SYMBOL(core_tpg_clear_object_luns); 344 345 /* core_tpg_add_initiator_node_acl(): 346 * 347 * 348 */ 349 struct se_node_acl *core_tpg_add_initiator_node_acl( 350 struct se_portal_group *tpg, 351 struct se_node_acl *se_nacl, 352 const char *initiatorname, 353 u32 queue_depth) 354 { 355 struct se_node_acl *acl = NULL; 356 357 spin_lock_bh(&tpg->acl_node_lock); 358 acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname); 359 if (acl) { 360 if (acl->dynamic_node_acl) { 361 acl->dynamic_node_acl = 0; 362 pr_debug("%s_TPG[%u] - Replacing dynamic ACL" 363 " for %s\n", tpg->se_tpg_tfo->get_fabric_name(), 364 tpg->se_tpg_tfo->tpg_get_tag(tpg), initiatorname); 365 spin_unlock_bh(&tpg->acl_node_lock); 366 /* 367 * Release the locally allocated struct se_node_acl 368 * because * core_tpg_add_initiator_node_acl() returned 369 * a pointer to an existing demo mode node ACL. 370 */ 371 if (se_nacl) 372 tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, 373 se_nacl); 374 goto done; 375 } 376 377 pr_err("ACL entry for %s Initiator" 378 " Node %s already exists for TPG %u, ignoring" 379 " request.\n", tpg->se_tpg_tfo->get_fabric_name(), 380 initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg)); 381 spin_unlock_bh(&tpg->acl_node_lock); 382 return ERR_PTR(-EEXIST); 383 } 384 spin_unlock_bh(&tpg->acl_node_lock); 385 386 if (!se_nacl) { 387 pr_err("struct se_node_acl pointer is NULL\n"); 388 return ERR_PTR(-EINVAL); 389 } 390 /* 391 * For v4.x logic the se_node_acl_s is hanging off a fabric 392 * dependent structure allocated via 393 * struct target_core_fabric_ops->fabric_make_nodeacl() 394 */ 395 acl = se_nacl; 396 397 INIT_LIST_HEAD(&acl->acl_list); 398 INIT_LIST_HEAD(&acl->acl_sess_list); 399 spin_lock_init(&acl->device_list_lock); 400 spin_lock_init(&acl->nacl_sess_lock); 401 atomic_set(&acl->acl_pr_ref_count, 0); 402 acl->queue_depth = queue_depth; 403 snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname); 404 acl->se_tpg = tpg; 405 acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX); 406 spin_lock_init(&acl->stats_lock); 407 408 tpg->se_tpg_tfo->set_default_node_attributes(acl); 409 410 if (core_create_device_list_for_node(acl) < 0) { 411 tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl); 412 return ERR_PTR(-ENOMEM); 413 } 414 415 if (core_set_queue_depth_for_node(tpg, acl) < 0) { 416 core_free_device_list_for_node(acl, tpg); 417 tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl); 418 return ERR_PTR(-EINVAL); 419 } 420 421 spin_lock_bh(&tpg->acl_node_lock); 422 list_add_tail(&acl->acl_list, &tpg->acl_node_list); 423 tpg->num_node_acls++; 424 spin_unlock_bh(&tpg->acl_node_lock); 425 426 done: 427 pr_debug("%s_TPG[%hu] - Added ACL with TCQ Depth: %d for %s" 428 " Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(), 429 tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth, 430 tpg->se_tpg_tfo->get_fabric_name(), initiatorname); 431 432 return acl; 433 } 434 EXPORT_SYMBOL(core_tpg_add_initiator_node_acl); 435 436 /* core_tpg_del_initiator_node_acl(): 437 * 438 * 439 */ 440 int core_tpg_del_initiator_node_acl( 441 struct se_portal_group *tpg, 442 struct se_node_acl *acl, 443 int force) 444 { 445 struct se_session *sess, *sess_tmp; 446 int dynamic_acl = 0; 447 448 spin_lock_bh(&tpg->acl_node_lock); 449 if (acl->dynamic_node_acl) { 450 acl->dynamic_node_acl = 0; 451 dynamic_acl = 1; 452 } 453 list_del(&acl->acl_list); 454 tpg->num_node_acls--; 455 spin_unlock_bh(&tpg->acl_node_lock); 456 457 spin_lock_bh(&tpg->session_lock); 458 list_for_each_entry_safe(sess, sess_tmp, 459 &tpg->tpg_sess_list, sess_list) { 460 if (sess->se_node_acl != acl) 461 continue; 462 /* 463 * Determine if the session needs to be closed by our context. 464 */ 465 if (!tpg->se_tpg_tfo->shutdown_session(sess)) 466 continue; 467 468 spin_unlock_bh(&tpg->session_lock); 469 /* 470 * If the $FABRIC_MOD session for the Initiator Node ACL exists, 471 * forcefully shutdown the $FABRIC_MOD session/nexus. 472 */ 473 tpg->se_tpg_tfo->close_session(sess); 474 475 spin_lock_bh(&tpg->session_lock); 476 } 477 spin_unlock_bh(&tpg->session_lock); 478 479 core_tpg_wait_for_nacl_pr_ref(acl); 480 core_clear_initiator_node_from_tpg(acl, tpg); 481 core_free_device_list_for_node(acl, tpg); 482 483 pr_debug("%s_TPG[%hu] - Deleted ACL with TCQ Depth: %d for %s" 484 " Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(), 485 tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth, 486 tpg->se_tpg_tfo->get_fabric_name(), acl->initiatorname); 487 488 return 0; 489 } 490 EXPORT_SYMBOL(core_tpg_del_initiator_node_acl); 491 492 /* core_tpg_set_initiator_node_queue_depth(): 493 * 494 * 495 */ 496 int core_tpg_set_initiator_node_queue_depth( 497 struct se_portal_group *tpg, 498 unsigned char *initiatorname, 499 u32 queue_depth, 500 int force) 501 { 502 struct se_session *sess, *init_sess = NULL; 503 struct se_node_acl *acl; 504 int dynamic_acl = 0; 505 506 spin_lock_bh(&tpg->acl_node_lock); 507 acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname); 508 if (!acl) { 509 pr_err("Access Control List entry for %s Initiator" 510 " Node %s does not exists for TPG %hu, ignoring" 511 " request.\n", tpg->se_tpg_tfo->get_fabric_name(), 512 initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg)); 513 spin_unlock_bh(&tpg->acl_node_lock); 514 return -ENODEV; 515 } 516 if (acl->dynamic_node_acl) { 517 acl->dynamic_node_acl = 0; 518 dynamic_acl = 1; 519 } 520 spin_unlock_bh(&tpg->acl_node_lock); 521 522 spin_lock_bh(&tpg->session_lock); 523 list_for_each_entry(sess, &tpg->tpg_sess_list, sess_list) { 524 if (sess->se_node_acl != acl) 525 continue; 526 527 if (!force) { 528 pr_err("Unable to change queue depth for %s" 529 " Initiator Node: %s while session is" 530 " operational. To forcefully change the queue" 531 " depth and force session reinstatement" 532 " use the \"force=1\" parameter.\n", 533 tpg->se_tpg_tfo->get_fabric_name(), initiatorname); 534 spin_unlock_bh(&tpg->session_lock); 535 536 spin_lock_bh(&tpg->acl_node_lock); 537 if (dynamic_acl) 538 acl->dynamic_node_acl = 1; 539 spin_unlock_bh(&tpg->acl_node_lock); 540 return -EEXIST; 541 } 542 /* 543 * Determine if the session needs to be closed by our context. 544 */ 545 if (!tpg->se_tpg_tfo->shutdown_session(sess)) 546 continue; 547 548 init_sess = sess; 549 break; 550 } 551 552 /* 553 * User has requested to change the queue depth for a Initiator Node. 554 * Change the value in the Node's struct se_node_acl, and call 555 * core_set_queue_depth_for_node() to add the requested queue depth. 556 * 557 * Finally call tpg->se_tpg_tfo->close_session() to force session 558 * reinstatement to occur if there is an active session for the 559 * $FABRIC_MOD Initiator Node in question. 560 */ 561 acl->queue_depth = queue_depth; 562 563 if (core_set_queue_depth_for_node(tpg, acl) < 0) { 564 spin_unlock_bh(&tpg->session_lock); 565 /* 566 * Force session reinstatement if 567 * core_set_queue_depth_for_node() failed, because we assume 568 * the $FABRIC_MOD has already the set session reinstatement 569 * bit from tpg->se_tpg_tfo->shutdown_session() called above. 570 */ 571 if (init_sess) 572 tpg->se_tpg_tfo->close_session(init_sess); 573 574 spin_lock_bh(&tpg->acl_node_lock); 575 if (dynamic_acl) 576 acl->dynamic_node_acl = 1; 577 spin_unlock_bh(&tpg->acl_node_lock); 578 return -EINVAL; 579 } 580 spin_unlock_bh(&tpg->session_lock); 581 /* 582 * If the $FABRIC_MOD session for the Initiator Node ACL exists, 583 * forcefully shutdown the $FABRIC_MOD session/nexus. 584 */ 585 if (init_sess) 586 tpg->se_tpg_tfo->close_session(init_sess); 587 588 pr_debug("Successfuly changed queue depth to: %d for Initiator" 589 " Node: %s on %s Target Portal Group: %u\n", queue_depth, 590 initiatorname, tpg->se_tpg_tfo->get_fabric_name(), 591 tpg->se_tpg_tfo->tpg_get_tag(tpg)); 592 593 spin_lock_bh(&tpg->acl_node_lock); 594 if (dynamic_acl) 595 acl->dynamic_node_acl = 1; 596 spin_unlock_bh(&tpg->acl_node_lock); 597 598 return 0; 599 } 600 EXPORT_SYMBOL(core_tpg_set_initiator_node_queue_depth); 601 602 static int core_tpg_setup_virtual_lun0(struct se_portal_group *se_tpg) 603 { 604 /* Set in core_dev_setup_virtual_lun0() */ 605 struct se_device *dev = g_lun0_dev; 606 struct se_lun *lun = &se_tpg->tpg_virt_lun0; 607 u32 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY; 608 int ret; 609 610 lun->unpacked_lun = 0; 611 lun->lun_status = TRANSPORT_LUN_STATUS_FREE; 612 atomic_set(&lun->lun_acl_count, 0); 613 init_completion(&lun->lun_shutdown_comp); 614 INIT_LIST_HEAD(&lun->lun_acl_list); 615 INIT_LIST_HEAD(&lun->lun_cmd_list); 616 spin_lock_init(&lun->lun_acl_lock); 617 spin_lock_init(&lun->lun_cmd_lock); 618 spin_lock_init(&lun->lun_sep_lock); 619 620 ret = core_tpg_post_addlun(se_tpg, lun, lun_access, dev); 621 if (ret < 0) 622 return ret; 623 624 return 0; 625 } 626 627 static void core_tpg_release_virtual_lun0(struct se_portal_group *se_tpg) 628 { 629 struct se_lun *lun = &se_tpg->tpg_virt_lun0; 630 631 core_tpg_post_dellun(se_tpg, lun); 632 } 633 634 int core_tpg_register( 635 struct target_core_fabric_ops *tfo, 636 struct se_wwn *se_wwn, 637 struct se_portal_group *se_tpg, 638 void *tpg_fabric_ptr, 639 int se_tpg_type) 640 { 641 struct se_lun *lun; 642 u32 i; 643 644 se_tpg->tpg_lun_list = kzalloc((sizeof(struct se_lun) * 645 TRANSPORT_MAX_LUNS_PER_TPG), GFP_KERNEL); 646 if (!se_tpg->tpg_lun_list) { 647 pr_err("Unable to allocate struct se_portal_group->" 648 "tpg_lun_list\n"); 649 return -ENOMEM; 650 } 651 652 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { 653 lun = &se_tpg->tpg_lun_list[i]; 654 lun->unpacked_lun = i; 655 lun->lun_status = TRANSPORT_LUN_STATUS_FREE; 656 atomic_set(&lun->lun_acl_count, 0); 657 init_completion(&lun->lun_shutdown_comp); 658 INIT_LIST_HEAD(&lun->lun_acl_list); 659 INIT_LIST_HEAD(&lun->lun_cmd_list); 660 spin_lock_init(&lun->lun_acl_lock); 661 spin_lock_init(&lun->lun_cmd_lock); 662 spin_lock_init(&lun->lun_sep_lock); 663 } 664 665 se_tpg->se_tpg_type = se_tpg_type; 666 se_tpg->se_tpg_fabric_ptr = tpg_fabric_ptr; 667 se_tpg->se_tpg_tfo = tfo; 668 se_tpg->se_tpg_wwn = se_wwn; 669 atomic_set(&se_tpg->tpg_pr_ref_count, 0); 670 INIT_LIST_HEAD(&se_tpg->acl_node_list); 671 INIT_LIST_HEAD(&se_tpg->se_tpg_node); 672 INIT_LIST_HEAD(&se_tpg->tpg_sess_list); 673 spin_lock_init(&se_tpg->acl_node_lock); 674 spin_lock_init(&se_tpg->session_lock); 675 spin_lock_init(&se_tpg->tpg_lun_lock); 676 677 if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) { 678 if (core_tpg_setup_virtual_lun0(se_tpg) < 0) { 679 kfree(se_tpg); 680 return -ENOMEM; 681 } 682 } 683 684 spin_lock_bh(&tpg_lock); 685 list_add_tail(&se_tpg->se_tpg_node, &tpg_list); 686 spin_unlock_bh(&tpg_lock); 687 688 pr_debug("TARGET_CORE[%s]: Allocated %s struct se_portal_group for" 689 " endpoint: %s, Portal Tag: %u\n", tfo->get_fabric_name(), 690 (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ? 691 "Normal" : "Discovery", (tfo->tpg_get_wwn(se_tpg) == NULL) ? 692 "None" : tfo->tpg_get_wwn(se_tpg), tfo->tpg_get_tag(se_tpg)); 693 694 return 0; 695 } 696 EXPORT_SYMBOL(core_tpg_register); 697 698 int core_tpg_deregister(struct se_portal_group *se_tpg) 699 { 700 struct se_node_acl *nacl, *nacl_tmp; 701 702 pr_debug("TARGET_CORE[%s]: Deallocating %s struct se_portal_group" 703 " for endpoint: %s Portal Tag %u\n", 704 (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ? 705 "Normal" : "Discovery", se_tpg->se_tpg_tfo->get_fabric_name(), 706 se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg), 707 se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg)); 708 709 spin_lock_bh(&tpg_lock); 710 list_del(&se_tpg->se_tpg_node); 711 spin_unlock_bh(&tpg_lock); 712 713 while (atomic_read(&se_tpg->tpg_pr_ref_count) != 0) 714 cpu_relax(); 715 /* 716 * Release any remaining demo-mode generated se_node_acl that have 717 * not been released because of TFO->tpg_check_demo_mode_cache() == 1 718 * in transport_deregister_session(). 719 */ 720 spin_lock_bh(&se_tpg->acl_node_lock); 721 list_for_each_entry_safe(nacl, nacl_tmp, &se_tpg->acl_node_list, 722 acl_list) { 723 list_del(&nacl->acl_list); 724 se_tpg->num_node_acls--; 725 spin_unlock_bh(&se_tpg->acl_node_lock); 726 727 core_tpg_wait_for_nacl_pr_ref(nacl); 728 core_free_device_list_for_node(nacl, se_tpg); 729 se_tpg->se_tpg_tfo->tpg_release_fabric_acl(se_tpg, nacl); 730 731 spin_lock_bh(&se_tpg->acl_node_lock); 732 } 733 spin_unlock_bh(&se_tpg->acl_node_lock); 734 735 if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) 736 core_tpg_release_virtual_lun0(se_tpg); 737 738 se_tpg->se_tpg_fabric_ptr = NULL; 739 kfree(se_tpg->tpg_lun_list); 740 return 0; 741 } 742 EXPORT_SYMBOL(core_tpg_deregister); 743 744 struct se_lun *core_tpg_pre_addlun( 745 struct se_portal_group *tpg, 746 u32 unpacked_lun) 747 { 748 struct se_lun *lun; 749 750 if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) { 751 pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG" 752 "-1: %u for Target Portal Group: %u\n", 753 tpg->se_tpg_tfo->get_fabric_name(), 754 unpacked_lun, TRANSPORT_MAX_LUNS_PER_TPG-1, 755 tpg->se_tpg_tfo->tpg_get_tag(tpg)); 756 return ERR_PTR(-EOVERFLOW); 757 } 758 759 spin_lock(&tpg->tpg_lun_lock); 760 lun = &tpg->tpg_lun_list[unpacked_lun]; 761 if (lun->lun_status == TRANSPORT_LUN_STATUS_ACTIVE) { 762 pr_err("TPG Logical Unit Number: %u is already active" 763 " on %s Target Portal Group: %u, ignoring request.\n", 764 unpacked_lun, tpg->se_tpg_tfo->get_fabric_name(), 765 tpg->se_tpg_tfo->tpg_get_tag(tpg)); 766 spin_unlock(&tpg->tpg_lun_lock); 767 return ERR_PTR(-EINVAL); 768 } 769 spin_unlock(&tpg->tpg_lun_lock); 770 771 return lun; 772 } 773 774 int core_tpg_post_addlun( 775 struct se_portal_group *tpg, 776 struct se_lun *lun, 777 u32 lun_access, 778 void *lun_ptr) 779 { 780 int ret; 781 782 ret = core_dev_export(lun_ptr, tpg, lun); 783 if (ret < 0) 784 return ret; 785 786 spin_lock(&tpg->tpg_lun_lock); 787 lun->lun_access = lun_access; 788 lun->lun_status = TRANSPORT_LUN_STATUS_ACTIVE; 789 spin_unlock(&tpg->tpg_lun_lock); 790 791 return 0; 792 } 793 794 static void core_tpg_shutdown_lun( 795 struct se_portal_group *tpg, 796 struct se_lun *lun) 797 { 798 core_clear_lun_from_tpg(lun, tpg); 799 transport_clear_lun_from_sessions(lun); 800 } 801 802 struct se_lun *core_tpg_pre_dellun( 803 struct se_portal_group *tpg, 804 u32 unpacked_lun, 805 int *ret) 806 { 807 struct se_lun *lun; 808 809 if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) { 810 pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG" 811 "-1: %u for Target Portal Group: %u\n", 812 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun, 813 TRANSPORT_MAX_LUNS_PER_TPG-1, 814 tpg->se_tpg_tfo->tpg_get_tag(tpg)); 815 return ERR_PTR(-EOVERFLOW); 816 } 817 818 spin_lock(&tpg->tpg_lun_lock); 819 lun = &tpg->tpg_lun_list[unpacked_lun]; 820 if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) { 821 pr_err("%s Logical Unit Number: %u is not active on" 822 " Target Portal Group: %u, ignoring request.\n", 823 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun, 824 tpg->se_tpg_tfo->tpg_get_tag(tpg)); 825 spin_unlock(&tpg->tpg_lun_lock); 826 return ERR_PTR(-ENODEV); 827 } 828 spin_unlock(&tpg->tpg_lun_lock); 829 830 return lun; 831 } 832 833 int core_tpg_post_dellun( 834 struct se_portal_group *tpg, 835 struct se_lun *lun) 836 { 837 core_tpg_shutdown_lun(tpg, lun); 838 839 core_dev_unexport(lun->lun_se_dev, tpg, lun); 840 841 spin_lock(&tpg->tpg_lun_lock); 842 lun->lun_status = TRANSPORT_LUN_STATUS_FREE; 843 spin_unlock(&tpg->tpg_lun_lock); 844 845 return 0; 846 } 847