1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * SBP2 target driver (SCSI over IEEE1394 in target mode) 4 * 5 * Copyright (C) 2011 Chris Boot <bootc@bootc.net> 6 */ 7 8 #define pr_fmt(fmt) "sbp_target: " fmt 9 10 #include <linux/kernel.h> 11 #include <linux/module.h> 12 #include <linux/init.h> 13 #include <linux/types.h> 14 #include <linux/string.h> 15 #include <linux/configfs.h> 16 #include <linux/ctype.h> 17 #include <linux/delay.h> 18 #include <linux/firewire.h> 19 #include <linux/firewire-constants.h> 20 #include <scsi/scsi_proto.h> 21 #include <scsi/scsi_tcq.h> 22 #include <target/target_core_base.h> 23 #include <target/target_core_backend.h> 24 #include <target/target_core_fabric.h> 25 #include <linux/unaligned.h> 26 27 #include "sbp_target.h" 28 29 /* FireWire address region for management and command block address handlers */ 30 static const struct fw_address_region sbp_register_region = { 31 .start = CSR_REGISTER_BASE + 0x10000, 32 .end = 0x1000000000000ULL, 33 }; 34 35 static const u32 sbp_unit_directory_template[] = { 36 0x1200609e, /* unit_specifier_id: NCITS/T10 */ 37 0x13010483, /* unit_sw_version: 1155D Rev 4 */ 38 0x3800609e, /* command_set_specifier_id: NCITS/T10 */ 39 0x390104d8, /* command_set: SPC-2 */ 40 0x3b000000, /* command_set_revision: 0 */ 41 0x3c000001, /* firmware_revision: 1 */ 42 }; 43 44 #define SESSION_MAINTENANCE_INTERVAL HZ 45 46 static atomic_t login_id = ATOMIC_INIT(0); 47 48 static void session_maintenance_work(struct work_struct *); 49 static int sbp_run_transaction(struct fw_card *, int, int, int, int, 50 unsigned long long, void *, size_t); 51 52 static int read_peer_guid(u64 *guid, const struct sbp_management_request *req) 53 { 54 int ret; 55 __be32 high, low; 56 57 ret = sbp_run_transaction(req->card, TCODE_READ_QUADLET_REQUEST, 58 req->node_addr, req->generation, req->speed, 59 (CSR_REGISTER_BASE | CSR_CONFIG_ROM) + 3 * 4, 60 &high, sizeof(high)); 61 if (ret != RCODE_COMPLETE) 62 return ret; 63 64 ret = sbp_run_transaction(req->card, TCODE_READ_QUADLET_REQUEST, 65 req->node_addr, req->generation, req->speed, 66 (CSR_REGISTER_BASE | CSR_CONFIG_ROM) + 4 * 4, 67 &low, sizeof(low)); 68 if (ret != RCODE_COMPLETE) 69 return ret; 70 71 *guid = (u64)be32_to_cpu(high) << 32 | be32_to_cpu(low); 72 73 return RCODE_COMPLETE; 74 } 75 76 static struct sbp_session *sbp_session_find_by_guid( 77 struct sbp_tpg *tpg, u64 guid) 78 { 79 struct se_session *se_sess; 80 struct sbp_session *sess, *found = NULL; 81 82 spin_lock_bh(&tpg->se_tpg.session_lock); 83 list_for_each_entry(se_sess, &tpg->se_tpg.tpg_sess_list, sess_list) { 84 sess = se_sess->fabric_sess_ptr; 85 if (sess->guid == guid) 86 found = sess; 87 } 88 spin_unlock_bh(&tpg->se_tpg.session_lock); 89 90 return found; 91 } 92 93 static struct sbp_login_descriptor *sbp_login_find_by_lun( 94 struct sbp_session *session, u32 unpacked_lun) 95 { 96 struct sbp_login_descriptor *login, *found = NULL; 97 98 spin_lock_bh(&session->lock); 99 list_for_each_entry(login, &session->login_list, link) { 100 if (login->login_lun == unpacked_lun) 101 found = login; 102 } 103 spin_unlock_bh(&session->lock); 104 105 return found; 106 } 107 108 static int sbp_login_count_all_by_lun( 109 struct sbp_tpg *tpg, 110 u32 unpacked_lun, 111 int exclusive) 112 { 113 struct se_session *se_sess; 114 struct sbp_session *sess; 115 struct sbp_login_descriptor *login; 116 int count = 0; 117 118 spin_lock_bh(&tpg->se_tpg.session_lock); 119 list_for_each_entry(se_sess, &tpg->se_tpg.tpg_sess_list, sess_list) { 120 sess = se_sess->fabric_sess_ptr; 121 122 spin_lock_bh(&sess->lock); 123 list_for_each_entry(login, &sess->login_list, link) { 124 if (login->login_lun != unpacked_lun) 125 continue; 126 127 if (!exclusive || login->exclusive) 128 count++; 129 } 130 spin_unlock_bh(&sess->lock); 131 } 132 spin_unlock_bh(&tpg->se_tpg.session_lock); 133 134 return count; 135 } 136 137 static struct sbp_login_descriptor *sbp_login_find_by_id( 138 struct sbp_tpg *tpg, int login_id) 139 { 140 struct se_session *se_sess; 141 struct sbp_session *sess; 142 struct sbp_login_descriptor *login, *found = NULL; 143 144 spin_lock_bh(&tpg->se_tpg.session_lock); 145 list_for_each_entry(se_sess, &tpg->se_tpg.tpg_sess_list, sess_list) { 146 sess = se_sess->fabric_sess_ptr; 147 148 spin_lock_bh(&sess->lock); 149 list_for_each_entry(login, &sess->login_list, link) { 150 if (login->login_id == login_id) 151 found = login; 152 } 153 spin_unlock_bh(&sess->lock); 154 } 155 spin_unlock_bh(&tpg->se_tpg.session_lock); 156 157 return found; 158 } 159 160 static u32 sbp_get_lun_from_tpg(struct sbp_tpg *tpg, u32 login_lun, int *err) 161 { 162 struct se_portal_group *se_tpg = &tpg->se_tpg; 163 struct se_lun *se_lun; 164 165 rcu_read_lock(); 166 hlist_for_each_entry_rcu(se_lun, &se_tpg->tpg_lun_hlist, link) { 167 if (se_lun->unpacked_lun == login_lun) { 168 rcu_read_unlock(); 169 *err = 0; 170 return login_lun; 171 } 172 } 173 rcu_read_unlock(); 174 175 *err = -ENODEV; 176 return login_lun; 177 } 178 179 static struct sbp_session *sbp_session_create( 180 struct sbp_tpg *tpg, 181 u64 guid) 182 { 183 struct sbp_session *sess; 184 int ret; 185 char guid_str[17]; 186 187 snprintf(guid_str, sizeof(guid_str), "%016llx", guid); 188 189 sess = kmalloc(sizeof(*sess), GFP_KERNEL); 190 if (!sess) 191 return ERR_PTR(-ENOMEM); 192 193 spin_lock_init(&sess->lock); 194 INIT_LIST_HEAD(&sess->login_list); 195 INIT_DELAYED_WORK(&sess->maint_work, session_maintenance_work); 196 sess->guid = guid; 197 198 sess->se_sess = target_setup_session(&tpg->se_tpg, 128, 199 sizeof(struct sbp_target_request), 200 TARGET_PROT_NORMAL, guid_str, 201 sess, NULL); 202 if (IS_ERR(sess->se_sess)) { 203 pr_err("failed to init se_session\n"); 204 ret = PTR_ERR(sess->se_sess); 205 kfree(sess); 206 return ERR_PTR(ret); 207 } 208 209 return sess; 210 } 211 212 static void sbp_session_release(struct sbp_session *sess, bool cancel_work) 213 { 214 spin_lock_bh(&sess->lock); 215 if (!list_empty(&sess->login_list)) { 216 spin_unlock_bh(&sess->lock); 217 return; 218 } 219 spin_unlock_bh(&sess->lock); 220 221 if (cancel_work) 222 cancel_delayed_work_sync(&sess->maint_work); 223 224 target_remove_session(sess->se_sess); 225 226 if (sess->card) 227 fw_card_put(sess->card); 228 229 kfree(sess); 230 } 231 232 static void sbp_target_agent_unregister(struct sbp_target_agent *); 233 234 static void sbp_login_release(struct sbp_login_descriptor *login, 235 bool cancel_work) 236 { 237 struct sbp_session *sess = login->sess; 238 239 /* FIXME: abort/wait on tasks */ 240 241 sbp_target_agent_unregister(login->tgt_agt); 242 243 if (sess) { 244 spin_lock_bh(&sess->lock); 245 list_del(&login->link); 246 spin_unlock_bh(&sess->lock); 247 248 sbp_session_release(sess, cancel_work); 249 } 250 251 kfree(login); 252 } 253 254 static struct sbp_target_agent *sbp_target_agent_register( 255 struct sbp_login_descriptor *); 256 257 static void sbp_management_request_login( 258 struct sbp_management_agent *agent, struct sbp_management_request *req, 259 int *status_data_size) 260 { 261 struct sbp_tport *tport = agent->tport; 262 struct sbp_tpg *tpg = tport->tpg; 263 struct sbp_session *sess; 264 struct sbp_login_descriptor *login; 265 struct sbp_login_response_block *response; 266 u64 guid; 267 u32 unpacked_lun; 268 int login_response_len, ret; 269 270 unpacked_lun = sbp_get_lun_from_tpg(tpg, 271 LOGIN_ORB_LUN(be32_to_cpu(req->orb.misc)), &ret); 272 if (ret) { 273 pr_notice("login to unknown LUN: %d\n", 274 LOGIN_ORB_LUN(be32_to_cpu(req->orb.misc))); 275 276 req->status.status = cpu_to_be32( 277 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | 278 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_LUN_NOTSUPP)); 279 return; 280 } 281 282 ret = read_peer_guid(&guid, req); 283 if (ret != RCODE_COMPLETE) { 284 pr_warn("failed to read peer GUID: %d\n", ret); 285 286 req->status.status = cpu_to_be32( 287 STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) | 288 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR)); 289 return; 290 } 291 292 pr_notice("mgt_agent LOGIN to LUN %d from %016llx\n", 293 unpacked_lun, guid); 294 295 sess = sbp_session_find_by_guid(tpg, guid); 296 if (sess) { 297 login = sbp_login_find_by_lun(sess, unpacked_lun); 298 if (login) { 299 pr_notice("initiator already logged-in\n"); 300 301 /* 302 * SBP-2 R4 says we should return access denied, but 303 * that can confuse initiators. Instead we need to 304 * treat this like a reconnect, but send the login 305 * response block like a fresh login. 306 * 307 * This is required particularly in the case of Apple 308 * devices booting off the FireWire target, where 309 * the firmware has an active login to the target. When 310 * the OS takes control of the session it issues its own 311 * LOGIN rather than a RECONNECT. To avoid the machine 312 * waiting until the reconnect_hold expires, we can skip 313 * the ACCESS_DENIED errors to speed things up. 314 */ 315 316 goto already_logged_in; 317 } 318 } 319 320 /* 321 * check exclusive bit in login request 322 * reject with access_denied if any logins present 323 */ 324 if (LOGIN_ORB_EXCLUSIVE(be32_to_cpu(req->orb.misc)) && 325 sbp_login_count_all_by_lun(tpg, unpacked_lun, 0)) { 326 pr_warn("refusing exclusive login with other active logins\n"); 327 328 req->status.status = cpu_to_be32( 329 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | 330 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED)); 331 return; 332 } 333 334 /* 335 * check exclusive bit in any existing login descriptor 336 * reject with access_denied if any exclusive logins present 337 */ 338 if (sbp_login_count_all_by_lun(tpg, unpacked_lun, 1)) { 339 pr_warn("refusing login while another exclusive login present\n"); 340 341 req->status.status = cpu_to_be32( 342 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | 343 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED)); 344 return; 345 } 346 347 /* 348 * check we haven't exceeded the number of allowed logins 349 * reject with resources_unavailable if we have 350 */ 351 if (sbp_login_count_all_by_lun(tpg, unpacked_lun, 0) >= 352 tport->max_logins_per_lun) { 353 pr_warn("max number of logins reached\n"); 354 355 req->status.status = cpu_to_be32( 356 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | 357 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL)); 358 return; 359 } 360 361 if (!sess) { 362 sess = sbp_session_create(tpg, guid); 363 if (IS_ERR(sess)) { 364 switch (PTR_ERR(sess)) { 365 case -EPERM: 366 ret = SBP_STATUS_ACCESS_DENIED; 367 break; 368 default: 369 ret = SBP_STATUS_RESOURCES_UNAVAIL; 370 break; 371 } 372 373 req->status.status = cpu_to_be32( 374 STATUS_BLOCK_RESP( 375 STATUS_RESP_REQUEST_COMPLETE) | 376 STATUS_BLOCK_SBP_STATUS(ret)); 377 return; 378 } 379 380 sess->node_id = req->node_addr; 381 sess->card = fw_card_get(req->card); 382 sess->generation = req->generation; 383 sess->speed = req->speed; 384 385 schedule_delayed_work(&sess->maint_work, 386 SESSION_MAINTENANCE_INTERVAL); 387 } 388 389 /* only take the latest reconnect_hold into account */ 390 sess->reconnect_hold = min( 391 1 << LOGIN_ORB_RECONNECT(be32_to_cpu(req->orb.misc)), 392 tport->max_reconnect_timeout) - 1; 393 394 login = kmalloc(sizeof(*login), GFP_KERNEL); 395 if (!login) { 396 pr_err("failed to allocate login descriptor\n"); 397 398 sbp_session_release(sess, true); 399 400 req->status.status = cpu_to_be32( 401 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | 402 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL)); 403 return; 404 } 405 406 login->sess = sess; 407 login->login_lun = unpacked_lun; 408 login->status_fifo_addr = sbp2_pointer_to_addr(&req->orb.status_fifo); 409 login->exclusive = LOGIN_ORB_EXCLUSIVE(be32_to_cpu(req->orb.misc)); 410 login->login_id = atomic_inc_return(&login_id); 411 412 login->tgt_agt = sbp_target_agent_register(login); 413 if (IS_ERR(login->tgt_agt)) { 414 ret = PTR_ERR(login->tgt_agt); 415 pr_err("failed to map command block handler: %d\n", ret); 416 417 sbp_session_release(sess, true); 418 kfree(login); 419 420 req->status.status = cpu_to_be32( 421 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | 422 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL)); 423 return; 424 } 425 426 spin_lock_bh(&sess->lock); 427 list_add_tail(&login->link, &sess->login_list); 428 spin_unlock_bh(&sess->lock); 429 430 already_logged_in: 431 response = kzalloc(sizeof(*response), GFP_KERNEL); 432 if (!response) { 433 pr_err("failed to allocate login response block\n"); 434 435 sbp_login_release(login, true); 436 437 req->status.status = cpu_to_be32( 438 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | 439 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL)); 440 return; 441 } 442 443 login_response_len = clamp_val( 444 LOGIN_ORB_RESPONSE_LENGTH(be32_to_cpu(req->orb.length)), 445 12, sizeof(*response)); 446 response->misc = cpu_to_be32( 447 ((login_response_len & 0xffff) << 16) | 448 (login->login_id & 0xffff)); 449 response->reconnect_hold = cpu_to_be32(sess->reconnect_hold & 0xffff); 450 addr_to_sbp2_pointer(login->tgt_agt->handler.offset, 451 &response->command_block_agent); 452 453 ret = sbp_run_transaction(sess->card, TCODE_WRITE_BLOCK_REQUEST, 454 sess->node_id, sess->generation, sess->speed, 455 sbp2_pointer_to_addr(&req->orb.ptr2), response, 456 login_response_len); 457 if (ret != RCODE_COMPLETE) { 458 pr_debug("failed to write login response block: %x\n", ret); 459 460 kfree(response); 461 sbp_login_release(login, true); 462 463 req->status.status = cpu_to_be32( 464 STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) | 465 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR)); 466 return; 467 } 468 469 kfree(response); 470 471 req->status.status = cpu_to_be32( 472 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | 473 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK)); 474 } 475 476 static void sbp_management_request_query_logins( 477 struct sbp_management_agent *agent, struct sbp_management_request *req, 478 int *status_data_size) 479 { 480 pr_notice("QUERY LOGINS not implemented\n"); 481 /* FIXME: implement */ 482 483 req->status.status = cpu_to_be32( 484 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | 485 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP)); 486 } 487 488 static void sbp_management_request_reconnect( 489 struct sbp_management_agent *agent, struct sbp_management_request *req, 490 int *status_data_size) 491 { 492 struct sbp_tport *tport = agent->tport; 493 struct sbp_tpg *tpg = tport->tpg; 494 int ret; 495 u64 guid; 496 struct sbp_login_descriptor *login; 497 498 ret = read_peer_guid(&guid, req); 499 if (ret != RCODE_COMPLETE) { 500 pr_warn("failed to read peer GUID: %d\n", ret); 501 502 req->status.status = cpu_to_be32( 503 STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) | 504 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR)); 505 return; 506 } 507 508 pr_notice("mgt_agent RECONNECT from %016llx\n", guid); 509 510 login = sbp_login_find_by_id(tpg, 511 RECONNECT_ORB_LOGIN_ID(be32_to_cpu(req->orb.misc))); 512 513 if (!login) { 514 pr_err("mgt_agent RECONNECT unknown login ID\n"); 515 516 req->status.status = cpu_to_be32( 517 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | 518 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED)); 519 return; 520 } 521 522 if (login->sess->guid != guid) { 523 pr_err("mgt_agent RECONNECT login GUID doesn't match\n"); 524 525 req->status.status = cpu_to_be32( 526 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | 527 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED)); 528 return; 529 } 530 531 spin_lock_bh(&login->sess->lock); 532 if (login->sess->card) 533 fw_card_put(login->sess->card); 534 535 /* update the node details */ 536 login->sess->generation = req->generation; 537 login->sess->node_id = req->node_addr; 538 login->sess->card = fw_card_get(req->card); 539 login->sess->speed = req->speed; 540 spin_unlock_bh(&login->sess->lock); 541 542 req->status.status = cpu_to_be32( 543 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | 544 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK)); 545 } 546 547 static void sbp_management_request_logout( 548 struct sbp_management_agent *agent, struct sbp_management_request *req, 549 int *status_data_size) 550 { 551 struct sbp_tport *tport = agent->tport; 552 struct sbp_tpg *tpg = tport->tpg; 553 int id; 554 struct sbp_login_descriptor *login; 555 556 id = LOGOUT_ORB_LOGIN_ID(be32_to_cpu(req->orb.misc)); 557 558 login = sbp_login_find_by_id(tpg, id); 559 if (!login) { 560 pr_warn("cannot find login: %d\n", id); 561 562 req->status.status = cpu_to_be32( 563 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | 564 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_LOGIN_ID_UNKNOWN)); 565 return; 566 } 567 568 pr_info("mgt_agent LOGOUT from LUN %d session %d\n", 569 login->login_lun, login->login_id); 570 571 if (req->node_addr != login->sess->node_id) { 572 pr_warn("logout from different node ID\n"); 573 574 req->status.status = cpu_to_be32( 575 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | 576 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED)); 577 return; 578 } 579 580 sbp_login_release(login, true); 581 582 req->status.status = cpu_to_be32( 583 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | 584 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK)); 585 } 586 587 static void session_check_for_reset(struct sbp_session *sess) 588 { 589 bool card_valid = false; 590 591 spin_lock_bh(&sess->lock); 592 593 if (sess->card) { 594 spin_lock_irq(&sess->card->lock); 595 card_valid = (sess->card->local_node != NULL); 596 spin_unlock_irq(&sess->card->lock); 597 598 if (!card_valid) { 599 fw_card_put(sess->card); 600 sess->card = NULL; 601 } 602 } 603 604 if (!card_valid || (sess->generation != sess->card->generation)) { 605 pr_info("Waiting for reconnect from node: %016llx\n", 606 sess->guid); 607 608 sess->node_id = -1; 609 sess->reconnect_expires = get_jiffies_64() + 610 ((sess->reconnect_hold + 1) * HZ); 611 } 612 613 spin_unlock_bh(&sess->lock); 614 } 615 616 static void session_reconnect_expired(struct sbp_session *sess) 617 { 618 struct sbp_login_descriptor *login, *temp; 619 LIST_HEAD(login_list); 620 621 pr_info("Reconnect timer expired for node: %016llx\n", sess->guid); 622 623 spin_lock_bh(&sess->lock); 624 list_for_each_entry_safe(login, temp, &sess->login_list, link) { 625 login->sess = NULL; 626 list_move_tail(&login->link, &login_list); 627 } 628 spin_unlock_bh(&sess->lock); 629 630 list_for_each_entry_safe(login, temp, &login_list, link) { 631 list_del(&login->link); 632 sbp_login_release(login, false); 633 } 634 635 sbp_session_release(sess, false); 636 } 637 638 static void session_maintenance_work(struct work_struct *work) 639 { 640 struct sbp_session *sess = container_of(work, struct sbp_session, 641 maint_work.work); 642 643 /* could be called while tearing down the session */ 644 spin_lock_bh(&sess->lock); 645 if (list_empty(&sess->login_list)) { 646 spin_unlock_bh(&sess->lock); 647 return; 648 } 649 spin_unlock_bh(&sess->lock); 650 651 if (sess->node_id != -1) { 652 /* check for bus reset and make node_id invalid */ 653 session_check_for_reset(sess); 654 655 schedule_delayed_work(&sess->maint_work, 656 SESSION_MAINTENANCE_INTERVAL); 657 } else if (!time_after64(get_jiffies_64(), sess->reconnect_expires)) { 658 /* still waiting for reconnect */ 659 schedule_delayed_work(&sess->maint_work, 660 SESSION_MAINTENANCE_INTERVAL); 661 } else { 662 /* reconnect timeout has expired */ 663 session_reconnect_expired(sess); 664 } 665 } 666 667 static int tgt_agent_rw_agent_state(struct fw_card *card, int tcode, void *data, 668 struct sbp_target_agent *agent) 669 { 670 int state; 671 672 switch (tcode) { 673 case TCODE_READ_QUADLET_REQUEST: 674 pr_debug("tgt_agent AGENT_STATE READ\n"); 675 676 spin_lock_bh(&agent->lock); 677 state = agent->state; 678 spin_unlock_bh(&agent->lock); 679 680 *(__be32 *)data = cpu_to_be32(state); 681 682 return RCODE_COMPLETE; 683 684 case TCODE_WRITE_QUADLET_REQUEST: 685 /* ignored */ 686 return RCODE_COMPLETE; 687 688 default: 689 return RCODE_TYPE_ERROR; 690 } 691 } 692 693 static int tgt_agent_rw_agent_reset(struct fw_card *card, int tcode, void *data, 694 struct sbp_target_agent *agent) 695 { 696 switch (tcode) { 697 case TCODE_WRITE_QUADLET_REQUEST: 698 pr_debug("tgt_agent AGENT_RESET\n"); 699 spin_lock_bh(&agent->lock); 700 agent->state = AGENT_STATE_RESET; 701 spin_unlock_bh(&agent->lock); 702 return RCODE_COMPLETE; 703 704 default: 705 return RCODE_TYPE_ERROR; 706 } 707 } 708 709 static int tgt_agent_rw_orb_pointer(struct fw_card *card, int tcode, void *data, 710 struct sbp_target_agent *agent) 711 { 712 struct sbp2_pointer *ptr = data; 713 714 switch (tcode) { 715 case TCODE_WRITE_BLOCK_REQUEST: 716 spin_lock_bh(&agent->lock); 717 if (agent->state != AGENT_STATE_SUSPENDED && 718 agent->state != AGENT_STATE_RESET) { 719 spin_unlock_bh(&agent->lock); 720 pr_notice("Ignoring ORB_POINTER write while active.\n"); 721 return RCODE_CONFLICT_ERROR; 722 } 723 agent->state = AGENT_STATE_ACTIVE; 724 spin_unlock_bh(&agent->lock); 725 726 agent->orb_pointer = sbp2_pointer_to_addr(ptr); 727 agent->doorbell = false; 728 729 pr_debug("tgt_agent ORB_POINTER write: 0x%llx\n", 730 agent->orb_pointer); 731 732 queue_work(system_dfl_wq, &agent->work); 733 734 return RCODE_COMPLETE; 735 736 case TCODE_READ_BLOCK_REQUEST: 737 pr_debug("tgt_agent ORB_POINTER READ\n"); 738 spin_lock_bh(&agent->lock); 739 addr_to_sbp2_pointer(agent->orb_pointer, ptr); 740 spin_unlock_bh(&agent->lock); 741 return RCODE_COMPLETE; 742 743 default: 744 return RCODE_TYPE_ERROR; 745 } 746 } 747 748 static int tgt_agent_rw_doorbell(struct fw_card *card, int tcode, void *data, 749 struct sbp_target_agent *agent) 750 { 751 switch (tcode) { 752 case TCODE_WRITE_QUADLET_REQUEST: 753 spin_lock_bh(&agent->lock); 754 if (agent->state != AGENT_STATE_SUSPENDED) { 755 spin_unlock_bh(&agent->lock); 756 pr_debug("Ignoring DOORBELL while active.\n"); 757 return RCODE_CONFLICT_ERROR; 758 } 759 agent->state = AGENT_STATE_ACTIVE; 760 spin_unlock_bh(&agent->lock); 761 762 agent->doorbell = true; 763 764 pr_debug("tgt_agent DOORBELL\n"); 765 766 queue_work(system_dfl_wq, &agent->work); 767 768 return RCODE_COMPLETE; 769 770 case TCODE_READ_QUADLET_REQUEST: 771 return RCODE_COMPLETE; 772 773 default: 774 return RCODE_TYPE_ERROR; 775 } 776 } 777 778 static int tgt_agent_rw_unsolicited_status_enable(struct fw_card *card, 779 int tcode, void *data, struct sbp_target_agent *agent) 780 { 781 switch (tcode) { 782 case TCODE_WRITE_QUADLET_REQUEST: 783 pr_debug("tgt_agent UNSOLICITED_STATUS_ENABLE\n"); 784 /* ignored as we don't send unsolicited status */ 785 return RCODE_COMPLETE; 786 787 case TCODE_READ_QUADLET_REQUEST: 788 return RCODE_COMPLETE; 789 790 default: 791 return RCODE_TYPE_ERROR; 792 } 793 } 794 795 static void tgt_agent_rw(struct fw_card *card, struct fw_request *request, 796 int tcode, int destination, int source, int generation, 797 unsigned long long offset, void *data, size_t length, 798 void *callback_data) 799 { 800 struct sbp_target_agent *agent = callback_data; 801 struct sbp_session *sess = agent->login->sess; 802 int sess_gen, sess_node, rcode; 803 804 spin_lock_bh(&sess->lock); 805 sess_gen = sess->generation; 806 sess_node = sess->node_id; 807 spin_unlock_bh(&sess->lock); 808 809 if (generation != sess_gen) { 810 pr_notice("ignoring request with wrong generation\n"); 811 rcode = RCODE_TYPE_ERROR; 812 goto out; 813 } 814 815 if (source != sess_node) { 816 pr_notice("ignoring request from foreign node (%x != %x)\n", 817 source, sess_node); 818 rcode = RCODE_TYPE_ERROR; 819 goto out; 820 } 821 822 /* turn offset into the offset from the start of the block */ 823 offset -= agent->handler.offset; 824 825 if (offset == 0x00 && length == 4) { 826 /* AGENT_STATE */ 827 rcode = tgt_agent_rw_agent_state(card, tcode, data, agent); 828 } else if (offset == 0x04 && length == 4) { 829 /* AGENT_RESET */ 830 rcode = tgt_agent_rw_agent_reset(card, tcode, data, agent); 831 } else if (offset == 0x08 && length == 8) { 832 /* ORB_POINTER */ 833 rcode = tgt_agent_rw_orb_pointer(card, tcode, data, agent); 834 } else if (offset == 0x10 && length == 4) { 835 /* DOORBELL */ 836 rcode = tgt_agent_rw_doorbell(card, tcode, data, agent); 837 } else if (offset == 0x14 && length == 4) { 838 /* UNSOLICITED_STATUS_ENABLE */ 839 rcode = tgt_agent_rw_unsolicited_status_enable(card, tcode, 840 data, agent); 841 } else { 842 rcode = RCODE_ADDRESS_ERROR; 843 } 844 845 out: 846 fw_send_response(card, request, rcode); 847 } 848 849 static void sbp_handle_command(struct sbp_target_request *); 850 static int sbp_send_status(struct sbp_target_request *); 851 static void sbp_free_request(struct sbp_target_request *); 852 853 static void tgt_agent_process_work(struct work_struct *work) 854 { 855 struct sbp_target_request *req = 856 container_of(work, struct sbp_target_request, work); 857 858 pr_debug("tgt_orb ptr:0x%llx next_ORB:0x%llx data_descriptor:0x%llx misc:0x%x\n", 859 req->orb_pointer, 860 sbp2_pointer_to_addr(&req->orb.next_orb), 861 sbp2_pointer_to_addr(&req->orb.data_descriptor), 862 be32_to_cpu(req->orb.misc)); 863 864 if (req->orb_pointer >> 32) 865 pr_debug("ORB with high bits set\n"); 866 867 switch (ORB_REQUEST_FORMAT(be32_to_cpu(req->orb.misc))) { 868 case 0:/* Format specified by this standard */ 869 sbp_handle_command(req); 870 return; 871 case 1: /* Reserved for future standardization */ 872 case 2: /* Vendor-dependent */ 873 req->status.status |= cpu_to_be32( 874 STATUS_BLOCK_RESP( 875 STATUS_RESP_REQUEST_COMPLETE) | 876 STATUS_BLOCK_DEAD(0) | 877 STATUS_BLOCK_LEN(1) | 878 STATUS_BLOCK_SBP_STATUS( 879 SBP_STATUS_REQ_TYPE_NOTSUPP)); 880 sbp_send_status(req); 881 return; 882 case 3: /* Dummy ORB */ 883 req->status.status |= cpu_to_be32( 884 STATUS_BLOCK_RESP( 885 STATUS_RESP_REQUEST_COMPLETE) | 886 STATUS_BLOCK_DEAD(0) | 887 STATUS_BLOCK_LEN(1) | 888 STATUS_BLOCK_SBP_STATUS( 889 SBP_STATUS_DUMMY_ORB_COMPLETE)); 890 sbp_send_status(req); 891 return; 892 default: 893 BUG(); 894 } 895 } 896 897 /* used to double-check we haven't been issued an AGENT_RESET */ 898 static inline bool tgt_agent_check_active(struct sbp_target_agent *agent) 899 { 900 bool active; 901 902 spin_lock_bh(&agent->lock); 903 active = (agent->state == AGENT_STATE_ACTIVE); 904 spin_unlock_bh(&agent->lock); 905 906 return active; 907 } 908 909 static struct sbp_target_request *sbp_mgt_get_req(struct sbp_session *sess, 910 struct fw_card *card, u64 next_orb) 911 { 912 struct se_session *se_sess = sess->se_sess; 913 struct sbp_target_request *req; 914 int tag, cpu; 915 916 tag = sbitmap_queue_get(&se_sess->sess_tag_pool, &cpu); 917 if (tag < 0) 918 return ERR_PTR(-ENOMEM); 919 920 req = &((struct sbp_target_request *)se_sess->sess_cmd_map)[tag]; 921 memset(req, 0, sizeof(*req)); 922 req->se_cmd.map_tag = tag; 923 req->se_cmd.map_cpu = cpu; 924 req->se_cmd.tag = next_orb; 925 926 return req; 927 } 928 929 static void tgt_agent_fetch_work(struct work_struct *work) 930 { 931 struct sbp_target_agent *agent = 932 container_of(work, struct sbp_target_agent, work); 933 struct sbp_session *sess = agent->login->sess; 934 struct sbp_target_request *req; 935 int ret; 936 bool doorbell = agent->doorbell; 937 u64 next_orb = agent->orb_pointer; 938 939 while (next_orb && tgt_agent_check_active(agent)) { 940 req = sbp_mgt_get_req(sess, sess->card, next_orb); 941 if (IS_ERR(req)) { 942 spin_lock_bh(&agent->lock); 943 agent->state = AGENT_STATE_DEAD; 944 spin_unlock_bh(&agent->lock); 945 return; 946 } 947 948 req->login = agent->login; 949 req->orb_pointer = next_orb; 950 951 req->status.status = cpu_to_be32(STATUS_BLOCK_ORB_OFFSET_HIGH( 952 req->orb_pointer >> 32)); 953 req->status.orb_low = cpu_to_be32( 954 req->orb_pointer & 0xfffffffc); 955 956 /* read in the ORB */ 957 ret = sbp_run_transaction(sess->card, TCODE_READ_BLOCK_REQUEST, 958 sess->node_id, sess->generation, sess->speed, 959 req->orb_pointer, &req->orb, sizeof(req->orb)); 960 if (ret != RCODE_COMPLETE) { 961 pr_debug("tgt_orb fetch failed: %x\n", ret); 962 req->status.status |= cpu_to_be32( 963 STATUS_BLOCK_SRC( 964 STATUS_SRC_ORB_FINISHED) | 965 STATUS_BLOCK_RESP( 966 STATUS_RESP_TRANSPORT_FAILURE) | 967 STATUS_BLOCK_DEAD(1) | 968 STATUS_BLOCK_LEN(1) | 969 STATUS_BLOCK_SBP_STATUS( 970 SBP_STATUS_UNSPECIFIED_ERROR)); 971 spin_lock_bh(&agent->lock); 972 agent->state = AGENT_STATE_DEAD; 973 spin_unlock_bh(&agent->lock); 974 975 sbp_send_status(req); 976 return; 977 } 978 979 /* check the next_ORB field */ 980 if (be32_to_cpu(req->orb.next_orb.high) & 0x80000000) { 981 next_orb = 0; 982 req->status.status |= cpu_to_be32(STATUS_BLOCK_SRC( 983 STATUS_SRC_ORB_FINISHED)); 984 } else { 985 next_orb = sbp2_pointer_to_addr(&req->orb.next_orb); 986 req->status.status |= cpu_to_be32(STATUS_BLOCK_SRC( 987 STATUS_SRC_ORB_CONTINUING)); 988 } 989 990 if (tgt_agent_check_active(agent) && !doorbell) { 991 INIT_WORK(&req->work, tgt_agent_process_work); 992 queue_work(system_dfl_wq, &req->work); 993 } else { 994 /* don't process this request, just check next_ORB */ 995 sbp_free_request(req); 996 } 997 998 spin_lock_bh(&agent->lock); 999 doorbell = agent->doorbell = false; 1000 1001 /* check if we should carry on processing */ 1002 if (next_orb) 1003 agent->orb_pointer = next_orb; 1004 else 1005 agent->state = AGENT_STATE_SUSPENDED; 1006 1007 spin_unlock_bh(&agent->lock); 1008 } 1009 } 1010 1011 static struct sbp_target_agent *sbp_target_agent_register( 1012 struct sbp_login_descriptor *login) 1013 { 1014 struct sbp_target_agent *agent; 1015 int ret; 1016 1017 agent = kmalloc(sizeof(*agent), GFP_KERNEL); 1018 if (!agent) 1019 return ERR_PTR(-ENOMEM); 1020 1021 spin_lock_init(&agent->lock); 1022 1023 agent->handler.length = 0x20; 1024 agent->handler.address_callback = tgt_agent_rw; 1025 agent->handler.callback_data = agent; 1026 1027 agent->login = login; 1028 agent->state = AGENT_STATE_RESET; 1029 INIT_WORK(&agent->work, tgt_agent_fetch_work); 1030 agent->orb_pointer = 0; 1031 agent->doorbell = false; 1032 1033 ret = fw_core_add_address_handler(&agent->handler, 1034 &sbp_register_region); 1035 if (ret < 0) { 1036 kfree(agent); 1037 return ERR_PTR(ret); 1038 } 1039 1040 return agent; 1041 } 1042 1043 static void sbp_target_agent_unregister(struct sbp_target_agent *agent) 1044 { 1045 fw_core_remove_address_handler(&agent->handler); 1046 cancel_work_sync(&agent->work); 1047 kfree(agent); 1048 } 1049 1050 /* 1051 * Simple wrapper around fw_run_transaction that retries the transaction several 1052 * times in case of failure, with an exponential backoff. 1053 */ 1054 static int sbp_run_transaction(struct fw_card *card, int tcode, int destination_id, 1055 int generation, int speed, unsigned long long offset, 1056 void *payload, size_t length) 1057 { 1058 int attempt, ret, delay; 1059 1060 for (attempt = 1; attempt <= 5; attempt++) { 1061 ret = fw_run_transaction(card, tcode, destination_id, 1062 generation, speed, offset, payload, length); 1063 1064 switch (ret) { 1065 case RCODE_COMPLETE: 1066 case RCODE_TYPE_ERROR: 1067 case RCODE_ADDRESS_ERROR: 1068 case RCODE_GENERATION: 1069 return ret; 1070 1071 default: 1072 delay = 5 * attempt * attempt; 1073 usleep_range(delay, delay * 2); 1074 } 1075 } 1076 1077 return ret; 1078 } 1079 1080 /* 1081 * Wrapper around sbp_run_transaction that gets the card, destination, 1082 * generation and speed out of the request's session. 1083 */ 1084 static int sbp_run_request_transaction(struct sbp_target_request *req, 1085 int tcode, unsigned long long offset, void *payload, 1086 size_t length) 1087 { 1088 struct sbp_login_descriptor *login = req->login; 1089 struct sbp_session *sess = login->sess; 1090 struct fw_card *card; 1091 int node_id, generation, speed, ret; 1092 1093 spin_lock_bh(&sess->lock); 1094 card = fw_card_get(sess->card); 1095 node_id = sess->node_id; 1096 generation = sess->generation; 1097 speed = sess->speed; 1098 spin_unlock_bh(&sess->lock); 1099 1100 ret = sbp_run_transaction(card, tcode, node_id, generation, speed, 1101 offset, payload, length); 1102 1103 fw_card_put(card); 1104 1105 return ret; 1106 } 1107 1108 static int sbp_fetch_command(struct sbp_target_request *req) 1109 { 1110 int ret, cmd_len, copy_len; 1111 1112 cmd_len = scsi_command_size(req->orb.command_block); 1113 1114 req->cmd_buf = kmalloc(cmd_len, GFP_KERNEL); 1115 if (!req->cmd_buf) 1116 return -ENOMEM; 1117 1118 memcpy(req->cmd_buf, req->orb.command_block, 1119 min_t(int, cmd_len, sizeof(req->orb.command_block))); 1120 1121 if (cmd_len > sizeof(req->orb.command_block)) { 1122 pr_debug("sbp_fetch_command: filling in long command\n"); 1123 copy_len = cmd_len - sizeof(req->orb.command_block); 1124 1125 ret = sbp_run_request_transaction(req, 1126 TCODE_READ_BLOCK_REQUEST, 1127 req->orb_pointer + sizeof(req->orb), 1128 req->cmd_buf + sizeof(req->orb.command_block), 1129 copy_len); 1130 if (ret != RCODE_COMPLETE) 1131 return -EIO; 1132 } 1133 1134 return 0; 1135 } 1136 1137 static int sbp_fetch_page_table(struct sbp_target_request *req) 1138 { 1139 int pg_tbl_sz, ret; 1140 struct sbp_page_table_entry *pg_tbl; 1141 1142 if (!CMDBLK_ORB_PG_TBL_PRESENT(be32_to_cpu(req->orb.misc))) 1143 return 0; 1144 1145 pg_tbl_sz = CMDBLK_ORB_DATA_SIZE(be32_to_cpu(req->orb.misc)) * 1146 sizeof(struct sbp_page_table_entry); 1147 1148 pg_tbl = kmalloc(pg_tbl_sz, GFP_KERNEL); 1149 if (!pg_tbl) 1150 return -ENOMEM; 1151 1152 ret = sbp_run_request_transaction(req, TCODE_READ_BLOCK_REQUEST, 1153 sbp2_pointer_to_addr(&req->orb.data_descriptor), 1154 pg_tbl, pg_tbl_sz); 1155 if (ret != RCODE_COMPLETE) { 1156 kfree(pg_tbl); 1157 return -EIO; 1158 } 1159 1160 req->pg_tbl = pg_tbl; 1161 return 0; 1162 } 1163 1164 static void sbp_calc_data_length_direction(struct sbp_target_request *req, 1165 u32 *data_len, enum dma_data_direction *data_dir) 1166 { 1167 int data_size, direction, idx; 1168 1169 data_size = CMDBLK_ORB_DATA_SIZE(be32_to_cpu(req->orb.misc)); 1170 direction = CMDBLK_ORB_DIRECTION(be32_to_cpu(req->orb.misc)); 1171 1172 if (!data_size) { 1173 *data_len = 0; 1174 *data_dir = DMA_NONE; 1175 return; 1176 } 1177 1178 *data_dir = direction ? DMA_FROM_DEVICE : DMA_TO_DEVICE; 1179 1180 if (req->pg_tbl) { 1181 *data_len = 0; 1182 for (idx = 0; idx < data_size; idx++) { 1183 *data_len += be16_to_cpu( 1184 req->pg_tbl[idx].segment_length); 1185 } 1186 } else { 1187 *data_len = data_size; 1188 } 1189 } 1190 1191 static void sbp_handle_command(struct sbp_target_request *req) 1192 { 1193 struct sbp_login_descriptor *login = req->login; 1194 struct sbp_session *sess = login->sess; 1195 int ret, unpacked_lun; 1196 u32 data_length; 1197 enum dma_data_direction data_dir; 1198 1199 ret = sbp_fetch_command(req); 1200 if (ret) { 1201 pr_debug("sbp_handle_command: fetch command failed: %d\n", ret); 1202 goto err; 1203 } 1204 1205 ret = sbp_fetch_page_table(req); 1206 if (ret) { 1207 pr_debug("sbp_handle_command: fetch page table failed: %d\n", 1208 ret); 1209 goto err; 1210 } 1211 1212 unpacked_lun = req->login->login_lun; 1213 sbp_calc_data_length_direction(req, &data_length, &data_dir); 1214 1215 pr_debug("sbp_handle_command ORB:0x%llx unpacked_lun:%d data_len:%d data_dir:%d\n", 1216 req->orb_pointer, unpacked_lun, data_length, data_dir); 1217 1218 /* only used for printk until we do TMRs */ 1219 req->se_cmd.tag = req->orb_pointer; 1220 target_submit_cmd(&req->se_cmd, sess->se_sess, req->cmd_buf, 1221 req->sense_buf, unpacked_lun, data_length, 1222 TCM_SIMPLE_TAG, data_dir, TARGET_SCF_ACK_KREF); 1223 return; 1224 1225 err: 1226 req->status.status |= cpu_to_be32( 1227 STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) | 1228 STATUS_BLOCK_DEAD(0) | 1229 STATUS_BLOCK_LEN(1) | 1230 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR)); 1231 sbp_send_status(req); 1232 } 1233 1234 /* 1235 * DMA_TO_DEVICE = read from initiator (SCSI WRITE) 1236 * DMA_FROM_DEVICE = write to initiator (SCSI READ) 1237 */ 1238 static int sbp_rw_data(struct sbp_target_request *req) 1239 { 1240 struct sbp_session *sess = req->login->sess; 1241 int tcode, sg_miter_flags, max_payload, pg_size, speed, node_id, 1242 generation, num_pte, length, tfr_length, 1243 rcode = RCODE_COMPLETE; 1244 struct sbp_page_table_entry *pte; 1245 unsigned long long offset; 1246 struct fw_card *card; 1247 struct sg_mapping_iter iter; 1248 1249 if (req->se_cmd.data_direction == DMA_FROM_DEVICE) { 1250 tcode = TCODE_WRITE_BLOCK_REQUEST; 1251 sg_miter_flags = SG_MITER_FROM_SG; 1252 } else { 1253 tcode = TCODE_READ_BLOCK_REQUEST; 1254 sg_miter_flags = SG_MITER_TO_SG; 1255 } 1256 1257 max_payload = 4 << CMDBLK_ORB_MAX_PAYLOAD(be32_to_cpu(req->orb.misc)); 1258 speed = CMDBLK_ORB_SPEED(be32_to_cpu(req->orb.misc)); 1259 1260 pg_size = CMDBLK_ORB_PG_SIZE(be32_to_cpu(req->orb.misc)); 1261 if (pg_size) { 1262 pr_err("sbp_run_transaction: page size ignored\n"); 1263 } 1264 1265 spin_lock_bh(&sess->lock); 1266 card = fw_card_get(sess->card); 1267 node_id = sess->node_id; 1268 generation = sess->generation; 1269 spin_unlock_bh(&sess->lock); 1270 1271 if (req->pg_tbl) { 1272 pte = req->pg_tbl; 1273 num_pte = CMDBLK_ORB_DATA_SIZE(be32_to_cpu(req->orb.misc)); 1274 1275 offset = 0; 1276 length = 0; 1277 } else { 1278 pte = NULL; 1279 num_pte = 0; 1280 1281 offset = sbp2_pointer_to_addr(&req->orb.data_descriptor); 1282 length = req->se_cmd.data_length; 1283 } 1284 1285 sg_miter_start(&iter, req->se_cmd.t_data_sg, req->se_cmd.t_data_nents, 1286 sg_miter_flags); 1287 1288 while (length || num_pte) { 1289 if (!length) { 1290 offset = (u64)be16_to_cpu(pte->segment_base_hi) << 32 | 1291 be32_to_cpu(pte->segment_base_lo); 1292 length = be16_to_cpu(pte->segment_length); 1293 1294 pte++; 1295 num_pte--; 1296 } 1297 1298 sg_miter_next(&iter); 1299 1300 tfr_length = min3(length, max_payload, (int)iter.length); 1301 1302 /* FIXME: take page_size into account */ 1303 1304 rcode = sbp_run_transaction(card, tcode, node_id, 1305 generation, speed, 1306 offset, iter.addr, tfr_length); 1307 1308 if (rcode != RCODE_COMPLETE) 1309 break; 1310 1311 length -= tfr_length; 1312 offset += tfr_length; 1313 iter.consumed = tfr_length; 1314 } 1315 1316 sg_miter_stop(&iter); 1317 fw_card_put(card); 1318 1319 if (rcode == RCODE_COMPLETE) { 1320 WARN_ON(length != 0); 1321 return 0; 1322 } else { 1323 return -EIO; 1324 } 1325 } 1326 1327 static int sbp_send_status(struct sbp_target_request *req) 1328 { 1329 int rc, ret = 0, length; 1330 struct sbp_login_descriptor *login = req->login; 1331 1332 length = (((be32_to_cpu(req->status.status) >> 24) & 0x07) + 1) * 4; 1333 1334 rc = sbp_run_request_transaction(req, TCODE_WRITE_BLOCK_REQUEST, 1335 login->status_fifo_addr, &req->status, length); 1336 if (rc != RCODE_COMPLETE) { 1337 pr_debug("sbp_send_status: write failed: 0x%x\n", rc); 1338 ret = -EIO; 1339 goto put_ref; 1340 } 1341 1342 pr_debug("sbp_send_status: status write complete for ORB: 0x%llx\n", 1343 req->orb_pointer); 1344 /* 1345 * Drop the extra ACK_KREF reference taken by target_submit_cmd() 1346 * ahead of sbp_check_stop_free() -> transport_generic_free_cmd() 1347 * final se_cmd->cmd_kref put. 1348 */ 1349 put_ref: 1350 target_put_sess_cmd(&req->se_cmd); 1351 return ret; 1352 } 1353 1354 static void sbp_sense_mangle(struct sbp_target_request *req) 1355 { 1356 struct se_cmd *se_cmd = &req->se_cmd; 1357 u8 *sense = req->sense_buf; 1358 u8 *status = req->status.data; 1359 1360 WARN_ON(se_cmd->scsi_sense_length < 18); 1361 1362 switch (sense[0] & 0x7f) { /* sfmt */ 1363 case 0x70: /* current, fixed */ 1364 status[0] = 0 << 6; 1365 break; 1366 case 0x71: /* deferred, fixed */ 1367 status[0] = 1 << 6; 1368 break; 1369 case 0x72: /* current, descriptor */ 1370 case 0x73: /* deferred, descriptor */ 1371 default: 1372 /* 1373 * TODO: SBP-3 specifies what we should do with descriptor 1374 * format sense data 1375 */ 1376 pr_err("sbp_send_sense: unknown sense format: 0x%x\n", 1377 sense[0]); 1378 req->status.status |= cpu_to_be32( 1379 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | 1380 STATUS_BLOCK_DEAD(0) | 1381 STATUS_BLOCK_LEN(1) | 1382 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQUEST_ABORTED)); 1383 return; 1384 } 1385 1386 status[0] |= se_cmd->scsi_status & 0x3f;/* status */ 1387 status[1] = 1388 (sense[0] & 0x80) | /* valid */ 1389 ((sense[2] & 0xe0) >> 1) | /* mark, eom, ili */ 1390 (sense[2] & 0x0f); /* sense_key */ 1391 status[2] = 0; /* XXX sense_code */ 1392 status[3] = 0; /* XXX sense_qualifier */ 1393 1394 /* information */ 1395 status[4] = sense[3]; 1396 status[5] = sense[4]; 1397 status[6] = sense[5]; 1398 status[7] = sense[6]; 1399 1400 /* CDB-dependent */ 1401 status[8] = sense[8]; 1402 status[9] = sense[9]; 1403 status[10] = sense[10]; 1404 status[11] = sense[11]; 1405 1406 /* fru */ 1407 status[12] = sense[14]; 1408 1409 /* sense_key-dependent */ 1410 status[13] = sense[15]; 1411 status[14] = sense[16]; 1412 status[15] = sense[17]; 1413 1414 req->status.status |= cpu_to_be32( 1415 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | 1416 STATUS_BLOCK_DEAD(0) | 1417 STATUS_BLOCK_LEN(5) | 1418 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK)); 1419 } 1420 1421 static int sbp_send_sense(struct sbp_target_request *req) 1422 { 1423 struct se_cmd *se_cmd = &req->se_cmd; 1424 1425 if (se_cmd->scsi_sense_length) { 1426 sbp_sense_mangle(req); 1427 } else { 1428 req->status.status |= cpu_to_be32( 1429 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | 1430 STATUS_BLOCK_DEAD(0) | 1431 STATUS_BLOCK_LEN(1) | 1432 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK)); 1433 } 1434 1435 return sbp_send_status(req); 1436 } 1437 1438 static void sbp_free_request(struct sbp_target_request *req) 1439 { 1440 struct se_cmd *se_cmd = &req->se_cmd; 1441 struct se_session *se_sess = se_cmd->se_sess; 1442 1443 kfree(req->pg_tbl); 1444 kfree(req->cmd_buf); 1445 1446 target_free_tag(se_sess, se_cmd); 1447 } 1448 1449 static void sbp_mgt_agent_process(struct work_struct *work) 1450 { 1451 struct sbp_management_agent *agent = 1452 container_of(work, struct sbp_management_agent, work); 1453 struct sbp_management_request *req = agent->request; 1454 int ret; 1455 int status_data_len = 0; 1456 1457 /* fetch the ORB from the initiator */ 1458 ret = sbp_run_transaction(req->card, TCODE_READ_BLOCK_REQUEST, 1459 req->node_addr, req->generation, req->speed, 1460 agent->orb_offset, &req->orb, sizeof(req->orb)); 1461 if (ret != RCODE_COMPLETE) { 1462 pr_debug("mgt_orb fetch failed: %x\n", ret); 1463 goto out; 1464 } 1465 1466 pr_debug("mgt_orb ptr1:0x%llx ptr2:0x%llx misc:0x%x len:0x%x status_fifo:0x%llx\n", 1467 sbp2_pointer_to_addr(&req->orb.ptr1), 1468 sbp2_pointer_to_addr(&req->orb.ptr2), 1469 be32_to_cpu(req->orb.misc), be32_to_cpu(req->orb.length), 1470 sbp2_pointer_to_addr(&req->orb.status_fifo)); 1471 1472 if (!ORB_NOTIFY(be32_to_cpu(req->orb.misc)) || 1473 ORB_REQUEST_FORMAT(be32_to_cpu(req->orb.misc)) != 0) { 1474 pr_err("mgt_orb bad request\n"); 1475 goto out; 1476 } 1477 1478 switch (MANAGEMENT_ORB_FUNCTION(be32_to_cpu(req->orb.misc))) { 1479 case MANAGEMENT_ORB_FUNCTION_LOGIN: 1480 sbp_management_request_login(agent, req, &status_data_len); 1481 break; 1482 1483 case MANAGEMENT_ORB_FUNCTION_QUERY_LOGINS: 1484 sbp_management_request_query_logins(agent, req, 1485 &status_data_len); 1486 break; 1487 1488 case MANAGEMENT_ORB_FUNCTION_RECONNECT: 1489 sbp_management_request_reconnect(agent, req, &status_data_len); 1490 break; 1491 1492 case MANAGEMENT_ORB_FUNCTION_SET_PASSWORD: 1493 pr_notice("SET PASSWORD not implemented\n"); 1494 1495 req->status.status = cpu_to_be32( 1496 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | 1497 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP)); 1498 1499 break; 1500 1501 case MANAGEMENT_ORB_FUNCTION_LOGOUT: 1502 sbp_management_request_logout(agent, req, &status_data_len); 1503 break; 1504 1505 case MANAGEMENT_ORB_FUNCTION_ABORT_TASK: 1506 pr_notice("ABORT TASK not implemented\n"); 1507 1508 req->status.status = cpu_to_be32( 1509 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | 1510 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP)); 1511 1512 break; 1513 1514 case MANAGEMENT_ORB_FUNCTION_ABORT_TASK_SET: 1515 pr_notice("ABORT TASK SET not implemented\n"); 1516 1517 req->status.status = cpu_to_be32( 1518 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | 1519 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP)); 1520 1521 break; 1522 1523 case MANAGEMENT_ORB_FUNCTION_LOGICAL_UNIT_RESET: 1524 pr_notice("LOGICAL UNIT RESET not implemented\n"); 1525 1526 req->status.status = cpu_to_be32( 1527 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | 1528 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP)); 1529 1530 break; 1531 1532 case MANAGEMENT_ORB_FUNCTION_TARGET_RESET: 1533 pr_notice("TARGET RESET not implemented\n"); 1534 1535 req->status.status = cpu_to_be32( 1536 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | 1537 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP)); 1538 1539 break; 1540 1541 default: 1542 pr_notice("unknown management function 0x%x\n", 1543 MANAGEMENT_ORB_FUNCTION(be32_to_cpu(req->orb.misc))); 1544 1545 req->status.status = cpu_to_be32( 1546 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | 1547 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP)); 1548 1549 break; 1550 } 1551 1552 req->status.status |= cpu_to_be32( 1553 STATUS_BLOCK_SRC(1) | /* Response to ORB, next_ORB absent */ 1554 STATUS_BLOCK_LEN(DIV_ROUND_UP(status_data_len, 4) + 1) | 1555 STATUS_BLOCK_ORB_OFFSET_HIGH(agent->orb_offset >> 32)); 1556 req->status.orb_low = cpu_to_be32(agent->orb_offset); 1557 1558 /* write the status block back to the initiator */ 1559 ret = sbp_run_transaction(req->card, TCODE_WRITE_BLOCK_REQUEST, 1560 req->node_addr, req->generation, req->speed, 1561 sbp2_pointer_to_addr(&req->orb.status_fifo), 1562 &req->status, 8 + status_data_len); 1563 if (ret != RCODE_COMPLETE) { 1564 pr_debug("mgt_orb status write failed: %x\n", ret); 1565 goto out; 1566 } 1567 1568 out: 1569 fw_card_put(req->card); 1570 kfree(req); 1571 1572 spin_lock_bh(&agent->lock); 1573 agent->state = MANAGEMENT_AGENT_STATE_IDLE; 1574 spin_unlock_bh(&agent->lock); 1575 } 1576 1577 static void sbp_mgt_agent_rw(struct fw_card *card, 1578 struct fw_request *request, int tcode, int destination, int source, 1579 int generation, unsigned long long offset, void *data, size_t length, 1580 void *callback_data) 1581 { 1582 struct sbp_management_agent *agent = callback_data; 1583 struct sbp2_pointer *ptr = data; 1584 int rcode = RCODE_ADDRESS_ERROR; 1585 1586 if (!agent->tport->enable) 1587 goto out; 1588 1589 if ((offset != agent->handler.offset) || (length != 8)) 1590 goto out; 1591 1592 if (tcode == TCODE_WRITE_BLOCK_REQUEST) { 1593 struct sbp_management_request *req; 1594 int prev_state; 1595 1596 spin_lock_bh(&agent->lock); 1597 prev_state = agent->state; 1598 agent->state = MANAGEMENT_AGENT_STATE_BUSY; 1599 spin_unlock_bh(&agent->lock); 1600 1601 if (prev_state == MANAGEMENT_AGENT_STATE_BUSY) { 1602 pr_notice("ignoring management request while busy\n"); 1603 rcode = RCODE_CONFLICT_ERROR; 1604 goto out; 1605 } 1606 req = kzalloc(sizeof(*req), GFP_ATOMIC); 1607 if (!req) { 1608 rcode = RCODE_CONFLICT_ERROR; 1609 goto out; 1610 } 1611 1612 req->card = fw_card_get(card); 1613 req->generation = generation; 1614 req->node_addr = source; 1615 req->speed = fw_get_request_speed(request); 1616 1617 agent->orb_offset = sbp2_pointer_to_addr(ptr); 1618 agent->request = req; 1619 1620 queue_work(system_dfl_wq, &agent->work); 1621 rcode = RCODE_COMPLETE; 1622 } else if (tcode == TCODE_READ_BLOCK_REQUEST) { 1623 addr_to_sbp2_pointer(agent->orb_offset, ptr); 1624 rcode = RCODE_COMPLETE; 1625 } else { 1626 rcode = RCODE_TYPE_ERROR; 1627 } 1628 1629 out: 1630 fw_send_response(card, request, rcode); 1631 } 1632 1633 static struct sbp_management_agent *sbp_management_agent_register( 1634 struct sbp_tport *tport) 1635 { 1636 int ret; 1637 struct sbp_management_agent *agent; 1638 1639 agent = kmalloc(sizeof(*agent), GFP_KERNEL); 1640 if (!agent) 1641 return ERR_PTR(-ENOMEM); 1642 1643 spin_lock_init(&agent->lock); 1644 agent->tport = tport; 1645 agent->handler.length = 0x08; 1646 agent->handler.address_callback = sbp_mgt_agent_rw; 1647 agent->handler.callback_data = agent; 1648 agent->state = MANAGEMENT_AGENT_STATE_IDLE; 1649 INIT_WORK(&agent->work, sbp_mgt_agent_process); 1650 agent->orb_offset = 0; 1651 agent->request = NULL; 1652 1653 ret = fw_core_add_address_handler(&agent->handler, 1654 &sbp_register_region); 1655 if (ret < 0) { 1656 kfree(agent); 1657 return ERR_PTR(ret); 1658 } 1659 1660 return agent; 1661 } 1662 1663 static void sbp_management_agent_unregister(struct sbp_management_agent *agent) 1664 { 1665 fw_core_remove_address_handler(&agent->handler); 1666 cancel_work_sync(&agent->work); 1667 kfree(agent); 1668 } 1669 1670 static int sbp_check_true(struct se_portal_group *se_tpg) 1671 { 1672 return 1; 1673 } 1674 1675 static char *sbp_get_fabric_wwn(struct se_portal_group *se_tpg) 1676 { 1677 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg); 1678 struct sbp_tport *tport = tpg->tport; 1679 1680 return &tport->tport_name[0]; 1681 } 1682 1683 static u16 sbp_get_tag(struct se_portal_group *se_tpg) 1684 { 1685 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg); 1686 return tpg->tport_tpgt; 1687 } 1688 1689 static void sbp_release_cmd(struct se_cmd *se_cmd) 1690 { 1691 struct sbp_target_request *req = container_of(se_cmd, 1692 struct sbp_target_request, se_cmd); 1693 1694 sbp_free_request(req); 1695 } 1696 1697 static int sbp_write_pending(struct se_cmd *se_cmd) 1698 { 1699 struct sbp_target_request *req = container_of(se_cmd, 1700 struct sbp_target_request, se_cmd); 1701 int ret; 1702 1703 ret = sbp_rw_data(req); 1704 if (ret) { 1705 req->status.status |= cpu_to_be32( 1706 STATUS_BLOCK_RESP( 1707 STATUS_RESP_TRANSPORT_FAILURE) | 1708 STATUS_BLOCK_DEAD(0) | 1709 STATUS_BLOCK_LEN(1) | 1710 STATUS_BLOCK_SBP_STATUS( 1711 SBP_STATUS_UNSPECIFIED_ERROR)); 1712 sbp_send_status(req); 1713 return ret; 1714 } 1715 1716 target_execute_cmd(se_cmd); 1717 return 0; 1718 } 1719 1720 static int sbp_queue_data_in(struct se_cmd *se_cmd) 1721 { 1722 struct sbp_target_request *req = container_of(se_cmd, 1723 struct sbp_target_request, se_cmd); 1724 int ret; 1725 1726 ret = sbp_rw_data(req); 1727 if (ret) { 1728 req->status.status |= cpu_to_be32( 1729 STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) | 1730 STATUS_BLOCK_DEAD(0) | 1731 STATUS_BLOCK_LEN(1) | 1732 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR)); 1733 sbp_send_status(req); 1734 return ret; 1735 } 1736 1737 return sbp_send_sense(req); 1738 } 1739 1740 /* 1741 * Called after command (no data transfer) or after the write (to device) 1742 * operation is completed 1743 */ 1744 static int sbp_queue_status(struct se_cmd *se_cmd) 1745 { 1746 struct sbp_target_request *req = container_of(se_cmd, 1747 struct sbp_target_request, se_cmd); 1748 1749 return sbp_send_sense(req); 1750 } 1751 1752 static void sbp_queue_tm_rsp(struct se_cmd *se_cmd) 1753 { 1754 } 1755 1756 static void sbp_aborted_task(struct se_cmd *se_cmd) 1757 { 1758 return; 1759 } 1760 1761 static int sbp_check_stop_free(struct se_cmd *se_cmd) 1762 { 1763 struct sbp_target_request *req = container_of(se_cmd, 1764 struct sbp_target_request, se_cmd); 1765 1766 return transport_generic_free_cmd(&req->se_cmd, 0); 1767 } 1768 1769 static int sbp_count_se_tpg_luns(struct se_portal_group *tpg) 1770 { 1771 struct se_lun *lun; 1772 int count = 0; 1773 1774 rcu_read_lock(); 1775 hlist_for_each_entry_rcu(lun, &tpg->tpg_lun_hlist, link) 1776 count++; 1777 rcu_read_unlock(); 1778 1779 return count; 1780 } 1781 1782 static int sbp_update_unit_directory(struct sbp_tport *tport) 1783 { 1784 struct se_lun *lun; 1785 int num_luns, num_entries, idx = 0, mgt_agt_addr, ret; 1786 u32 *data; 1787 1788 if (tport->unit_directory.data) { 1789 fw_core_remove_descriptor(&tport->unit_directory); 1790 kfree(tport->unit_directory.data); 1791 tport->unit_directory.data = NULL; 1792 } 1793 1794 if (!tport->enable || !tport->tpg) 1795 return 0; 1796 1797 num_luns = sbp_count_se_tpg_luns(&tport->tpg->se_tpg); 1798 1799 /* 1800 * Number of entries in the final unit directory: 1801 * - all of those in the template 1802 * - management_agent 1803 * - unit_characteristics 1804 * - reconnect_timeout 1805 * - unit unique ID 1806 * - one for each LUN 1807 * 1808 * MUST NOT include leaf or sub-directory entries 1809 */ 1810 num_entries = ARRAY_SIZE(sbp_unit_directory_template) + 4 + num_luns; 1811 1812 if (tport->directory_id != -1) 1813 num_entries++; 1814 1815 /* allocate num_entries + 4 for the header and unique ID leaf */ 1816 data = kcalloc((num_entries + 4), sizeof(u32), GFP_KERNEL); 1817 if (!data) 1818 return -ENOMEM; 1819 1820 /* directory_length */ 1821 data[idx++] = num_entries << 16; 1822 1823 /* directory_id */ 1824 if (tport->directory_id != -1) 1825 data[idx++] = (CSR_DIRECTORY_ID << 24) | tport->directory_id; 1826 1827 /* unit directory template */ 1828 memcpy(&data[idx], sbp_unit_directory_template, 1829 sizeof(sbp_unit_directory_template)); 1830 idx += ARRAY_SIZE(sbp_unit_directory_template); 1831 1832 /* management_agent */ 1833 mgt_agt_addr = (tport->mgt_agt->handler.offset - CSR_REGISTER_BASE) / 4; 1834 data[idx++] = 0x54000000 | (mgt_agt_addr & 0x00ffffff); 1835 1836 /* unit_characteristics */ 1837 data[idx++] = 0x3a000000 | 1838 (((tport->mgt_orb_timeout * 2) << 8) & 0xff00) | 1839 SBP_ORB_FETCH_SIZE; 1840 1841 /* reconnect_timeout */ 1842 data[idx++] = 0x3d000000 | (tport->max_reconnect_timeout & 0xffff); 1843 1844 /* unit unique ID (leaf is just after LUNs) */ 1845 data[idx++] = 0x8d000000 | (num_luns + 1); 1846 1847 rcu_read_lock(); 1848 hlist_for_each_entry_rcu(lun, &tport->tpg->se_tpg.tpg_lun_hlist, link) { 1849 struct se_device *dev; 1850 int type; 1851 /* 1852 * rcu_dereference_raw protected by se_lun->lun_group symlink 1853 * reference to se_device->dev_group. 1854 */ 1855 dev = rcu_dereference_raw(lun->lun_se_dev); 1856 type = dev->transport->get_device_type(dev); 1857 1858 /* logical_unit_number */ 1859 data[idx++] = 0x14000000 | 1860 ((type << 16) & 0x1f0000) | 1861 (lun->unpacked_lun & 0xffff); 1862 } 1863 rcu_read_unlock(); 1864 1865 /* unit unique ID leaf */ 1866 data[idx++] = 2 << 16; 1867 data[idx++] = tport->guid >> 32; 1868 data[idx++] = tport->guid; 1869 1870 tport->unit_directory.length = idx; 1871 tport->unit_directory.key = (CSR_DIRECTORY | CSR_UNIT) << 24; 1872 tport->unit_directory.data = data; 1873 1874 ret = fw_core_add_descriptor(&tport->unit_directory); 1875 if (ret < 0) { 1876 kfree(tport->unit_directory.data); 1877 tport->unit_directory.data = NULL; 1878 } 1879 1880 return ret; 1881 } 1882 1883 static ssize_t sbp_parse_wwn(const char *name, u64 *wwn) 1884 { 1885 const char *cp; 1886 char c, nibble; 1887 int pos = 0, err; 1888 1889 *wwn = 0; 1890 for (cp = name; cp < &name[SBP_NAMELEN - 1]; cp++) { 1891 c = *cp; 1892 if (c == '\n' && cp[1] == '\0') 1893 continue; 1894 if (c == '\0') { 1895 err = 2; 1896 if (pos != 16) 1897 goto fail; 1898 return cp - name; 1899 } 1900 err = 3; 1901 if (isdigit(c)) 1902 nibble = c - '0'; 1903 else if (isxdigit(c)) 1904 nibble = tolower(c) - 'a' + 10; 1905 else 1906 goto fail; 1907 *wwn = (*wwn << 4) | nibble; 1908 pos++; 1909 } 1910 err = 4; 1911 fail: 1912 printk(KERN_INFO "err %u len %zu pos %u\n", 1913 err, cp - name, pos); 1914 return -1; 1915 } 1916 1917 static ssize_t sbp_format_wwn(char *buf, size_t len, u64 wwn) 1918 { 1919 return snprintf(buf, len, "%016llx", wwn); 1920 } 1921 1922 static int sbp_init_nodeacl(struct se_node_acl *se_nacl, const char *name) 1923 { 1924 u64 guid = 0; 1925 1926 if (sbp_parse_wwn(name, &guid) < 0) 1927 return -EINVAL; 1928 return 0; 1929 } 1930 1931 static int sbp_post_link_lun( 1932 struct se_portal_group *se_tpg, 1933 struct se_lun *se_lun) 1934 { 1935 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg); 1936 1937 return sbp_update_unit_directory(tpg->tport); 1938 } 1939 1940 static void sbp_pre_unlink_lun( 1941 struct se_portal_group *se_tpg, 1942 struct se_lun *se_lun) 1943 { 1944 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg); 1945 struct sbp_tport *tport = tpg->tport; 1946 int ret; 1947 1948 if (sbp_count_se_tpg_luns(&tpg->se_tpg) == 0) 1949 tport->enable = 0; 1950 1951 ret = sbp_update_unit_directory(tport); 1952 if (ret < 0) 1953 pr_err("unlink LUN: failed to update unit directory\n"); 1954 } 1955 1956 static struct se_portal_group *sbp_make_tpg(struct se_wwn *wwn, 1957 const char *name) 1958 { 1959 struct sbp_tport *tport = 1960 container_of(wwn, struct sbp_tport, tport_wwn); 1961 1962 struct sbp_tpg *tpg; 1963 unsigned long tpgt; 1964 int ret; 1965 1966 if (strstr(name, "tpgt_") != name) 1967 return ERR_PTR(-EINVAL); 1968 if (kstrtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX) 1969 return ERR_PTR(-EINVAL); 1970 1971 if (tport->tpg) { 1972 pr_err("Only one TPG per Unit is possible.\n"); 1973 return ERR_PTR(-EBUSY); 1974 } 1975 1976 tpg = kzalloc(sizeof(*tpg), GFP_KERNEL); 1977 if (!tpg) 1978 return ERR_PTR(-ENOMEM); 1979 1980 tpg->tport = tport; 1981 tpg->tport_tpgt = tpgt; 1982 tport->tpg = tpg; 1983 1984 /* default attribute values */ 1985 tport->enable = 0; 1986 tport->directory_id = -1; 1987 tport->mgt_orb_timeout = 15; 1988 tport->max_reconnect_timeout = 5; 1989 tport->max_logins_per_lun = 1; 1990 1991 tport->mgt_agt = sbp_management_agent_register(tport); 1992 if (IS_ERR(tport->mgt_agt)) { 1993 ret = PTR_ERR(tport->mgt_agt); 1994 goto out_free_tpg; 1995 } 1996 1997 ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_SBP); 1998 if (ret < 0) 1999 goto out_unreg_mgt_agt; 2000 2001 return &tpg->se_tpg; 2002 2003 out_unreg_mgt_agt: 2004 sbp_management_agent_unregister(tport->mgt_agt); 2005 out_free_tpg: 2006 tport->tpg = NULL; 2007 kfree(tpg); 2008 return ERR_PTR(ret); 2009 } 2010 2011 static void sbp_drop_tpg(struct se_portal_group *se_tpg) 2012 { 2013 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg); 2014 struct sbp_tport *tport = tpg->tport; 2015 2016 core_tpg_deregister(se_tpg); 2017 sbp_management_agent_unregister(tport->mgt_agt); 2018 tport->tpg = NULL; 2019 kfree(tpg); 2020 } 2021 2022 static struct se_wwn *sbp_make_tport( 2023 struct target_fabric_configfs *tf, 2024 struct config_group *group, 2025 const char *name) 2026 { 2027 struct sbp_tport *tport; 2028 u64 guid = 0; 2029 2030 if (sbp_parse_wwn(name, &guid) < 0) 2031 return ERR_PTR(-EINVAL); 2032 2033 tport = kzalloc(sizeof(*tport), GFP_KERNEL); 2034 if (!tport) 2035 return ERR_PTR(-ENOMEM); 2036 2037 tport->guid = guid; 2038 sbp_format_wwn(tport->tport_name, SBP_NAMELEN, guid); 2039 2040 return &tport->tport_wwn; 2041 } 2042 2043 static void sbp_drop_tport(struct se_wwn *wwn) 2044 { 2045 struct sbp_tport *tport = 2046 container_of(wwn, struct sbp_tport, tport_wwn); 2047 2048 kfree(tport); 2049 } 2050 2051 static ssize_t sbp_wwn_version_show(struct config_item *item, char *page) 2052 { 2053 return sprintf(page, "FireWire SBP fabric module %s\n", SBP_VERSION); 2054 } 2055 2056 CONFIGFS_ATTR_RO(sbp_wwn_, version); 2057 2058 static struct configfs_attribute *sbp_wwn_attrs[] = { 2059 &sbp_wwn_attr_version, 2060 NULL, 2061 }; 2062 2063 static ssize_t sbp_tpg_directory_id_show(struct config_item *item, char *page) 2064 { 2065 struct se_portal_group *se_tpg = to_tpg(item); 2066 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg); 2067 struct sbp_tport *tport = tpg->tport; 2068 2069 if (tport->directory_id == -1) 2070 return sprintf(page, "implicit\n"); 2071 else 2072 return sprintf(page, "%06x\n", tport->directory_id); 2073 } 2074 2075 static ssize_t sbp_tpg_directory_id_store(struct config_item *item, 2076 const char *page, size_t count) 2077 { 2078 struct se_portal_group *se_tpg = to_tpg(item); 2079 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg); 2080 struct sbp_tport *tport = tpg->tport; 2081 unsigned long val; 2082 2083 if (tport->enable) { 2084 pr_err("Cannot change the directory_id on an active target.\n"); 2085 return -EBUSY; 2086 } 2087 2088 if (strstr(page, "implicit") == page) { 2089 tport->directory_id = -1; 2090 } else { 2091 if (kstrtoul(page, 16, &val) < 0) 2092 return -EINVAL; 2093 if (val > 0xffffff) 2094 return -EINVAL; 2095 2096 tport->directory_id = val; 2097 } 2098 2099 return count; 2100 } 2101 2102 static int sbp_enable_tpg(struct se_portal_group *se_tpg, bool enable) 2103 { 2104 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg); 2105 struct sbp_tport *tport = tpg->tport; 2106 int ret; 2107 2108 if (enable) { 2109 if (sbp_count_se_tpg_luns(&tpg->se_tpg) == 0) { 2110 pr_err("Cannot enable a target with no LUNs!\n"); 2111 return -EINVAL; 2112 } 2113 } else { 2114 /* XXX: force-shutdown sessions instead? */ 2115 spin_lock_bh(&se_tpg->session_lock); 2116 if (!list_empty(&se_tpg->tpg_sess_list)) { 2117 spin_unlock_bh(&se_tpg->session_lock); 2118 return -EBUSY; 2119 } 2120 spin_unlock_bh(&se_tpg->session_lock); 2121 } 2122 2123 tport->enable = enable; 2124 2125 ret = sbp_update_unit_directory(tport); 2126 if (ret < 0) { 2127 pr_err("Could not update Config ROM\n"); 2128 return ret; 2129 } 2130 2131 return 0; 2132 } 2133 2134 CONFIGFS_ATTR(sbp_tpg_, directory_id); 2135 2136 static struct configfs_attribute *sbp_tpg_base_attrs[] = { 2137 &sbp_tpg_attr_directory_id, 2138 NULL, 2139 }; 2140 2141 static ssize_t sbp_tpg_attrib_mgt_orb_timeout_show(struct config_item *item, 2142 char *page) 2143 { 2144 struct se_portal_group *se_tpg = attrib_to_tpg(item); 2145 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg); 2146 struct sbp_tport *tport = tpg->tport; 2147 return sprintf(page, "%d\n", tport->mgt_orb_timeout); 2148 } 2149 2150 static ssize_t sbp_tpg_attrib_mgt_orb_timeout_store(struct config_item *item, 2151 const char *page, size_t count) 2152 { 2153 struct se_portal_group *se_tpg = attrib_to_tpg(item); 2154 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg); 2155 struct sbp_tport *tport = tpg->tport; 2156 unsigned long val; 2157 int ret; 2158 2159 if (kstrtoul(page, 0, &val) < 0) 2160 return -EINVAL; 2161 if ((val < 1) || (val > 127)) 2162 return -EINVAL; 2163 2164 if (tport->mgt_orb_timeout == val) 2165 return count; 2166 2167 tport->mgt_orb_timeout = val; 2168 2169 ret = sbp_update_unit_directory(tport); 2170 if (ret < 0) 2171 return ret; 2172 2173 return count; 2174 } 2175 2176 static ssize_t sbp_tpg_attrib_max_reconnect_timeout_show(struct config_item *item, 2177 char *page) 2178 { 2179 struct se_portal_group *se_tpg = attrib_to_tpg(item); 2180 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg); 2181 struct sbp_tport *tport = tpg->tport; 2182 return sprintf(page, "%d\n", tport->max_reconnect_timeout); 2183 } 2184 2185 static ssize_t sbp_tpg_attrib_max_reconnect_timeout_store(struct config_item *item, 2186 const char *page, size_t count) 2187 { 2188 struct se_portal_group *se_tpg = attrib_to_tpg(item); 2189 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg); 2190 struct sbp_tport *tport = tpg->tport; 2191 unsigned long val; 2192 int ret; 2193 2194 if (kstrtoul(page, 0, &val) < 0) 2195 return -EINVAL; 2196 if ((val < 1) || (val > 32767)) 2197 return -EINVAL; 2198 2199 if (tport->max_reconnect_timeout == val) 2200 return count; 2201 2202 tport->max_reconnect_timeout = val; 2203 2204 ret = sbp_update_unit_directory(tport); 2205 if (ret < 0) 2206 return ret; 2207 2208 return count; 2209 } 2210 2211 static ssize_t sbp_tpg_attrib_max_logins_per_lun_show(struct config_item *item, 2212 char *page) 2213 { 2214 struct se_portal_group *se_tpg = attrib_to_tpg(item); 2215 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg); 2216 struct sbp_tport *tport = tpg->tport; 2217 return sprintf(page, "%d\n", tport->max_logins_per_lun); 2218 } 2219 2220 static ssize_t sbp_tpg_attrib_max_logins_per_lun_store(struct config_item *item, 2221 const char *page, size_t count) 2222 { 2223 struct se_portal_group *se_tpg = attrib_to_tpg(item); 2224 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg); 2225 struct sbp_tport *tport = tpg->tport; 2226 unsigned long val; 2227 2228 if (kstrtoul(page, 0, &val) < 0) 2229 return -EINVAL; 2230 if ((val < 1) || (val > 127)) 2231 return -EINVAL; 2232 2233 /* XXX: also check against current count? */ 2234 2235 tport->max_logins_per_lun = val; 2236 2237 return count; 2238 } 2239 2240 CONFIGFS_ATTR(sbp_tpg_attrib_, mgt_orb_timeout); 2241 CONFIGFS_ATTR(sbp_tpg_attrib_, max_reconnect_timeout); 2242 CONFIGFS_ATTR(sbp_tpg_attrib_, max_logins_per_lun); 2243 2244 static struct configfs_attribute *sbp_tpg_attrib_attrs[] = { 2245 &sbp_tpg_attrib_attr_mgt_orb_timeout, 2246 &sbp_tpg_attrib_attr_max_reconnect_timeout, 2247 &sbp_tpg_attrib_attr_max_logins_per_lun, 2248 NULL, 2249 }; 2250 2251 static const struct target_core_fabric_ops sbp_ops = { 2252 .module = THIS_MODULE, 2253 .fabric_name = "sbp", 2254 .tpg_get_wwn = sbp_get_fabric_wwn, 2255 .tpg_get_tag = sbp_get_tag, 2256 .tpg_check_demo_mode = sbp_check_true, 2257 .tpg_check_demo_mode_cache = sbp_check_true, 2258 .release_cmd = sbp_release_cmd, 2259 .write_pending = sbp_write_pending, 2260 .queue_data_in = sbp_queue_data_in, 2261 .queue_status = sbp_queue_status, 2262 .queue_tm_rsp = sbp_queue_tm_rsp, 2263 .aborted_task = sbp_aborted_task, 2264 .check_stop_free = sbp_check_stop_free, 2265 2266 .fabric_make_wwn = sbp_make_tport, 2267 .fabric_drop_wwn = sbp_drop_tport, 2268 .fabric_make_tpg = sbp_make_tpg, 2269 .fabric_enable_tpg = sbp_enable_tpg, 2270 .fabric_drop_tpg = sbp_drop_tpg, 2271 .fabric_post_link = sbp_post_link_lun, 2272 .fabric_pre_unlink = sbp_pre_unlink_lun, 2273 .fabric_make_np = NULL, 2274 .fabric_drop_np = NULL, 2275 .fabric_init_nodeacl = sbp_init_nodeacl, 2276 2277 .tfc_wwn_attrs = sbp_wwn_attrs, 2278 .tfc_tpg_base_attrs = sbp_tpg_base_attrs, 2279 .tfc_tpg_attrib_attrs = sbp_tpg_attrib_attrs, 2280 2281 .default_submit_type = TARGET_DIRECT_SUBMIT, 2282 .direct_submit_supp = 1, 2283 }; 2284 2285 static int __init sbp_init(void) 2286 { 2287 return target_register_template(&sbp_ops); 2288 }; 2289 2290 static void __exit sbp_exit(void) 2291 { 2292 target_unregister_template(&sbp_ops); 2293 }; 2294 2295 MODULE_DESCRIPTION("FireWire SBP fabric driver"); 2296 MODULE_LICENSE("GPL"); 2297 module_init(sbp_init); 2298 module_exit(sbp_exit); 2299