1 /* 2 * Copyright (c) 2005 Cisco Systems. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #include <linux/module.h> 34 #include <linux/init.h> 35 #include <linux/slab.h> 36 #include <linux/err.h> 37 #include <linux/string.h> 38 #include <linux/parser.h> 39 #include <linux/random.h> 40 #include <linux/jiffies.h> 41 42 #include <asm/atomic.h> 43 44 #include <scsi/scsi.h> 45 #include <scsi/scsi_device.h> 46 #include <scsi/scsi_dbg.h> 47 #include <scsi/srp.h> 48 #include <scsi/scsi_transport_srp.h> 49 50 #include "ib_srp.h" 51 52 #define DRV_NAME "ib_srp" 53 #define PFX DRV_NAME ": " 54 #define DRV_VERSION "0.2" 55 #define DRV_RELDATE "November 1, 2005" 56 57 MODULE_AUTHOR("Roland Dreier"); 58 MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator " 59 "v" DRV_VERSION " (" DRV_RELDATE ")"); 60 MODULE_LICENSE("Dual BSD/GPL"); 61 62 static int srp_sg_tablesize = SRP_DEF_SG_TABLESIZE; 63 static int srp_max_iu_len; 64 65 module_param(srp_sg_tablesize, int, 0444); 66 MODULE_PARM_DESC(srp_sg_tablesize, 67 "Max number of gather/scatter entries per I/O (default is 12, max 255)"); 68 69 static int topspin_workarounds = 1; 70 71 module_param(topspin_workarounds, int, 0444); 72 MODULE_PARM_DESC(topspin_workarounds, 73 "Enable workarounds for Topspin/Cisco SRP target bugs if != 0"); 74 75 static int mellanox_workarounds = 1; 76 77 module_param(mellanox_workarounds, int, 0444); 78 MODULE_PARM_DESC(mellanox_workarounds, 79 "Enable workarounds for Mellanox SRP target bugs if != 0"); 80 81 static void srp_add_one(struct ib_device *device); 82 static void srp_remove_one(struct ib_device *device); 83 static void srp_recv_completion(struct ib_cq *cq, void *target_ptr); 84 static void srp_send_completion(struct ib_cq *cq, void *target_ptr); 85 static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event); 86 87 static struct scsi_transport_template *ib_srp_transport_template; 88 89 static struct ib_client srp_client = { 90 .name = "srp", 91 .add = srp_add_one, 92 .remove = srp_remove_one 93 }; 94 95 static struct ib_sa_client srp_sa_client; 96 97 static inline struct srp_target_port *host_to_target(struct Scsi_Host *host) 98 { 99 return (struct srp_target_port *) host->hostdata; 100 } 101 102 static const char *srp_target_info(struct Scsi_Host *host) 103 { 104 return host_to_target(host)->target_name; 105 } 106 107 static int srp_target_is_topspin(struct srp_target_port *target) 108 { 109 static const u8 topspin_oui[3] = { 0x00, 0x05, 0xad }; 110 static const u8 cisco_oui[3] = { 0x00, 0x1b, 0x0d }; 111 112 return topspin_workarounds && 113 (!memcmp(&target->ioc_guid, topspin_oui, sizeof topspin_oui) || 114 !memcmp(&target->ioc_guid, cisco_oui, sizeof cisco_oui)); 115 } 116 117 static int srp_target_is_mellanox(struct srp_target_port *target) 118 { 119 static const u8 mellanox_oui[3] = { 0x00, 0x02, 0xc9 }; 120 121 return mellanox_workarounds && 122 !memcmp(&target->ioc_guid, mellanox_oui, sizeof mellanox_oui); 123 } 124 125 static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size, 126 gfp_t gfp_mask, 127 enum dma_data_direction direction) 128 { 129 struct srp_iu *iu; 130 131 iu = kmalloc(sizeof *iu, gfp_mask); 132 if (!iu) 133 goto out; 134 135 iu->buf = kzalloc(size, gfp_mask); 136 if (!iu->buf) 137 goto out_free_iu; 138 139 iu->dma = ib_dma_map_single(host->srp_dev->dev, iu->buf, size, 140 direction); 141 if (ib_dma_mapping_error(host->srp_dev->dev, iu->dma)) 142 goto out_free_buf; 143 144 iu->size = size; 145 iu->direction = direction; 146 147 return iu; 148 149 out_free_buf: 150 kfree(iu->buf); 151 out_free_iu: 152 kfree(iu); 153 out: 154 return NULL; 155 } 156 157 static void srp_free_iu(struct srp_host *host, struct srp_iu *iu) 158 { 159 if (!iu) 160 return; 161 162 ib_dma_unmap_single(host->srp_dev->dev, iu->dma, iu->size, 163 iu->direction); 164 kfree(iu->buf); 165 kfree(iu); 166 } 167 168 static void srp_qp_event(struct ib_event *event, void *context) 169 { 170 printk(KERN_ERR PFX "QP event %d\n", event->event); 171 } 172 173 static int srp_init_qp(struct srp_target_port *target, 174 struct ib_qp *qp) 175 { 176 struct ib_qp_attr *attr; 177 int ret; 178 179 attr = kmalloc(sizeof *attr, GFP_KERNEL); 180 if (!attr) 181 return -ENOMEM; 182 183 ret = ib_find_pkey(target->srp_host->srp_dev->dev, 184 target->srp_host->port, 185 be16_to_cpu(target->path.pkey), 186 &attr->pkey_index); 187 if (ret) 188 goto out; 189 190 attr->qp_state = IB_QPS_INIT; 191 attr->qp_access_flags = (IB_ACCESS_REMOTE_READ | 192 IB_ACCESS_REMOTE_WRITE); 193 attr->port_num = target->srp_host->port; 194 195 ret = ib_modify_qp(qp, attr, 196 IB_QP_STATE | 197 IB_QP_PKEY_INDEX | 198 IB_QP_ACCESS_FLAGS | 199 IB_QP_PORT); 200 201 out: 202 kfree(attr); 203 return ret; 204 } 205 206 static int srp_new_cm_id(struct srp_target_port *target) 207 { 208 struct ib_cm_id *new_cm_id; 209 210 new_cm_id = ib_create_cm_id(target->srp_host->srp_dev->dev, 211 srp_cm_handler, target); 212 if (IS_ERR(new_cm_id)) 213 return PTR_ERR(new_cm_id); 214 215 if (target->cm_id) 216 ib_destroy_cm_id(target->cm_id); 217 target->cm_id = new_cm_id; 218 219 return 0; 220 } 221 222 static int srp_create_target_ib(struct srp_target_port *target) 223 { 224 struct ib_qp_init_attr *init_attr; 225 int ret; 226 227 init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL); 228 if (!init_attr) 229 return -ENOMEM; 230 231 target->recv_cq = ib_create_cq(target->srp_host->srp_dev->dev, 232 srp_recv_completion, NULL, target, SRP_RQ_SIZE, 0); 233 if (IS_ERR(target->recv_cq)) { 234 ret = PTR_ERR(target->recv_cq); 235 goto err; 236 } 237 238 target->send_cq = ib_create_cq(target->srp_host->srp_dev->dev, 239 srp_send_completion, NULL, target, SRP_SQ_SIZE, 0); 240 if (IS_ERR(target->send_cq)) { 241 ret = PTR_ERR(target->send_cq); 242 goto err_recv_cq; 243 } 244 245 ib_req_notify_cq(target->recv_cq, IB_CQ_NEXT_COMP); 246 247 init_attr->event_handler = srp_qp_event; 248 init_attr->cap.max_send_wr = SRP_SQ_SIZE; 249 init_attr->cap.max_recv_wr = SRP_RQ_SIZE; 250 init_attr->cap.max_recv_sge = 1; 251 init_attr->cap.max_send_sge = 1; 252 init_attr->sq_sig_type = IB_SIGNAL_ALL_WR; 253 init_attr->qp_type = IB_QPT_RC; 254 init_attr->send_cq = target->send_cq; 255 init_attr->recv_cq = target->recv_cq; 256 257 target->qp = ib_create_qp(target->srp_host->srp_dev->pd, init_attr); 258 if (IS_ERR(target->qp)) { 259 ret = PTR_ERR(target->qp); 260 goto err_send_cq; 261 } 262 263 ret = srp_init_qp(target, target->qp); 264 if (ret) 265 goto err_qp; 266 267 kfree(init_attr); 268 return 0; 269 270 err_qp: 271 ib_destroy_qp(target->qp); 272 273 err_send_cq: 274 ib_destroy_cq(target->send_cq); 275 276 err_recv_cq: 277 ib_destroy_cq(target->recv_cq); 278 279 err: 280 kfree(init_attr); 281 return ret; 282 } 283 284 static void srp_free_target_ib(struct srp_target_port *target) 285 { 286 int i; 287 288 ib_destroy_qp(target->qp); 289 ib_destroy_cq(target->send_cq); 290 ib_destroy_cq(target->recv_cq); 291 292 for (i = 0; i < SRP_RQ_SIZE; ++i) 293 srp_free_iu(target->srp_host, target->rx_ring[i]); 294 for (i = 0; i < SRP_SQ_SIZE + 1; ++i) 295 srp_free_iu(target->srp_host, target->tx_ring[i]); 296 } 297 298 static void srp_path_rec_completion(int status, 299 struct ib_sa_path_rec *pathrec, 300 void *target_ptr) 301 { 302 struct srp_target_port *target = target_ptr; 303 304 target->status = status; 305 if (status) 306 shost_printk(KERN_ERR, target->scsi_host, 307 PFX "Got failed path rec status %d\n", status); 308 else 309 target->path = *pathrec; 310 complete(&target->done); 311 } 312 313 static int srp_lookup_path(struct srp_target_port *target) 314 { 315 target->path.numb_path = 1; 316 317 init_completion(&target->done); 318 319 target->path_query_id = ib_sa_path_rec_get(&srp_sa_client, 320 target->srp_host->srp_dev->dev, 321 target->srp_host->port, 322 &target->path, 323 IB_SA_PATH_REC_SERVICE_ID | 324 IB_SA_PATH_REC_DGID | 325 IB_SA_PATH_REC_SGID | 326 IB_SA_PATH_REC_NUMB_PATH | 327 IB_SA_PATH_REC_PKEY, 328 SRP_PATH_REC_TIMEOUT_MS, 329 GFP_KERNEL, 330 srp_path_rec_completion, 331 target, &target->path_query); 332 if (target->path_query_id < 0) 333 return target->path_query_id; 334 335 wait_for_completion(&target->done); 336 337 if (target->status < 0) 338 shost_printk(KERN_WARNING, target->scsi_host, 339 PFX "Path record query failed\n"); 340 341 return target->status; 342 } 343 344 static int srp_send_req(struct srp_target_port *target) 345 { 346 struct { 347 struct ib_cm_req_param param; 348 struct srp_login_req priv; 349 } *req = NULL; 350 int status; 351 352 req = kzalloc(sizeof *req, GFP_KERNEL); 353 if (!req) 354 return -ENOMEM; 355 356 req->param.primary_path = &target->path; 357 req->param.alternate_path = NULL; 358 req->param.service_id = target->service_id; 359 req->param.qp_num = target->qp->qp_num; 360 req->param.qp_type = target->qp->qp_type; 361 req->param.private_data = &req->priv; 362 req->param.private_data_len = sizeof req->priv; 363 req->param.flow_control = 1; 364 365 get_random_bytes(&req->param.starting_psn, 4); 366 req->param.starting_psn &= 0xffffff; 367 368 /* 369 * Pick some arbitrary defaults here; we could make these 370 * module parameters if anyone cared about setting them. 371 */ 372 req->param.responder_resources = 4; 373 req->param.remote_cm_response_timeout = 20; 374 req->param.local_cm_response_timeout = 20; 375 req->param.retry_count = 7; 376 req->param.rnr_retry_count = 7; 377 req->param.max_cm_retries = 15; 378 379 req->priv.opcode = SRP_LOGIN_REQ; 380 req->priv.tag = 0; 381 req->priv.req_it_iu_len = cpu_to_be32(srp_max_iu_len); 382 req->priv.req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT | 383 SRP_BUF_FORMAT_INDIRECT); 384 /* 385 * In the published SRP specification (draft rev. 16a), the 386 * port identifier format is 8 bytes of ID extension followed 387 * by 8 bytes of GUID. Older drafts put the two halves in the 388 * opposite order, so that the GUID comes first. 389 * 390 * Targets conforming to these obsolete drafts can be 391 * recognized by the I/O Class they report. 392 */ 393 if (target->io_class == SRP_REV10_IB_IO_CLASS) { 394 memcpy(req->priv.initiator_port_id, 395 &target->path.sgid.global.interface_id, 8); 396 memcpy(req->priv.initiator_port_id + 8, 397 &target->initiator_ext, 8); 398 memcpy(req->priv.target_port_id, &target->ioc_guid, 8); 399 memcpy(req->priv.target_port_id + 8, &target->id_ext, 8); 400 } else { 401 memcpy(req->priv.initiator_port_id, 402 &target->initiator_ext, 8); 403 memcpy(req->priv.initiator_port_id + 8, 404 &target->path.sgid.global.interface_id, 8); 405 memcpy(req->priv.target_port_id, &target->id_ext, 8); 406 memcpy(req->priv.target_port_id + 8, &target->ioc_guid, 8); 407 } 408 409 /* 410 * Topspin/Cisco SRP targets will reject our login unless we 411 * zero out the first 8 bytes of our initiator port ID and set 412 * the second 8 bytes to the local node GUID. 413 */ 414 if (srp_target_is_topspin(target)) { 415 shost_printk(KERN_DEBUG, target->scsi_host, 416 PFX "Topspin/Cisco initiator port ID workaround " 417 "activated for target GUID %016llx\n", 418 (unsigned long long) be64_to_cpu(target->ioc_guid)); 419 memset(req->priv.initiator_port_id, 0, 8); 420 memcpy(req->priv.initiator_port_id + 8, 421 &target->srp_host->srp_dev->dev->node_guid, 8); 422 } 423 424 status = ib_send_cm_req(target->cm_id, &req->param); 425 426 kfree(req); 427 428 return status; 429 } 430 431 static void srp_disconnect_target(struct srp_target_port *target) 432 { 433 /* XXX should send SRP_I_LOGOUT request */ 434 435 init_completion(&target->done); 436 if (ib_send_cm_dreq(target->cm_id, NULL, 0)) { 437 shost_printk(KERN_DEBUG, target->scsi_host, 438 PFX "Sending CM DREQ failed\n"); 439 return; 440 } 441 wait_for_completion(&target->done); 442 } 443 444 static void srp_remove_work(struct work_struct *work) 445 { 446 struct srp_target_port *target = 447 container_of(work, struct srp_target_port, work); 448 449 spin_lock_irq(target->scsi_host->host_lock); 450 if (target->state != SRP_TARGET_DEAD) { 451 spin_unlock_irq(target->scsi_host->host_lock); 452 return; 453 } 454 target->state = SRP_TARGET_REMOVED; 455 spin_unlock_irq(target->scsi_host->host_lock); 456 457 spin_lock(&target->srp_host->target_lock); 458 list_del(&target->list); 459 spin_unlock(&target->srp_host->target_lock); 460 461 srp_remove_host(target->scsi_host); 462 scsi_remove_host(target->scsi_host); 463 ib_destroy_cm_id(target->cm_id); 464 srp_free_target_ib(target); 465 scsi_host_put(target->scsi_host); 466 } 467 468 static int srp_connect_target(struct srp_target_port *target) 469 { 470 int retries = 3; 471 int ret; 472 473 ret = srp_lookup_path(target); 474 if (ret) 475 return ret; 476 477 while (1) { 478 init_completion(&target->done); 479 ret = srp_send_req(target); 480 if (ret) 481 return ret; 482 wait_for_completion(&target->done); 483 484 /* 485 * The CM event handling code will set status to 486 * SRP_PORT_REDIRECT if we get a port redirect REJ 487 * back, or SRP_DLID_REDIRECT if we get a lid/qp 488 * redirect REJ back. 489 */ 490 switch (target->status) { 491 case 0: 492 return 0; 493 494 case SRP_PORT_REDIRECT: 495 ret = srp_lookup_path(target); 496 if (ret) 497 return ret; 498 break; 499 500 case SRP_DLID_REDIRECT: 501 break; 502 503 case SRP_STALE_CONN: 504 /* Our current CM id was stale, and is now in timewait. 505 * Try to reconnect with a new one. 506 */ 507 if (!retries-- || srp_new_cm_id(target)) { 508 shost_printk(KERN_ERR, target->scsi_host, PFX 509 "giving up on stale connection\n"); 510 target->status = -ECONNRESET; 511 return target->status; 512 } 513 514 shost_printk(KERN_ERR, target->scsi_host, PFX 515 "retrying stale connection\n"); 516 break; 517 518 default: 519 return target->status; 520 } 521 } 522 } 523 524 static void srp_unmap_data(struct scsi_cmnd *scmnd, 525 struct srp_target_port *target, 526 struct srp_request *req) 527 { 528 if (!scsi_sglist(scmnd) || 529 (scmnd->sc_data_direction != DMA_TO_DEVICE && 530 scmnd->sc_data_direction != DMA_FROM_DEVICE)) 531 return; 532 533 if (req->fmr) { 534 ib_fmr_pool_unmap(req->fmr); 535 req->fmr = NULL; 536 } 537 538 ib_dma_unmap_sg(target->srp_host->srp_dev->dev, scsi_sglist(scmnd), 539 scsi_sg_count(scmnd), scmnd->sc_data_direction); 540 } 541 542 static void srp_remove_req(struct srp_target_port *target, struct srp_request *req) 543 { 544 srp_unmap_data(req->scmnd, target, req); 545 list_move_tail(&req->list, &target->free_reqs); 546 } 547 548 static void srp_reset_req(struct srp_target_port *target, struct srp_request *req) 549 { 550 req->scmnd->result = DID_RESET << 16; 551 req->scmnd->scsi_done(req->scmnd); 552 srp_remove_req(target, req); 553 } 554 555 static int srp_reconnect_target(struct srp_target_port *target) 556 { 557 struct ib_qp_attr qp_attr; 558 struct srp_request *req, *tmp; 559 struct ib_wc wc; 560 int ret; 561 562 spin_lock_irq(target->scsi_host->host_lock); 563 if (target->state != SRP_TARGET_LIVE) { 564 spin_unlock_irq(target->scsi_host->host_lock); 565 return -EAGAIN; 566 } 567 target->state = SRP_TARGET_CONNECTING; 568 spin_unlock_irq(target->scsi_host->host_lock); 569 570 srp_disconnect_target(target); 571 /* 572 * Now get a new local CM ID so that we avoid confusing the 573 * target in case things are really fouled up. 574 */ 575 ret = srp_new_cm_id(target); 576 if (ret) 577 goto err; 578 579 qp_attr.qp_state = IB_QPS_RESET; 580 ret = ib_modify_qp(target->qp, &qp_attr, IB_QP_STATE); 581 if (ret) 582 goto err; 583 584 ret = srp_init_qp(target, target->qp); 585 if (ret) 586 goto err; 587 588 while (ib_poll_cq(target->recv_cq, 1, &wc) > 0) 589 ; /* nothing */ 590 while (ib_poll_cq(target->send_cq, 1, &wc) > 0) 591 ; /* nothing */ 592 593 spin_lock_irq(target->scsi_host->host_lock); 594 list_for_each_entry_safe(req, tmp, &target->req_queue, list) 595 srp_reset_req(target, req); 596 spin_unlock_irq(target->scsi_host->host_lock); 597 598 target->rx_head = 0; 599 target->tx_head = 0; 600 target->tx_tail = 0; 601 602 target->qp_in_error = 0; 603 ret = srp_connect_target(target); 604 if (ret) 605 goto err; 606 607 spin_lock_irq(target->scsi_host->host_lock); 608 if (target->state == SRP_TARGET_CONNECTING) { 609 ret = 0; 610 target->state = SRP_TARGET_LIVE; 611 } else 612 ret = -EAGAIN; 613 spin_unlock_irq(target->scsi_host->host_lock); 614 615 return ret; 616 617 err: 618 shost_printk(KERN_ERR, target->scsi_host, 619 PFX "reconnect failed (%d), removing target port.\n", ret); 620 621 /* 622 * We couldn't reconnect, so kill our target port off. 623 * However, we have to defer the real removal because we might 624 * be in the context of the SCSI error handler now, which 625 * would deadlock if we call scsi_remove_host(). 626 */ 627 spin_lock_irq(target->scsi_host->host_lock); 628 if (target->state == SRP_TARGET_CONNECTING) { 629 target->state = SRP_TARGET_DEAD; 630 INIT_WORK(&target->work, srp_remove_work); 631 schedule_work(&target->work); 632 } 633 spin_unlock_irq(target->scsi_host->host_lock); 634 635 return ret; 636 } 637 638 static int srp_map_fmr(struct srp_target_port *target, struct scatterlist *scat, 639 int sg_cnt, struct srp_request *req, 640 struct srp_direct_buf *buf) 641 { 642 u64 io_addr = 0; 643 u64 *dma_pages; 644 u32 len; 645 int page_cnt; 646 int i, j; 647 int ret; 648 struct srp_device *dev = target->srp_host->srp_dev; 649 struct ib_device *ibdev = dev->dev; 650 struct scatterlist *sg; 651 652 if (!dev->fmr_pool) 653 return -ENODEV; 654 655 if (srp_target_is_mellanox(target) && 656 (ib_sg_dma_address(ibdev, &scat[0]) & ~dev->fmr_page_mask)) 657 return -EINVAL; 658 659 len = page_cnt = 0; 660 scsi_for_each_sg(req->scmnd, sg, sg_cnt, i) { 661 unsigned int dma_len = ib_sg_dma_len(ibdev, sg); 662 663 if (ib_sg_dma_address(ibdev, sg) & ~dev->fmr_page_mask) { 664 if (i > 0) 665 return -EINVAL; 666 else 667 ++page_cnt; 668 } 669 if ((ib_sg_dma_address(ibdev, sg) + dma_len) & 670 ~dev->fmr_page_mask) { 671 if (i < sg_cnt - 1) 672 return -EINVAL; 673 else 674 ++page_cnt; 675 } 676 677 len += dma_len; 678 } 679 680 page_cnt += len >> dev->fmr_page_shift; 681 if (page_cnt > SRP_FMR_SIZE) 682 return -ENOMEM; 683 684 dma_pages = kmalloc(sizeof (u64) * page_cnt, GFP_ATOMIC); 685 if (!dma_pages) 686 return -ENOMEM; 687 688 page_cnt = 0; 689 scsi_for_each_sg(req->scmnd, sg, sg_cnt, i) { 690 unsigned int dma_len = ib_sg_dma_len(ibdev, sg); 691 692 for (j = 0; j < dma_len; j += dev->fmr_page_size) 693 dma_pages[page_cnt++] = 694 (ib_sg_dma_address(ibdev, sg) & 695 dev->fmr_page_mask) + j; 696 } 697 698 req->fmr = ib_fmr_pool_map_phys(dev->fmr_pool, 699 dma_pages, page_cnt, io_addr); 700 if (IS_ERR(req->fmr)) { 701 ret = PTR_ERR(req->fmr); 702 req->fmr = NULL; 703 goto out; 704 } 705 706 buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, &scat[0]) & 707 ~dev->fmr_page_mask); 708 buf->key = cpu_to_be32(req->fmr->fmr->rkey); 709 buf->len = cpu_to_be32(len); 710 711 ret = 0; 712 713 out: 714 kfree(dma_pages); 715 716 return ret; 717 } 718 719 static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target, 720 struct srp_request *req) 721 { 722 struct scatterlist *scat; 723 struct srp_cmd *cmd = req->cmd->buf; 724 int len, nents, count; 725 u8 fmt = SRP_DATA_DESC_DIRECT; 726 struct srp_device *dev; 727 struct ib_device *ibdev; 728 729 if (!scsi_sglist(scmnd) || scmnd->sc_data_direction == DMA_NONE) 730 return sizeof (struct srp_cmd); 731 732 if (scmnd->sc_data_direction != DMA_FROM_DEVICE && 733 scmnd->sc_data_direction != DMA_TO_DEVICE) { 734 shost_printk(KERN_WARNING, target->scsi_host, 735 PFX "Unhandled data direction %d\n", 736 scmnd->sc_data_direction); 737 return -EINVAL; 738 } 739 740 nents = scsi_sg_count(scmnd); 741 scat = scsi_sglist(scmnd); 742 743 dev = target->srp_host->srp_dev; 744 ibdev = dev->dev; 745 746 count = ib_dma_map_sg(ibdev, scat, nents, scmnd->sc_data_direction); 747 748 fmt = SRP_DATA_DESC_DIRECT; 749 len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf); 750 751 if (count == 1) { 752 /* 753 * The midlayer only generated a single gather/scatter 754 * entry, or DMA mapping coalesced everything to a 755 * single entry. So a direct descriptor along with 756 * the DMA MR suffices. 757 */ 758 struct srp_direct_buf *buf = (void *) cmd->add_data; 759 760 buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, scat)); 761 buf->key = cpu_to_be32(dev->mr->rkey); 762 buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat)); 763 } else if (srp_map_fmr(target, scat, count, req, 764 (void *) cmd->add_data)) { 765 /* 766 * FMR mapping failed, and the scatterlist has more 767 * than one entry. Generate an indirect memory 768 * descriptor. 769 */ 770 struct srp_indirect_buf *buf = (void *) cmd->add_data; 771 struct scatterlist *sg; 772 u32 datalen = 0; 773 int i; 774 775 fmt = SRP_DATA_DESC_INDIRECT; 776 len = sizeof (struct srp_cmd) + 777 sizeof (struct srp_indirect_buf) + 778 count * sizeof (struct srp_direct_buf); 779 780 scsi_for_each_sg(scmnd, sg, count, i) { 781 unsigned int dma_len = ib_sg_dma_len(ibdev, sg); 782 783 buf->desc_list[i].va = 784 cpu_to_be64(ib_sg_dma_address(ibdev, sg)); 785 buf->desc_list[i].key = 786 cpu_to_be32(dev->mr->rkey); 787 buf->desc_list[i].len = cpu_to_be32(dma_len); 788 datalen += dma_len; 789 } 790 791 if (scmnd->sc_data_direction == DMA_TO_DEVICE) 792 cmd->data_out_desc_cnt = count; 793 else 794 cmd->data_in_desc_cnt = count; 795 796 buf->table_desc.va = 797 cpu_to_be64(req->cmd->dma + sizeof *cmd + sizeof *buf); 798 buf->table_desc.key = 799 cpu_to_be32(target->srp_host->srp_dev->mr->rkey); 800 buf->table_desc.len = 801 cpu_to_be32(count * sizeof (struct srp_direct_buf)); 802 803 buf->len = cpu_to_be32(datalen); 804 } 805 806 if (scmnd->sc_data_direction == DMA_TO_DEVICE) 807 cmd->buf_fmt = fmt << 4; 808 else 809 cmd->buf_fmt = fmt; 810 811 return len; 812 } 813 814 static int srp_post_recv(struct srp_target_port *target) 815 { 816 unsigned long flags; 817 struct srp_iu *iu; 818 struct ib_sge list; 819 struct ib_recv_wr wr, *bad_wr; 820 unsigned int next; 821 int ret; 822 823 spin_lock_irqsave(target->scsi_host->host_lock, flags); 824 825 next = target->rx_head & (SRP_RQ_SIZE - 1); 826 wr.wr_id = next; 827 iu = target->rx_ring[next]; 828 829 list.addr = iu->dma; 830 list.length = iu->size; 831 list.lkey = target->srp_host->srp_dev->mr->lkey; 832 833 wr.next = NULL; 834 wr.sg_list = &list; 835 wr.num_sge = 1; 836 837 ret = ib_post_recv(target->qp, &wr, &bad_wr); 838 if (!ret) 839 ++target->rx_head; 840 841 spin_unlock_irqrestore(target->scsi_host->host_lock, flags); 842 843 return ret; 844 } 845 846 static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp) 847 { 848 struct srp_request *req; 849 struct scsi_cmnd *scmnd; 850 unsigned long flags; 851 s32 delta; 852 853 delta = (s32) be32_to_cpu(rsp->req_lim_delta); 854 855 spin_lock_irqsave(target->scsi_host->host_lock, flags); 856 857 target->req_lim += delta; 858 859 req = &target->req_ring[rsp->tag & ~SRP_TAG_TSK_MGMT]; 860 861 if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) { 862 if (be32_to_cpu(rsp->resp_data_len) < 4) 863 req->tsk_status = -1; 864 else 865 req->tsk_status = rsp->data[3]; 866 complete(&req->done); 867 } else { 868 scmnd = req->scmnd; 869 if (!scmnd) 870 shost_printk(KERN_ERR, target->scsi_host, 871 "Null scmnd for RSP w/tag %016llx\n", 872 (unsigned long long) rsp->tag); 873 scmnd->result = rsp->status; 874 875 if (rsp->flags & SRP_RSP_FLAG_SNSVALID) { 876 memcpy(scmnd->sense_buffer, rsp->data + 877 be32_to_cpu(rsp->resp_data_len), 878 min_t(int, be32_to_cpu(rsp->sense_data_len), 879 SCSI_SENSE_BUFFERSIZE)); 880 } 881 882 if (rsp->flags & (SRP_RSP_FLAG_DOOVER | SRP_RSP_FLAG_DOUNDER)) 883 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_out_res_cnt)); 884 else if (rsp->flags & (SRP_RSP_FLAG_DIOVER | SRP_RSP_FLAG_DIUNDER)) 885 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt)); 886 887 if (!req->tsk_mgmt) { 888 scmnd->host_scribble = (void *) -1L; 889 scmnd->scsi_done(scmnd); 890 891 srp_remove_req(target, req); 892 } else 893 req->cmd_done = 1; 894 } 895 896 spin_unlock_irqrestore(target->scsi_host->host_lock, flags); 897 } 898 899 static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc) 900 { 901 struct ib_device *dev; 902 struct srp_iu *iu; 903 int res; 904 u8 opcode; 905 906 iu = target->rx_ring[wc->wr_id]; 907 908 dev = target->srp_host->srp_dev->dev; 909 ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_ti_iu_len, 910 DMA_FROM_DEVICE); 911 912 opcode = *(u8 *) iu->buf; 913 914 if (0) { 915 shost_printk(KERN_ERR, target->scsi_host, 916 PFX "recv completion, opcode 0x%02x\n", opcode); 917 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 8, 1, 918 iu->buf, wc->byte_len, true); 919 } 920 921 switch (opcode) { 922 case SRP_RSP: 923 srp_process_rsp(target, iu->buf); 924 break; 925 926 case SRP_T_LOGOUT: 927 /* XXX Handle target logout */ 928 shost_printk(KERN_WARNING, target->scsi_host, 929 PFX "Got target logout request\n"); 930 break; 931 932 default: 933 shost_printk(KERN_WARNING, target->scsi_host, 934 PFX "Unhandled SRP opcode 0x%02x\n", opcode); 935 break; 936 } 937 938 ib_dma_sync_single_for_device(dev, iu->dma, target->max_ti_iu_len, 939 DMA_FROM_DEVICE); 940 941 res = srp_post_recv(target); 942 if (res != 0) 943 shost_printk(KERN_ERR, target->scsi_host, 944 PFX "Recv failed with error code %d\n", res); 945 } 946 947 static void srp_recv_completion(struct ib_cq *cq, void *target_ptr) 948 { 949 struct srp_target_port *target = target_ptr; 950 struct ib_wc wc; 951 952 ib_req_notify_cq(cq, IB_CQ_NEXT_COMP); 953 while (ib_poll_cq(cq, 1, &wc) > 0) { 954 if (wc.status) { 955 shost_printk(KERN_ERR, target->scsi_host, 956 PFX "failed receive status %d\n", 957 wc.status); 958 target->qp_in_error = 1; 959 break; 960 } 961 962 srp_handle_recv(target, &wc); 963 } 964 } 965 966 static void srp_send_completion(struct ib_cq *cq, void *target_ptr) 967 { 968 struct srp_target_port *target = target_ptr; 969 struct ib_wc wc; 970 971 while (ib_poll_cq(cq, 1, &wc) > 0) { 972 if (wc.status) { 973 shost_printk(KERN_ERR, target->scsi_host, 974 PFX "failed send status %d\n", 975 wc.status); 976 target->qp_in_error = 1; 977 break; 978 } 979 980 ++target->tx_tail; 981 } 982 } 983 984 /* 985 * Must be called with target->scsi_host->host_lock held to protect 986 * req_lim and tx_head. Lock cannot be dropped between call here and 987 * call to __srp_post_send(). 988 */ 989 static struct srp_iu *__srp_get_tx_iu(struct srp_target_port *target, 990 enum srp_request_type req_type) 991 { 992 s32 min = (req_type == SRP_REQ_TASK_MGMT) ? 1 : 2; 993 994 srp_send_completion(target->send_cq, target); 995 996 if (target->tx_head - target->tx_tail >= SRP_SQ_SIZE) 997 return NULL; 998 999 if (target->req_lim < min) { 1000 ++target->zero_req_lim; 1001 return NULL; 1002 } 1003 1004 return target->tx_ring[target->tx_head & SRP_SQ_SIZE]; 1005 } 1006 1007 /* 1008 * Must be called with target->scsi_host->host_lock held to protect 1009 * req_lim and tx_head. 1010 */ 1011 static int __srp_post_send(struct srp_target_port *target, 1012 struct srp_iu *iu, int len) 1013 { 1014 struct ib_sge list; 1015 struct ib_send_wr wr, *bad_wr; 1016 int ret = 0; 1017 1018 list.addr = iu->dma; 1019 list.length = len; 1020 list.lkey = target->srp_host->srp_dev->mr->lkey; 1021 1022 wr.next = NULL; 1023 wr.wr_id = target->tx_head & SRP_SQ_SIZE; 1024 wr.sg_list = &list; 1025 wr.num_sge = 1; 1026 wr.opcode = IB_WR_SEND; 1027 wr.send_flags = IB_SEND_SIGNALED; 1028 1029 ret = ib_post_send(target->qp, &wr, &bad_wr); 1030 1031 if (!ret) { 1032 ++target->tx_head; 1033 --target->req_lim; 1034 } 1035 1036 return ret; 1037 } 1038 1039 static int srp_queuecommand(struct scsi_cmnd *scmnd, 1040 void (*done)(struct scsi_cmnd *)) 1041 { 1042 struct srp_target_port *target = host_to_target(scmnd->device->host); 1043 struct srp_request *req; 1044 struct srp_iu *iu; 1045 struct srp_cmd *cmd; 1046 struct ib_device *dev; 1047 int len; 1048 1049 if (target->state == SRP_TARGET_CONNECTING) 1050 goto err; 1051 1052 if (target->state == SRP_TARGET_DEAD || 1053 target->state == SRP_TARGET_REMOVED) { 1054 scmnd->result = DID_BAD_TARGET << 16; 1055 done(scmnd); 1056 return 0; 1057 } 1058 1059 iu = __srp_get_tx_iu(target, SRP_REQ_NORMAL); 1060 if (!iu) 1061 goto err; 1062 1063 dev = target->srp_host->srp_dev->dev; 1064 ib_dma_sync_single_for_cpu(dev, iu->dma, srp_max_iu_len, 1065 DMA_TO_DEVICE); 1066 1067 req = list_entry(target->free_reqs.next, struct srp_request, list); 1068 1069 scmnd->scsi_done = done; 1070 scmnd->result = 0; 1071 scmnd->host_scribble = (void *) (long) req->index; 1072 1073 cmd = iu->buf; 1074 memset(cmd, 0, sizeof *cmd); 1075 1076 cmd->opcode = SRP_CMD; 1077 cmd->lun = cpu_to_be64((u64) scmnd->device->lun << 48); 1078 cmd->tag = req->index; 1079 memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len); 1080 1081 req->scmnd = scmnd; 1082 req->cmd = iu; 1083 req->cmd_done = 0; 1084 req->tsk_mgmt = NULL; 1085 1086 len = srp_map_data(scmnd, target, req); 1087 if (len < 0) { 1088 shost_printk(KERN_ERR, target->scsi_host, 1089 PFX "Failed to map data\n"); 1090 goto err; 1091 } 1092 1093 ib_dma_sync_single_for_device(dev, iu->dma, srp_max_iu_len, 1094 DMA_TO_DEVICE); 1095 1096 if (__srp_post_send(target, iu, len)) { 1097 shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n"); 1098 goto err_unmap; 1099 } 1100 1101 list_move_tail(&req->list, &target->req_queue); 1102 1103 return 0; 1104 1105 err_unmap: 1106 srp_unmap_data(scmnd, target, req); 1107 1108 err: 1109 return SCSI_MLQUEUE_HOST_BUSY; 1110 } 1111 1112 static int srp_alloc_iu_bufs(struct srp_target_port *target) 1113 { 1114 int i; 1115 1116 for (i = 0; i < SRP_RQ_SIZE; ++i) { 1117 target->rx_ring[i] = srp_alloc_iu(target->srp_host, 1118 target->max_ti_iu_len, 1119 GFP_KERNEL, DMA_FROM_DEVICE); 1120 if (!target->rx_ring[i]) 1121 goto err; 1122 } 1123 1124 for (i = 0; i < SRP_SQ_SIZE + 1; ++i) { 1125 target->tx_ring[i] = srp_alloc_iu(target->srp_host, 1126 srp_max_iu_len, 1127 GFP_KERNEL, DMA_TO_DEVICE); 1128 if (!target->tx_ring[i]) 1129 goto err; 1130 } 1131 1132 return 0; 1133 1134 err: 1135 for (i = 0; i < SRP_RQ_SIZE; ++i) { 1136 srp_free_iu(target->srp_host, target->rx_ring[i]); 1137 target->rx_ring[i] = NULL; 1138 } 1139 1140 for (i = 0; i < SRP_SQ_SIZE + 1; ++i) { 1141 srp_free_iu(target->srp_host, target->tx_ring[i]); 1142 target->tx_ring[i] = NULL; 1143 } 1144 1145 return -ENOMEM; 1146 } 1147 1148 static void srp_cm_rej_handler(struct ib_cm_id *cm_id, 1149 struct ib_cm_event *event, 1150 struct srp_target_port *target) 1151 { 1152 struct Scsi_Host *shost = target->scsi_host; 1153 struct ib_class_port_info *cpi; 1154 int opcode; 1155 1156 switch (event->param.rej_rcvd.reason) { 1157 case IB_CM_REJ_PORT_CM_REDIRECT: 1158 cpi = event->param.rej_rcvd.ari; 1159 target->path.dlid = cpi->redirect_lid; 1160 target->path.pkey = cpi->redirect_pkey; 1161 cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff; 1162 memcpy(target->path.dgid.raw, cpi->redirect_gid, 16); 1163 1164 target->status = target->path.dlid ? 1165 SRP_DLID_REDIRECT : SRP_PORT_REDIRECT; 1166 break; 1167 1168 case IB_CM_REJ_PORT_REDIRECT: 1169 if (srp_target_is_topspin(target)) { 1170 /* 1171 * Topspin/Cisco SRP gateways incorrectly send 1172 * reject reason code 25 when they mean 24 1173 * (port redirect). 1174 */ 1175 memcpy(target->path.dgid.raw, 1176 event->param.rej_rcvd.ari, 16); 1177 1178 shost_printk(KERN_DEBUG, shost, 1179 PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n", 1180 (unsigned long long) be64_to_cpu(target->path.dgid.global.subnet_prefix), 1181 (unsigned long long) be64_to_cpu(target->path.dgid.global.interface_id)); 1182 1183 target->status = SRP_PORT_REDIRECT; 1184 } else { 1185 shost_printk(KERN_WARNING, shost, 1186 " REJ reason: IB_CM_REJ_PORT_REDIRECT\n"); 1187 target->status = -ECONNRESET; 1188 } 1189 break; 1190 1191 case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID: 1192 shost_printk(KERN_WARNING, shost, 1193 " REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n"); 1194 target->status = -ECONNRESET; 1195 break; 1196 1197 case IB_CM_REJ_CONSUMER_DEFINED: 1198 opcode = *(u8 *) event->private_data; 1199 if (opcode == SRP_LOGIN_REJ) { 1200 struct srp_login_rej *rej = event->private_data; 1201 u32 reason = be32_to_cpu(rej->reason); 1202 1203 if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE) 1204 shost_printk(KERN_WARNING, shost, 1205 PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n"); 1206 else 1207 shost_printk(KERN_WARNING, shost, 1208 PFX "SRP LOGIN REJECTED, reason 0x%08x\n", reason); 1209 } else 1210 shost_printk(KERN_WARNING, shost, 1211 " REJ reason: IB_CM_REJ_CONSUMER_DEFINED," 1212 " opcode 0x%02x\n", opcode); 1213 target->status = -ECONNRESET; 1214 break; 1215 1216 case IB_CM_REJ_STALE_CONN: 1217 shost_printk(KERN_WARNING, shost, " REJ reason: stale connection\n"); 1218 target->status = SRP_STALE_CONN; 1219 break; 1220 1221 default: 1222 shost_printk(KERN_WARNING, shost, " REJ reason 0x%x\n", 1223 event->param.rej_rcvd.reason); 1224 target->status = -ECONNRESET; 1225 } 1226 } 1227 1228 static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event) 1229 { 1230 struct srp_target_port *target = cm_id->context; 1231 struct ib_qp_attr *qp_attr = NULL; 1232 int attr_mask = 0; 1233 int comp = 0; 1234 int opcode = 0; 1235 int i; 1236 1237 switch (event->event) { 1238 case IB_CM_REQ_ERROR: 1239 shost_printk(KERN_DEBUG, target->scsi_host, 1240 PFX "Sending CM REQ failed\n"); 1241 comp = 1; 1242 target->status = -ECONNRESET; 1243 break; 1244 1245 case IB_CM_REP_RECEIVED: 1246 comp = 1; 1247 opcode = *(u8 *) event->private_data; 1248 1249 if (opcode == SRP_LOGIN_RSP) { 1250 struct srp_login_rsp *rsp = event->private_data; 1251 1252 target->max_ti_iu_len = be32_to_cpu(rsp->max_ti_iu_len); 1253 target->req_lim = be32_to_cpu(rsp->req_lim_delta); 1254 1255 target->scsi_host->can_queue = min(target->req_lim, 1256 target->scsi_host->can_queue); 1257 } else { 1258 shost_printk(KERN_WARNING, target->scsi_host, 1259 PFX "Unhandled RSP opcode %#x\n", opcode); 1260 target->status = -ECONNRESET; 1261 break; 1262 } 1263 1264 if (!target->rx_ring[0]) { 1265 target->status = srp_alloc_iu_bufs(target); 1266 if (target->status) 1267 break; 1268 } 1269 1270 qp_attr = kmalloc(sizeof *qp_attr, GFP_KERNEL); 1271 if (!qp_attr) { 1272 target->status = -ENOMEM; 1273 break; 1274 } 1275 1276 qp_attr->qp_state = IB_QPS_RTR; 1277 target->status = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask); 1278 if (target->status) 1279 break; 1280 1281 target->status = ib_modify_qp(target->qp, qp_attr, attr_mask); 1282 if (target->status) 1283 break; 1284 1285 for (i = 0; i < SRP_RQ_SIZE; i++) { 1286 target->status = srp_post_recv(target); 1287 if (target->status) 1288 break; 1289 } 1290 if (target->status) 1291 break; 1292 1293 qp_attr->qp_state = IB_QPS_RTS; 1294 target->status = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask); 1295 if (target->status) 1296 break; 1297 1298 target->status = ib_modify_qp(target->qp, qp_attr, attr_mask); 1299 if (target->status) 1300 break; 1301 1302 target->status = ib_send_cm_rtu(cm_id, NULL, 0); 1303 if (target->status) 1304 break; 1305 1306 break; 1307 1308 case IB_CM_REJ_RECEIVED: 1309 shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n"); 1310 comp = 1; 1311 1312 srp_cm_rej_handler(cm_id, event, target); 1313 break; 1314 1315 case IB_CM_DREQ_RECEIVED: 1316 shost_printk(KERN_WARNING, target->scsi_host, 1317 PFX "DREQ received - connection closed\n"); 1318 if (ib_send_cm_drep(cm_id, NULL, 0)) 1319 shost_printk(KERN_ERR, target->scsi_host, 1320 PFX "Sending CM DREP failed\n"); 1321 break; 1322 1323 case IB_CM_TIMEWAIT_EXIT: 1324 shost_printk(KERN_ERR, target->scsi_host, 1325 PFX "connection closed\n"); 1326 1327 comp = 1; 1328 target->status = 0; 1329 break; 1330 1331 case IB_CM_MRA_RECEIVED: 1332 case IB_CM_DREQ_ERROR: 1333 case IB_CM_DREP_RECEIVED: 1334 break; 1335 1336 default: 1337 shost_printk(KERN_WARNING, target->scsi_host, 1338 PFX "Unhandled CM event %d\n", event->event); 1339 break; 1340 } 1341 1342 if (comp) 1343 complete(&target->done); 1344 1345 kfree(qp_attr); 1346 1347 return 0; 1348 } 1349 1350 static int srp_send_tsk_mgmt(struct srp_target_port *target, 1351 struct srp_request *req, u8 func) 1352 { 1353 struct srp_iu *iu; 1354 struct srp_tsk_mgmt *tsk_mgmt; 1355 1356 spin_lock_irq(target->scsi_host->host_lock); 1357 1358 if (target->state == SRP_TARGET_DEAD || 1359 target->state == SRP_TARGET_REMOVED) { 1360 req->scmnd->result = DID_BAD_TARGET << 16; 1361 goto out; 1362 } 1363 1364 init_completion(&req->done); 1365 1366 iu = __srp_get_tx_iu(target, SRP_REQ_TASK_MGMT); 1367 if (!iu) 1368 goto out; 1369 1370 tsk_mgmt = iu->buf; 1371 memset(tsk_mgmt, 0, sizeof *tsk_mgmt); 1372 1373 tsk_mgmt->opcode = SRP_TSK_MGMT; 1374 tsk_mgmt->lun = cpu_to_be64((u64) req->scmnd->device->lun << 48); 1375 tsk_mgmt->tag = req->index | SRP_TAG_TSK_MGMT; 1376 tsk_mgmt->tsk_mgmt_func = func; 1377 tsk_mgmt->task_tag = req->index; 1378 1379 if (__srp_post_send(target, iu, sizeof *tsk_mgmt)) 1380 goto out; 1381 1382 req->tsk_mgmt = iu; 1383 1384 spin_unlock_irq(target->scsi_host->host_lock); 1385 1386 if (!wait_for_completion_timeout(&req->done, 1387 msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS))) 1388 return -1; 1389 1390 return 0; 1391 1392 out: 1393 spin_unlock_irq(target->scsi_host->host_lock); 1394 return -1; 1395 } 1396 1397 static int srp_find_req(struct srp_target_port *target, 1398 struct scsi_cmnd *scmnd, 1399 struct srp_request **req) 1400 { 1401 if (scmnd->host_scribble == (void *) -1L) 1402 return -1; 1403 1404 *req = &target->req_ring[(long) scmnd->host_scribble]; 1405 1406 return 0; 1407 } 1408 1409 static int srp_abort(struct scsi_cmnd *scmnd) 1410 { 1411 struct srp_target_port *target = host_to_target(scmnd->device->host); 1412 struct srp_request *req; 1413 int ret = SUCCESS; 1414 1415 shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n"); 1416 1417 if (target->qp_in_error) 1418 return FAILED; 1419 if (srp_find_req(target, scmnd, &req)) 1420 return FAILED; 1421 if (srp_send_tsk_mgmt(target, req, SRP_TSK_ABORT_TASK)) 1422 return FAILED; 1423 1424 spin_lock_irq(target->scsi_host->host_lock); 1425 1426 if (req->cmd_done) { 1427 srp_remove_req(target, req); 1428 scmnd->scsi_done(scmnd); 1429 } else if (!req->tsk_status) { 1430 srp_remove_req(target, req); 1431 scmnd->result = DID_ABORT << 16; 1432 } else 1433 ret = FAILED; 1434 1435 spin_unlock_irq(target->scsi_host->host_lock); 1436 1437 return ret; 1438 } 1439 1440 static int srp_reset_device(struct scsi_cmnd *scmnd) 1441 { 1442 struct srp_target_port *target = host_to_target(scmnd->device->host); 1443 struct srp_request *req, *tmp; 1444 1445 shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n"); 1446 1447 if (target->qp_in_error) 1448 return FAILED; 1449 if (srp_find_req(target, scmnd, &req)) 1450 return FAILED; 1451 if (srp_send_tsk_mgmt(target, req, SRP_TSK_LUN_RESET)) 1452 return FAILED; 1453 if (req->tsk_status) 1454 return FAILED; 1455 1456 spin_lock_irq(target->scsi_host->host_lock); 1457 1458 list_for_each_entry_safe(req, tmp, &target->req_queue, list) 1459 if (req->scmnd->device == scmnd->device) 1460 srp_reset_req(target, req); 1461 1462 spin_unlock_irq(target->scsi_host->host_lock); 1463 1464 return SUCCESS; 1465 } 1466 1467 static int srp_reset_host(struct scsi_cmnd *scmnd) 1468 { 1469 struct srp_target_port *target = host_to_target(scmnd->device->host); 1470 int ret = FAILED; 1471 1472 shost_printk(KERN_ERR, target->scsi_host, PFX "SRP reset_host called\n"); 1473 1474 if (!srp_reconnect_target(target)) 1475 ret = SUCCESS; 1476 1477 return ret; 1478 } 1479 1480 static ssize_t show_id_ext(struct device *dev, struct device_attribute *attr, 1481 char *buf) 1482 { 1483 struct srp_target_port *target = host_to_target(class_to_shost(dev)); 1484 1485 if (target->state == SRP_TARGET_DEAD || 1486 target->state == SRP_TARGET_REMOVED) 1487 return -ENODEV; 1488 1489 return sprintf(buf, "0x%016llx\n", 1490 (unsigned long long) be64_to_cpu(target->id_ext)); 1491 } 1492 1493 static ssize_t show_ioc_guid(struct device *dev, struct device_attribute *attr, 1494 char *buf) 1495 { 1496 struct srp_target_port *target = host_to_target(class_to_shost(dev)); 1497 1498 if (target->state == SRP_TARGET_DEAD || 1499 target->state == SRP_TARGET_REMOVED) 1500 return -ENODEV; 1501 1502 return sprintf(buf, "0x%016llx\n", 1503 (unsigned long long) be64_to_cpu(target->ioc_guid)); 1504 } 1505 1506 static ssize_t show_service_id(struct device *dev, 1507 struct device_attribute *attr, char *buf) 1508 { 1509 struct srp_target_port *target = host_to_target(class_to_shost(dev)); 1510 1511 if (target->state == SRP_TARGET_DEAD || 1512 target->state == SRP_TARGET_REMOVED) 1513 return -ENODEV; 1514 1515 return sprintf(buf, "0x%016llx\n", 1516 (unsigned long long) be64_to_cpu(target->service_id)); 1517 } 1518 1519 static ssize_t show_pkey(struct device *dev, struct device_attribute *attr, 1520 char *buf) 1521 { 1522 struct srp_target_port *target = host_to_target(class_to_shost(dev)); 1523 1524 if (target->state == SRP_TARGET_DEAD || 1525 target->state == SRP_TARGET_REMOVED) 1526 return -ENODEV; 1527 1528 return sprintf(buf, "0x%04x\n", be16_to_cpu(target->path.pkey)); 1529 } 1530 1531 static ssize_t show_dgid(struct device *dev, struct device_attribute *attr, 1532 char *buf) 1533 { 1534 struct srp_target_port *target = host_to_target(class_to_shost(dev)); 1535 1536 if (target->state == SRP_TARGET_DEAD || 1537 target->state == SRP_TARGET_REMOVED) 1538 return -ENODEV; 1539 1540 return sprintf(buf, "%pI6\n", target->path.dgid.raw); 1541 } 1542 1543 static ssize_t show_orig_dgid(struct device *dev, 1544 struct device_attribute *attr, char *buf) 1545 { 1546 struct srp_target_port *target = host_to_target(class_to_shost(dev)); 1547 1548 if (target->state == SRP_TARGET_DEAD || 1549 target->state == SRP_TARGET_REMOVED) 1550 return -ENODEV; 1551 1552 return sprintf(buf, "%pI6\n", target->orig_dgid); 1553 } 1554 1555 static ssize_t show_req_lim(struct device *dev, 1556 struct device_attribute *attr, char *buf) 1557 { 1558 struct srp_target_port *target = host_to_target(class_to_shost(dev)); 1559 1560 if (target->state == SRP_TARGET_DEAD || 1561 target->state == SRP_TARGET_REMOVED) 1562 return -ENODEV; 1563 1564 return sprintf(buf, "%d\n", target->req_lim); 1565 } 1566 1567 static ssize_t show_zero_req_lim(struct device *dev, 1568 struct device_attribute *attr, char *buf) 1569 { 1570 struct srp_target_port *target = host_to_target(class_to_shost(dev)); 1571 1572 if (target->state == SRP_TARGET_DEAD || 1573 target->state == SRP_TARGET_REMOVED) 1574 return -ENODEV; 1575 1576 return sprintf(buf, "%d\n", target->zero_req_lim); 1577 } 1578 1579 static ssize_t show_local_ib_port(struct device *dev, 1580 struct device_attribute *attr, char *buf) 1581 { 1582 struct srp_target_port *target = host_to_target(class_to_shost(dev)); 1583 1584 return sprintf(buf, "%d\n", target->srp_host->port); 1585 } 1586 1587 static ssize_t show_local_ib_device(struct device *dev, 1588 struct device_attribute *attr, char *buf) 1589 { 1590 struct srp_target_port *target = host_to_target(class_to_shost(dev)); 1591 1592 return sprintf(buf, "%s\n", target->srp_host->srp_dev->dev->name); 1593 } 1594 1595 static DEVICE_ATTR(id_ext, S_IRUGO, show_id_ext, NULL); 1596 static DEVICE_ATTR(ioc_guid, S_IRUGO, show_ioc_guid, NULL); 1597 static DEVICE_ATTR(service_id, S_IRUGO, show_service_id, NULL); 1598 static DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL); 1599 static DEVICE_ATTR(dgid, S_IRUGO, show_dgid, NULL); 1600 static DEVICE_ATTR(orig_dgid, S_IRUGO, show_orig_dgid, NULL); 1601 static DEVICE_ATTR(req_lim, S_IRUGO, show_req_lim, NULL); 1602 static DEVICE_ATTR(zero_req_lim, S_IRUGO, show_zero_req_lim, NULL); 1603 static DEVICE_ATTR(local_ib_port, S_IRUGO, show_local_ib_port, NULL); 1604 static DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL); 1605 1606 static struct device_attribute *srp_host_attrs[] = { 1607 &dev_attr_id_ext, 1608 &dev_attr_ioc_guid, 1609 &dev_attr_service_id, 1610 &dev_attr_pkey, 1611 &dev_attr_dgid, 1612 &dev_attr_orig_dgid, 1613 &dev_attr_req_lim, 1614 &dev_attr_zero_req_lim, 1615 &dev_attr_local_ib_port, 1616 &dev_attr_local_ib_device, 1617 NULL 1618 }; 1619 1620 static struct scsi_host_template srp_template = { 1621 .module = THIS_MODULE, 1622 .name = "InfiniBand SRP initiator", 1623 .proc_name = DRV_NAME, 1624 .info = srp_target_info, 1625 .queuecommand = srp_queuecommand, 1626 .eh_abort_handler = srp_abort, 1627 .eh_device_reset_handler = srp_reset_device, 1628 .eh_host_reset_handler = srp_reset_host, 1629 .can_queue = SRP_SQ_SIZE, 1630 .this_id = -1, 1631 .cmd_per_lun = SRP_SQ_SIZE, 1632 .use_clustering = ENABLE_CLUSTERING, 1633 .shost_attrs = srp_host_attrs 1634 }; 1635 1636 static int srp_add_target(struct srp_host *host, struct srp_target_port *target) 1637 { 1638 struct srp_rport_identifiers ids; 1639 struct srp_rport *rport; 1640 1641 sprintf(target->target_name, "SRP.T10:%016llX", 1642 (unsigned long long) be64_to_cpu(target->id_ext)); 1643 1644 if (scsi_add_host(target->scsi_host, host->srp_dev->dev->dma_device)) 1645 return -ENODEV; 1646 1647 memcpy(ids.port_id, &target->id_ext, 8); 1648 memcpy(ids.port_id + 8, &target->ioc_guid, 8); 1649 ids.roles = SRP_RPORT_ROLE_TARGET; 1650 rport = srp_rport_add(target->scsi_host, &ids); 1651 if (IS_ERR(rport)) { 1652 scsi_remove_host(target->scsi_host); 1653 return PTR_ERR(rport); 1654 } 1655 1656 spin_lock(&host->target_lock); 1657 list_add_tail(&target->list, &host->target_list); 1658 spin_unlock(&host->target_lock); 1659 1660 target->state = SRP_TARGET_LIVE; 1661 1662 scsi_scan_target(&target->scsi_host->shost_gendev, 1663 0, target->scsi_id, SCAN_WILD_CARD, 0); 1664 1665 return 0; 1666 } 1667 1668 static void srp_release_dev(struct device *dev) 1669 { 1670 struct srp_host *host = 1671 container_of(dev, struct srp_host, dev); 1672 1673 complete(&host->released); 1674 } 1675 1676 static struct class srp_class = { 1677 .name = "infiniband_srp", 1678 .dev_release = srp_release_dev 1679 }; 1680 1681 /* 1682 * Target ports are added by writing 1683 * 1684 * id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,dgid=<dest GID>, 1685 * pkey=<P_Key>,service_id=<service ID> 1686 * 1687 * to the add_target sysfs attribute. 1688 */ 1689 enum { 1690 SRP_OPT_ERR = 0, 1691 SRP_OPT_ID_EXT = 1 << 0, 1692 SRP_OPT_IOC_GUID = 1 << 1, 1693 SRP_OPT_DGID = 1 << 2, 1694 SRP_OPT_PKEY = 1 << 3, 1695 SRP_OPT_SERVICE_ID = 1 << 4, 1696 SRP_OPT_MAX_SECT = 1 << 5, 1697 SRP_OPT_MAX_CMD_PER_LUN = 1 << 6, 1698 SRP_OPT_IO_CLASS = 1 << 7, 1699 SRP_OPT_INITIATOR_EXT = 1 << 8, 1700 SRP_OPT_ALL = (SRP_OPT_ID_EXT | 1701 SRP_OPT_IOC_GUID | 1702 SRP_OPT_DGID | 1703 SRP_OPT_PKEY | 1704 SRP_OPT_SERVICE_ID), 1705 }; 1706 1707 static const match_table_t srp_opt_tokens = { 1708 { SRP_OPT_ID_EXT, "id_ext=%s" }, 1709 { SRP_OPT_IOC_GUID, "ioc_guid=%s" }, 1710 { SRP_OPT_DGID, "dgid=%s" }, 1711 { SRP_OPT_PKEY, "pkey=%x" }, 1712 { SRP_OPT_SERVICE_ID, "service_id=%s" }, 1713 { SRP_OPT_MAX_SECT, "max_sect=%d" }, 1714 { SRP_OPT_MAX_CMD_PER_LUN, "max_cmd_per_lun=%d" }, 1715 { SRP_OPT_IO_CLASS, "io_class=%x" }, 1716 { SRP_OPT_INITIATOR_EXT, "initiator_ext=%s" }, 1717 { SRP_OPT_ERR, NULL } 1718 }; 1719 1720 static int srp_parse_options(const char *buf, struct srp_target_port *target) 1721 { 1722 char *options, *sep_opt; 1723 char *p; 1724 char dgid[3]; 1725 substring_t args[MAX_OPT_ARGS]; 1726 int opt_mask = 0; 1727 int token; 1728 int ret = -EINVAL; 1729 int i; 1730 1731 options = kstrdup(buf, GFP_KERNEL); 1732 if (!options) 1733 return -ENOMEM; 1734 1735 sep_opt = options; 1736 while ((p = strsep(&sep_opt, ",")) != NULL) { 1737 if (!*p) 1738 continue; 1739 1740 token = match_token(p, srp_opt_tokens, args); 1741 opt_mask |= token; 1742 1743 switch (token) { 1744 case SRP_OPT_ID_EXT: 1745 p = match_strdup(args); 1746 if (!p) { 1747 ret = -ENOMEM; 1748 goto out; 1749 } 1750 target->id_ext = cpu_to_be64(simple_strtoull(p, NULL, 16)); 1751 kfree(p); 1752 break; 1753 1754 case SRP_OPT_IOC_GUID: 1755 p = match_strdup(args); 1756 if (!p) { 1757 ret = -ENOMEM; 1758 goto out; 1759 } 1760 target->ioc_guid = cpu_to_be64(simple_strtoull(p, NULL, 16)); 1761 kfree(p); 1762 break; 1763 1764 case SRP_OPT_DGID: 1765 p = match_strdup(args); 1766 if (!p) { 1767 ret = -ENOMEM; 1768 goto out; 1769 } 1770 if (strlen(p) != 32) { 1771 printk(KERN_WARNING PFX "bad dest GID parameter '%s'\n", p); 1772 kfree(p); 1773 goto out; 1774 } 1775 1776 for (i = 0; i < 16; ++i) { 1777 strlcpy(dgid, p + i * 2, 3); 1778 target->path.dgid.raw[i] = simple_strtoul(dgid, NULL, 16); 1779 } 1780 kfree(p); 1781 memcpy(target->orig_dgid, target->path.dgid.raw, 16); 1782 break; 1783 1784 case SRP_OPT_PKEY: 1785 if (match_hex(args, &token)) { 1786 printk(KERN_WARNING PFX "bad P_Key parameter '%s'\n", p); 1787 goto out; 1788 } 1789 target->path.pkey = cpu_to_be16(token); 1790 break; 1791 1792 case SRP_OPT_SERVICE_ID: 1793 p = match_strdup(args); 1794 if (!p) { 1795 ret = -ENOMEM; 1796 goto out; 1797 } 1798 target->service_id = cpu_to_be64(simple_strtoull(p, NULL, 16)); 1799 target->path.service_id = target->service_id; 1800 kfree(p); 1801 break; 1802 1803 case SRP_OPT_MAX_SECT: 1804 if (match_int(args, &token)) { 1805 printk(KERN_WARNING PFX "bad max sect parameter '%s'\n", p); 1806 goto out; 1807 } 1808 target->scsi_host->max_sectors = token; 1809 break; 1810 1811 case SRP_OPT_MAX_CMD_PER_LUN: 1812 if (match_int(args, &token)) { 1813 printk(KERN_WARNING PFX "bad max cmd_per_lun parameter '%s'\n", p); 1814 goto out; 1815 } 1816 target->scsi_host->cmd_per_lun = min(token, SRP_SQ_SIZE); 1817 break; 1818 1819 case SRP_OPT_IO_CLASS: 1820 if (match_hex(args, &token)) { 1821 printk(KERN_WARNING PFX "bad IO class parameter '%s' \n", p); 1822 goto out; 1823 } 1824 if (token != SRP_REV10_IB_IO_CLASS && 1825 token != SRP_REV16A_IB_IO_CLASS) { 1826 printk(KERN_WARNING PFX "unknown IO class parameter value" 1827 " %x specified (use %x or %x).\n", 1828 token, SRP_REV10_IB_IO_CLASS, SRP_REV16A_IB_IO_CLASS); 1829 goto out; 1830 } 1831 target->io_class = token; 1832 break; 1833 1834 case SRP_OPT_INITIATOR_EXT: 1835 p = match_strdup(args); 1836 if (!p) { 1837 ret = -ENOMEM; 1838 goto out; 1839 } 1840 target->initiator_ext = cpu_to_be64(simple_strtoull(p, NULL, 16)); 1841 kfree(p); 1842 break; 1843 1844 default: 1845 printk(KERN_WARNING PFX "unknown parameter or missing value " 1846 "'%s' in target creation request\n", p); 1847 goto out; 1848 } 1849 } 1850 1851 if ((opt_mask & SRP_OPT_ALL) == SRP_OPT_ALL) 1852 ret = 0; 1853 else 1854 for (i = 0; i < ARRAY_SIZE(srp_opt_tokens); ++i) 1855 if ((srp_opt_tokens[i].token & SRP_OPT_ALL) && 1856 !(srp_opt_tokens[i].token & opt_mask)) 1857 printk(KERN_WARNING PFX "target creation request is " 1858 "missing parameter '%s'\n", 1859 srp_opt_tokens[i].pattern); 1860 1861 out: 1862 kfree(options); 1863 return ret; 1864 } 1865 1866 static ssize_t srp_create_target(struct device *dev, 1867 struct device_attribute *attr, 1868 const char *buf, size_t count) 1869 { 1870 struct srp_host *host = 1871 container_of(dev, struct srp_host, dev); 1872 struct Scsi_Host *target_host; 1873 struct srp_target_port *target; 1874 int ret; 1875 int i; 1876 1877 target_host = scsi_host_alloc(&srp_template, 1878 sizeof (struct srp_target_port)); 1879 if (!target_host) 1880 return -ENOMEM; 1881 1882 target_host->transportt = ib_srp_transport_template; 1883 target_host->max_lun = SRP_MAX_LUN; 1884 target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb; 1885 1886 target = host_to_target(target_host); 1887 1888 target->io_class = SRP_REV16A_IB_IO_CLASS; 1889 target->scsi_host = target_host; 1890 target->srp_host = host; 1891 1892 INIT_LIST_HEAD(&target->free_reqs); 1893 INIT_LIST_HEAD(&target->req_queue); 1894 for (i = 0; i < SRP_SQ_SIZE; ++i) { 1895 target->req_ring[i].index = i; 1896 list_add_tail(&target->req_ring[i].list, &target->free_reqs); 1897 } 1898 1899 ret = srp_parse_options(buf, target); 1900 if (ret) 1901 goto err; 1902 1903 ib_query_gid(host->srp_dev->dev, host->port, 0, &target->path.sgid); 1904 1905 shost_printk(KERN_DEBUG, target->scsi_host, PFX 1906 "new target: id_ext %016llx ioc_guid %016llx pkey %04x " 1907 "service_id %016llx dgid %pI6\n", 1908 (unsigned long long) be64_to_cpu(target->id_ext), 1909 (unsigned long long) be64_to_cpu(target->ioc_guid), 1910 be16_to_cpu(target->path.pkey), 1911 (unsigned long long) be64_to_cpu(target->service_id), 1912 target->path.dgid.raw); 1913 1914 ret = srp_create_target_ib(target); 1915 if (ret) 1916 goto err; 1917 1918 ret = srp_new_cm_id(target); 1919 if (ret) 1920 goto err_free; 1921 1922 target->qp_in_error = 0; 1923 ret = srp_connect_target(target); 1924 if (ret) { 1925 shost_printk(KERN_ERR, target->scsi_host, 1926 PFX "Connection failed\n"); 1927 goto err_cm_id; 1928 } 1929 1930 ret = srp_add_target(host, target); 1931 if (ret) 1932 goto err_disconnect; 1933 1934 return count; 1935 1936 err_disconnect: 1937 srp_disconnect_target(target); 1938 1939 err_cm_id: 1940 ib_destroy_cm_id(target->cm_id); 1941 1942 err_free: 1943 srp_free_target_ib(target); 1944 1945 err: 1946 scsi_host_put(target_host); 1947 1948 return ret; 1949 } 1950 1951 static DEVICE_ATTR(add_target, S_IWUSR, NULL, srp_create_target); 1952 1953 static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr, 1954 char *buf) 1955 { 1956 struct srp_host *host = container_of(dev, struct srp_host, dev); 1957 1958 return sprintf(buf, "%s\n", host->srp_dev->dev->name); 1959 } 1960 1961 static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL); 1962 1963 static ssize_t show_port(struct device *dev, struct device_attribute *attr, 1964 char *buf) 1965 { 1966 struct srp_host *host = container_of(dev, struct srp_host, dev); 1967 1968 return sprintf(buf, "%d\n", host->port); 1969 } 1970 1971 static DEVICE_ATTR(port, S_IRUGO, show_port, NULL); 1972 1973 static struct srp_host *srp_add_port(struct srp_device *device, u8 port) 1974 { 1975 struct srp_host *host; 1976 1977 host = kzalloc(sizeof *host, GFP_KERNEL); 1978 if (!host) 1979 return NULL; 1980 1981 INIT_LIST_HEAD(&host->target_list); 1982 spin_lock_init(&host->target_lock); 1983 init_completion(&host->released); 1984 host->srp_dev = device; 1985 host->port = port; 1986 1987 host->dev.class = &srp_class; 1988 host->dev.parent = device->dev->dma_device; 1989 dev_set_name(&host->dev, "srp-%s-%d", device->dev->name, port); 1990 1991 if (device_register(&host->dev)) 1992 goto free_host; 1993 if (device_create_file(&host->dev, &dev_attr_add_target)) 1994 goto err_class; 1995 if (device_create_file(&host->dev, &dev_attr_ibdev)) 1996 goto err_class; 1997 if (device_create_file(&host->dev, &dev_attr_port)) 1998 goto err_class; 1999 2000 return host; 2001 2002 err_class: 2003 device_unregister(&host->dev); 2004 2005 free_host: 2006 kfree(host); 2007 2008 return NULL; 2009 } 2010 2011 static void srp_add_one(struct ib_device *device) 2012 { 2013 struct srp_device *srp_dev; 2014 struct ib_device_attr *dev_attr; 2015 struct ib_fmr_pool_param fmr_param; 2016 struct srp_host *host; 2017 int s, e, p; 2018 2019 dev_attr = kmalloc(sizeof *dev_attr, GFP_KERNEL); 2020 if (!dev_attr) 2021 return; 2022 2023 if (ib_query_device(device, dev_attr)) { 2024 printk(KERN_WARNING PFX "Query device failed for %s\n", 2025 device->name); 2026 goto free_attr; 2027 } 2028 2029 srp_dev = kmalloc(sizeof *srp_dev, GFP_KERNEL); 2030 if (!srp_dev) 2031 goto free_attr; 2032 2033 /* 2034 * Use the smallest page size supported by the HCA, down to a 2035 * minimum of 512 bytes (which is the smallest sector that a 2036 * SCSI command will ever carry). 2037 */ 2038 srp_dev->fmr_page_shift = max(9, ffs(dev_attr->page_size_cap) - 1); 2039 srp_dev->fmr_page_size = 1 << srp_dev->fmr_page_shift; 2040 srp_dev->fmr_page_mask = ~((u64) srp_dev->fmr_page_size - 1); 2041 2042 INIT_LIST_HEAD(&srp_dev->dev_list); 2043 2044 srp_dev->dev = device; 2045 srp_dev->pd = ib_alloc_pd(device); 2046 if (IS_ERR(srp_dev->pd)) 2047 goto free_dev; 2048 2049 srp_dev->mr = ib_get_dma_mr(srp_dev->pd, 2050 IB_ACCESS_LOCAL_WRITE | 2051 IB_ACCESS_REMOTE_READ | 2052 IB_ACCESS_REMOTE_WRITE); 2053 if (IS_ERR(srp_dev->mr)) 2054 goto err_pd; 2055 2056 memset(&fmr_param, 0, sizeof fmr_param); 2057 fmr_param.pool_size = SRP_FMR_POOL_SIZE; 2058 fmr_param.dirty_watermark = SRP_FMR_DIRTY_SIZE; 2059 fmr_param.cache = 1; 2060 fmr_param.max_pages_per_fmr = SRP_FMR_SIZE; 2061 fmr_param.page_shift = srp_dev->fmr_page_shift; 2062 fmr_param.access = (IB_ACCESS_LOCAL_WRITE | 2063 IB_ACCESS_REMOTE_WRITE | 2064 IB_ACCESS_REMOTE_READ); 2065 2066 srp_dev->fmr_pool = ib_create_fmr_pool(srp_dev->pd, &fmr_param); 2067 if (IS_ERR(srp_dev->fmr_pool)) 2068 srp_dev->fmr_pool = NULL; 2069 2070 if (device->node_type == RDMA_NODE_IB_SWITCH) { 2071 s = 0; 2072 e = 0; 2073 } else { 2074 s = 1; 2075 e = device->phys_port_cnt; 2076 } 2077 2078 for (p = s; p <= e; ++p) { 2079 host = srp_add_port(srp_dev, p); 2080 if (host) 2081 list_add_tail(&host->list, &srp_dev->dev_list); 2082 } 2083 2084 ib_set_client_data(device, &srp_client, srp_dev); 2085 2086 goto free_attr; 2087 2088 err_pd: 2089 ib_dealloc_pd(srp_dev->pd); 2090 2091 free_dev: 2092 kfree(srp_dev); 2093 2094 free_attr: 2095 kfree(dev_attr); 2096 } 2097 2098 static void srp_remove_one(struct ib_device *device) 2099 { 2100 struct srp_device *srp_dev; 2101 struct srp_host *host, *tmp_host; 2102 LIST_HEAD(target_list); 2103 struct srp_target_port *target, *tmp_target; 2104 2105 srp_dev = ib_get_client_data(device, &srp_client); 2106 2107 list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) { 2108 device_unregister(&host->dev); 2109 /* 2110 * Wait for the sysfs entry to go away, so that no new 2111 * target ports can be created. 2112 */ 2113 wait_for_completion(&host->released); 2114 2115 /* 2116 * Mark all target ports as removed, so we stop queueing 2117 * commands and don't try to reconnect. 2118 */ 2119 spin_lock(&host->target_lock); 2120 list_for_each_entry(target, &host->target_list, list) { 2121 spin_lock_irq(target->scsi_host->host_lock); 2122 target->state = SRP_TARGET_REMOVED; 2123 spin_unlock_irq(target->scsi_host->host_lock); 2124 } 2125 spin_unlock(&host->target_lock); 2126 2127 /* 2128 * Wait for any reconnection tasks that may have 2129 * started before we marked our target ports as 2130 * removed, and any target port removal tasks. 2131 */ 2132 flush_scheduled_work(); 2133 2134 list_for_each_entry_safe(target, tmp_target, 2135 &host->target_list, list) { 2136 srp_remove_host(target->scsi_host); 2137 scsi_remove_host(target->scsi_host); 2138 srp_disconnect_target(target); 2139 ib_destroy_cm_id(target->cm_id); 2140 srp_free_target_ib(target); 2141 scsi_host_put(target->scsi_host); 2142 } 2143 2144 kfree(host); 2145 } 2146 2147 if (srp_dev->fmr_pool) 2148 ib_destroy_fmr_pool(srp_dev->fmr_pool); 2149 ib_dereg_mr(srp_dev->mr); 2150 ib_dealloc_pd(srp_dev->pd); 2151 2152 kfree(srp_dev); 2153 } 2154 2155 static struct srp_function_template ib_srp_transport_functions = { 2156 }; 2157 2158 static int __init srp_init_module(void) 2159 { 2160 int ret; 2161 2162 if (srp_sg_tablesize > 255) { 2163 printk(KERN_WARNING PFX "Clamping srp_sg_tablesize to 255\n"); 2164 srp_sg_tablesize = 255; 2165 } 2166 2167 ib_srp_transport_template = 2168 srp_attach_transport(&ib_srp_transport_functions); 2169 if (!ib_srp_transport_template) 2170 return -ENOMEM; 2171 2172 srp_template.sg_tablesize = srp_sg_tablesize; 2173 srp_max_iu_len = (sizeof (struct srp_cmd) + 2174 sizeof (struct srp_indirect_buf) + 2175 srp_sg_tablesize * 16); 2176 2177 ret = class_register(&srp_class); 2178 if (ret) { 2179 printk(KERN_ERR PFX "couldn't register class infiniband_srp\n"); 2180 srp_release_transport(ib_srp_transport_template); 2181 return ret; 2182 } 2183 2184 ib_sa_register_client(&srp_sa_client); 2185 2186 ret = ib_register_client(&srp_client); 2187 if (ret) { 2188 printk(KERN_ERR PFX "couldn't register IB client\n"); 2189 srp_release_transport(ib_srp_transport_template); 2190 ib_sa_unregister_client(&srp_sa_client); 2191 class_unregister(&srp_class); 2192 return ret; 2193 } 2194 2195 return 0; 2196 } 2197 2198 static void __exit srp_cleanup_module(void) 2199 { 2200 ib_unregister_client(&srp_client); 2201 ib_sa_unregister_client(&srp_sa_client); 2202 class_unregister(&srp_class); 2203 srp_release_transport(ib_srp_transport_template); 2204 } 2205 2206 module_init(srp_init_module); 2207 module_exit(srp_cleanup_module); 2208