1 /* 2 * Copyright (c) 2005 Cisco Systems. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 * 32 * $Id: ib_srp.c 3932 2005-11-01 17:19:29Z roland $ 33 */ 34 35 #include <linux/module.h> 36 #include <linux/init.h> 37 #include <linux/slab.h> 38 #include <linux/err.h> 39 #include <linux/string.h> 40 #include <linux/parser.h> 41 #include <linux/random.h> 42 #include <linux/jiffies.h> 43 44 #include <asm/atomic.h> 45 46 #include <scsi/scsi.h> 47 #include <scsi/scsi_device.h> 48 #include <scsi/scsi_dbg.h> 49 #include <scsi/srp.h> 50 51 #include <rdma/ib_cache.h> 52 53 #include "ib_srp.h" 54 55 #define DRV_NAME "ib_srp" 56 #define PFX DRV_NAME ": " 57 #define DRV_VERSION "0.2" 58 #define DRV_RELDATE "November 1, 2005" 59 60 MODULE_AUTHOR("Roland Dreier"); 61 MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator " 62 "v" DRV_VERSION " (" DRV_RELDATE ")"); 63 MODULE_LICENSE("Dual BSD/GPL"); 64 65 static int topspin_workarounds = 1; 66 67 module_param(topspin_workarounds, int, 0444); 68 MODULE_PARM_DESC(topspin_workarounds, 69 "Enable workarounds for Topspin/Cisco SRP target bugs if != 0"); 70 71 static const u8 topspin_oui[3] = { 0x00, 0x05, 0xad }; 72 73 static void srp_add_one(struct ib_device *device); 74 static void srp_remove_one(struct ib_device *device); 75 static void srp_completion(struct ib_cq *cq, void *target_ptr); 76 static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event); 77 78 static struct ib_client srp_client = { 79 .name = "srp", 80 .add = srp_add_one, 81 .remove = srp_remove_one 82 }; 83 84 static inline struct srp_target_port *host_to_target(struct Scsi_Host *host) 85 { 86 return (struct srp_target_port *) host->hostdata; 87 } 88 89 static const char *srp_target_info(struct Scsi_Host *host) 90 { 91 return host_to_target(host)->target_name; 92 } 93 94 static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size, 95 gfp_t gfp_mask, 96 enum dma_data_direction direction) 97 { 98 struct srp_iu *iu; 99 100 iu = kmalloc(sizeof *iu, gfp_mask); 101 if (!iu) 102 goto out; 103 104 iu->buf = kzalloc(size, gfp_mask); 105 if (!iu->buf) 106 goto out_free_iu; 107 108 iu->dma = dma_map_single(host->dev->dma_device, iu->buf, size, direction); 109 if (dma_mapping_error(iu->dma)) 110 goto out_free_buf; 111 112 iu->size = size; 113 iu->direction = direction; 114 115 return iu; 116 117 out_free_buf: 118 kfree(iu->buf); 119 out_free_iu: 120 kfree(iu); 121 out: 122 return NULL; 123 } 124 125 static void srp_free_iu(struct srp_host *host, struct srp_iu *iu) 126 { 127 if (!iu) 128 return; 129 130 dma_unmap_single(host->dev->dma_device, iu->dma, iu->size, iu->direction); 131 kfree(iu->buf); 132 kfree(iu); 133 } 134 135 static void srp_qp_event(struct ib_event *event, void *context) 136 { 137 printk(KERN_ERR PFX "QP event %d\n", event->event); 138 } 139 140 static int srp_init_qp(struct srp_target_port *target, 141 struct ib_qp *qp) 142 { 143 struct ib_qp_attr *attr; 144 int ret; 145 146 attr = kmalloc(sizeof *attr, GFP_KERNEL); 147 if (!attr) 148 return -ENOMEM; 149 150 ret = ib_find_cached_pkey(target->srp_host->dev, 151 target->srp_host->port, 152 be16_to_cpu(target->path.pkey), 153 &attr->pkey_index); 154 if (ret) 155 goto out; 156 157 attr->qp_state = IB_QPS_INIT; 158 attr->qp_access_flags = (IB_ACCESS_REMOTE_READ | 159 IB_ACCESS_REMOTE_WRITE); 160 attr->port_num = target->srp_host->port; 161 162 ret = ib_modify_qp(qp, attr, 163 IB_QP_STATE | 164 IB_QP_PKEY_INDEX | 165 IB_QP_ACCESS_FLAGS | 166 IB_QP_PORT); 167 168 out: 169 kfree(attr); 170 return ret; 171 } 172 173 static int srp_create_target_ib(struct srp_target_port *target) 174 { 175 struct ib_qp_init_attr *init_attr; 176 int ret; 177 178 init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL); 179 if (!init_attr) 180 return -ENOMEM; 181 182 target->cq = ib_create_cq(target->srp_host->dev, srp_completion, 183 NULL, target, SRP_CQ_SIZE); 184 if (IS_ERR(target->cq)) { 185 ret = PTR_ERR(target->cq); 186 goto out; 187 } 188 189 ib_req_notify_cq(target->cq, IB_CQ_NEXT_COMP); 190 191 init_attr->event_handler = srp_qp_event; 192 init_attr->cap.max_send_wr = SRP_SQ_SIZE; 193 init_attr->cap.max_recv_wr = SRP_RQ_SIZE; 194 init_attr->cap.max_recv_sge = 1; 195 init_attr->cap.max_send_sge = 1; 196 init_attr->sq_sig_type = IB_SIGNAL_ALL_WR; 197 init_attr->qp_type = IB_QPT_RC; 198 init_attr->send_cq = target->cq; 199 init_attr->recv_cq = target->cq; 200 201 target->qp = ib_create_qp(target->srp_host->pd, init_attr); 202 if (IS_ERR(target->qp)) { 203 ret = PTR_ERR(target->qp); 204 ib_destroy_cq(target->cq); 205 goto out; 206 } 207 208 ret = srp_init_qp(target, target->qp); 209 if (ret) { 210 ib_destroy_qp(target->qp); 211 ib_destroy_cq(target->cq); 212 goto out; 213 } 214 215 out: 216 kfree(init_attr); 217 return ret; 218 } 219 220 static void srp_free_target_ib(struct srp_target_port *target) 221 { 222 int i; 223 224 ib_destroy_qp(target->qp); 225 ib_destroy_cq(target->cq); 226 227 for (i = 0; i < SRP_RQ_SIZE; ++i) 228 srp_free_iu(target->srp_host, target->rx_ring[i]); 229 for (i = 0; i < SRP_SQ_SIZE + 1; ++i) 230 srp_free_iu(target->srp_host, target->tx_ring[i]); 231 } 232 233 static void srp_path_rec_completion(int status, 234 struct ib_sa_path_rec *pathrec, 235 void *target_ptr) 236 { 237 struct srp_target_port *target = target_ptr; 238 239 target->status = status; 240 if (status) 241 printk(KERN_ERR PFX "Got failed path rec status %d\n", status); 242 else 243 target->path = *pathrec; 244 complete(&target->done); 245 } 246 247 static int srp_lookup_path(struct srp_target_port *target) 248 { 249 target->path.numb_path = 1; 250 251 init_completion(&target->done); 252 253 target->path_query_id = ib_sa_path_rec_get(target->srp_host->dev, 254 target->srp_host->port, 255 &target->path, 256 IB_SA_PATH_REC_DGID | 257 IB_SA_PATH_REC_SGID | 258 IB_SA_PATH_REC_NUMB_PATH | 259 IB_SA_PATH_REC_PKEY, 260 SRP_PATH_REC_TIMEOUT_MS, 261 GFP_KERNEL, 262 srp_path_rec_completion, 263 target, &target->path_query); 264 if (target->path_query_id < 0) 265 return target->path_query_id; 266 267 wait_for_completion(&target->done); 268 269 if (target->status < 0) 270 printk(KERN_WARNING PFX "Path record query failed\n"); 271 272 return target->status; 273 } 274 275 static int srp_send_req(struct srp_target_port *target) 276 { 277 struct { 278 struct ib_cm_req_param param; 279 struct srp_login_req priv; 280 } *req = NULL; 281 int status; 282 283 req = kzalloc(sizeof *req, GFP_KERNEL); 284 if (!req) 285 return -ENOMEM; 286 287 req->param.primary_path = &target->path; 288 req->param.alternate_path = NULL; 289 req->param.service_id = target->service_id; 290 req->param.qp_num = target->qp->qp_num; 291 req->param.qp_type = target->qp->qp_type; 292 req->param.private_data = &req->priv; 293 req->param.private_data_len = sizeof req->priv; 294 req->param.flow_control = 1; 295 296 get_random_bytes(&req->param.starting_psn, 4); 297 req->param.starting_psn &= 0xffffff; 298 299 /* 300 * Pick some arbitrary defaults here; we could make these 301 * module parameters if anyone cared about setting them. 302 */ 303 req->param.responder_resources = 4; 304 req->param.remote_cm_response_timeout = 20; 305 req->param.local_cm_response_timeout = 20; 306 req->param.retry_count = 7; 307 req->param.rnr_retry_count = 7; 308 req->param.max_cm_retries = 15; 309 310 req->priv.opcode = SRP_LOGIN_REQ; 311 req->priv.tag = 0; 312 req->priv.req_it_iu_len = cpu_to_be32(SRP_MAX_IU_LEN); 313 req->priv.req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT | 314 SRP_BUF_FORMAT_INDIRECT); 315 memcpy(req->priv.initiator_port_id, target->srp_host->initiator_port_id, 16); 316 /* 317 * Topspin/Cisco SRP targets will reject our login unless we 318 * zero out the first 8 bytes of our initiator port ID. The 319 * second 8 bytes must be our local node GUID, but we always 320 * use that anyway. 321 */ 322 if (topspin_workarounds && !memcmp(&target->ioc_guid, topspin_oui, 3)) { 323 printk(KERN_DEBUG PFX "Topspin/Cisco initiator port ID workaround " 324 "activated for target GUID %016llx\n", 325 (unsigned long long) be64_to_cpu(target->ioc_guid)); 326 memset(req->priv.initiator_port_id, 0, 8); 327 } 328 memcpy(req->priv.target_port_id, &target->id_ext, 8); 329 memcpy(req->priv.target_port_id + 8, &target->ioc_guid, 8); 330 331 status = ib_send_cm_req(target->cm_id, &req->param); 332 333 kfree(req); 334 335 return status; 336 } 337 338 static void srp_disconnect_target(struct srp_target_port *target) 339 { 340 /* XXX should send SRP_I_LOGOUT request */ 341 342 init_completion(&target->done); 343 ib_send_cm_dreq(target->cm_id, NULL, 0); 344 wait_for_completion(&target->done); 345 } 346 347 static void srp_remove_work(void *target_ptr) 348 { 349 struct srp_target_port *target = target_ptr; 350 351 spin_lock_irq(target->scsi_host->host_lock); 352 if (target->state != SRP_TARGET_DEAD) { 353 spin_unlock_irq(target->scsi_host->host_lock); 354 scsi_host_put(target->scsi_host); 355 return; 356 } 357 target->state = SRP_TARGET_REMOVED; 358 spin_unlock_irq(target->scsi_host->host_lock); 359 360 mutex_lock(&target->srp_host->target_mutex); 361 list_del(&target->list); 362 mutex_unlock(&target->srp_host->target_mutex); 363 364 scsi_remove_host(target->scsi_host); 365 ib_destroy_cm_id(target->cm_id); 366 srp_free_target_ib(target); 367 scsi_host_put(target->scsi_host); 368 /* And another put to really free the target port... */ 369 scsi_host_put(target->scsi_host); 370 } 371 372 static int srp_connect_target(struct srp_target_port *target) 373 { 374 int ret; 375 376 ret = srp_lookup_path(target); 377 if (ret) 378 return ret; 379 380 while (1) { 381 init_completion(&target->done); 382 ret = srp_send_req(target); 383 if (ret) 384 return ret; 385 wait_for_completion(&target->done); 386 387 /* 388 * The CM event handling code will set status to 389 * SRP_PORT_REDIRECT if we get a port redirect REJ 390 * back, or SRP_DLID_REDIRECT if we get a lid/qp 391 * redirect REJ back. 392 */ 393 switch (target->status) { 394 case 0: 395 return 0; 396 397 case SRP_PORT_REDIRECT: 398 ret = srp_lookup_path(target); 399 if (ret) 400 return ret; 401 break; 402 403 case SRP_DLID_REDIRECT: 404 break; 405 406 default: 407 return target->status; 408 } 409 } 410 } 411 412 static int srp_reconnect_target(struct srp_target_port *target) 413 { 414 struct ib_cm_id *new_cm_id; 415 struct ib_qp_attr qp_attr; 416 struct srp_request *req; 417 struct ib_wc wc; 418 int ret; 419 int i; 420 421 spin_lock_irq(target->scsi_host->host_lock); 422 if (target->state != SRP_TARGET_LIVE) { 423 spin_unlock_irq(target->scsi_host->host_lock); 424 return -EAGAIN; 425 } 426 target->state = SRP_TARGET_CONNECTING; 427 spin_unlock_irq(target->scsi_host->host_lock); 428 429 srp_disconnect_target(target); 430 /* 431 * Now get a new local CM ID so that we avoid confusing the 432 * target in case things are really fouled up. 433 */ 434 new_cm_id = ib_create_cm_id(target->srp_host->dev, 435 srp_cm_handler, target); 436 if (IS_ERR(new_cm_id)) { 437 ret = PTR_ERR(new_cm_id); 438 goto err; 439 } 440 ib_destroy_cm_id(target->cm_id); 441 target->cm_id = new_cm_id; 442 443 qp_attr.qp_state = IB_QPS_RESET; 444 ret = ib_modify_qp(target->qp, &qp_attr, IB_QP_STATE); 445 if (ret) 446 goto err; 447 448 ret = srp_init_qp(target, target->qp); 449 if (ret) 450 goto err; 451 452 while (ib_poll_cq(target->cq, 1, &wc) > 0) 453 ; /* nothing */ 454 455 list_for_each_entry(req, &target->req_queue, list) { 456 req->scmnd->result = DID_RESET << 16; 457 req->scmnd->scsi_done(req->scmnd); 458 } 459 460 target->rx_head = 0; 461 target->tx_head = 0; 462 target->tx_tail = 0; 463 target->req_head = 0; 464 for (i = 0; i < SRP_SQ_SIZE - 1; ++i) 465 target->req_ring[i].next = i + 1; 466 target->req_ring[SRP_SQ_SIZE - 1].next = -1; 467 INIT_LIST_HEAD(&target->req_queue); 468 469 ret = srp_connect_target(target); 470 if (ret) 471 goto err; 472 473 spin_lock_irq(target->scsi_host->host_lock); 474 if (target->state == SRP_TARGET_CONNECTING) { 475 ret = 0; 476 target->state = SRP_TARGET_LIVE; 477 } else 478 ret = -EAGAIN; 479 spin_unlock_irq(target->scsi_host->host_lock); 480 481 return ret; 482 483 err: 484 printk(KERN_ERR PFX "reconnect failed (%d), removing target port.\n", ret); 485 486 /* 487 * We couldn't reconnect, so kill our target port off. 488 * However, we have to defer the real removal because we might 489 * be in the context of the SCSI error handler now, which 490 * would deadlock if we call scsi_remove_host(). 491 */ 492 spin_lock_irq(target->scsi_host->host_lock); 493 if (target->state == SRP_TARGET_CONNECTING) { 494 target->state = SRP_TARGET_DEAD; 495 INIT_WORK(&target->work, srp_remove_work, target); 496 schedule_work(&target->work); 497 } 498 spin_unlock_irq(target->scsi_host->host_lock); 499 500 return ret; 501 } 502 503 static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target, 504 struct srp_request *req) 505 { 506 struct scatterlist *scat; 507 struct srp_cmd *cmd = req->cmd->buf; 508 int len, nents, count; 509 int i; 510 u8 fmt; 511 512 if (!scmnd->request_buffer || scmnd->sc_data_direction == DMA_NONE) 513 return sizeof (struct srp_cmd); 514 515 if (scmnd->sc_data_direction != DMA_FROM_DEVICE && 516 scmnd->sc_data_direction != DMA_TO_DEVICE) { 517 printk(KERN_WARNING PFX "Unhandled data direction %d\n", 518 scmnd->sc_data_direction); 519 return -EINVAL; 520 } 521 522 /* 523 * This handling of non-SG commands can be killed when the 524 * SCSI midlayer no longer generates non-SG commands. 525 */ 526 if (likely(scmnd->use_sg)) { 527 nents = scmnd->use_sg; 528 scat = scmnd->request_buffer; 529 } else { 530 nents = 1; 531 scat = &req->fake_sg; 532 sg_init_one(scat, scmnd->request_buffer, scmnd->request_bufflen); 533 } 534 535 count = dma_map_sg(target->srp_host->dev->dma_device, scat, nents, 536 scmnd->sc_data_direction); 537 538 if (count == 1) { 539 struct srp_direct_buf *buf = (void *) cmd->add_data; 540 541 fmt = SRP_DATA_DESC_DIRECT; 542 543 buf->va = cpu_to_be64(sg_dma_address(scat)); 544 buf->key = cpu_to_be32(target->srp_host->mr->rkey); 545 buf->len = cpu_to_be32(sg_dma_len(scat)); 546 547 len = sizeof (struct srp_cmd) + 548 sizeof (struct srp_direct_buf); 549 } else { 550 struct srp_indirect_buf *buf = (void *) cmd->add_data; 551 u32 datalen = 0; 552 553 fmt = SRP_DATA_DESC_INDIRECT; 554 555 if (scmnd->sc_data_direction == DMA_TO_DEVICE) 556 cmd->data_out_desc_cnt = count; 557 else 558 cmd->data_in_desc_cnt = count; 559 560 buf->table_desc.va = cpu_to_be64(req->cmd->dma + 561 sizeof *cmd + 562 sizeof *buf); 563 buf->table_desc.key = 564 cpu_to_be32(target->srp_host->mr->rkey); 565 buf->table_desc.len = 566 cpu_to_be32(count * sizeof (struct srp_direct_buf)); 567 568 for (i = 0; i < count; ++i) { 569 buf->desc_list[i].va = cpu_to_be64(sg_dma_address(&scat[i])); 570 buf->desc_list[i].key = 571 cpu_to_be32(target->srp_host->mr->rkey); 572 buf->desc_list[i].len = cpu_to_be32(sg_dma_len(&scat[i])); 573 574 datalen += sg_dma_len(&scat[i]); 575 } 576 577 buf->len = cpu_to_be32(datalen); 578 579 len = sizeof (struct srp_cmd) + 580 sizeof (struct srp_indirect_buf) + 581 count * sizeof (struct srp_direct_buf); 582 } 583 584 if (scmnd->sc_data_direction == DMA_TO_DEVICE) 585 cmd->buf_fmt = fmt << 4; 586 else 587 cmd->buf_fmt = fmt; 588 589 return len; 590 } 591 592 static void srp_unmap_data(struct scsi_cmnd *scmnd, 593 struct srp_target_port *target, 594 struct srp_request *req) 595 { 596 struct scatterlist *scat; 597 int nents; 598 599 if (!scmnd->request_buffer || 600 (scmnd->sc_data_direction != DMA_TO_DEVICE && 601 scmnd->sc_data_direction != DMA_FROM_DEVICE)) 602 return; 603 604 /* 605 * This handling of non-SG commands can be killed when the 606 * SCSI midlayer no longer generates non-SG commands. 607 */ 608 if (likely(scmnd->use_sg)) { 609 nents = scmnd->use_sg; 610 scat = scmnd->request_buffer; 611 } else { 612 nents = 1; 613 scat = &req->fake_sg; 614 } 615 616 dma_unmap_sg(target->srp_host->dev->dma_device, scat, nents, 617 scmnd->sc_data_direction); 618 } 619 620 static void srp_remove_req(struct srp_target_port *target, struct srp_request *req, 621 int index) 622 { 623 list_del(&req->list); 624 req->next = target->req_head; 625 target->req_head = index; 626 } 627 628 static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp) 629 { 630 struct srp_request *req; 631 struct scsi_cmnd *scmnd; 632 unsigned long flags; 633 s32 delta; 634 635 delta = (s32) be32_to_cpu(rsp->req_lim_delta); 636 637 spin_lock_irqsave(target->scsi_host->host_lock, flags); 638 639 target->req_lim += delta; 640 641 req = &target->req_ring[rsp->tag & ~SRP_TAG_TSK_MGMT]; 642 643 if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) { 644 if (be32_to_cpu(rsp->resp_data_len) < 4) 645 req->tsk_status = -1; 646 else 647 req->tsk_status = rsp->data[3]; 648 complete(&req->done); 649 } else { 650 scmnd = req->scmnd; 651 if (!scmnd) 652 printk(KERN_ERR "Null scmnd for RSP w/tag %016llx\n", 653 (unsigned long long) rsp->tag); 654 scmnd->result = rsp->status; 655 656 if (rsp->flags & SRP_RSP_FLAG_SNSVALID) { 657 memcpy(scmnd->sense_buffer, rsp->data + 658 be32_to_cpu(rsp->resp_data_len), 659 min_t(int, be32_to_cpu(rsp->sense_data_len), 660 SCSI_SENSE_BUFFERSIZE)); 661 } 662 663 if (rsp->flags & (SRP_RSP_FLAG_DOOVER | SRP_RSP_FLAG_DOUNDER)) 664 scmnd->resid = be32_to_cpu(rsp->data_out_res_cnt); 665 else if (rsp->flags & (SRP_RSP_FLAG_DIOVER | SRP_RSP_FLAG_DIUNDER)) 666 scmnd->resid = be32_to_cpu(rsp->data_in_res_cnt); 667 668 srp_unmap_data(scmnd, target, req); 669 670 if (!req->tsk_mgmt) { 671 req->scmnd = NULL; 672 scmnd->host_scribble = (void *) -1L; 673 scmnd->scsi_done(scmnd); 674 675 srp_remove_req(target, req, rsp->tag & ~SRP_TAG_TSK_MGMT); 676 } else 677 req->cmd_done = 1; 678 } 679 680 spin_unlock_irqrestore(target->scsi_host->host_lock, flags); 681 } 682 683 static void srp_reconnect_work(void *target_ptr) 684 { 685 struct srp_target_port *target = target_ptr; 686 687 srp_reconnect_target(target); 688 } 689 690 static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc) 691 { 692 struct srp_iu *iu; 693 u8 opcode; 694 695 iu = target->rx_ring[wc->wr_id & ~SRP_OP_RECV]; 696 697 dma_sync_single_for_cpu(target->srp_host->dev->dma_device, iu->dma, 698 target->max_ti_iu_len, DMA_FROM_DEVICE); 699 700 opcode = *(u8 *) iu->buf; 701 702 if (0) { 703 int i; 704 705 printk(KERN_ERR PFX "recv completion, opcode 0x%02x\n", opcode); 706 707 for (i = 0; i < wc->byte_len; ++i) { 708 if (i % 8 == 0) 709 printk(KERN_ERR " [%02x] ", i); 710 printk(" %02x", ((u8 *) iu->buf)[i]); 711 if ((i + 1) % 8 == 0) 712 printk("\n"); 713 } 714 715 if (wc->byte_len % 8) 716 printk("\n"); 717 } 718 719 switch (opcode) { 720 case SRP_RSP: 721 srp_process_rsp(target, iu->buf); 722 break; 723 724 case SRP_T_LOGOUT: 725 /* XXX Handle target logout */ 726 printk(KERN_WARNING PFX "Got target logout request\n"); 727 break; 728 729 default: 730 printk(KERN_WARNING PFX "Unhandled SRP opcode 0x%02x\n", opcode); 731 break; 732 } 733 734 dma_sync_single_for_device(target->srp_host->dev->dma_device, iu->dma, 735 target->max_ti_iu_len, DMA_FROM_DEVICE); 736 } 737 738 static void srp_completion(struct ib_cq *cq, void *target_ptr) 739 { 740 struct srp_target_port *target = target_ptr; 741 struct ib_wc wc; 742 unsigned long flags; 743 744 ib_req_notify_cq(cq, IB_CQ_NEXT_COMP); 745 while (ib_poll_cq(cq, 1, &wc) > 0) { 746 if (wc.status) { 747 printk(KERN_ERR PFX "failed %s status %d\n", 748 wc.wr_id & SRP_OP_RECV ? "receive" : "send", 749 wc.status); 750 spin_lock_irqsave(target->scsi_host->host_lock, flags); 751 if (target->state == SRP_TARGET_LIVE) 752 schedule_work(&target->work); 753 spin_unlock_irqrestore(target->scsi_host->host_lock, flags); 754 break; 755 } 756 757 if (wc.wr_id & SRP_OP_RECV) 758 srp_handle_recv(target, &wc); 759 else 760 ++target->tx_tail; 761 } 762 } 763 764 static int __srp_post_recv(struct srp_target_port *target) 765 { 766 struct srp_iu *iu; 767 struct ib_sge list; 768 struct ib_recv_wr wr, *bad_wr; 769 unsigned int next; 770 int ret; 771 772 next = target->rx_head & (SRP_RQ_SIZE - 1); 773 wr.wr_id = next | SRP_OP_RECV; 774 iu = target->rx_ring[next]; 775 776 list.addr = iu->dma; 777 list.length = iu->size; 778 list.lkey = target->srp_host->mr->lkey; 779 780 wr.next = NULL; 781 wr.sg_list = &list; 782 wr.num_sge = 1; 783 784 ret = ib_post_recv(target->qp, &wr, &bad_wr); 785 if (!ret) 786 ++target->rx_head; 787 788 return ret; 789 } 790 791 static int srp_post_recv(struct srp_target_port *target) 792 { 793 unsigned long flags; 794 int ret; 795 796 spin_lock_irqsave(target->scsi_host->host_lock, flags); 797 ret = __srp_post_recv(target); 798 spin_unlock_irqrestore(target->scsi_host->host_lock, flags); 799 800 return ret; 801 } 802 803 /* 804 * Must be called with target->scsi_host->host_lock held to protect 805 * req_lim and tx_head. Lock cannot be dropped between call here and 806 * call to __srp_post_send(). 807 */ 808 static struct srp_iu *__srp_get_tx_iu(struct srp_target_port *target) 809 { 810 if (target->tx_head - target->tx_tail >= SRP_SQ_SIZE) 811 return NULL; 812 813 if (unlikely(target->req_lim < 1)) { 814 if (printk_ratelimit()) 815 printk(KERN_DEBUG PFX "Target has req_lim %d\n", 816 target->req_lim); 817 return NULL; 818 } 819 820 return target->tx_ring[target->tx_head & SRP_SQ_SIZE]; 821 } 822 823 /* 824 * Must be called with target->scsi_host->host_lock held to protect 825 * req_lim and tx_head. 826 */ 827 static int __srp_post_send(struct srp_target_port *target, 828 struct srp_iu *iu, int len) 829 { 830 struct ib_sge list; 831 struct ib_send_wr wr, *bad_wr; 832 int ret = 0; 833 834 list.addr = iu->dma; 835 list.length = len; 836 list.lkey = target->srp_host->mr->lkey; 837 838 wr.next = NULL; 839 wr.wr_id = target->tx_head & SRP_SQ_SIZE; 840 wr.sg_list = &list; 841 wr.num_sge = 1; 842 wr.opcode = IB_WR_SEND; 843 wr.send_flags = IB_SEND_SIGNALED; 844 845 ret = ib_post_send(target->qp, &wr, &bad_wr); 846 847 if (!ret) { 848 ++target->tx_head; 849 --target->req_lim; 850 } 851 852 return ret; 853 } 854 855 static int srp_queuecommand(struct scsi_cmnd *scmnd, 856 void (*done)(struct scsi_cmnd *)) 857 { 858 struct srp_target_port *target = host_to_target(scmnd->device->host); 859 struct srp_request *req; 860 struct srp_iu *iu; 861 struct srp_cmd *cmd; 862 long req_index; 863 int len; 864 865 if (target->state == SRP_TARGET_CONNECTING) 866 goto err; 867 868 if (target->state == SRP_TARGET_DEAD || 869 target->state == SRP_TARGET_REMOVED) { 870 scmnd->result = DID_BAD_TARGET << 16; 871 done(scmnd); 872 return 0; 873 } 874 875 iu = __srp_get_tx_iu(target); 876 if (!iu) 877 goto err; 878 879 dma_sync_single_for_cpu(target->srp_host->dev->dma_device, iu->dma, 880 SRP_MAX_IU_LEN, DMA_TO_DEVICE); 881 882 req_index = target->req_head; 883 884 scmnd->scsi_done = done; 885 scmnd->result = 0; 886 scmnd->host_scribble = (void *) req_index; 887 888 cmd = iu->buf; 889 memset(cmd, 0, sizeof *cmd); 890 891 cmd->opcode = SRP_CMD; 892 cmd->lun = cpu_to_be64((u64) scmnd->device->lun << 48); 893 cmd->tag = req_index; 894 memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len); 895 896 req = &target->req_ring[req_index]; 897 898 req->scmnd = scmnd; 899 req->cmd = iu; 900 req->cmd_done = 0; 901 req->tsk_mgmt = NULL; 902 903 len = srp_map_data(scmnd, target, req); 904 if (len < 0) { 905 printk(KERN_ERR PFX "Failed to map data\n"); 906 goto err; 907 } 908 909 if (__srp_post_recv(target)) { 910 printk(KERN_ERR PFX "Recv failed\n"); 911 goto err_unmap; 912 } 913 914 dma_sync_single_for_device(target->srp_host->dev->dma_device, iu->dma, 915 SRP_MAX_IU_LEN, DMA_TO_DEVICE); 916 917 if (__srp_post_send(target, iu, len)) { 918 printk(KERN_ERR PFX "Send failed\n"); 919 goto err_unmap; 920 } 921 922 target->req_head = req->next; 923 list_add_tail(&req->list, &target->req_queue); 924 925 return 0; 926 927 err_unmap: 928 srp_unmap_data(scmnd, target, req); 929 930 err: 931 return SCSI_MLQUEUE_HOST_BUSY; 932 } 933 934 static int srp_alloc_iu_bufs(struct srp_target_port *target) 935 { 936 int i; 937 938 for (i = 0; i < SRP_RQ_SIZE; ++i) { 939 target->rx_ring[i] = srp_alloc_iu(target->srp_host, 940 target->max_ti_iu_len, 941 GFP_KERNEL, DMA_FROM_DEVICE); 942 if (!target->rx_ring[i]) 943 goto err; 944 } 945 946 for (i = 0; i < SRP_SQ_SIZE + 1; ++i) { 947 target->tx_ring[i] = srp_alloc_iu(target->srp_host, 948 SRP_MAX_IU_LEN, 949 GFP_KERNEL, DMA_TO_DEVICE); 950 if (!target->tx_ring[i]) 951 goto err; 952 } 953 954 return 0; 955 956 err: 957 for (i = 0; i < SRP_RQ_SIZE; ++i) { 958 srp_free_iu(target->srp_host, target->rx_ring[i]); 959 target->rx_ring[i] = NULL; 960 } 961 962 for (i = 0; i < SRP_SQ_SIZE + 1; ++i) { 963 srp_free_iu(target->srp_host, target->tx_ring[i]); 964 target->tx_ring[i] = NULL; 965 } 966 967 return -ENOMEM; 968 } 969 970 static void srp_cm_rej_handler(struct ib_cm_id *cm_id, 971 struct ib_cm_event *event, 972 struct srp_target_port *target) 973 { 974 struct ib_class_port_info *cpi; 975 int opcode; 976 977 switch (event->param.rej_rcvd.reason) { 978 case IB_CM_REJ_PORT_CM_REDIRECT: 979 cpi = event->param.rej_rcvd.ari; 980 target->path.dlid = cpi->redirect_lid; 981 target->path.pkey = cpi->redirect_pkey; 982 cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff; 983 memcpy(target->path.dgid.raw, cpi->redirect_gid, 16); 984 985 target->status = target->path.dlid ? 986 SRP_DLID_REDIRECT : SRP_PORT_REDIRECT; 987 break; 988 989 case IB_CM_REJ_PORT_REDIRECT: 990 if (topspin_workarounds && 991 !memcmp(&target->ioc_guid, topspin_oui, 3)) { 992 /* 993 * Topspin/Cisco SRP gateways incorrectly send 994 * reject reason code 25 when they mean 24 995 * (port redirect). 996 */ 997 memcpy(target->path.dgid.raw, 998 event->param.rej_rcvd.ari, 16); 999 1000 printk(KERN_DEBUG PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n", 1001 (unsigned long long) be64_to_cpu(target->path.dgid.global.subnet_prefix), 1002 (unsigned long long) be64_to_cpu(target->path.dgid.global.interface_id)); 1003 1004 target->status = SRP_PORT_REDIRECT; 1005 } else { 1006 printk(KERN_WARNING " REJ reason: IB_CM_REJ_PORT_REDIRECT\n"); 1007 target->status = -ECONNRESET; 1008 } 1009 break; 1010 1011 case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID: 1012 printk(KERN_WARNING " REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n"); 1013 target->status = -ECONNRESET; 1014 break; 1015 1016 case IB_CM_REJ_CONSUMER_DEFINED: 1017 opcode = *(u8 *) event->private_data; 1018 if (opcode == SRP_LOGIN_REJ) { 1019 struct srp_login_rej *rej = event->private_data; 1020 u32 reason = be32_to_cpu(rej->reason); 1021 1022 if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE) 1023 printk(KERN_WARNING PFX 1024 "SRP_LOGIN_REJ: requested max_it_iu_len too large\n"); 1025 else 1026 printk(KERN_WARNING PFX 1027 "SRP LOGIN REJECTED, reason 0x%08x\n", reason); 1028 } else 1029 printk(KERN_WARNING " REJ reason: IB_CM_REJ_CONSUMER_DEFINED," 1030 " opcode 0x%02x\n", opcode); 1031 target->status = -ECONNRESET; 1032 break; 1033 1034 default: 1035 printk(KERN_WARNING " REJ reason 0x%x\n", 1036 event->param.rej_rcvd.reason); 1037 target->status = -ECONNRESET; 1038 } 1039 } 1040 1041 static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event) 1042 { 1043 struct srp_target_port *target = cm_id->context; 1044 struct ib_qp_attr *qp_attr = NULL; 1045 int attr_mask = 0; 1046 int comp = 0; 1047 int opcode = 0; 1048 1049 switch (event->event) { 1050 case IB_CM_REQ_ERROR: 1051 printk(KERN_DEBUG PFX "Sending CM REQ failed\n"); 1052 comp = 1; 1053 target->status = -ECONNRESET; 1054 break; 1055 1056 case IB_CM_REP_RECEIVED: 1057 comp = 1; 1058 opcode = *(u8 *) event->private_data; 1059 1060 if (opcode == SRP_LOGIN_RSP) { 1061 struct srp_login_rsp *rsp = event->private_data; 1062 1063 target->max_ti_iu_len = be32_to_cpu(rsp->max_ti_iu_len); 1064 target->req_lim = be32_to_cpu(rsp->req_lim_delta); 1065 1066 target->scsi_host->can_queue = min(target->req_lim, 1067 target->scsi_host->can_queue); 1068 } else { 1069 printk(KERN_WARNING PFX "Unhandled RSP opcode %#x\n", opcode); 1070 target->status = -ECONNRESET; 1071 break; 1072 } 1073 1074 target->status = srp_alloc_iu_bufs(target); 1075 if (target->status) 1076 break; 1077 1078 qp_attr = kmalloc(sizeof *qp_attr, GFP_KERNEL); 1079 if (!qp_attr) { 1080 target->status = -ENOMEM; 1081 break; 1082 } 1083 1084 qp_attr->qp_state = IB_QPS_RTR; 1085 target->status = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask); 1086 if (target->status) 1087 break; 1088 1089 target->status = ib_modify_qp(target->qp, qp_attr, attr_mask); 1090 if (target->status) 1091 break; 1092 1093 target->status = srp_post_recv(target); 1094 if (target->status) 1095 break; 1096 1097 qp_attr->qp_state = IB_QPS_RTS; 1098 target->status = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask); 1099 if (target->status) 1100 break; 1101 1102 target->status = ib_modify_qp(target->qp, qp_attr, attr_mask); 1103 if (target->status) 1104 break; 1105 1106 target->status = ib_send_cm_rtu(cm_id, NULL, 0); 1107 if (target->status) 1108 break; 1109 1110 break; 1111 1112 case IB_CM_REJ_RECEIVED: 1113 printk(KERN_DEBUG PFX "REJ received\n"); 1114 comp = 1; 1115 1116 srp_cm_rej_handler(cm_id, event, target); 1117 break; 1118 1119 case IB_CM_MRA_RECEIVED: 1120 printk(KERN_ERR PFX "MRA received\n"); 1121 break; 1122 1123 case IB_CM_DREP_RECEIVED: 1124 break; 1125 1126 case IB_CM_TIMEWAIT_EXIT: 1127 printk(KERN_ERR PFX "connection closed\n"); 1128 1129 comp = 1; 1130 target->status = 0; 1131 break; 1132 1133 default: 1134 printk(KERN_WARNING PFX "Unhandled CM event %d\n", event->event); 1135 break; 1136 } 1137 1138 if (comp) 1139 complete(&target->done); 1140 1141 kfree(qp_attr); 1142 1143 return 0; 1144 } 1145 1146 static int srp_send_tsk_mgmt(struct scsi_cmnd *scmnd, u8 func) 1147 { 1148 struct srp_target_port *target = host_to_target(scmnd->device->host); 1149 struct srp_request *req; 1150 struct srp_iu *iu; 1151 struct srp_tsk_mgmt *tsk_mgmt; 1152 int req_index; 1153 int ret = FAILED; 1154 1155 spin_lock_irq(target->scsi_host->host_lock); 1156 1157 if (target->state == SRP_TARGET_DEAD || 1158 target->state == SRP_TARGET_REMOVED) { 1159 scmnd->result = DID_BAD_TARGET << 16; 1160 goto out; 1161 } 1162 1163 if (scmnd->host_scribble == (void *) -1L) 1164 goto out; 1165 1166 req_index = (long) scmnd->host_scribble; 1167 printk(KERN_ERR "Abort for req_index %d\n", req_index); 1168 1169 req = &target->req_ring[req_index]; 1170 init_completion(&req->done); 1171 1172 iu = __srp_get_tx_iu(target); 1173 if (!iu) 1174 goto out; 1175 1176 tsk_mgmt = iu->buf; 1177 memset(tsk_mgmt, 0, sizeof *tsk_mgmt); 1178 1179 tsk_mgmt->opcode = SRP_TSK_MGMT; 1180 tsk_mgmt->lun = cpu_to_be64((u64) scmnd->device->lun << 48); 1181 tsk_mgmt->tag = req_index | SRP_TAG_TSK_MGMT; 1182 tsk_mgmt->tsk_mgmt_func = func; 1183 tsk_mgmt->task_tag = req_index; 1184 1185 if (__srp_post_send(target, iu, sizeof *tsk_mgmt)) 1186 goto out; 1187 1188 req->tsk_mgmt = iu; 1189 1190 spin_unlock_irq(target->scsi_host->host_lock); 1191 if (!wait_for_completion_timeout(&req->done, 1192 msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS))) 1193 return FAILED; 1194 spin_lock_irq(target->scsi_host->host_lock); 1195 1196 if (req->cmd_done) { 1197 srp_remove_req(target, req, req_index); 1198 scmnd->scsi_done(scmnd); 1199 } else if (!req->tsk_status) { 1200 srp_remove_req(target, req, req_index); 1201 scmnd->result = DID_ABORT << 16; 1202 ret = SUCCESS; 1203 } 1204 1205 out: 1206 spin_unlock_irq(target->scsi_host->host_lock); 1207 return ret; 1208 } 1209 1210 static int srp_abort(struct scsi_cmnd *scmnd) 1211 { 1212 printk(KERN_ERR "SRP abort called\n"); 1213 1214 return srp_send_tsk_mgmt(scmnd, SRP_TSK_ABORT_TASK); 1215 } 1216 1217 static int srp_reset_device(struct scsi_cmnd *scmnd) 1218 { 1219 printk(KERN_ERR "SRP reset_device called\n"); 1220 1221 return srp_send_tsk_mgmt(scmnd, SRP_TSK_LUN_RESET); 1222 } 1223 1224 static int srp_reset_host(struct scsi_cmnd *scmnd) 1225 { 1226 struct srp_target_port *target = host_to_target(scmnd->device->host); 1227 int ret = FAILED; 1228 1229 printk(KERN_ERR PFX "SRP reset_host called\n"); 1230 1231 if (!srp_reconnect_target(target)) 1232 ret = SUCCESS; 1233 1234 return ret; 1235 } 1236 1237 static ssize_t show_id_ext(struct class_device *cdev, char *buf) 1238 { 1239 struct srp_target_port *target = host_to_target(class_to_shost(cdev)); 1240 1241 if (target->state == SRP_TARGET_DEAD || 1242 target->state == SRP_TARGET_REMOVED) 1243 return -ENODEV; 1244 1245 return sprintf(buf, "0x%016llx\n", 1246 (unsigned long long) be64_to_cpu(target->id_ext)); 1247 } 1248 1249 static ssize_t show_ioc_guid(struct class_device *cdev, char *buf) 1250 { 1251 struct srp_target_port *target = host_to_target(class_to_shost(cdev)); 1252 1253 if (target->state == SRP_TARGET_DEAD || 1254 target->state == SRP_TARGET_REMOVED) 1255 return -ENODEV; 1256 1257 return sprintf(buf, "0x%016llx\n", 1258 (unsigned long long) be64_to_cpu(target->ioc_guid)); 1259 } 1260 1261 static ssize_t show_service_id(struct class_device *cdev, char *buf) 1262 { 1263 struct srp_target_port *target = host_to_target(class_to_shost(cdev)); 1264 1265 if (target->state == SRP_TARGET_DEAD || 1266 target->state == SRP_TARGET_REMOVED) 1267 return -ENODEV; 1268 1269 return sprintf(buf, "0x%016llx\n", 1270 (unsigned long long) be64_to_cpu(target->service_id)); 1271 } 1272 1273 static ssize_t show_pkey(struct class_device *cdev, char *buf) 1274 { 1275 struct srp_target_port *target = host_to_target(class_to_shost(cdev)); 1276 1277 if (target->state == SRP_TARGET_DEAD || 1278 target->state == SRP_TARGET_REMOVED) 1279 return -ENODEV; 1280 1281 return sprintf(buf, "0x%04x\n", be16_to_cpu(target->path.pkey)); 1282 } 1283 1284 static ssize_t show_dgid(struct class_device *cdev, char *buf) 1285 { 1286 struct srp_target_port *target = host_to_target(class_to_shost(cdev)); 1287 1288 if (target->state == SRP_TARGET_DEAD || 1289 target->state == SRP_TARGET_REMOVED) 1290 return -ENODEV; 1291 1292 return sprintf(buf, "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n", 1293 be16_to_cpu(((__be16 *) target->path.dgid.raw)[0]), 1294 be16_to_cpu(((__be16 *) target->path.dgid.raw)[1]), 1295 be16_to_cpu(((__be16 *) target->path.dgid.raw)[2]), 1296 be16_to_cpu(((__be16 *) target->path.dgid.raw)[3]), 1297 be16_to_cpu(((__be16 *) target->path.dgid.raw)[4]), 1298 be16_to_cpu(((__be16 *) target->path.dgid.raw)[5]), 1299 be16_to_cpu(((__be16 *) target->path.dgid.raw)[6]), 1300 be16_to_cpu(((__be16 *) target->path.dgid.raw)[7])); 1301 } 1302 1303 static CLASS_DEVICE_ATTR(id_ext, S_IRUGO, show_id_ext, NULL); 1304 static CLASS_DEVICE_ATTR(ioc_guid, S_IRUGO, show_ioc_guid, NULL); 1305 static CLASS_DEVICE_ATTR(service_id, S_IRUGO, show_service_id, NULL); 1306 static CLASS_DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL); 1307 static CLASS_DEVICE_ATTR(dgid, S_IRUGO, show_dgid, NULL); 1308 1309 static struct class_device_attribute *srp_host_attrs[] = { 1310 &class_device_attr_id_ext, 1311 &class_device_attr_ioc_guid, 1312 &class_device_attr_service_id, 1313 &class_device_attr_pkey, 1314 &class_device_attr_dgid, 1315 NULL 1316 }; 1317 1318 static struct scsi_host_template srp_template = { 1319 .module = THIS_MODULE, 1320 .name = DRV_NAME, 1321 .info = srp_target_info, 1322 .queuecommand = srp_queuecommand, 1323 .eh_abort_handler = srp_abort, 1324 .eh_device_reset_handler = srp_reset_device, 1325 .eh_host_reset_handler = srp_reset_host, 1326 .can_queue = SRP_SQ_SIZE, 1327 .this_id = -1, 1328 .sg_tablesize = SRP_MAX_INDIRECT, 1329 .cmd_per_lun = SRP_SQ_SIZE, 1330 .use_clustering = ENABLE_CLUSTERING, 1331 .shost_attrs = srp_host_attrs 1332 }; 1333 1334 static int srp_add_target(struct srp_host *host, struct srp_target_port *target) 1335 { 1336 sprintf(target->target_name, "SRP.T10:%016llX", 1337 (unsigned long long) be64_to_cpu(target->id_ext)); 1338 1339 if (scsi_add_host(target->scsi_host, host->dev->dma_device)) 1340 return -ENODEV; 1341 1342 mutex_lock(&host->target_mutex); 1343 list_add_tail(&target->list, &host->target_list); 1344 mutex_unlock(&host->target_mutex); 1345 1346 target->state = SRP_TARGET_LIVE; 1347 1348 /* XXX: are we supposed to have a definition of SCAN_WILD_CARD ?? */ 1349 scsi_scan_target(&target->scsi_host->shost_gendev, 1350 0, target->scsi_id, ~0, 0); 1351 1352 return 0; 1353 } 1354 1355 static void srp_release_class_dev(struct class_device *class_dev) 1356 { 1357 struct srp_host *host = 1358 container_of(class_dev, struct srp_host, class_dev); 1359 1360 complete(&host->released); 1361 } 1362 1363 static struct class srp_class = { 1364 .name = "infiniband_srp", 1365 .release = srp_release_class_dev 1366 }; 1367 1368 /* 1369 * Target ports are added by writing 1370 * 1371 * id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,dgid=<dest GID>, 1372 * pkey=<P_Key>,service_id=<service ID> 1373 * 1374 * to the add_target sysfs attribute. 1375 */ 1376 enum { 1377 SRP_OPT_ERR = 0, 1378 SRP_OPT_ID_EXT = 1 << 0, 1379 SRP_OPT_IOC_GUID = 1 << 1, 1380 SRP_OPT_DGID = 1 << 2, 1381 SRP_OPT_PKEY = 1 << 3, 1382 SRP_OPT_SERVICE_ID = 1 << 4, 1383 SRP_OPT_MAX_SECT = 1 << 5, 1384 SRP_OPT_ALL = (SRP_OPT_ID_EXT | 1385 SRP_OPT_IOC_GUID | 1386 SRP_OPT_DGID | 1387 SRP_OPT_PKEY | 1388 SRP_OPT_SERVICE_ID), 1389 }; 1390 1391 static match_table_t srp_opt_tokens = { 1392 { SRP_OPT_ID_EXT, "id_ext=%s" }, 1393 { SRP_OPT_IOC_GUID, "ioc_guid=%s" }, 1394 { SRP_OPT_DGID, "dgid=%s" }, 1395 { SRP_OPT_PKEY, "pkey=%x" }, 1396 { SRP_OPT_SERVICE_ID, "service_id=%s" }, 1397 { SRP_OPT_MAX_SECT, "max_sect=%d" }, 1398 { SRP_OPT_ERR, NULL } 1399 }; 1400 1401 static int srp_parse_options(const char *buf, struct srp_target_port *target) 1402 { 1403 char *options, *sep_opt; 1404 char *p; 1405 char dgid[3]; 1406 substring_t args[MAX_OPT_ARGS]; 1407 int opt_mask = 0; 1408 int token; 1409 int ret = -EINVAL; 1410 int i; 1411 1412 options = kstrdup(buf, GFP_KERNEL); 1413 if (!options) 1414 return -ENOMEM; 1415 1416 sep_opt = options; 1417 while ((p = strsep(&sep_opt, ",")) != NULL) { 1418 if (!*p) 1419 continue; 1420 1421 token = match_token(p, srp_opt_tokens, args); 1422 opt_mask |= token; 1423 1424 switch (token) { 1425 case SRP_OPT_ID_EXT: 1426 p = match_strdup(args); 1427 target->id_ext = cpu_to_be64(simple_strtoull(p, NULL, 16)); 1428 kfree(p); 1429 break; 1430 1431 case SRP_OPT_IOC_GUID: 1432 p = match_strdup(args); 1433 target->ioc_guid = cpu_to_be64(simple_strtoull(p, NULL, 16)); 1434 kfree(p); 1435 break; 1436 1437 case SRP_OPT_DGID: 1438 p = match_strdup(args); 1439 if (strlen(p) != 32) { 1440 printk(KERN_WARNING PFX "bad dest GID parameter '%s'\n", p); 1441 kfree(p); 1442 goto out; 1443 } 1444 1445 for (i = 0; i < 16; ++i) { 1446 strlcpy(dgid, p + i * 2, 3); 1447 target->path.dgid.raw[i] = simple_strtoul(dgid, NULL, 16); 1448 } 1449 kfree(p); 1450 break; 1451 1452 case SRP_OPT_PKEY: 1453 if (match_hex(args, &token)) { 1454 printk(KERN_WARNING PFX "bad P_Key parameter '%s'\n", p); 1455 goto out; 1456 } 1457 target->path.pkey = cpu_to_be16(token); 1458 break; 1459 1460 case SRP_OPT_SERVICE_ID: 1461 p = match_strdup(args); 1462 target->service_id = cpu_to_be64(simple_strtoull(p, NULL, 16)); 1463 kfree(p); 1464 break; 1465 1466 case SRP_OPT_MAX_SECT: 1467 if (match_int(args, &token)) { 1468 printk(KERN_WARNING PFX "bad max sect parameter '%s'\n", p); 1469 goto out; 1470 } 1471 target->scsi_host->max_sectors = token; 1472 break; 1473 1474 default: 1475 printk(KERN_WARNING PFX "unknown parameter or missing value " 1476 "'%s' in target creation request\n", p); 1477 goto out; 1478 } 1479 } 1480 1481 if ((opt_mask & SRP_OPT_ALL) == SRP_OPT_ALL) 1482 ret = 0; 1483 else 1484 for (i = 0; i < ARRAY_SIZE(srp_opt_tokens); ++i) 1485 if ((srp_opt_tokens[i].token & SRP_OPT_ALL) && 1486 !(srp_opt_tokens[i].token & opt_mask)) 1487 printk(KERN_WARNING PFX "target creation request is " 1488 "missing parameter '%s'\n", 1489 srp_opt_tokens[i].pattern); 1490 1491 out: 1492 kfree(options); 1493 return ret; 1494 } 1495 1496 static ssize_t srp_create_target(struct class_device *class_dev, 1497 const char *buf, size_t count) 1498 { 1499 struct srp_host *host = 1500 container_of(class_dev, struct srp_host, class_dev); 1501 struct Scsi_Host *target_host; 1502 struct srp_target_port *target; 1503 int ret; 1504 int i; 1505 1506 target_host = scsi_host_alloc(&srp_template, 1507 sizeof (struct srp_target_port)); 1508 if (!target_host) 1509 return -ENOMEM; 1510 1511 target_host->max_lun = SRP_MAX_LUN; 1512 1513 target = host_to_target(target_host); 1514 memset(target, 0, sizeof *target); 1515 1516 target->scsi_host = target_host; 1517 target->srp_host = host; 1518 1519 INIT_WORK(&target->work, srp_reconnect_work, target); 1520 1521 for (i = 0; i < SRP_SQ_SIZE - 1; ++i) 1522 target->req_ring[i].next = i + 1; 1523 target->req_ring[SRP_SQ_SIZE - 1].next = -1; 1524 INIT_LIST_HEAD(&target->req_queue); 1525 1526 ret = srp_parse_options(buf, target); 1527 if (ret) 1528 goto err; 1529 1530 ib_get_cached_gid(host->dev, host->port, 0, &target->path.sgid); 1531 1532 printk(KERN_DEBUG PFX "new target: id_ext %016llx ioc_guid %016llx pkey %04x " 1533 "service_id %016llx dgid %04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n", 1534 (unsigned long long) be64_to_cpu(target->id_ext), 1535 (unsigned long long) be64_to_cpu(target->ioc_guid), 1536 be16_to_cpu(target->path.pkey), 1537 (unsigned long long) be64_to_cpu(target->service_id), 1538 (int) be16_to_cpu(*(__be16 *) &target->path.dgid.raw[0]), 1539 (int) be16_to_cpu(*(__be16 *) &target->path.dgid.raw[2]), 1540 (int) be16_to_cpu(*(__be16 *) &target->path.dgid.raw[4]), 1541 (int) be16_to_cpu(*(__be16 *) &target->path.dgid.raw[6]), 1542 (int) be16_to_cpu(*(__be16 *) &target->path.dgid.raw[8]), 1543 (int) be16_to_cpu(*(__be16 *) &target->path.dgid.raw[10]), 1544 (int) be16_to_cpu(*(__be16 *) &target->path.dgid.raw[12]), 1545 (int) be16_to_cpu(*(__be16 *) &target->path.dgid.raw[14])); 1546 1547 ret = srp_create_target_ib(target); 1548 if (ret) 1549 goto err; 1550 1551 target->cm_id = ib_create_cm_id(host->dev, srp_cm_handler, target); 1552 if (IS_ERR(target->cm_id)) { 1553 ret = PTR_ERR(target->cm_id); 1554 goto err_free; 1555 } 1556 1557 ret = srp_connect_target(target); 1558 if (ret) { 1559 printk(KERN_ERR PFX "Connection failed\n"); 1560 goto err_cm_id; 1561 } 1562 1563 ret = srp_add_target(host, target); 1564 if (ret) 1565 goto err_disconnect; 1566 1567 return count; 1568 1569 err_disconnect: 1570 srp_disconnect_target(target); 1571 1572 err_cm_id: 1573 ib_destroy_cm_id(target->cm_id); 1574 1575 err_free: 1576 srp_free_target_ib(target); 1577 1578 err: 1579 scsi_host_put(target_host); 1580 1581 return ret; 1582 } 1583 1584 static CLASS_DEVICE_ATTR(add_target, S_IWUSR, NULL, srp_create_target); 1585 1586 static ssize_t show_ibdev(struct class_device *class_dev, char *buf) 1587 { 1588 struct srp_host *host = 1589 container_of(class_dev, struct srp_host, class_dev); 1590 1591 return sprintf(buf, "%s\n", host->dev->name); 1592 } 1593 1594 static CLASS_DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL); 1595 1596 static ssize_t show_port(struct class_device *class_dev, char *buf) 1597 { 1598 struct srp_host *host = 1599 container_of(class_dev, struct srp_host, class_dev); 1600 1601 return sprintf(buf, "%d\n", host->port); 1602 } 1603 1604 static CLASS_DEVICE_ATTR(port, S_IRUGO, show_port, NULL); 1605 1606 static struct srp_host *srp_add_port(struct ib_device *device, u8 port) 1607 { 1608 struct srp_host *host; 1609 1610 host = kzalloc(sizeof *host, GFP_KERNEL); 1611 if (!host) 1612 return NULL; 1613 1614 INIT_LIST_HEAD(&host->target_list); 1615 mutex_init(&host->target_mutex); 1616 init_completion(&host->released); 1617 host->dev = device; 1618 host->port = port; 1619 1620 host->initiator_port_id[7] = port; 1621 memcpy(host->initiator_port_id + 8, &device->node_guid, 8); 1622 1623 host->pd = ib_alloc_pd(device); 1624 if (IS_ERR(host->pd)) 1625 goto err_free; 1626 1627 host->mr = ib_get_dma_mr(host->pd, 1628 IB_ACCESS_LOCAL_WRITE | 1629 IB_ACCESS_REMOTE_READ | 1630 IB_ACCESS_REMOTE_WRITE); 1631 if (IS_ERR(host->mr)) 1632 goto err_pd; 1633 1634 host->class_dev.class = &srp_class; 1635 host->class_dev.dev = device->dma_device; 1636 snprintf(host->class_dev.class_id, BUS_ID_SIZE, "srp-%s-%d", 1637 device->name, port); 1638 1639 if (class_device_register(&host->class_dev)) 1640 goto err_mr; 1641 if (class_device_create_file(&host->class_dev, &class_device_attr_add_target)) 1642 goto err_class; 1643 if (class_device_create_file(&host->class_dev, &class_device_attr_ibdev)) 1644 goto err_class; 1645 if (class_device_create_file(&host->class_dev, &class_device_attr_port)) 1646 goto err_class; 1647 1648 return host; 1649 1650 err_class: 1651 class_device_unregister(&host->class_dev); 1652 1653 err_mr: 1654 ib_dereg_mr(host->mr); 1655 1656 err_pd: 1657 ib_dealloc_pd(host->pd); 1658 1659 err_free: 1660 kfree(host); 1661 1662 return NULL; 1663 } 1664 1665 static void srp_add_one(struct ib_device *device) 1666 { 1667 struct list_head *dev_list; 1668 struct srp_host *host; 1669 int s, e, p; 1670 1671 dev_list = kmalloc(sizeof *dev_list, GFP_KERNEL); 1672 if (!dev_list) 1673 return; 1674 1675 INIT_LIST_HEAD(dev_list); 1676 1677 if (device->node_type == IB_NODE_SWITCH) { 1678 s = 0; 1679 e = 0; 1680 } else { 1681 s = 1; 1682 e = device->phys_port_cnt; 1683 } 1684 1685 for (p = s; p <= e; ++p) { 1686 host = srp_add_port(device, p); 1687 if (host) 1688 list_add_tail(&host->list, dev_list); 1689 } 1690 1691 ib_set_client_data(device, &srp_client, dev_list); 1692 } 1693 1694 static void srp_remove_one(struct ib_device *device) 1695 { 1696 struct list_head *dev_list; 1697 struct srp_host *host, *tmp_host; 1698 LIST_HEAD(target_list); 1699 struct srp_target_port *target, *tmp_target; 1700 unsigned long flags; 1701 1702 dev_list = ib_get_client_data(device, &srp_client); 1703 1704 list_for_each_entry_safe(host, tmp_host, dev_list, list) { 1705 class_device_unregister(&host->class_dev); 1706 /* 1707 * Wait for the sysfs entry to go away, so that no new 1708 * target ports can be created. 1709 */ 1710 wait_for_completion(&host->released); 1711 1712 /* 1713 * Mark all target ports as removed, so we stop queueing 1714 * commands and don't try to reconnect. 1715 */ 1716 mutex_lock(&host->target_mutex); 1717 list_for_each_entry_safe(target, tmp_target, 1718 &host->target_list, list) { 1719 spin_lock_irqsave(target->scsi_host->host_lock, flags); 1720 if (target->state != SRP_TARGET_REMOVED) 1721 target->state = SRP_TARGET_REMOVED; 1722 spin_unlock_irqrestore(target->scsi_host->host_lock, flags); 1723 } 1724 mutex_unlock(&host->target_mutex); 1725 1726 /* 1727 * Wait for any reconnection tasks that may have 1728 * started before we marked our target ports as 1729 * removed, and any target port removal tasks. 1730 */ 1731 flush_scheduled_work(); 1732 1733 list_for_each_entry_safe(target, tmp_target, 1734 &host->target_list, list) { 1735 scsi_remove_host(target->scsi_host); 1736 srp_disconnect_target(target); 1737 ib_destroy_cm_id(target->cm_id); 1738 srp_free_target_ib(target); 1739 scsi_host_put(target->scsi_host); 1740 } 1741 1742 ib_dereg_mr(host->mr); 1743 ib_dealloc_pd(host->pd); 1744 kfree(host); 1745 } 1746 1747 kfree(dev_list); 1748 } 1749 1750 static int __init srp_init_module(void) 1751 { 1752 int ret; 1753 1754 ret = class_register(&srp_class); 1755 if (ret) { 1756 printk(KERN_ERR PFX "couldn't register class infiniband_srp\n"); 1757 return ret; 1758 } 1759 1760 ret = ib_register_client(&srp_client); 1761 if (ret) { 1762 printk(KERN_ERR PFX "couldn't register IB client\n"); 1763 class_unregister(&srp_class); 1764 return ret; 1765 } 1766 1767 return 0; 1768 } 1769 1770 static void __exit srp_cleanup_module(void) 1771 { 1772 ib_unregister_client(&srp_client); 1773 class_unregister(&srp_class); 1774 } 1775 1776 module_init(srp_init_module); 1777 module_exit(srp_cleanup_module); 1778