1 /* 2 * Copyright (c) 2005 Cisco Systems. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 * 32 * $Id: ib_srp.c 3932 2005-11-01 17:19:29Z roland $ 33 */ 34 35 #include <linux/module.h> 36 #include <linux/init.h> 37 #include <linux/slab.h> 38 #include <linux/err.h> 39 #include <linux/string.h> 40 #include <linux/parser.h> 41 #include <linux/random.h> 42 #include <linux/jiffies.h> 43 44 #include <asm/atomic.h> 45 46 #include <scsi/scsi.h> 47 #include <scsi/scsi_device.h> 48 #include <scsi/scsi_dbg.h> 49 #include <scsi/srp.h> 50 51 #include <rdma/ib_cache.h> 52 53 #include "ib_srp.h" 54 55 #define DRV_NAME "ib_srp" 56 #define PFX DRV_NAME ": " 57 #define DRV_VERSION "0.2" 58 #define DRV_RELDATE "November 1, 2005" 59 60 MODULE_AUTHOR("Roland Dreier"); 61 MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator " 62 "v" DRV_VERSION " (" DRV_RELDATE ")"); 63 MODULE_LICENSE("Dual BSD/GPL"); 64 65 static int topspin_workarounds = 1; 66 67 module_param(topspin_workarounds, int, 0444); 68 MODULE_PARM_DESC(topspin_workarounds, 69 "Enable workarounds for Topspin/Cisco SRP target bugs if != 0"); 70 71 static const u8 topspin_oui[3] = { 0x00, 0x05, 0xad }; 72 73 static void srp_add_one(struct ib_device *device); 74 static void srp_remove_one(struct ib_device *device); 75 static void srp_completion(struct ib_cq *cq, void *target_ptr); 76 static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event); 77 78 static struct ib_client srp_client = { 79 .name = "srp", 80 .add = srp_add_one, 81 .remove = srp_remove_one 82 }; 83 84 static inline struct srp_target_port *host_to_target(struct Scsi_Host *host) 85 { 86 return (struct srp_target_port *) host->hostdata; 87 } 88 89 static const char *srp_target_info(struct Scsi_Host *host) 90 { 91 return host_to_target(host)->target_name; 92 } 93 94 static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size, 95 gfp_t gfp_mask, 96 enum dma_data_direction direction) 97 { 98 struct srp_iu *iu; 99 100 iu = kmalloc(sizeof *iu, gfp_mask); 101 if (!iu) 102 goto out; 103 104 iu->buf = kzalloc(size, gfp_mask); 105 if (!iu->buf) 106 goto out_free_iu; 107 108 iu->dma = dma_map_single(host->dev->dma_device, iu->buf, size, direction); 109 if (dma_mapping_error(iu->dma)) 110 goto out_free_buf; 111 112 iu->size = size; 113 iu->direction = direction; 114 115 return iu; 116 117 out_free_buf: 118 kfree(iu->buf); 119 out_free_iu: 120 kfree(iu); 121 out: 122 return NULL; 123 } 124 125 static void srp_free_iu(struct srp_host *host, struct srp_iu *iu) 126 { 127 if (!iu) 128 return; 129 130 dma_unmap_single(host->dev->dma_device, iu->dma, iu->size, iu->direction); 131 kfree(iu->buf); 132 kfree(iu); 133 } 134 135 static void srp_qp_event(struct ib_event *event, void *context) 136 { 137 printk(KERN_ERR PFX "QP event %d\n", event->event); 138 } 139 140 static int srp_init_qp(struct srp_target_port *target, 141 struct ib_qp *qp) 142 { 143 struct ib_qp_attr *attr; 144 int ret; 145 146 attr = kmalloc(sizeof *attr, GFP_KERNEL); 147 if (!attr) 148 return -ENOMEM; 149 150 ret = ib_find_cached_pkey(target->srp_host->dev, 151 target->srp_host->port, 152 be16_to_cpu(target->path.pkey), 153 &attr->pkey_index); 154 if (ret) 155 goto out; 156 157 attr->qp_state = IB_QPS_INIT; 158 attr->qp_access_flags = (IB_ACCESS_REMOTE_READ | 159 IB_ACCESS_REMOTE_WRITE); 160 attr->port_num = target->srp_host->port; 161 162 ret = ib_modify_qp(qp, attr, 163 IB_QP_STATE | 164 IB_QP_PKEY_INDEX | 165 IB_QP_ACCESS_FLAGS | 166 IB_QP_PORT); 167 168 out: 169 kfree(attr); 170 return ret; 171 } 172 173 static int srp_create_target_ib(struct srp_target_port *target) 174 { 175 struct ib_qp_init_attr *init_attr; 176 int ret; 177 178 init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL); 179 if (!init_attr) 180 return -ENOMEM; 181 182 target->cq = ib_create_cq(target->srp_host->dev, srp_completion, 183 NULL, target, SRP_CQ_SIZE); 184 if (IS_ERR(target->cq)) { 185 ret = PTR_ERR(target->cq); 186 goto out; 187 } 188 189 ib_req_notify_cq(target->cq, IB_CQ_NEXT_COMP); 190 191 init_attr->event_handler = srp_qp_event; 192 init_attr->cap.max_send_wr = SRP_SQ_SIZE; 193 init_attr->cap.max_recv_wr = SRP_RQ_SIZE; 194 init_attr->cap.max_recv_sge = 1; 195 init_attr->cap.max_send_sge = 1; 196 init_attr->sq_sig_type = IB_SIGNAL_ALL_WR; 197 init_attr->qp_type = IB_QPT_RC; 198 init_attr->send_cq = target->cq; 199 init_attr->recv_cq = target->cq; 200 201 target->qp = ib_create_qp(target->srp_host->pd, init_attr); 202 if (IS_ERR(target->qp)) { 203 ret = PTR_ERR(target->qp); 204 ib_destroy_cq(target->cq); 205 goto out; 206 } 207 208 ret = srp_init_qp(target, target->qp); 209 if (ret) { 210 ib_destroy_qp(target->qp); 211 ib_destroy_cq(target->cq); 212 goto out; 213 } 214 215 out: 216 kfree(init_attr); 217 return ret; 218 } 219 220 static void srp_free_target_ib(struct srp_target_port *target) 221 { 222 int i; 223 224 ib_destroy_qp(target->qp); 225 ib_destroy_cq(target->cq); 226 227 for (i = 0; i < SRP_RQ_SIZE; ++i) 228 srp_free_iu(target->srp_host, target->rx_ring[i]); 229 for (i = 0; i < SRP_SQ_SIZE + 1; ++i) 230 srp_free_iu(target->srp_host, target->tx_ring[i]); 231 } 232 233 static void srp_path_rec_completion(int status, 234 struct ib_sa_path_rec *pathrec, 235 void *target_ptr) 236 { 237 struct srp_target_port *target = target_ptr; 238 239 target->status = status; 240 if (status) 241 printk(KERN_ERR PFX "Got failed path rec status %d\n", status); 242 else 243 target->path = *pathrec; 244 complete(&target->done); 245 } 246 247 static int srp_lookup_path(struct srp_target_port *target) 248 { 249 target->path.numb_path = 1; 250 251 init_completion(&target->done); 252 253 target->path_query_id = ib_sa_path_rec_get(target->srp_host->dev, 254 target->srp_host->port, 255 &target->path, 256 IB_SA_PATH_REC_DGID | 257 IB_SA_PATH_REC_SGID | 258 IB_SA_PATH_REC_NUMB_PATH | 259 IB_SA_PATH_REC_PKEY, 260 SRP_PATH_REC_TIMEOUT_MS, 261 GFP_KERNEL, 262 srp_path_rec_completion, 263 target, &target->path_query); 264 if (target->path_query_id < 0) 265 return target->path_query_id; 266 267 wait_for_completion(&target->done); 268 269 if (target->status < 0) 270 printk(KERN_WARNING PFX "Path record query failed\n"); 271 272 return target->status; 273 } 274 275 static int srp_send_req(struct srp_target_port *target) 276 { 277 struct { 278 struct ib_cm_req_param param; 279 struct srp_login_req priv; 280 } *req = NULL; 281 int status; 282 283 req = kzalloc(sizeof *req, GFP_KERNEL); 284 if (!req) 285 return -ENOMEM; 286 287 req->param.primary_path = &target->path; 288 req->param.alternate_path = NULL; 289 req->param.service_id = target->service_id; 290 req->param.qp_num = target->qp->qp_num; 291 req->param.qp_type = target->qp->qp_type; 292 req->param.private_data = &req->priv; 293 req->param.private_data_len = sizeof req->priv; 294 req->param.flow_control = 1; 295 296 get_random_bytes(&req->param.starting_psn, 4); 297 req->param.starting_psn &= 0xffffff; 298 299 /* 300 * Pick some arbitrary defaults here; we could make these 301 * module parameters if anyone cared about setting them. 302 */ 303 req->param.responder_resources = 4; 304 req->param.remote_cm_response_timeout = 20; 305 req->param.local_cm_response_timeout = 20; 306 req->param.retry_count = 7; 307 req->param.rnr_retry_count = 7; 308 req->param.max_cm_retries = 15; 309 310 req->priv.opcode = SRP_LOGIN_REQ; 311 req->priv.tag = 0; 312 req->priv.req_it_iu_len = cpu_to_be32(SRP_MAX_IU_LEN); 313 req->priv.req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT | 314 SRP_BUF_FORMAT_INDIRECT); 315 memcpy(req->priv.initiator_port_id, target->srp_host->initiator_port_id, 16); 316 /* 317 * Topspin/Cisco SRP targets will reject our login unless we 318 * zero out the first 8 bytes of our initiator port ID. The 319 * second 8 bytes must be our local node GUID, but we always 320 * use that anyway. 321 */ 322 if (topspin_workarounds && !memcmp(&target->ioc_guid, topspin_oui, 3)) { 323 printk(KERN_DEBUG PFX "Topspin/Cisco initiator port ID workaround " 324 "activated for target GUID %016llx\n", 325 (unsigned long long) be64_to_cpu(target->ioc_guid)); 326 memset(req->priv.initiator_port_id, 0, 8); 327 } 328 memcpy(req->priv.target_port_id, &target->id_ext, 8); 329 memcpy(req->priv.target_port_id + 8, &target->ioc_guid, 8); 330 331 status = ib_send_cm_req(target->cm_id, &req->param); 332 333 kfree(req); 334 335 return status; 336 } 337 338 static void srp_disconnect_target(struct srp_target_port *target) 339 { 340 /* XXX should send SRP_I_LOGOUT request */ 341 342 init_completion(&target->done); 343 ib_send_cm_dreq(target->cm_id, NULL, 0); 344 wait_for_completion(&target->done); 345 } 346 347 static void srp_remove_work(void *target_ptr) 348 { 349 struct srp_target_port *target = target_ptr; 350 351 spin_lock_irq(target->scsi_host->host_lock); 352 if (target->state != SRP_TARGET_DEAD) { 353 spin_unlock_irq(target->scsi_host->host_lock); 354 scsi_host_put(target->scsi_host); 355 return; 356 } 357 target->state = SRP_TARGET_REMOVED; 358 spin_unlock_irq(target->scsi_host->host_lock); 359 360 mutex_lock(&target->srp_host->target_mutex); 361 list_del(&target->list); 362 mutex_unlock(&target->srp_host->target_mutex); 363 364 scsi_remove_host(target->scsi_host); 365 ib_destroy_cm_id(target->cm_id); 366 srp_free_target_ib(target); 367 scsi_host_put(target->scsi_host); 368 /* And another put to really free the target port... */ 369 scsi_host_put(target->scsi_host); 370 } 371 372 static int srp_connect_target(struct srp_target_port *target) 373 { 374 int ret; 375 376 ret = srp_lookup_path(target); 377 if (ret) 378 return ret; 379 380 while (1) { 381 init_completion(&target->done); 382 ret = srp_send_req(target); 383 if (ret) 384 return ret; 385 wait_for_completion(&target->done); 386 387 /* 388 * The CM event handling code will set status to 389 * SRP_PORT_REDIRECT if we get a port redirect REJ 390 * back, or SRP_DLID_REDIRECT if we get a lid/qp 391 * redirect REJ back. 392 */ 393 switch (target->status) { 394 case 0: 395 return 0; 396 397 case SRP_PORT_REDIRECT: 398 ret = srp_lookup_path(target); 399 if (ret) 400 return ret; 401 break; 402 403 case SRP_DLID_REDIRECT: 404 break; 405 406 default: 407 return target->status; 408 } 409 } 410 } 411 412 static int srp_reconnect_target(struct srp_target_port *target) 413 { 414 struct ib_cm_id *new_cm_id; 415 struct ib_qp_attr qp_attr; 416 struct srp_request *req; 417 struct ib_wc wc; 418 int ret; 419 int i; 420 421 spin_lock_irq(target->scsi_host->host_lock); 422 if (target->state != SRP_TARGET_LIVE) { 423 spin_unlock_irq(target->scsi_host->host_lock); 424 return -EAGAIN; 425 } 426 target->state = SRP_TARGET_CONNECTING; 427 spin_unlock_irq(target->scsi_host->host_lock); 428 429 srp_disconnect_target(target); 430 /* 431 * Now get a new local CM ID so that we avoid confusing the 432 * target in case things are really fouled up. 433 */ 434 new_cm_id = ib_create_cm_id(target->srp_host->dev, 435 srp_cm_handler, target); 436 if (IS_ERR(new_cm_id)) { 437 ret = PTR_ERR(new_cm_id); 438 goto err; 439 } 440 ib_destroy_cm_id(target->cm_id); 441 target->cm_id = new_cm_id; 442 443 qp_attr.qp_state = IB_QPS_RESET; 444 ret = ib_modify_qp(target->qp, &qp_attr, IB_QP_STATE); 445 if (ret) 446 goto err; 447 448 ret = srp_init_qp(target, target->qp); 449 if (ret) 450 goto err; 451 452 while (ib_poll_cq(target->cq, 1, &wc) > 0) 453 ; /* nothing */ 454 455 list_for_each_entry(req, &target->req_queue, list) { 456 req->scmnd->result = DID_RESET << 16; 457 req->scmnd->scsi_done(req->scmnd); 458 } 459 460 target->rx_head = 0; 461 target->tx_head = 0; 462 target->tx_tail = 0; 463 target->req_head = 0; 464 for (i = 0; i < SRP_SQ_SIZE - 1; ++i) 465 target->req_ring[i].next = i + 1; 466 target->req_ring[SRP_SQ_SIZE - 1].next = -1; 467 INIT_LIST_HEAD(&target->req_queue); 468 469 ret = srp_connect_target(target); 470 if (ret) 471 goto err; 472 473 spin_lock_irq(target->scsi_host->host_lock); 474 if (target->state == SRP_TARGET_CONNECTING) { 475 ret = 0; 476 target->state = SRP_TARGET_LIVE; 477 } else 478 ret = -EAGAIN; 479 spin_unlock_irq(target->scsi_host->host_lock); 480 481 return ret; 482 483 err: 484 printk(KERN_ERR PFX "reconnect failed (%d), removing target port.\n", ret); 485 486 /* 487 * We couldn't reconnect, so kill our target port off. 488 * However, we have to defer the real removal because we might 489 * be in the context of the SCSI error handler now, which 490 * would deadlock if we call scsi_remove_host(). 491 */ 492 spin_lock_irq(target->scsi_host->host_lock); 493 if (target->state == SRP_TARGET_CONNECTING) { 494 target->state = SRP_TARGET_DEAD; 495 INIT_WORK(&target->work, srp_remove_work, target); 496 schedule_work(&target->work); 497 } 498 spin_unlock_irq(target->scsi_host->host_lock); 499 500 return ret; 501 } 502 503 static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target, 504 struct srp_request *req) 505 { 506 struct scatterlist *scat; 507 struct srp_cmd *cmd = req->cmd->buf; 508 int len, nents, count; 509 int i; 510 u8 fmt; 511 512 if (!scmnd->request_buffer || scmnd->sc_data_direction == DMA_NONE) 513 return sizeof (struct srp_cmd); 514 515 if (scmnd->sc_data_direction != DMA_FROM_DEVICE && 516 scmnd->sc_data_direction != DMA_TO_DEVICE) { 517 printk(KERN_WARNING PFX "Unhandled data direction %d\n", 518 scmnd->sc_data_direction); 519 return -EINVAL; 520 } 521 522 /* 523 * This handling of non-SG commands can be killed when the 524 * SCSI midlayer no longer generates non-SG commands. 525 */ 526 if (likely(scmnd->use_sg)) { 527 nents = scmnd->use_sg; 528 scat = scmnd->request_buffer; 529 } else { 530 nents = 1; 531 scat = &req->fake_sg; 532 sg_init_one(scat, scmnd->request_buffer, scmnd->request_bufflen); 533 } 534 535 count = dma_map_sg(target->srp_host->dev->dma_device, scat, nents, 536 scmnd->sc_data_direction); 537 538 if (count == 1) { 539 struct srp_direct_buf *buf = (void *) cmd->add_data; 540 541 fmt = SRP_DATA_DESC_DIRECT; 542 543 buf->va = cpu_to_be64(sg_dma_address(scat)); 544 buf->key = cpu_to_be32(target->srp_host->mr->rkey); 545 buf->len = cpu_to_be32(sg_dma_len(scat)); 546 547 len = sizeof (struct srp_cmd) + 548 sizeof (struct srp_direct_buf); 549 } else { 550 struct srp_indirect_buf *buf = (void *) cmd->add_data; 551 u32 datalen = 0; 552 553 fmt = SRP_DATA_DESC_INDIRECT; 554 555 if (scmnd->sc_data_direction == DMA_TO_DEVICE) 556 cmd->data_out_desc_cnt = count; 557 else 558 cmd->data_in_desc_cnt = count; 559 560 buf->table_desc.va = cpu_to_be64(req->cmd->dma + 561 sizeof *cmd + 562 sizeof *buf); 563 buf->table_desc.key = 564 cpu_to_be32(target->srp_host->mr->rkey); 565 buf->table_desc.len = 566 cpu_to_be32(count * sizeof (struct srp_direct_buf)); 567 568 for (i = 0; i < count; ++i) { 569 buf->desc_list[i].va = cpu_to_be64(sg_dma_address(&scat[i])); 570 buf->desc_list[i].key = 571 cpu_to_be32(target->srp_host->mr->rkey); 572 buf->desc_list[i].len = cpu_to_be32(sg_dma_len(&scat[i])); 573 574 datalen += sg_dma_len(&scat[i]); 575 } 576 577 buf->len = cpu_to_be32(datalen); 578 579 len = sizeof (struct srp_cmd) + 580 sizeof (struct srp_indirect_buf) + 581 count * sizeof (struct srp_direct_buf); 582 } 583 584 if (scmnd->sc_data_direction == DMA_TO_DEVICE) 585 cmd->buf_fmt = fmt << 4; 586 else 587 cmd->buf_fmt = fmt; 588 589 return len; 590 } 591 592 static void srp_unmap_data(struct scsi_cmnd *scmnd, 593 struct srp_target_port *target, 594 struct srp_request *req) 595 { 596 struct scatterlist *scat; 597 int nents; 598 599 if (!scmnd->request_buffer || 600 (scmnd->sc_data_direction != DMA_TO_DEVICE && 601 scmnd->sc_data_direction != DMA_FROM_DEVICE)) 602 return; 603 604 /* 605 * This handling of non-SG commands can be killed when the 606 * SCSI midlayer no longer generates non-SG commands. 607 */ 608 if (likely(scmnd->use_sg)) { 609 nents = scmnd->use_sg; 610 scat = scmnd->request_buffer; 611 } else { 612 nents = 1; 613 scat = &req->fake_sg; 614 } 615 616 dma_unmap_sg(target->srp_host->dev->dma_device, scat, nents, 617 scmnd->sc_data_direction); 618 } 619 620 static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp) 621 { 622 struct srp_request *req; 623 struct scsi_cmnd *scmnd; 624 unsigned long flags; 625 s32 delta; 626 627 delta = (s32) be32_to_cpu(rsp->req_lim_delta); 628 629 spin_lock_irqsave(target->scsi_host->host_lock, flags); 630 631 target->req_lim += delta; 632 633 req = &target->req_ring[rsp->tag & ~SRP_TAG_TSK_MGMT]; 634 635 if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) { 636 if (be32_to_cpu(rsp->resp_data_len) < 4) 637 req->tsk_status = -1; 638 else 639 req->tsk_status = rsp->data[3]; 640 complete(&req->done); 641 } else { 642 scmnd = req->scmnd; 643 if (!scmnd) 644 printk(KERN_ERR "Null scmnd for RSP w/tag %016llx\n", 645 (unsigned long long) rsp->tag); 646 scmnd->result = rsp->status; 647 648 if (rsp->flags & SRP_RSP_FLAG_SNSVALID) { 649 memcpy(scmnd->sense_buffer, rsp->data + 650 be32_to_cpu(rsp->resp_data_len), 651 min_t(int, be32_to_cpu(rsp->sense_data_len), 652 SCSI_SENSE_BUFFERSIZE)); 653 } 654 655 if (rsp->flags & (SRP_RSP_FLAG_DOOVER | SRP_RSP_FLAG_DOUNDER)) 656 scmnd->resid = be32_to_cpu(rsp->data_out_res_cnt); 657 else if (rsp->flags & (SRP_RSP_FLAG_DIOVER | SRP_RSP_FLAG_DIUNDER)) 658 scmnd->resid = be32_to_cpu(rsp->data_in_res_cnt); 659 660 srp_unmap_data(scmnd, target, req); 661 662 if (!req->tsk_mgmt) { 663 req->scmnd = NULL; 664 scmnd->host_scribble = (void *) -1L; 665 scmnd->scsi_done(scmnd); 666 667 list_del(&req->list); 668 req->next = target->req_head; 669 target->req_head = rsp->tag & ~SRP_TAG_TSK_MGMT; 670 } else 671 req->cmd_done = 1; 672 } 673 674 spin_unlock_irqrestore(target->scsi_host->host_lock, flags); 675 } 676 677 static void srp_reconnect_work(void *target_ptr) 678 { 679 struct srp_target_port *target = target_ptr; 680 681 srp_reconnect_target(target); 682 } 683 684 static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc) 685 { 686 struct srp_iu *iu; 687 u8 opcode; 688 689 iu = target->rx_ring[wc->wr_id & ~SRP_OP_RECV]; 690 691 dma_sync_single_for_cpu(target->srp_host->dev->dma_device, iu->dma, 692 target->max_ti_iu_len, DMA_FROM_DEVICE); 693 694 opcode = *(u8 *) iu->buf; 695 696 if (0) { 697 int i; 698 699 printk(KERN_ERR PFX "recv completion, opcode 0x%02x\n", opcode); 700 701 for (i = 0; i < wc->byte_len; ++i) { 702 if (i % 8 == 0) 703 printk(KERN_ERR " [%02x] ", i); 704 printk(" %02x", ((u8 *) iu->buf)[i]); 705 if ((i + 1) % 8 == 0) 706 printk("\n"); 707 } 708 709 if (wc->byte_len % 8) 710 printk("\n"); 711 } 712 713 switch (opcode) { 714 case SRP_RSP: 715 srp_process_rsp(target, iu->buf); 716 break; 717 718 case SRP_T_LOGOUT: 719 /* XXX Handle target logout */ 720 printk(KERN_WARNING PFX "Got target logout request\n"); 721 break; 722 723 default: 724 printk(KERN_WARNING PFX "Unhandled SRP opcode 0x%02x\n", opcode); 725 break; 726 } 727 728 dma_sync_single_for_device(target->srp_host->dev->dma_device, iu->dma, 729 target->max_ti_iu_len, DMA_FROM_DEVICE); 730 } 731 732 static void srp_completion(struct ib_cq *cq, void *target_ptr) 733 { 734 struct srp_target_port *target = target_ptr; 735 struct ib_wc wc; 736 unsigned long flags; 737 738 ib_req_notify_cq(cq, IB_CQ_NEXT_COMP); 739 while (ib_poll_cq(cq, 1, &wc) > 0) { 740 if (wc.status) { 741 printk(KERN_ERR PFX "failed %s status %d\n", 742 wc.wr_id & SRP_OP_RECV ? "receive" : "send", 743 wc.status); 744 spin_lock_irqsave(target->scsi_host->host_lock, flags); 745 if (target->state == SRP_TARGET_LIVE) 746 schedule_work(&target->work); 747 spin_unlock_irqrestore(target->scsi_host->host_lock, flags); 748 break; 749 } 750 751 if (wc.wr_id & SRP_OP_RECV) 752 srp_handle_recv(target, &wc); 753 else 754 ++target->tx_tail; 755 } 756 } 757 758 static int __srp_post_recv(struct srp_target_port *target) 759 { 760 struct srp_iu *iu; 761 struct ib_sge list; 762 struct ib_recv_wr wr, *bad_wr; 763 unsigned int next; 764 int ret; 765 766 next = target->rx_head & (SRP_RQ_SIZE - 1); 767 wr.wr_id = next | SRP_OP_RECV; 768 iu = target->rx_ring[next]; 769 770 list.addr = iu->dma; 771 list.length = iu->size; 772 list.lkey = target->srp_host->mr->lkey; 773 774 wr.next = NULL; 775 wr.sg_list = &list; 776 wr.num_sge = 1; 777 778 ret = ib_post_recv(target->qp, &wr, &bad_wr); 779 if (!ret) 780 ++target->rx_head; 781 782 return ret; 783 } 784 785 static int srp_post_recv(struct srp_target_port *target) 786 { 787 unsigned long flags; 788 int ret; 789 790 spin_lock_irqsave(target->scsi_host->host_lock, flags); 791 ret = __srp_post_recv(target); 792 spin_unlock_irqrestore(target->scsi_host->host_lock, flags); 793 794 return ret; 795 } 796 797 /* 798 * Must be called with target->scsi_host->host_lock held to protect 799 * req_lim and tx_head. Lock cannot be dropped between call here and 800 * call to __srp_post_send(). 801 */ 802 static struct srp_iu *__srp_get_tx_iu(struct srp_target_port *target) 803 { 804 if (target->tx_head - target->tx_tail >= SRP_SQ_SIZE) 805 return NULL; 806 807 if (unlikely(target->req_lim < 1)) { 808 if (printk_ratelimit()) 809 printk(KERN_DEBUG PFX "Target has req_lim %d\n", 810 target->req_lim); 811 return NULL; 812 } 813 814 return target->tx_ring[target->tx_head & SRP_SQ_SIZE]; 815 } 816 817 /* 818 * Must be called with target->scsi_host->host_lock held to protect 819 * req_lim and tx_head. 820 */ 821 static int __srp_post_send(struct srp_target_port *target, 822 struct srp_iu *iu, int len) 823 { 824 struct ib_sge list; 825 struct ib_send_wr wr, *bad_wr; 826 int ret = 0; 827 828 list.addr = iu->dma; 829 list.length = len; 830 list.lkey = target->srp_host->mr->lkey; 831 832 wr.next = NULL; 833 wr.wr_id = target->tx_head & SRP_SQ_SIZE; 834 wr.sg_list = &list; 835 wr.num_sge = 1; 836 wr.opcode = IB_WR_SEND; 837 wr.send_flags = IB_SEND_SIGNALED; 838 839 ret = ib_post_send(target->qp, &wr, &bad_wr); 840 841 if (!ret) { 842 ++target->tx_head; 843 --target->req_lim; 844 } 845 846 return ret; 847 } 848 849 static int srp_queuecommand(struct scsi_cmnd *scmnd, 850 void (*done)(struct scsi_cmnd *)) 851 { 852 struct srp_target_port *target = host_to_target(scmnd->device->host); 853 struct srp_request *req; 854 struct srp_iu *iu; 855 struct srp_cmd *cmd; 856 long req_index; 857 int len; 858 859 if (target->state == SRP_TARGET_CONNECTING) 860 goto err; 861 862 if (target->state == SRP_TARGET_DEAD || 863 target->state == SRP_TARGET_REMOVED) { 864 scmnd->result = DID_BAD_TARGET << 16; 865 done(scmnd); 866 return 0; 867 } 868 869 iu = __srp_get_tx_iu(target); 870 if (!iu) 871 goto err; 872 873 dma_sync_single_for_cpu(target->srp_host->dev->dma_device, iu->dma, 874 SRP_MAX_IU_LEN, DMA_TO_DEVICE); 875 876 req_index = target->req_head; 877 878 scmnd->scsi_done = done; 879 scmnd->result = 0; 880 scmnd->host_scribble = (void *) req_index; 881 882 cmd = iu->buf; 883 memset(cmd, 0, sizeof *cmd); 884 885 cmd->opcode = SRP_CMD; 886 cmd->lun = cpu_to_be64((u64) scmnd->device->lun << 48); 887 cmd->tag = req_index; 888 memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len); 889 890 req = &target->req_ring[req_index]; 891 892 req->scmnd = scmnd; 893 req->cmd = iu; 894 req->cmd_done = 0; 895 req->tsk_mgmt = NULL; 896 897 len = srp_map_data(scmnd, target, req); 898 if (len < 0) { 899 printk(KERN_ERR PFX "Failed to map data\n"); 900 goto err; 901 } 902 903 if (__srp_post_recv(target)) { 904 printk(KERN_ERR PFX "Recv failed\n"); 905 goto err_unmap; 906 } 907 908 dma_sync_single_for_device(target->srp_host->dev->dma_device, iu->dma, 909 SRP_MAX_IU_LEN, DMA_TO_DEVICE); 910 911 if (__srp_post_send(target, iu, len)) { 912 printk(KERN_ERR PFX "Send failed\n"); 913 goto err_unmap; 914 } 915 916 target->req_head = req->next; 917 list_add_tail(&req->list, &target->req_queue); 918 919 return 0; 920 921 err_unmap: 922 srp_unmap_data(scmnd, target, req); 923 924 err: 925 return SCSI_MLQUEUE_HOST_BUSY; 926 } 927 928 static int srp_alloc_iu_bufs(struct srp_target_port *target) 929 { 930 int i; 931 932 for (i = 0; i < SRP_RQ_SIZE; ++i) { 933 target->rx_ring[i] = srp_alloc_iu(target->srp_host, 934 target->max_ti_iu_len, 935 GFP_KERNEL, DMA_FROM_DEVICE); 936 if (!target->rx_ring[i]) 937 goto err; 938 } 939 940 for (i = 0; i < SRP_SQ_SIZE + 1; ++i) { 941 target->tx_ring[i] = srp_alloc_iu(target->srp_host, 942 SRP_MAX_IU_LEN, 943 GFP_KERNEL, DMA_TO_DEVICE); 944 if (!target->tx_ring[i]) 945 goto err; 946 } 947 948 return 0; 949 950 err: 951 for (i = 0; i < SRP_RQ_SIZE; ++i) { 952 srp_free_iu(target->srp_host, target->rx_ring[i]); 953 target->rx_ring[i] = NULL; 954 } 955 956 for (i = 0; i < SRP_SQ_SIZE + 1; ++i) { 957 srp_free_iu(target->srp_host, target->tx_ring[i]); 958 target->tx_ring[i] = NULL; 959 } 960 961 return -ENOMEM; 962 } 963 964 static void srp_cm_rej_handler(struct ib_cm_id *cm_id, 965 struct ib_cm_event *event, 966 struct srp_target_port *target) 967 { 968 struct ib_class_port_info *cpi; 969 int opcode; 970 971 switch (event->param.rej_rcvd.reason) { 972 case IB_CM_REJ_PORT_CM_REDIRECT: 973 cpi = event->param.rej_rcvd.ari; 974 target->path.dlid = cpi->redirect_lid; 975 target->path.pkey = cpi->redirect_pkey; 976 cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff; 977 memcpy(target->path.dgid.raw, cpi->redirect_gid, 16); 978 979 target->status = target->path.dlid ? 980 SRP_DLID_REDIRECT : SRP_PORT_REDIRECT; 981 break; 982 983 case IB_CM_REJ_PORT_REDIRECT: 984 if (topspin_workarounds && 985 !memcmp(&target->ioc_guid, topspin_oui, 3)) { 986 /* 987 * Topspin/Cisco SRP gateways incorrectly send 988 * reject reason code 25 when they mean 24 989 * (port redirect). 990 */ 991 memcpy(target->path.dgid.raw, 992 event->param.rej_rcvd.ari, 16); 993 994 printk(KERN_DEBUG PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n", 995 (unsigned long long) be64_to_cpu(target->path.dgid.global.subnet_prefix), 996 (unsigned long long) be64_to_cpu(target->path.dgid.global.interface_id)); 997 998 target->status = SRP_PORT_REDIRECT; 999 } else { 1000 printk(KERN_WARNING " REJ reason: IB_CM_REJ_PORT_REDIRECT\n"); 1001 target->status = -ECONNRESET; 1002 } 1003 break; 1004 1005 case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID: 1006 printk(KERN_WARNING " REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n"); 1007 target->status = -ECONNRESET; 1008 break; 1009 1010 case IB_CM_REJ_CONSUMER_DEFINED: 1011 opcode = *(u8 *) event->private_data; 1012 if (opcode == SRP_LOGIN_REJ) { 1013 struct srp_login_rej *rej = event->private_data; 1014 u32 reason = be32_to_cpu(rej->reason); 1015 1016 if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE) 1017 printk(KERN_WARNING PFX 1018 "SRP_LOGIN_REJ: requested max_it_iu_len too large\n"); 1019 else 1020 printk(KERN_WARNING PFX 1021 "SRP LOGIN REJECTED, reason 0x%08x\n", reason); 1022 } else 1023 printk(KERN_WARNING " REJ reason: IB_CM_REJ_CONSUMER_DEFINED," 1024 " opcode 0x%02x\n", opcode); 1025 target->status = -ECONNRESET; 1026 break; 1027 1028 default: 1029 printk(KERN_WARNING " REJ reason 0x%x\n", 1030 event->param.rej_rcvd.reason); 1031 target->status = -ECONNRESET; 1032 } 1033 } 1034 1035 static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event) 1036 { 1037 struct srp_target_port *target = cm_id->context; 1038 struct ib_qp_attr *qp_attr = NULL; 1039 int attr_mask = 0; 1040 int comp = 0; 1041 int opcode = 0; 1042 1043 switch (event->event) { 1044 case IB_CM_REQ_ERROR: 1045 printk(KERN_DEBUG PFX "Sending CM REQ failed\n"); 1046 comp = 1; 1047 target->status = -ECONNRESET; 1048 break; 1049 1050 case IB_CM_REP_RECEIVED: 1051 comp = 1; 1052 opcode = *(u8 *) event->private_data; 1053 1054 if (opcode == SRP_LOGIN_RSP) { 1055 struct srp_login_rsp *rsp = event->private_data; 1056 1057 target->max_ti_iu_len = be32_to_cpu(rsp->max_ti_iu_len); 1058 target->req_lim = be32_to_cpu(rsp->req_lim_delta); 1059 1060 target->scsi_host->can_queue = min(target->req_lim, 1061 target->scsi_host->can_queue); 1062 } else { 1063 printk(KERN_WARNING PFX "Unhandled RSP opcode %#x\n", opcode); 1064 target->status = -ECONNRESET; 1065 break; 1066 } 1067 1068 target->status = srp_alloc_iu_bufs(target); 1069 if (target->status) 1070 break; 1071 1072 qp_attr = kmalloc(sizeof *qp_attr, GFP_KERNEL); 1073 if (!qp_attr) { 1074 target->status = -ENOMEM; 1075 break; 1076 } 1077 1078 qp_attr->qp_state = IB_QPS_RTR; 1079 target->status = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask); 1080 if (target->status) 1081 break; 1082 1083 target->status = ib_modify_qp(target->qp, qp_attr, attr_mask); 1084 if (target->status) 1085 break; 1086 1087 target->status = srp_post_recv(target); 1088 if (target->status) 1089 break; 1090 1091 qp_attr->qp_state = IB_QPS_RTS; 1092 target->status = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask); 1093 if (target->status) 1094 break; 1095 1096 target->status = ib_modify_qp(target->qp, qp_attr, attr_mask); 1097 if (target->status) 1098 break; 1099 1100 target->status = ib_send_cm_rtu(cm_id, NULL, 0); 1101 if (target->status) 1102 break; 1103 1104 break; 1105 1106 case IB_CM_REJ_RECEIVED: 1107 printk(KERN_DEBUG PFX "REJ received\n"); 1108 comp = 1; 1109 1110 srp_cm_rej_handler(cm_id, event, target); 1111 break; 1112 1113 case IB_CM_MRA_RECEIVED: 1114 printk(KERN_ERR PFX "MRA received\n"); 1115 break; 1116 1117 case IB_CM_DREP_RECEIVED: 1118 break; 1119 1120 case IB_CM_TIMEWAIT_EXIT: 1121 printk(KERN_ERR PFX "connection closed\n"); 1122 1123 comp = 1; 1124 target->status = 0; 1125 break; 1126 1127 default: 1128 printk(KERN_WARNING PFX "Unhandled CM event %d\n", event->event); 1129 break; 1130 } 1131 1132 if (comp) 1133 complete(&target->done); 1134 1135 kfree(qp_attr); 1136 1137 return 0; 1138 } 1139 1140 static int srp_send_tsk_mgmt(struct scsi_cmnd *scmnd, u8 func) 1141 { 1142 struct srp_target_port *target = host_to_target(scmnd->device->host); 1143 struct srp_request *req; 1144 struct srp_iu *iu; 1145 struct srp_tsk_mgmt *tsk_mgmt; 1146 int req_index; 1147 int ret = FAILED; 1148 1149 spin_lock_irq(target->scsi_host->host_lock); 1150 1151 if (target->state == SRP_TARGET_DEAD || 1152 target->state == SRP_TARGET_REMOVED) { 1153 scmnd->result = DID_BAD_TARGET << 16; 1154 goto out; 1155 } 1156 1157 if (scmnd->host_scribble == (void *) -1L) 1158 goto out; 1159 1160 req_index = (long) scmnd->host_scribble; 1161 printk(KERN_ERR "Abort for req_index %d\n", req_index); 1162 1163 req = &target->req_ring[req_index]; 1164 init_completion(&req->done); 1165 1166 iu = __srp_get_tx_iu(target); 1167 if (!iu) 1168 goto out; 1169 1170 tsk_mgmt = iu->buf; 1171 memset(tsk_mgmt, 0, sizeof *tsk_mgmt); 1172 1173 tsk_mgmt->opcode = SRP_TSK_MGMT; 1174 tsk_mgmt->lun = cpu_to_be64((u64) scmnd->device->lun << 48); 1175 tsk_mgmt->tag = req_index | SRP_TAG_TSK_MGMT; 1176 tsk_mgmt->tsk_mgmt_func = func; 1177 tsk_mgmt->task_tag = req_index; 1178 1179 if (__srp_post_send(target, iu, sizeof *tsk_mgmt)) 1180 goto out; 1181 1182 req->tsk_mgmt = iu; 1183 1184 spin_unlock_irq(target->scsi_host->host_lock); 1185 if (!wait_for_completion_timeout(&req->done, 1186 msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS))) 1187 return FAILED; 1188 spin_lock_irq(target->scsi_host->host_lock); 1189 1190 if (req->cmd_done) { 1191 list_del(&req->list); 1192 req->next = target->req_head; 1193 target->req_head = req_index; 1194 1195 scmnd->scsi_done(scmnd); 1196 } else if (!req->tsk_status) { 1197 scmnd->result = DID_ABORT << 16; 1198 ret = SUCCESS; 1199 } 1200 1201 out: 1202 spin_unlock_irq(target->scsi_host->host_lock); 1203 return ret; 1204 } 1205 1206 static int srp_abort(struct scsi_cmnd *scmnd) 1207 { 1208 printk(KERN_ERR "SRP abort called\n"); 1209 1210 return srp_send_tsk_mgmt(scmnd, SRP_TSK_ABORT_TASK); 1211 } 1212 1213 static int srp_reset_device(struct scsi_cmnd *scmnd) 1214 { 1215 printk(KERN_ERR "SRP reset_device called\n"); 1216 1217 return srp_send_tsk_mgmt(scmnd, SRP_TSK_LUN_RESET); 1218 } 1219 1220 static int srp_reset_host(struct scsi_cmnd *scmnd) 1221 { 1222 struct srp_target_port *target = host_to_target(scmnd->device->host); 1223 int ret = FAILED; 1224 1225 printk(KERN_ERR PFX "SRP reset_host called\n"); 1226 1227 if (!srp_reconnect_target(target)) 1228 ret = SUCCESS; 1229 1230 return ret; 1231 } 1232 1233 static ssize_t show_id_ext(struct class_device *cdev, char *buf) 1234 { 1235 struct srp_target_port *target = host_to_target(class_to_shost(cdev)); 1236 1237 if (target->state == SRP_TARGET_DEAD || 1238 target->state == SRP_TARGET_REMOVED) 1239 return -ENODEV; 1240 1241 return sprintf(buf, "0x%016llx\n", 1242 (unsigned long long) be64_to_cpu(target->id_ext)); 1243 } 1244 1245 static ssize_t show_ioc_guid(struct class_device *cdev, char *buf) 1246 { 1247 struct srp_target_port *target = host_to_target(class_to_shost(cdev)); 1248 1249 if (target->state == SRP_TARGET_DEAD || 1250 target->state == SRP_TARGET_REMOVED) 1251 return -ENODEV; 1252 1253 return sprintf(buf, "0x%016llx\n", 1254 (unsigned long long) be64_to_cpu(target->ioc_guid)); 1255 } 1256 1257 static ssize_t show_service_id(struct class_device *cdev, char *buf) 1258 { 1259 struct srp_target_port *target = host_to_target(class_to_shost(cdev)); 1260 1261 if (target->state == SRP_TARGET_DEAD || 1262 target->state == SRP_TARGET_REMOVED) 1263 return -ENODEV; 1264 1265 return sprintf(buf, "0x%016llx\n", 1266 (unsigned long long) be64_to_cpu(target->service_id)); 1267 } 1268 1269 static ssize_t show_pkey(struct class_device *cdev, char *buf) 1270 { 1271 struct srp_target_port *target = host_to_target(class_to_shost(cdev)); 1272 1273 if (target->state == SRP_TARGET_DEAD || 1274 target->state == SRP_TARGET_REMOVED) 1275 return -ENODEV; 1276 1277 return sprintf(buf, "0x%04x\n", be16_to_cpu(target->path.pkey)); 1278 } 1279 1280 static ssize_t show_dgid(struct class_device *cdev, char *buf) 1281 { 1282 struct srp_target_port *target = host_to_target(class_to_shost(cdev)); 1283 1284 if (target->state == SRP_TARGET_DEAD || 1285 target->state == SRP_TARGET_REMOVED) 1286 return -ENODEV; 1287 1288 return sprintf(buf, "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n", 1289 be16_to_cpu(((__be16 *) target->path.dgid.raw)[0]), 1290 be16_to_cpu(((__be16 *) target->path.dgid.raw)[1]), 1291 be16_to_cpu(((__be16 *) target->path.dgid.raw)[2]), 1292 be16_to_cpu(((__be16 *) target->path.dgid.raw)[3]), 1293 be16_to_cpu(((__be16 *) target->path.dgid.raw)[4]), 1294 be16_to_cpu(((__be16 *) target->path.dgid.raw)[5]), 1295 be16_to_cpu(((__be16 *) target->path.dgid.raw)[6]), 1296 be16_to_cpu(((__be16 *) target->path.dgid.raw)[7])); 1297 } 1298 1299 static CLASS_DEVICE_ATTR(id_ext, S_IRUGO, show_id_ext, NULL); 1300 static CLASS_DEVICE_ATTR(ioc_guid, S_IRUGO, show_ioc_guid, NULL); 1301 static CLASS_DEVICE_ATTR(service_id, S_IRUGO, show_service_id, NULL); 1302 static CLASS_DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL); 1303 static CLASS_DEVICE_ATTR(dgid, S_IRUGO, show_dgid, NULL); 1304 1305 static struct class_device_attribute *srp_host_attrs[] = { 1306 &class_device_attr_id_ext, 1307 &class_device_attr_ioc_guid, 1308 &class_device_attr_service_id, 1309 &class_device_attr_pkey, 1310 &class_device_attr_dgid, 1311 NULL 1312 }; 1313 1314 static struct scsi_host_template srp_template = { 1315 .module = THIS_MODULE, 1316 .name = DRV_NAME, 1317 .info = srp_target_info, 1318 .queuecommand = srp_queuecommand, 1319 .eh_abort_handler = srp_abort, 1320 .eh_device_reset_handler = srp_reset_device, 1321 .eh_host_reset_handler = srp_reset_host, 1322 .can_queue = SRP_SQ_SIZE, 1323 .this_id = -1, 1324 .sg_tablesize = SRP_MAX_INDIRECT, 1325 .cmd_per_lun = SRP_SQ_SIZE, 1326 .use_clustering = ENABLE_CLUSTERING, 1327 .shost_attrs = srp_host_attrs 1328 }; 1329 1330 static int srp_add_target(struct srp_host *host, struct srp_target_port *target) 1331 { 1332 sprintf(target->target_name, "SRP.T10:%016llX", 1333 (unsigned long long) be64_to_cpu(target->id_ext)); 1334 1335 if (scsi_add_host(target->scsi_host, host->dev->dma_device)) 1336 return -ENODEV; 1337 1338 mutex_lock(&host->target_mutex); 1339 list_add_tail(&target->list, &host->target_list); 1340 mutex_unlock(&host->target_mutex); 1341 1342 target->state = SRP_TARGET_LIVE; 1343 1344 /* XXX: are we supposed to have a definition of SCAN_WILD_CARD ?? */ 1345 scsi_scan_target(&target->scsi_host->shost_gendev, 1346 0, target->scsi_id, ~0, 0); 1347 1348 return 0; 1349 } 1350 1351 static void srp_release_class_dev(struct class_device *class_dev) 1352 { 1353 struct srp_host *host = 1354 container_of(class_dev, struct srp_host, class_dev); 1355 1356 complete(&host->released); 1357 } 1358 1359 static struct class srp_class = { 1360 .name = "infiniband_srp", 1361 .release = srp_release_class_dev 1362 }; 1363 1364 /* 1365 * Target ports are added by writing 1366 * 1367 * id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,dgid=<dest GID>, 1368 * pkey=<P_Key>,service_id=<service ID> 1369 * 1370 * to the add_target sysfs attribute. 1371 */ 1372 enum { 1373 SRP_OPT_ERR = 0, 1374 SRP_OPT_ID_EXT = 1 << 0, 1375 SRP_OPT_IOC_GUID = 1 << 1, 1376 SRP_OPT_DGID = 1 << 2, 1377 SRP_OPT_PKEY = 1 << 3, 1378 SRP_OPT_SERVICE_ID = 1 << 4, 1379 SRP_OPT_MAX_SECT = 1 << 5, 1380 SRP_OPT_ALL = (SRP_OPT_ID_EXT | 1381 SRP_OPT_IOC_GUID | 1382 SRP_OPT_DGID | 1383 SRP_OPT_PKEY | 1384 SRP_OPT_SERVICE_ID), 1385 }; 1386 1387 static match_table_t srp_opt_tokens = { 1388 { SRP_OPT_ID_EXT, "id_ext=%s" }, 1389 { SRP_OPT_IOC_GUID, "ioc_guid=%s" }, 1390 { SRP_OPT_DGID, "dgid=%s" }, 1391 { SRP_OPT_PKEY, "pkey=%x" }, 1392 { SRP_OPT_SERVICE_ID, "service_id=%s" }, 1393 { SRP_OPT_MAX_SECT, "max_sect=%d" }, 1394 { SRP_OPT_ERR, NULL } 1395 }; 1396 1397 static int srp_parse_options(const char *buf, struct srp_target_port *target) 1398 { 1399 char *options, *sep_opt; 1400 char *p; 1401 char dgid[3]; 1402 substring_t args[MAX_OPT_ARGS]; 1403 int opt_mask = 0; 1404 int token; 1405 int ret = -EINVAL; 1406 int i; 1407 1408 options = kstrdup(buf, GFP_KERNEL); 1409 if (!options) 1410 return -ENOMEM; 1411 1412 sep_opt = options; 1413 while ((p = strsep(&sep_opt, ",")) != NULL) { 1414 if (!*p) 1415 continue; 1416 1417 token = match_token(p, srp_opt_tokens, args); 1418 opt_mask |= token; 1419 1420 switch (token) { 1421 case SRP_OPT_ID_EXT: 1422 p = match_strdup(args); 1423 target->id_ext = cpu_to_be64(simple_strtoull(p, NULL, 16)); 1424 kfree(p); 1425 break; 1426 1427 case SRP_OPT_IOC_GUID: 1428 p = match_strdup(args); 1429 target->ioc_guid = cpu_to_be64(simple_strtoull(p, NULL, 16)); 1430 kfree(p); 1431 break; 1432 1433 case SRP_OPT_DGID: 1434 p = match_strdup(args); 1435 if (strlen(p) != 32) { 1436 printk(KERN_WARNING PFX "bad dest GID parameter '%s'\n", p); 1437 goto out; 1438 } 1439 1440 for (i = 0; i < 16; ++i) { 1441 strlcpy(dgid, p + i * 2, 3); 1442 target->path.dgid.raw[i] = simple_strtoul(dgid, NULL, 16); 1443 } 1444 kfree(p); 1445 break; 1446 1447 case SRP_OPT_PKEY: 1448 if (match_hex(args, &token)) { 1449 printk(KERN_WARNING PFX "bad P_Key parameter '%s'\n", p); 1450 goto out; 1451 } 1452 target->path.pkey = cpu_to_be16(token); 1453 break; 1454 1455 case SRP_OPT_SERVICE_ID: 1456 p = match_strdup(args); 1457 target->service_id = cpu_to_be64(simple_strtoull(p, NULL, 16)); 1458 kfree(p); 1459 break; 1460 1461 case SRP_OPT_MAX_SECT: 1462 if (match_int(args, &token)) { 1463 printk(KERN_WARNING PFX "bad max sect parameter '%s'\n", p); 1464 goto out; 1465 } 1466 target->scsi_host->max_sectors = token; 1467 break; 1468 1469 default: 1470 printk(KERN_WARNING PFX "unknown parameter or missing value " 1471 "'%s' in target creation request\n", p); 1472 goto out; 1473 } 1474 } 1475 1476 if ((opt_mask & SRP_OPT_ALL) == SRP_OPT_ALL) 1477 ret = 0; 1478 else 1479 for (i = 0; i < ARRAY_SIZE(srp_opt_tokens); ++i) 1480 if ((srp_opt_tokens[i].token & SRP_OPT_ALL) && 1481 !(srp_opt_tokens[i].token & opt_mask)) 1482 printk(KERN_WARNING PFX "target creation request is " 1483 "missing parameter '%s'\n", 1484 srp_opt_tokens[i].pattern); 1485 1486 out: 1487 kfree(options); 1488 return ret; 1489 } 1490 1491 static ssize_t srp_create_target(struct class_device *class_dev, 1492 const char *buf, size_t count) 1493 { 1494 struct srp_host *host = 1495 container_of(class_dev, struct srp_host, class_dev); 1496 struct Scsi_Host *target_host; 1497 struct srp_target_port *target; 1498 int ret; 1499 int i; 1500 1501 target_host = scsi_host_alloc(&srp_template, 1502 sizeof (struct srp_target_port)); 1503 if (!target_host) 1504 return -ENOMEM; 1505 1506 target_host->max_lun = SRP_MAX_LUN; 1507 1508 target = host_to_target(target_host); 1509 memset(target, 0, sizeof *target); 1510 1511 target->scsi_host = target_host; 1512 target->srp_host = host; 1513 1514 INIT_WORK(&target->work, srp_reconnect_work, target); 1515 1516 for (i = 0; i < SRP_SQ_SIZE - 1; ++i) 1517 target->req_ring[i].next = i + 1; 1518 target->req_ring[SRP_SQ_SIZE - 1].next = -1; 1519 INIT_LIST_HEAD(&target->req_queue); 1520 1521 ret = srp_parse_options(buf, target); 1522 if (ret) 1523 goto err; 1524 1525 ib_get_cached_gid(host->dev, host->port, 0, &target->path.sgid); 1526 1527 printk(KERN_DEBUG PFX "new target: id_ext %016llx ioc_guid %016llx pkey %04x " 1528 "service_id %016llx dgid %04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n", 1529 (unsigned long long) be64_to_cpu(target->id_ext), 1530 (unsigned long long) be64_to_cpu(target->ioc_guid), 1531 be16_to_cpu(target->path.pkey), 1532 (unsigned long long) be64_to_cpu(target->service_id), 1533 (int) be16_to_cpu(*(__be16 *) &target->path.dgid.raw[0]), 1534 (int) be16_to_cpu(*(__be16 *) &target->path.dgid.raw[2]), 1535 (int) be16_to_cpu(*(__be16 *) &target->path.dgid.raw[4]), 1536 (int) be16_to_cpu(*(__be16 *) &target->path.dgid.raw[6]), 1537 (int) be16_to_cpu(*(__be16 *) &target->path.dgid.raw[8]), 1538 (int) be16_to_cpu(*(__be16 *) &target->path.dgid.raw[10]), 1539 (int) be16_to_cpu(*(__be16 *) &target->path.dgid.raw[12]), 1540 (int) be16_to_cpu(*(__be16 *) &target->path.dgid.raw[14])); 1541 1542 ret = srp_create_target_ib(target); 1543 if (ret) 1544 goto err; 1545 1546 target->cm_id = ib_create_cm_id(host->dev, srp_cm_handler, target); 1547 if (IS_ERR(target->cm_id)) { 1548 ret = PTR_ERR(target->cm_id); 1549 goto err_free; 1550 } 1551 1552 ret = srp_connect_target(target); 1553 if (ret) { 1554 printk(KERN_ERR PFX "Connection failed\n"); 1555 goto err_cm_id; 1556 } 1557 1558 ret = srp_add_target(host, target); 1559 if (ret) 1560 goto err_disconnect; 1561 1562 return count; 1563 1564 err_disconnect: 1565 srp_disconnect_target(target); 1566 1567 err_cm_id: 1568 ib_destroy_cm_id(target->cm_id); 1569 1570 err_free: 1571 srp_free_target_ib(target); 1572 1573 err: 1574 scsi_host_put(target_host); 1575 1576 return ret; 1577 } 1578 1579 static CLASS_DEVICE_ATTR(add_target, S_IWUSR, NULL, srp_create_target); 1580 1581 static ssize_t show_ibdev(struct class_device *class_dev, char *buf) 1582 { 1583 struct srp_host *host = 1584 container_of(class_dev, struct srp_host, class_dev); 1585 1586 return sprintf(buf, "%s\n", host->dev->name); 1587 } 1588 1589 static CLASS_DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL); 1590 1591 static ssize_t show_port(struct class_device *class_dev, char *buf) 1592 { 1593 struct srp_host *host = 1594 container_of(class_dev, struct srp_host, class_dev); 1595 1596 return sprintf(buf, "%d\n", host->port); 1597 } 1598 1599 static CLASS_DEVICE_ATTR(port, S_IRUGO, show_port, NULL); 1600 1601 static struct srp_host *srp_add_port(struct ib_device *device, u8 port) 1602 { 1603 struct srp_host *host; 1604 1605 host = kzalloc(sizeof *host, GFP_KERNEL); 1606 if (!host) 1607 return NULL; 1608 1609 INIT_LIST_HEAD(&host->target_list); 1610 mutex_init(&host->target_mutex); 1611 init_completion(&host->released); 1612 host->dev = device; 1613 host->port = port; 1614 1615 host->initiator_port_id[7] = port; 1616 memcpy(host->initiator_port_id + 8, &device->node_guid, 8); 1617 1618 host->pd = ib_alloc_pd(device); 1619 if (IS_ERR(host->pd)) 1620 goto err_free; 1621 1622 host->mr = ib_get_dma_mr(host->pd, 1623 IB_ACCESS_LOCAL_WRITE | 1624 IB_ACCESS_REMOTE_READ | 1625 IB_ACCESS_REMOTE_WRITE); 1626 if (IS_ERR(host->mr)) 1627 goto err_pd; 1628 1629 host->class_dev.class = &srp_class; 1630 host->class_dev.dev = device->dma_device; 1631 snprintf(host->class_dev.class_id, BUS_ID_SIZE, "srp-%s-%d", 1632 device->name, port); 1633 1634 if (class_device_register(&host->class_dev)) 1635 goto err_mr; 1636 if (class_device_create_file(&host->class_dev, &class_device_attr_add_target)) 1637 goto err_class; 1638 if (class_device_create_file(&host->class_dev, &class_device_attr_ibdev)) 1639 goto err_class; 1640 if (class_device_create_file(&host->class_dev, &class_device_attr_port)) 1641 goto err_class; 1642 1643 return host; 1644 1645 err_class: 1646 class_device_unregister(&host->class_dev); 1647 1648 err_mr: 1649 ib_dereg_mr(host->mr); 1650 1651 err_pd: 1652 ib_dealloc_pd(host->pd); 1653 1654 err_free: 1655 kfree(host); 1656 1657 return NULL; 1658 } 1659 1660 static void srp_add_one(struct ib_device *device) 1661 { 1662 struct list_head *dev_list; 1663 struct srp_host *host; 1664 int s, e, p; 1665 1666 dev_list = kmalloc(sizeof *dev_list, GFP_KERNEL); 1667 if (!dev_list) 1668 return; 1669 1670 INIT_LIST_HEAD(dev_list); 1671 1672 if (device->node_type == IB_NODE_SWITCH) { 1673 s = 0; 1674 e = 0; 1675 } else { 1676 s = 1; 1677 e = device->phys_port_cnt; 1678 } 1679 1680 for (p = s; p <= e; ++p) { 1681 host = srp_add_port(device, p); 1682 if (host) 1683 list_add_tail(&host->list, dev_list); 1684 } 1685 1686 ib_set_client_data(device, &srp_client, dev_list); 1687 } 1688 1689 static void srp_remove_one(struct ib_device *device) 1690 { 1691 struct list_head *dev_list; 1692 struct srp_host *host, *tmp_host; 1693 LIST_HEAD(target_list); 1694 struct srp_target_port *target, *tmp_target; 1695 unsigned long flags; 1696 1697 dev_list = ib_get_client_data(device, &srp_client); 1698 1699 list_for_each_entry_safe(host, tmp_host, dev_list, list) { 1700 class_device_unregister(&host->class_dev); 1701 /* 1702 * Wait for the sysfs entry to go away, so that no new 1703 * target ports can be created. 1704 */ 1705 wait_for_completion(&host->released); 1706 1707 /* 1708 * Mark all target ports as removed, so we stop queueing 1709 * commands and don't try to reconnect. 1710 */ 1711 mutex_lock(&host->target_mutex); 1712 list_for_each_entry_safe(target, tmp_target, 1713 &host->target_list, list) { 1714 spin_lock_irqsave(target->scsi_host->host_lock, flags); 1715 if (target->state != SRP_TARGET_REMOVED) 1716 target->state = SRP_TARGET_REMOVED; 1717 spin_unlock_irqrestore(target->scsi_host->host_lock, flags); 1718 } 1719 mutex_unlock(&host->target_mutex); 1720 1721 /* 1722 * Wait for any reconnection tasks that may have 1723 * started before we marked our target ports as 1724 * removed, and any target port removal tasks. 1725 */ 1726 flush_scheduled_work(); 1727 1728 list_for_each_entry_safe(target, tmp_target, 1729 &host->target_list, list) { 1730 scsi_remove_host(target->scsi_host); 1731 srp_disconnect_target(target); 1732 ib_destroy_cm_id(target->cm_id); 1733 srp_free_target_ib(target); 1734 scsi_host_put(target->scsi_host); 1735 } 1736 1737 ib_dereg_mr(host->mr); 1738 ib_dealloc_pd(host->pd); 1739 kfree(host); 1740 } 1741 1742 kfree(dev_list); 1743 } 1744 1745 static int __init srp_init_module(void) 1746 { 1747 int ret; 1748 1749 ret = class_register(&srp_class); 1750 if (ret) { 1751 printk(KERN_ERR PFX "couldn't register class infiniband_srp\n"); 1752 return ret; 1753 } 1754 1755 ret = ib_register_client(&srp_client); 1756 if (ret) { 1757 printk(KERN_ERR PFX "couldn't register IB client\n"); 1758 class_unregister(&srp_class); 1759 return ret; 1760 } 1761 1762 return 0; 1763 } 1764 1765 static void __exit srp_cleanup_module(void) 1766 { 1767 ib_unregister_client(&srp_client); 1768 class_unregister(&srp_class); 1769 } 1770 1771 module_init(srp_init_module); 1772 module_exit(srp_cleanup_module); 1773