1 /* 2 * Copyright (c) 2005 Cisco Systems. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 * 32 * $Id: ib_srp.c 3932 2005-11-01 17:19:29Z roland $ 33 */ 34 35 #include <linux/module.h> 36 #include <linux/init.h> 37 #include <linux/slab.h> 38 #include <linux/err.h> 39 #include <linux/string.h> 40 #include <linux/parser.h> 41 #include <linux/random.h> 42 43 #include <asm/atomic.h> 44 45 #include <scsi/scsi.h> 46 #include <scsi/scsi_device.h> 47 #include <scsi/scsi_dbg.h> 48 #include <scsi/srp.h> 49 50 #include <rdma/ib_cache.h> 51 52 #include "ib_srp.h" 53 54 #define DRV_NAME "ib_srp" 55 #define PFX DRV_NAME ": " 56 #define DRV_VERSION "0.2" 57 #define DRV_RELDATE "November 1, 2005" 58 59 MODULE_AUTHOR("Roland Dreier"); 60 MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator " 61 "v" DRV_VERSION " (" DRV_RELDATE ")"); 62 MODULE_LICENSE("Dual BSD/GPL"); 63 64 static int topspin_workarounds = 1; 65 66 module_param(topspin_workarounds, int, 0444); 67 MODULE_PARM_DESC(topspin_workarounds, 68 "Enable workarounds for Topspin/Cisco SRP target bugs if != 0"); 69 70 static const u8 topspin_oui[3] = { 0x00, 0x05, 0xad }; 71 72 static void srp_add_one(struct ib_device *device); 73 static void srp_remove_one(struct ib_device *device); 74 static void srp_completion(struct ib_cq *cq, void *target_ptr); 75 static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event); 76 77 static struct ib_client srp_client = { 78 .name = "srp", 79 .add = srp_add_one, 80 .remove = srp_remove_one 81 }; 82 83 static inline struct srp_target_port *host_to_target(struct Scsi_Host *host) 84 { 85 return (struct srp_target_port *) host->hostdata; 86 } 87 88 static const char *srp_target_info(struct Scsi_Host *host) 89 { 90 return host_to_target(host)->target_name; 91 } 92 93 static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size, 94 gfp_t gfp_mask, 95 enum dma_data_direction direction) 96 { 97 struct srp_iu *iu; 98 99 iu = kmalloc(sizeof *iu, gfp_mask); 100 if (!iu) 101 goto out; 102 103 iu->buf = kzalloc(size, gfp_mask); 104 if (!iu->buf) 105 goto out_free_iu; 106 107 iu->dma = dma_map_single(host->dev->dma_device, iu->buf, size, direction); 108 if (dma_mapping_error(iu->dma)) 109 goto out_free_buf; 110 111 iu->size = size; 112 iu->direction = direction; 113 114 return iu; 115 116 out_free_buf: 117 kfree(iu->buf); 118 out_free_iu: 119 kfree(iu); 120 out: 121 return NULL; 122 } 123 124 static void srp_free_iu(struct srp_host *host, struct srp_iu *iu) 125 { 126 if (!iu) 127 return; 128 129 dma_unmap_single(host->dev->dma_device, iu->dma, iu->size, iu->direction); 130 kfree(iu->buf); 131 kfree(iu); 132 } 133 134 static void srp_qp_event(struct ib_event *event, void *context) 135 { 136 printk(KERN_ERR PFX "QP event %d\n", event->event); 137 } 138 139 static int srp_init_qp(struct srp_target_port *target, 140 struct ib_qp *qp) 141 { 142 struct ib_qp_attr *attr; 143 int ret; 144 145 attr = kmalloc(sizeof *attr, GFP_KERNEL); 146 if (!attr) 147 return -ENOMEM; 148 149 ret = ib_find_cached_pkey(target->srp_host->dev, 150 target->srp_host->port, 151 be16_to_cpu(target->path.pkey), 152 &attr->pkey_index); 153 if (ret) 154 goto out; 155 156 attr->qp_state = IB_QPS_INIT; 157 attr->qp_access_flags = (IB_ACCESS_REMOTE_READ | 158 IB_ACCESS_REMOTE_WRITE); 159 attr->port_num = target->srp_host->port; 160 161 ret = ib_modify_qp(qp, attr, 162 IB_QP_STATE | 163 IB_QP_PKEY_INDEX | 164 IB_QP_ACCESS_FLAGS | 165 IB_QP_PORT); 166 167 out: 168 kfree(attr); 169 return ret; 170 } 171 172 static int srp_create_target_ib(struct srp_target_port *target) 173 { 174 struct ib_qp_init_attr *init_attr; 175 int ret; 176 177 init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL); 178 if (!init_attr) 179 return -ENOMEM; 180 181 target->cq = ib_create_cq(target->srp_host->dev, srp_completion, 182 NULL, target, SRP_CQ_SIZE); 183 if (IS_ERR(target->cq)) { 184 ret = PTR_ERR(target->cq); 185 goto out; 186 } 187 188 ib_req_notify_cq(target->cq, IB_CQ_NEXT_COMP); 189 190 init_attr->event_handler = srp_qp_event; 191 init_attr->cap.max_send_wr = SRP_SQ_SIZE; 192 init_attr->cap.max_recv_wr = SRP_RQ_SIZE; 193 init_attr->cap.max_recv_sge = 1; 194 init_attr->cap.max_send_sge = 1; 195 init_attr->sq_sig_type = IB_SIGNAL_ALL_WR; 196 init_attr->qp_type = IB_QPT_RC; 197 init_attr->send_cq = target->cq; 198 init_attr->recv_cq = target->cq; 199 200 target->qp = ib_create_qp(target->srp_host->pd, init_attr); 201 if (IS_ERR(target->qp)) { 202 ret = PTR_ERR(target->qp); 203 ib_destroy_cq(target->cq); 204 goto out; 205 } 206 207 ret = srp_init_qp(target, target->qp); 208 if (ret) { 209 ib_destroy_qp(target->qp); 210 ib_destroy_cq(target->cq); 211 goto out; 212 } 213 214 out: 215 kfree(init_attr); 216 return ret; 217 } 218 219 static void srp_free_target_ib(struct srp_target_port *target) 220 { 221 int i; 222 223 ib_destroy_qp(target->qp); 224 ib_destroy_cq(target->cq); 225 226 for (i = 0; i < SRP_RQ_SIZE; ++i) 227 srp_free_iu(target->srp_host, target->rx_ring[i]); 228 for (i = 0; i < SRP_SQ_SIZE + 1; ++i) 229 srp_free_iu(target->srp_host, target->tx_ring[i]); 230 } 231 232 static void srp_path_rec_completion(int status, 233 struct ib_sa_path_rec *pathrec, 234 void *target_ptr) 235 { 236 struct srp_target_port *target = target_ptr; 237 238 target->status = status; 239 if (status) 240 printk(KERN_ERR PFX "Got failed path rec status %d\n", status); 241 else 242 target->path = *pathrec; 243 complete(&target->done); 244 } 245 246 static int srp_lookup_path(struct srp_target_port *target) 247 { 248 target->path.numb_path = 1; 249 250 init_completion(&target->done); 251 252 target->path_query_id = ib_sa_path_rec_get(target->srp_host->dev, 253 target->srp_host->port, 254 &target->path, 255 IB_SA_PATH_REC_DGID | 256 IB_SA_PATH_REC_SGID | 257 IB_SA_PATH_REC_NUMB_PATH | 258 IB_SA_PATH_REC_PKEY, 259 SRP_PATH_REC_TIMEOUT_MS, 260 GFP_KERNEL, 261 srp_path_rec_completion, 262 target, &target->path_query); 263 if (target->path_query_id < 0) 264 return target->path_query_id; 265 266 wait_for_completion(&target->done); 267 268 if (target->status < 0) 269 printk(KERN_WARNING PFX "Path record query failed\n"); 270 271 return target->status; 272 } 273 274 static int srp_send_req(struct srp_target_port *target) 275 { 276 struct { 277 struct ib_cm_req_param param; 278 struct srp_login_req priv; 279 } *req = NULL; 280 int status; 281 282 req = kzalloc(sizeof *req, GFP_KERNEL); 283 if (!req) 284 return -ENOMEM; 285 286 req->param.primary_path = &target->path; 287 req->param.alternate_path = NULL; 288 req->param.service_id = target->service_id; 289 req->param.qp_num = target->qp->qp_num; 290 req->param.qp_type = target->qp->qp_type; 291 req->param.private_data = &req->priv; 292 req->param.private_data_len = sizeof req->priv; 293 req->param.flow_control = 1; 294 295 get_random_bytes(&req->param.starting_psn, 4); 296 req->param.starting_psn &= 0xffffff; 297 298 /* 299 * Pick some arbitrary defaults here; we could make these 300 * module parameters if anyone cared about setting them. 301 */ 302 req->param.responder_resources = 4; 303 req->param.remote_cm_response_timeout = 20; 304 req->param.local_cm_response_timeout = 20; 305 req->param.retry_count = 7; 306 req->param.rnr_retry_count = 7; 307 req->param.max_cm_retries = 15; 308 309 req->priv.opcode = SRP_LOGIN_REQ; 310 req->priv.tag = 0; 311 req->priv.req_it_iu_len = cpu_to_be32(SRP_MAX_IU_LEN); 312 req->priv.req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT | 313 SRP_BUF_FORMAT_INDIRECT); 314 memcpy(req->priv.initiator_port_id, target->srp_host->initiator_port_id, 16); 315 /* 316 * Topspin/Cisco SRP targets will reject our login unless we 317 * zero out the first 8 bytes of our initiator port ID. The 318 * second 8 bytes must be our local node GUID, but we always 319 * use that anyway. 320 */ 321 if (topspin_workarounds && !memcmp(&target->ioc_guid, topspin_oui, 3)) { 322 printk(KERN_DEBUG PFX "Topspin/Cisco initiator port ID workaround " 323 "activated for target GUID %016llx\n", 324 (unsigned long long) be64_to_cpu(target->ioc_guid)); 325 memset(req->priv.initiator_port_id, 0, 8); 326 } 327 memcpy(req->priv.target_port_id, &target->id_ext, 8); 328 memcpy(req->priv.target_port_id + 8, &target->ioc_guid, 8); 329 330 status = ib_send_cm_req(target->cm_id, &req->param); 331 332 kfree(req); 333 334 return status; 335 } 336 337 static void srp_disconnect_target(struct srp_target_port *target) 338 { 339 /* XXX should send SRP_I_LOGOUT request */ 340 341 init_completion(&target->done); 342 ib_send_cm_dreq(target->cm_id, NULL, 0); 343 wait_for_completion(&target->done); 344 } 345 346 static void srp_remove_work(void *target_ptr) 347 { 348 struct srp_target_port *target = target_ptr; 349 350 spin_lock_irq(target->scsi_host->host_lock); 351 if (target->state != SRP_TARGET_DEAD) { 352 spin_unlock_irq(target->scsi_host->host_lock); 353 scsi_host_put(target->scsi_host); 354 return; 355 } 356 target->state = SRP_TARGET_REMOVED; 357 spin_unlock_irq(target->scsi_host->host_lock); 358 359 down(&target->srp_host->target_mutex); 360 list_del(&target->list); 361 up(&target->srp_host->target_mutex); 362 363 scsi_remove_host(target->scsi_host); 364 ib_destroy_cm_id(target->cm_id); 365 srp_free_target_ib(target); 366 scsi_host_put(target->scsi_host); 367 /* And another put to really free the target port... */ 368 scsi_host_put(target->scsi_host); 369 } 370 371 static int srp_connect_target(struct srp_target_port *target) 372 { 373 int ret; 374 375 ret = srp_lookup_path(target); 376 if (ret) 377 return ret; 378 379 while (1) { 380 init_completion(&target->done); 381 ret = srp_send_req(target); 382 if (ret) 383 return ret; 384 wait_for_completion(&target->done); 385 386 /* 387 * The CM event handling code will set status to 388 * SRP_PORT_REDIRECT if we get a port redirect REJ 389 * back, or SRP_DLID_REDIRECT if we get a lid/qp 390 * redirect REJ back. 391 */ 392 switch (target->status) { 393 case 0: 394 return 0; 395 396 case SRP_PORT_REDIRECT: 397 ret = srp_lookup_path(target); 398 if (ret) 399 return ret; 400 break; 401 402 case SRP_DLID_REDIRECT: 403 break; 404 405 default: 406 return target->status; 407 } 408 } 409 } 410 411 static int srp_reconnect_target(struct srp_target_port *target) 412 { 413 struct ib_cm_id *new_cm_id; 414 struct ib_qp_attr qp_attr; 415 struct srp_request *req; 416 struct ib_wc wc; 417 int ret; 418 int i; 419 420 spin_lock_irq(target->scsi_host->host_lock); 421 if (target->state != SRP_TARGET_LIVE) { 422 spin_unlock_irq(target->scsi_host->host_lock); 423 return -EAGAIN; 424 } 425 target->state = SRP_TARGET_CONNECTING; 426 spin_unlock_irq(target->scsi_host->host_lock); 427 428 srp_disconnect_target(target); 429 /* 430 * Now get a new local CM ID so that we avoid confusing the 431 * target in case things are really fouled up. 432 */ 433 new_cm_id = ib_create_cm_id(target->srp_host->dev, 434 srp_cm_handler, target); 435 if (IS_ERR(new_cm_id)) { 436 ret = PTR_ERR(new_cm_id); 437 goto err; 438 } 439 ib_destroy_cm_id(target->cm_id); 440 target->cm_id = new_cm_id; 441 442 qp_attr.qp_state = IB_QPS_RESET; 443 ret = ib_modify_qp(target->qp, &qp_attr, IB_QP_STATE); 444 if (ret) 445 goto err; 446 447 ret = srp_init_qp(target, target->qp); 448 if (ret) 449 goto err; 450 451 while (ib_poll_cq(target->cq, 1, &wc) > 0) 452 ; /* nothing */ 453 454 list_for_each_entry(req, &target->req_queue, list) { 455 req->scmnd->result = DID_RESET << 16; 456 req->scmnd->scsi_done(req->scmnd); 457 } 458 459 target->rx_head = 0; 460 target->tx_head = 0; 461 target->tx_tail = 0; 462 target->req_head = 0; 463 for (i = 0; i < SRP_SQ_SIZE - 1; ++i) 464 target->req_ring[i].next = i + 1; 465 target->req_ring[SRP_SQ_SIZE - 1].next = -1; 466 INIT_LIST_HEAD(&target->req_queue); 467 468 ret = srp_connect_target(target); 469 if (ret) 470 goto err; 471 472 spin_lock_irq(target->scsi_host->host_lock); 473 if (target->state == SRP_TARGET_CONNECTING) { 474 ret = 0; 475 target->state = SRP_TARGET_LIVE; 476 } else 477 ret = -EAGAIN; 478 spin_unlock_irq(target->scsi_host->host_lock); 479 480 return ret; 481 482 err: 483 printk(KERN_ERR PFX "reconnect failed (%d), removing target port.\n", ret); 484 485 /* 486 * We couldn't reconnect, so kill our target port off. 487 * However, we have to defer the real removal because we might 488 * be in the context of the SCSI error handler now, which 489 * would deadlock if we call scsi_remove_host(). 490 */ 491 spin_lock_irq(target->scsi_host->host_lock); 492 if (target->state == SRP_TARGET_CONNECTING) { 493 target->state = SRP_TARGET_DEAD; 494 INIT_WORK(&target->work, srp_remove_work, target); 495 schedule_work(&target->work); 496 } 497 spin_unlock_irq(target->scsi_host->host_lock); 498 499 return ret; 500 } 501 502 static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target, 503 struct srp_request *req) 504 { 505 struct srp_cmd *cmd = req->cmd->buf; 506 int len; 507 u8 fmt; 508 509 if (!scmnd->request_buffer || scmnd->sc_data_direction == DMA_NONE) 510 return sizeof (struct srp_cmd); 511 512 if (scmnd->sc_data_direction != DMA_FROM_DEVICE && 513 scmnd->sc_data_direction != DMA_TO_DEVICE) { 514 printk(KERN_WARNING PFX "Unhandled data direction %d\n", 515 scmnd->sc_data_direction); 516 return -EINVAL; 517 } 518 519 if (scmnd->use_sg) { 520 struct scatterlist *scat = scmnd->request_buffer; 521 int n; 522 int i; 523 524 n = dma_map_sg(target->srp_host->dev->dma_device, 525 scat, scmnd->use_sg, scmnd->sc_data_direction); 526 527 if (n == 1) { 528 struct srp_direct_buf *buf = (void *) cmd->add_data; 529 530 fmt = SRP_DATA_DESC_DIRECT; 531 532 buf->va = cpu_to_be64(sg_dma_address(scat)); 533 buf->key = cpu_to_be32(target->srp_host->mr->rkey); 534 buf->len = cpu_to_be32(sg_dma_len(scat)); 535 536 len = sizeof (struct srp_cmd) + 537 sizeof (struct srp_direct_buf); 538 } else { 539 struct srp_indirect_buf *buf = (void *) cmd->add_data; 540 u32 datalen = 0; 541 542 fmt = SRP_DATA_DESC_INDIRECT; 543 544 if (scmnd->sc_data_direction == DMA_TO_DEVICE) 545 cmd->data_out_desc_cnt = n; 546 else 547 cmd->data_in_desc_cnt = n; 548 549 buf->table_desc.va = cpu_to_be64(req->cmd->dma + 550 sizeof *cmd + 551 sizeof *buf); 552 buf->table_desc.key = 553 cpu_to_be32(target->srp_host->mr->rkey); 554 buf->table_desc.len = 555 cpu_to_be32(n * sizeof (struct srp_direct_buf)); 556 557 for (i = 0; i < n; ++i) { 558 buf->desc_list[i].va = cpu_to_be64(sg_dma_address(&scat[i])); 559 buf->desc_list[i].key = 560 cpu_to_be32(target->srp_host->mr->rkey); 561 buf->desc_list[i].len = cpu_to_be32(sg_dma_len(&scat[i])); 562 563 datalen += sg_dma_len(&scat[i]); 564 } 565 566 buf->len = cpu_to_be32(datalen); 567 568 len = sizeof (struct srp_cmd) + 569 sizeof (struct srp_indirect_buf) + 570 n * sizeof (struct srp_direct_buf); 571 } 572 } else { 573 struct srp_direct_buf *buf = (void *) cmd->add_data; 574 dma_addr_t dma; 575 576 dma = dma_map_single(target->srp_host->dev->dma_device, 577 scmnd->request_buffer, scmnd->request_bufflen, 578 scmnd->sc_data_direction); 579 if (dma_mapping_error(dma)) { 580 printk(KERN_WARNING PFX "unable to map %p/%d (dir %d)\n", 581 scmnd->request_buffer, (int) scmnd->request_bufflen, 582 scmnd->sc_data_direction); 583 return -EINVAL; 584 } 585 586 pci_unmap_addr_set(req, direct_mapping, dma); 587 588 buf->va = cpu_to_be64(dma); 589 buf->key = cpu_to_be32(target->srp_host->mr->rkey); 590 buf->len = cpu_to_be32(scmnd->request_bufflen); 591 592 fmt = SRP_DATA_DESC_DIRECT; 593 594 len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf); 595 } 596 597 if (scmnd->sc_data_direction == DMA_TO_DEVICE) 598 cmd->buf_fmt = fmt << 4; 599 else 600 cmd->buf_fmt = fmt; 601 602 603 return len; 604 } 605 606 static void srp_unmap_data(struct scsi_cmnd *scmnd, 607 struct srp_target_port *target, 608 struct srp_request *req) 609 { 610 if (!scmnd->request_buffer || 611 (scmnd->sc_data_direction != DMA_TO_DEVICE && 612 scmnd->sc_data_direction != DMA_FROM_DEVICE)) 613 return; 614 615 if (scmnd->use_sg) 616 dma_unmap_sg(target->srp_host->dev->dma_device, 617 (struct scatterlist *) scmnd->request_buffer, 618 scmnd->use_sg, scmnd->sc_data_direction); 619 else 620 dma_unmap_single(target->srp_host->dev->dma_device, 621 pci_unmap_addr(req, direct_mapping), 622 scmnd->request_bufflen, 623 scmnd->sc_data_direction); 624 } 625 626 static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp) 627 { 628 struct srp_request *req; 629 struct scsi_cmnd *scmnd; 630 unsigned long flags; 631 s32 delta; 632 633 delta = (s32) be32_to_cpu(rsp->req_lim_delta); 634 635 spin_lock_irqsave(target->scsi_host->host_lock, flags); 636 637 target->req_lim += delta; 638 639 req = &target->req_ring[rsp->tag & ~SRP_TAG_TSK_MGMT]; 640 641 if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) { 642 if (be32_to_cpu(rsp->resp_data_len) < 4) 643 req->tsk_status = -1; 644 else 645 req->tsk_status = rsp->data[3]; 646 complete(&req->done); 647 } else { 648 scmnd = req->scmnd; 649 if (!scmnd) 650 printk(KERN_ERR "Null scmnd for RSP w/tag %016llx\n", 651 (unsigned long long) rsp->tag); 652 scmnd->result = rsp->status; 653 654 if (rsp->flags & SRP_RSP_FLAG_SNSVALID) { 655 memcpy(scmnd->sense_buffer, rsp->data + 656 be32_to_cpu(rsp->resp_data_len), 657 min_t(int, be32_to_cpu(rsp->sense_data_len), 658 SCSI_SENSE_BUFFERSIZE)); 659 } 660 661 if (rsp->flags & (SRP_RSP_FLAG_DOOVER | SRP_RSP_FLAG_DOUNDER)) 662 scmnd->resid = be32_to_cpu(rsp->data_out_res_cnt); 663 else if (rsp->flags & (SRP_RSP_FLAG_DIOVER | SRP_RSP_FLAG_DIUNDER)) 664 scmnd->resid = be32_to_cpu(rsp->data_in_res_cnt); 665 666 srp_unmap_data(scmnd, target, req); 667 668 if (!req->tsk_mgmt) { 669 req->scmnd = NULL; 670 scmnd->host_scribble = (void *) -1L; 671 scmnd->scsi_done(scmnd); 672 673 list_del(&req->list); 674 req->next = target->req_head; 675 target->req_head = rsp->tag & ~SRP_TAG_TSK_MGMT; 676 } else 677 req->cmd_done = 1; 678 } 679 680 spin_unlock_irqrestore(target->scsi_host->host_lock, flags); 681 } 682 683 static void srp_reconnect_work(void *target_ptr) 684 { 685 struct srp_target_port *target = target_ptr; 686 687 srp_reconnect_target(target); 688 } 689 690 static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc) 691 { 692 struct srp_iu *iu; 693 u8 opcode; 694 695 iu = target->rx_ring[wc->wr_id & ~SRP_OP_RECV]; 696 697 dma_sync_single_for_cpu(target->srp_host->dev->dma_device, iu->dma, 698 target->max_ti_iu_len, DMA_FROM_DEVICE); 699 700 opcode = *(u8 *) iu->buf; 701 702 if (0) { 703 int i; 704 705 printk(KERN_ERR PFX "recv completion, opcode 0x%02x\n", opcode); 706 707 for (i = 0; i < wc->byte_len; ++i) { 708 if (i % 8 == 0) 709 printk(KERN_ERR " [%02x] ", i); 710 printk(" %02x", ((u8 *) iu->buf)[i]); 711 if ((i + 1) % 8 == 0) 712 printk("\n"); 713 } 714 715 if (wc->byte_len % 8) 716 printk("\n"); 717 } 718 719 switch (opcode) { 720 case SRP_RSP: 721 srp_process_rsp(target, iu->buf); 722 break; 723 724 case SRP_T_LOGOUT: 725 /* XXX Handle target logout */ 726 printk(KERN_WARNING PFX "Got target logout request\n"); 727 break; 728 729 default: 730 printk(KERN_WARNING PFX "Unhandled SRP opcode 0x%02x\n", opcode); 731 break; 732 } 733 734 dma_sync_single_for_device(target->srp_host->dev->dma_device, iu->dma, 735 target->max_ti_iu_len, DMA_FROM_DEVICE); 736 } 737 738 static void srp_completion(struct ib_cq *cq, void *target_ptr) 739 { 740 struct srp_target_port *target = target_ptr; 741 struct ib_wc wc; 742 unsigned long flags; 743 744 ib_req_notify_cq(cq, IB_CQ_NEXT_COMP); 745 while (ib_poll_cq(cq, 1, &wc) > 0) { 746 if (wc.status) { 747 printk(KERN_ERR PFX "failed %s status %d\n", 748 wc.wr_id & SRP_OP_RECV ? "receive" : "send", 749 wc.status); 750 spin_lock_irqsave(target->scsi_host->host_lock, flags); 751 if (target->state == SRP_TARGET_LIVE) 752 schedule_work(&target->work); 753 spin_unlock_irqrestore(target->scsi_host->host_lock, flags); 754 break; 755 } 756 757 if (wc.wr_id & SRP_OP_RECV) 758 srp_handle_recv(target, &wc); 759 else 760 ++target->tx_tail; 761 } 762 } 763 764 static int __srp_post_recv(struct srp_target_port *target) 765 { 766 struct srp_iu *iu; 767 struct ib_sge list; 768 struct ib_recv_wr wr, *bad_wr; 769 unsigned int next; 770 int ret; 771 772 next = target->rx_head & (SRP_RQ_SIZE - 1); 773 wr.wr_id = next | SRP_OP_RECV; 774 iu = target->rx_ring[next]; 775 776 list.addr = iu->dma; 777 list.length = iu->size; 778 list.lkey = target->srp_host->mr->lkey; 779 780 wr.next = NULL; 781 wr.sg_list = &list; 782 wr.num_sge = 1; 783 784 ret = ib_post_recv(target->qp, &wr, &bad_wr); 785 if (!ret) 786 ++target->rx_head; 787 788 return ret; 789 } 790 791 static int srp_post_recv(struct srp_target_port *target) 792 { 793 unsigned long flags; 794 int ret; 795 796 spin_lock_irqsave(target->scsi_host->host_lock, flags); 797 ret = __srp_post_recv(target); 798 spin_unlock_irqrestore(target->scsi_host->host_lock, flags); 799 800 return ret; 801 } 802 803 /* 804 * Must be called with target->scsi_host->host_lock held to protect 805 * req_lim and tx_head. Lock cannot be dropped between call here and 806 * call to __srp_post_send(). 807 */ 808 static struct srp_iu *__srp_get_tx_iu(struct srp_target_port *target) 809 { 810 if (target->tx_head - target->tx_tail >= SRP_SQ_SIZE) 811 return NULL; 812 813 if (unlikely(target->req_lim < 1)) { 814 if (printk_ratelimit()) 815 printk(KERN_DEBUG PFX "Target has req_lim %d\n", 816 target->req_lim); 817 return NULL; 818 } 819 820 return target->tx_ring[target->tx_head & SRP_SQ_SIZE]; 821 } 822 823 /* 824 * Must be called with target->scsi_host->host_lock held to protect 825 * req_lim and tx_head. 826 */ 827 static int __srp_post_send(struct srp_target_port *target, 828 struct srp_iu *iu, int len) 829 { 830 struct ib_sge list; 831 struct ib_send_wr wr, *bad_wr; 832 int ret = 0; 833 834 list.addr = iu->dma; 835 list.length = len; 836 list.lkey = target->srp_host->mr->lkey; 837 838 wr.next = NULL; 839 wr.wr_id = target->tx_head & SRP_SQ_SIZE; 840 wr.sg_list = &list; 841 wr.num_sge = 1; 842 wr.opcode = IB_WR_SEND; 843 wr.send_flags = IB_SEND_SIGNALED; 844 845 ret = ib_post_send(target->qp, &wr, &bad_wr); 846 847 if (!ret) { 848 ++target->tx_head; 849 --target->req_lim; 850 } 851 852 return ret; 853 } 854 855 static int srp_queuecommand(struct scsi_cmnd *scmnd, 856 void (*done)(struct scsi_cmnd *)) 857 { 858 struct srp_target_port *target = host_to_target(scmnd->device->host); 859 struct srp_request *req; 860 struct srp_iu *iu; 861 struct srp_cmd *cmd; 862 long req_index; 863 int len; 864 865 if (target->state == SRP_TARGET_CONNECTING) 866 goto err; 867 868 if (target->state == SRP_TARGET_DEAD || 869 target->state == SRP_TARGET_REMOVED) { 870 scmnd->result = DID_BAD_TARGET << 16; 871 done(scmnd); 872 return 0; 873 } 874 875 iu = __srp_get_tx_iu(target); 876 if (!iu) 877 goto err; 878 879 dma_sync_single_for_cpu(target->srp_host->dev->dma_device, iu->dma, 880 SRP_MAX_IU_LEN, DMA_TO_DEVICE); 881 882 req_index = target->req_head; 883 884 scmnd->scsi_done = done; 885 scmnd->result = 0; 886 scmnd->host_scribble = (void *) req_index; 887 888 cmd = iu->buf; 889 memset(cmd, 0, sizeof *cmd); 890 891 cmd->opcode = SRP_CMD; 892 cmd->lun = cpu_to_be64((u64) scmnd->device->lun << 48); 893 cmd->tag = req_index; 894 memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len); 895 896 req = &target->req_ring[req_index]; 897 898 req->scmnd = scmnd; 899 req->cmd = iu; 900 req->cmd_done = 0; 901 req->tsk_mgmt = NULL; 902 903 len = srp_map_data(scmnd, target, req); 904 if (len < 0) { 905 printk(KERN_ERR PFX "Failed to map data\n"); 906 goto err; 907 } 908 909 if (__srp_post_recv(target)) { 910 printk(KERN_ERR PFX "Recv failed\n"); 911 goto err_unmap; 912 } 913 914 dma_sync_single_for_device(target->srp_host->dev->dma_device, iu->dma, 915 SRP_MAX_IU_LEN, DMA_TO_DEVICE); 916 917 if (__srp_post_send(target, iu, len)) { 918 printk(KERN_ERR PFX "Send failed\n"); 919 goto err_unmap; 920 } 921 922 target->req_head = req->next; 923 list_add_tail(&req->list, &target->req_queue); 924 925 return 0; 926 927 err_unmap: 928 srp_unmap_data(scmnd, target, req); 929 930 err: 931 return SCSI_MLQUEUE_HOST_BUSY; 932 } 933 934 static int srp_alloc_iu_bufs(struct srp_target_port *target) 935 { 936 int i; 937 938 for (i = 0; i < SRP_RQ_SIZE; ++i) { 939 target->rx_ring[i] = srp_alloc_iu(target->srp_host, 940 target->max_ti_iu_len, 941 GFP_KERNEL, DMA_FROM_DEVICE); 942 if (!target->rx_ring[i]) 943 goto err; 944 } 945 946 for (i = 0; i < SRP_SQ_SIZE + 1; ++i) { 947 target->tx_ring[i] = srp_alloc_iu(target->srp_host, 948 SRP_MAX_IU_LEN, 949 GFP_KERNEL, DMA_TO_DEVICE); 950 if (!target->tx_ring[i]) 951 goto err; 952 } 953 954 return 0; 955 956 err: 957 for (i = 0; i < SRP_RQ_SIZE; ++i) { 958 srp_free_iu(target->srp_host, target->rx_ring[i]); 959 target->rx_ring[i] = NULL; 960 } 961 962 for (i = 0; i < SRP_SQ_SIZE + 1; ++i) { 963 srp_free_iu(target->srp_host, target->tx_ring[i]); 964 target->tx_ring[i] = NULL; 965 } 966 967 return -ENOMEM; 968 } 969 970 static void srp_cm_rej_handler(struct ib_cm_id *cm_id, 971 struct ib_cm_event *event, 972 struct srp_target_port *target) 973 { 974 struct ib_class_port_info *cpi; 975 int opcode; 976 977 switch (event->param.rej_rcvd.reason) { 978 case IB_CM_REJ_PORT_CM_REDIRECT: 979 cpi = event->param.rej_rcvd.ari; 980 target->path.dlid = cpi->redirect_lid; 981 target->path.pkey = cpi->redirect_pkey; 982 cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff; 983 memcpy(target->path.dgid.raw, cpi->redirect_gid, 16); 984 985 target->status = target->path.dlid ? 986 SRP_DLID_REDIRECT : SRP_PORT_REDIRECT; 987 break; 988 989 case IB_CM_REJ_PORT_REDIRECT: 990 if (topspin_workarounds && 991 !memcmp(&target->ioc_guid, topspin_oui, 3)) { 992 /* 993 * Topspin/Cisco SRP gateways incorrectly send 994 * reject reason code 25 when they mean 24 995 * (port redirect). 996 */ 997 memcpy(target->path.dgid.raw, 998 event->param.rej_rcvd.ari, 16); 999 1000 printk(KERN_DEBUG PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n", 1001 (unsigned long long) be64_to_cpu(target->path.dgid.global.subnet_prefix), 1002 (unsigned long long) be64_to_cpu(target->path.dgid.global.interface_id)); 1003 1004 target->status = SRP_PORT_REDIRECT; 1005 } else { 1006 printk(KERN_WARNING " REJ reason: IB_CM_REJ_PORT_REDIRECT\n"); 1007 target->status = -ECONNRESET; 1008 } 1009 break; 1010 1011 case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID: 1012 printk(KERN_WARNING " REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n"); 1013 target->status = -ECONNRESET; 1014 break; 1015 1016 case IB_CM_REJ_CONSUMER_DEFINED: 1017 opcode = *(u8 *) event->private_data; 1018 if (opcode == SRP_LOGIN_REJ) { 1019 struct srp_login_rej *rej = event->private_data; 1020 u32 reason = be32_to_cpu(rej->reason); 1021 1022 if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE) 1023 printk(KERN_WARNING PFX 1024 "SRP_LOGIN_REJ: requested max_it_iu_len too large\n"); 1025 else 1026 printk(KERN_WARNING PFX 1027 "SRP LOGIN REJECTED, reason 0x%08x\n", reason); 1028 } else 1029 printk(KERN_WARNING " REJ reason: IB_CM_REJ_CONSUMER_DEFINED," 1030 " opcode 0x%02x\n", opcode); 1031 target->status = -ECONNRESET; 1032 break; 1033 1034 default: 1035 printk(KERN_WARNING " REJ reason 0x%x\n", 1036 event->param.rej_rcvd.reason); 1037 target->status = -ECONNRESET; 1038 } 1039 } 1040 1041 static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event) 1042 { 1043 struct srp_target_port *target = cm_id->context; 1044 struct ib_qp_attr *qp_attr = NULL; 1045 int attr_mask = 0; 1046 int comp = 0; 1047 int opcode = 0; 1048 1049 switch (event->event) { 1050 case IB_CM_REQ_ERROR: 1051 printk(KERN_DEBUG PFX "Sending CM REQ failed\n"); 1052 comp = 1; 1053 target->status = -ECONNRESET; 1054 break; 1055 1056 case IB_CM_REP_RECEIVED: 1057 comp = 1; 1058 opcode = *(u8 *) event->private_data; 1059 1060 if (opcode == SRP_LOGIN_RSP) { 1061 struct srp_login_rsp *rsp = event->private_data; 1062 1063 target->max_ti_iu_len = be32_to_cpu(rsp->max_ti_iu_len); 1064 target->req_lim = be32_to_cpu(rsp->req_lim_delta); 1065 1066 target->scsi_host->can_queue = min(target->req_lim, 1067 target->scsi_host->can_queue); 1068 } else { 1069 printk(KERN_WARNING PFX "Unhandled RSP opcode %#x\n", opcode); 1070 target->status = -ECONNRESET; 1071 break; 1072 } 1073 1074 target->status = srp_alloc_iu_bufs(target); 1075 if (target->status) 1076 break; 1077 1078 qp_attr = kmalloc(sizeof *qp_attr, GFP_KERNEL); 1079 if (!qp_attr) { 1080 target->status = -ENOMEM; 1081 break; 1082 } 1083 1084 qp_attr->qp_state = IB_QPS_RTR; 1085 target->status = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask); 1086 if (target->status) 1087 break; 1088 1089 target->status = ib_modify_qp(target->qp, qp_attr, attr_mask); 1090 if (target->status) 1091 break; 1092 1093 target->status = srp_post_recv(target); 1094 if (target->status) 1095 break; 1096 1097 qp_attr->qp_state = IB_QPS_RTS; 1098 target->status = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask); 1099 if (target->status) 1100 break; 1101 1102 target->status = ib_modify_qp(target->qp, qp_attr, attr_mask); 1103 if (target->status) 1104 break; 1105 1106 target->status = ib_send_cm_rtu(cm_id, NULL, 0); 1107 if (target->status) 1108 break; 1109 1110 break; 1111 1112 case IB_CM_REJ_RECEIVED: 1113 printk(KERN_DEBUG PFX "REJ received\n"); 1114 comp = 1; 1115 1116 srp_cm_rej_handler(cm_id, event, target); 1117 break; 1118 1119 case IB_CM_MRA_RECEIVED: 1120 printk(KERN_ERR PFX "MRA received\n"); 1121 break; 1122 1123 case IB_CM_DREP_RECEIVED: 1124 break; 1125 1126 case IB_CM_TIMEWAIT_EXIT: 1127 printk(KERN_ERR PFX "connection closed\n"); 1128 1129 comp = 1; 1130 target->status = 0; 1131 break; 1132 1133 default: 1134 printk(KERN_WARNING PFX "Unhandled CM event %d\n", event->event); 1135 break; 1136 } 1137 1138 if (comp) 1139 complete(&target->done); 1140 1141 kfree(qp_attr); 1142 1143 return 0; 1144 } 1145 1146 static int srp_send_tsk_mgmt(struct scsi_cmnd *scmnd, u8 func) 1147 { 1148 struct srp_target_port *target = host_to_target(scmnd->device->host); 1149 struct srp_request *req; 1150 struct srp_iu *iu; 1151 struct srp_tsk_mgmt *tsk_mgmt; 1152 int req_index; 1153 int ret = FAILED; 1154 1155 spin_lock_irq(target->scsi_host->host_lock); 1156 1157 if (scmnd->host_scribble == (void *) -1L) 1158 goto out; 1159 1160 req_index = (long) scmnd->host_scribble; 1161 printk(KERN_ERR "Abort for req_index %d\n", req_index); 1162 1163 req = &target->req_ring[req_index]; 1164 init_completion(&req->done); 1165 1166 iu = __srp_get_tx_iu(target); 1167 if (!iu) 1168 goto out; 1169 1170 tsk_mgmt = iu->buf; 1171 memset(tsk_mgmt, 0, sizeof *tsk_mgmt); 1172 1173 tsk_mgmt->opcode = SRP_TSK_MGMT; 1174 tsk_mgmt->lun = cpu_to_be64((u64) scmnd->device->lun << 48); 1175 tsk_mgmt->tag = req_index | SRP_TAG_TSK_MGMT; 1176 tsk_mgmt->tsk_mgmt_func = func; 1177 tsk_mgmt->task_tag = req_index; 1178 1179 if (__srp_post_send(target, iu, sizeof *tsk_mgmt)) 1180 goto out; 1181 1182 req->tsk_mgmt = iu; 1183 1184 spin_unlock_irq(target->scsi_host->host_lock); 1185 if (!wait_for_completion_timeout(&req->done, 1186 msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS))) 1187 return FAILED; 1188 spin_lock_irq(target->scsi_host->host_lock); 1189 1190 if (req->cmd_done) { 1191 list_del(&req->list); 1192 req->next = target->req_head; 1193 target->req_head = req_index; 1194 1195 scmnd->scsi_done(scmnd); 1196 } else if (!req->tsk_status) { 1197 scmnd->result = DID_ABORT << 16; 1198 ret = SUCCESS; 1199 } 1200 1201 out: 1202 spin_unlock_irq(target->scsi_host->host_lock); 1203 return ret; 1204 } 1205 1206 static int srp_abort(struct scsi_cmnd *scmnd) 1207 { 1208 printk(KERN_ERR "SRP abort called\n"); 1209 1210 return srp_send_tsk_mgmt(scmnd, SRP_TSK_ABORT_TASK); 1211 } 1212 1213 static int srp_reset_device(struct scsi_cmnd *scmnd) 1214 { 1215 printk(KERN_ERR "SRP reset_device called\n"); 1216 1217 return srp_send_tsk_mgmt(scmnd, SRP_TSK_LUN_RESET); 1218 } 1219 1220 static int srp_reset_host(struct scsi_cmnd *scmnd) 1221 { 1222 struct srp_target_port *target = host_to_target(scmnd->device->host); 1223 int ret = FAILED; 1224 1225 printk(KERN_ERR PFX "SRP reset_host called\n"); 1226 1227 if (!srp_reconnect_target(target)) 1228 ret = SUCCESS; 1229 1230 return ret; 1231 } 1232 1233 static struct scsi_host_template srp_template = { 1234 .module = THIS_MODULE, 1235 .name = DRV_NAME, 1236 .info = srp_target_info, 1237 .queuecommand = srp_queuecommand, 1238 .eh_abort_handler = srp_abort, 1239 .eh_device_reset_handler = srp_reset_device, 1240 .eh_host_reset_handler = srp_reset_host, 1241 .can_queue = SRP_SQ_SIZE, 1242 .this_id = -1, 1243 .sg_tablesize = SRP_MAX_INDIRECT, 1244 .cmd_per_lun = SRP_SQ_SIZE, 1245 .use_clustering = ENABLE_CLUSTERING 1246 }; 1247 1248 static int srp_add_target(struct srp_host *host, struct srp_target_port *target) 1249 { 1250 sprintf(target->target_name, "SRP.T10:%016llX", 1251 (unsigned long long) be64_to_cpu(target->id_ext)); 1252 1253 if (scsi_add_host(target->scsi_host, host->dev->dma_device)) 1254 return -ENODEV; 1255 1256 down(&host->target_mutex); 1257 list_add_tail(&target->list, &host->target_list); 1258 up(&host->target_mutex); 1259 1260 target->state = SRP_TARGET_LIVE; 1261 1262 /* XXX: are we supposed to have a definition of SCAN_WILD_CARD ?? */ 1263 scsi_scan_target(&target->scsi_host->shost_gendev, 1264 0, target->scsi_id, ~0, 0); 1265 1266 return 0; 1267 } 1268 1269 static void srp_release_class_dev(struct class_device *class_dev) 1270 { 1271 struct srp_host *host = 1272 container_of(class_dev, struct srp_host, class_dev); 1273 1274 complete(&host->released); 1275 } 1276 1277 static struct class srp_class = { 1278 .name = "infiniband_srp", 1279 .release = srp_release_class_dev 1280 }; 1281 1282 /* 1283 * Target ports are added by writing 1284 * 1285 * id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,dgid=<dest GID>, 1286 * pkey=<P_Key>,service_id=<service ID> 1287 * 1288 * to the add_target sysfs attribute. 1289 */ 1290 enum { 1291 SRP_OPT_ERR = 0, 1292 SRP_OPT_ID_EXT = 1 << 0, 1293 SRP_OPT_IOC_GUID = 1 << 1, 1294 SRP_OPT_DGID = 1 << 2, 1295 SRP_OPT_PKEY = 1 << 3, 1296 SRP_OPT_SERVICE_ID = 1 << 4, 1297 SRP_OPT_MAX_SECT = 1 << 5, 1298 SRP_OPT_ALL = (SRP_OPT_ID_EXT | 1299 SRP_OPT_IOC_GUID | 1300 SRP_OPT_DGID | 1301 SRP_OPT_PKEY | 1302 SRP_OPT_SERVICE_ID), 1303 }; 1304 1305 static match_table_t srp_opt_tokens = { 1306 { SRP_OPT_ID_EXT, "id_ext=%s" }, 1307 { SRP_OPT_IOC_GUID, "ioc_guid=%s" }, 1308 { SRP_OPT_DGID, "dgid=%s" }, 1309 { SRP_OPT_PKEY, "pkey=%x" }, 1310 { SRP_OPT_SERVICE_ID, "service_id=%s" }, 1311 { SRP_OPT_MAX_SECT, "max_sect=%d" }, 1312 { SRP_OPT_ERR, NULL } 1313 }; 1314 1315 static int srp_parse_options(const char *buf, struct srp_target_port *target) 1316 { 1317 char *options, *sep_opt; 1318 char *p; 1319 char dgid[3]; 1320 substring_t args[MAX_OPT_ARGS]; 1321 int opt_mask = 0; 1322 int token; 1323 int ret = -EINVAL; 1324 int i; 1325 1326 options = kstrdup(buf, GFP_KERNEL); 1327 if (!options) 1328 return -ENOMEM; 1329 1330 sep_opt = options; 1331 while ((p = strsep(&sep_opt, ",")) != NULL) { 1332 if (!*p) 1333 continue; 1334 1335 token = match_token(p, srp_opt_tokens, args); 1336 opt_mask |= token; 1337 1338 switch (token) { 1339 case SRP_OPT_ID_EXT: 1340 p = match_strdup(args); 1341 target->id_ext = cpu_to_be64(simple_strtoull(p, NULL, 16)); 1342 kfree(p); 1343 break; 1344 1345 case SRP_OPT_IOC_GUID: 1346 p = match_strdup(args); 1347 target->ioc_guid = cpu_to_be64(simple_strtoull(p, NULL, 16)); 1348 kfree(p); 1349 break; 1350 1351 case SRP_OPT_DGID: 1352 p = match_strdup(args); 1353 if (strlen(p) != 32) { 1354 printk(KERN_WARNING PFX "bad dest GID parameter '%s'\n", p); 1355 goto out; 1356 } 1357 1358 for (i = 0; i < 16; ++i) { 1359 strlcpy(dgid, p + i * 2, 3); 1360 target->path.dgid.raw[i] = simple_strtoul(dgid, NULL, 16); 1361 } 1362 break; 1363 1364 case SRP_OPT_PKEY: 1365 if (match_hex(args, &token)) { 1366 printk(KERN_WARNING PFX "bad P_Key parameter '%s'\n", p); 1367 goto out; 1368 } 1369 target->path.pkey = cpu_to_be16(token); 1370 break; 1371 1372 case SRP_OPT_SERVICE_ID: 1373 p = match_strdup(args); 1374 target->service_id = cpu_to_be64(simple_strtoull(p, NULL, 16)); 1375 kfree(p); 1376 break; 1377 1378 case SRP_OPT_MAX_SECT: 1379 if (match_int(args, &token)) { 1380 printk(KERN_WARNING PFX "bad max sect parameter '%s'\n", p); 1381 goto out; 1382 } 1383 target->scsi_host->max_sectors = token; 1384 break; 1385 1386 default: 1387 printk(KERN_WARNING PFX "unknown parameter or missing value " 1388 "'%s' in target creation request\n", p); 1389 goto out; 1390 } 1391 } 1392 1393 if ((opt_mask & SRP_OPT_ALL) == SRP_OPT_ALL) 1394 ret = 0; 1395 else 1396 for (i = 0; i < ARRAY_SIZE(srp_opt_tokens); ++i) 1397 if ((srp_opt_tokens[i].token & SRP_OPT_ALL) && 1398 !(srp_opt_tokens[i].token & opt_mask)) 1399 printk(KERN_WARNING PFX "target creation request is " 1400 "missing parameter '%s'\n", 1401 srp_opt_tokens[i].pattern); 1402 1403 out: 1404 kfree(options); 1405 return ret; 1406 } 1407 1408 static ssize_t srp_create_target(struct class_device *class_dev, 1409 const char *buf, size_t count) 1410 { 1411 struct srp_host *host = 1412 container_of(class_dev, struct srp_host, class_dev); 1413 struct Scsi_Host *target_host; 1414 struct srp_target_port *target; 1415 int ret; 1416 int i; 1417 1418 target_host = scsi_host_alloc(&srp_template, 1419 sizeof (struct srp_target_port)); 1420 if (!target_host) 1421 return -ENOMEM; 1422 1423 target_host->max_lun = SRP_MAX_LUN; 1424 1425 target = host_to_target(target_host); 1426 memset(target, 0, sizeof *target); 1427 1428 target->scsi_host = target_host; 1429 target->srp_host = host; 1430 1431 INIT_WORK(&target->work, srp_reconnect_work, target); 1432 1433 for (i = 0; i < SRP_SQ_SIZE - 1; ++i) 1434 target->req_ring[i].next = i + 1; 1435 target->req_ring[SRP_SQ_SIZE - 1].next = -1; 1436 INIT_LIST_HEAD(&target->req_queue); 1437 1438 ret = srp_parse_options(buf, target); 1439 if (ret) 1440 goto err; 1441 1442 ib_get_cached_gid(host->dev, host->port, 0, &target->path.sgid); 1443 1444 printk(KERN_DEBUG PFX "new target: id_ext %016llx ioc_guid %016llx pkey %04x " 1445 "service_id %016llx dgid %04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n", 1446 (unsigned long long) be64_to_cpu(target->id_ext), 1447 (unsigned long long) be64_to_cpu(target->ioc_guid), 1448 be16_to_cpu(target->path.pkey), 1449 (unsigned long long) be64_to_cpu(target->service_id), 1450 (int) be16_to_cpu(*(__be16 *) &target->path.dgid.raw[0]), 1451 (int) be16_to_cpu(*(__be16 *) &target->path.dgid.raw[2]), 1452 (int) be16_to_cpu(*(__be16 *) &target->path.dgid.raw[4]), 1453 (int) be16_to_cpu(*(__be16 *) &target->path.dgid.raw[6]), 1454 (int) be16_to_cpu(*(__be16 *) &target->path.dgid.raw[8]), 1455 (int) be16_to_cpu(*(__be16 *) &target->path.dgid.raw[10]), 1456 (int) be16_to_cpu(*(__be16 *) &target->path.dgid.raw[12]), 1457 (int) be16_to_cpu(*(__be16 *) &target->path.dgid.raw[14])); 1458 1459 ret = srp_create_target_ib(target); 1460 if (ret) 1461 goto err; 1462 1463 target->cm_id = ib_create_cm_id(host->dev, srp_cm_handler, target); 1464 if (IS_ERR(target->cm_id)) { 1465 ret = PTR_ERR(target->cm_id); 1466 goto err_free; 1467 } 1468 1469 ret = srp_connect_target(target); 1470 if (ret) { 1471 printk(KERN_ERR PFX "Connection failed\n"); 1472 goto err_cm_id; 1473 } 1474 1475 ret = srp_add_target(host, target); 1476 if (ret) 1477 goto err_disconnect; 1478 1479 return count; 1480 1481 err_disconnect: 1482 srp_disconnect_target(target); 1483 1484 err_cm_id: 1485 ib_destroy_cm_id(target->cm_id); 1486 1487 err_free: 1488 srp_free_target_ib(target); 1489 1490 err: 1491 scsi_host_put(target_host); 1492 1493 return ret; 1494 } 1495 1496 static CLASS_DEVICE_ATTR(add_target, S_IWUSR, NULL, srp_create_target); 1497 1498 static ssize_t show_ibdev(struct class_device *class_dev, char *buf) 1499 { 1500 struct srp_host *host = 1501 container_of(class_dev, struct srp_host, class_dev); 1502 1503 return sprintf(buf, "%s\n", host->dev->name); 1504 } 1505 1506 static CLASS_DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL); 1507 1508 static ssize_t show_port(struct class_device *class_dev, char *buf) 1509 { 1510 struct srp_host *host = 1511 container_of(class_dev, struct srp_host, class_dev); 1512 1513 return sprintf(buf, "%d\n", host->port); 1514 } 1515 1516 static CLASS_DEVICE_ATTR(port, S_IRUGO, show_port, NULL); 1517 1518 static struct srp_host *srp_add_port(struct ib_device *device, 1519 __be64 node_guid, u8 port) 1520 { 1521 struct srp_host *host; 1522 1523 host = kzalloc(sizeof *host, GFP_KERNEL); 1524 if (!host) 1525 return NULL; 1526 1527 INIT_LIST_HEAD(&host->target_list); 1528 init_MUTEX(&host->target_mutex); 1529 init_completion(&host->released); 1530 host->dev = device; 1531 host->port = port; 1532 1533 host->initiator_port_id[7] = port; 1534 memcpy(host->initiator_port_id + 8, &node_guid, 8); 1535 1536 host->pd = ib_alloc_pd(device); 1537 if (IS_ERR(host->pd)) 1538 goto err_free; 1539 1540 host->mr = ib_get_dma_mr(host->pd, 1541 IB_ACCESS_LOCAL_WRITE | 1542 IB_ACCESS_REMOTE_READ | 1543 IB_ACCESS_REMOTE_WRITE); 1544 if (IS_ERR(host->mr)) 1545 goto err_pd; 1546 1547 host->class_dev.class = &srp_class; 1548 host->class_dev.dev = device->dma_device; 1549 snprintf(host->class_dev.class_id, BUS_ID_SIZE, "srp-%s-%d", 1550 device->name, port); 1551 1552 if (class_device_register(&host->class_dev)) 1553 goto err_mr; 1554 if (class_device_create_file(&host->class_dev, &class_device_attr_add_target)) 1555 goto err_class; 1556 if (class_device_create_file(&host->class_dev, &class_device_attr_ibdev)) 1557 goto err_class; 1558 if (class_device_create_file(&host->class_dev, &class_device_attr_port)) 1559 goto err_class; 1560 1561 return host; 1562 1563 err_class: 1564 class_device_unregister(&host->class_dev); 1565 1566 err_mr: 1567 ib_dereg_mr(host->mr); 1568 1569 err_pd: 1570 ib_dealloc_pd(host->pd); 1571 1572 err_free: 1573 kfree(host); 1574 1575 return NULL; 1576 } 1577 1578 static void srp_add_one(struct ib_device *device) 1579 { 1580 struct list_head *dev_list; 1581 struct srp_host *host; 1582 struct ib_device_attr *dev_attr; 1583 int s, e, p; 1584 1585 dev_attr = kmalloc(sizeof *dev_attr, GFP_KERNEL); 1586 if (!dev_attr) 1587 return; 1588 1589 if (ib_query_device(device, dev_attr)) { 1590 printk(KERN_WARNING PFX "Couldn't query node GUID for %s.\n", 1591 device->name); 1592 goto out; 1593 } 1594 1595 dev_list = kmalloc(sizeof *dev_list, GFP_KERNEL); 1596 if (!dev_list) 1597 goto out; 1598 1599 INIT_LIST_HEAD(dev_list); 1600 1601 if (device->node_type == IB_NODE_SWITCH) { 1602 s = 0; 1603 e = 0; 1604 } else { 1605 s = 1; 1606 e = device->phys_port_cnt; 1607 } 1608 1609 for (p = s; p <= e; ++p) { 1610 host = srp_add_port(device, dev_attr->node_guid, p); 1611 if (host) 1612 list_add_tail(&host->list, dev_list); 1613 } 1614 1615 ib_set_client_data(device, &srp_client, dev_list); 1616 1617 out: 1618 kfree(dev_attr); 1619 } 1620 1621 static void srp_remove_one(struct ib_device *device) 1622 { 1623 struct list_head *dev_list; 1624 struct srp_host *host, *tmp_host; 1625 LIST_HEAD(target_list); 1626 struct srp_target_port *target, *tmp_target; 1627 unsigned long flags; 1628 1629 dev_list = ib_get_client_data(device, &srp_client); 1630 1631 list_for_each_entry_safe(host, tmp_host, dev_list, list) { 1632 class_device_unregister(&host->class_dev); 1633 /* 1634 * Wait for the sysfs entry to go away, so that no new 1635 * target ports can be created. 1636 */ 1637 wait_for_completion(&host->released); 1638 1639 /* 1640 * Mark all target ports as removed, so we stop queueing 1641 * commands and don't try to reconnect. 1642 */ 1643 down(&host->target_mutex); 1644 list_for_each_entry_safe(target, tmp_target, 1645 &host->target_list, list) { 1646 spin_lock_irqsave(target->scsi_host->host_lock, flags); 1647 if (target->state != SRP_TARGET_REMOVED) 1648 target->state = SRP_TARGET_REMOVED; 1649 spin_unlock_irqrestore(target->scsi_host->host_lock, flags); 1650 } 1651 up(&host->target_mutex); 1652 1653 /* 1654 * Wait for any reconnection tasks that may have 1655 * started before we marked our target ports as 1656 * removed, and any target port removal tasks. 1657 */ 1658 flush_scheduled_work(); 1659 1660 list_for_each_entry_safe(target, tmp_target, 1661 &host->target_list, list) { 1662 scsi_remove_host(target->scsi_host); 1663 srp_disconnect_target(target); 1664 ib_destroy_cm_id(target->cm_id); 1665 srp_free_target_ib(target); 1666 scsi_host_put(target->scsi_host); 1667 } 1668 1669 ib_dereg_mr(host->mr); 1670 ib_dealloc_pd(host->pd); 1671 kfree(host); 1672 } 1673 1674 kfree(dev_list); 1675 } 1676 1677 static int __init srp_init_module(void) 1678 { 1679 int ret; 1680 1681 ret = class_register(&srp_class); 1682 if (ret) { 1683 printk(KERN_ERR PFX "couldn't register class infiniband_srp\n"); 1684 return ret; 1685 } 1686 1687 ret = ib_register_client(&srp_client); 1688 if (ret) { 1689 printk(KERN_ERR PFX "couldn't register IB client\n"); 1690 class_unregister(&srp_class); 1691 return ret; 1692 } 1693 1694 return 0; 1695 } 1696 1697 static void __exit srp_cleanup_module(void) 1698 { 1699 ib_unregister_client(&srp_client); 1700 class_unregister(&srp_class); 1701 } 1702 1703 module_init(srp_init_module); 1704 module_exit(srp_cleanup_module); 1705