1 /* 2 * Copyright (c) 2005 Cisco Systems. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 34 35 #include <linux/module.h> 36 #include <linux/hex.h> 37 #include <linux/init.h> 38 #include <linux/slab.h> 39 #include <linux/err.h> 40 #include <linux/string.h> 41 #include <linux/parser.h> 42 #include <linux/random.h> 43 #include <linux/jiffies.h> 44 #include <linux/lockdep.h> 45 #include <linux/inet.h> 46 #include <net/net_namespace.h> 47 #include <rdma/ib_cache.h> 48 49 #include <linux/atomic.h> 50 51 #include <scsi/scsi.h> 52 #include <scsi/scsi_device.h> 53 #include <scsi/scsi_dbg.h> 54 #include <scsi/scsi_tcq.h> 55 #include <scsi/srp.h> 56 #include <scsi/scsi_transport_srp.h> 57 58 #include "ib_srp.h" 59 60 #define DRV_NAME "ib_srp" 61 #define PFX DRV_NAME ": " 62 63 MODULE_AUTHOR("Roland Dreier"); 64 MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator"); 65 MODULE_LICENSE("Dual BSD/GPL"); 66 67 static unsigned int srp_sg_tablesize; 68 static unsigned int cmd_sg_entries; 69 static unsigned int indirect_sg_entries; 70 static bool allow_ext_sg; 71 static bool register_always = true; 72 static bool never_register; 73 static int topspin_workarounds = 1; 74 75 module_param(srp_sg_tablesize, uint, 0444); 76 MODULE_PARM_DESC(srp_sg_tablesize, "Deprecated name for cmd_sg_entries"); 77 78 module_param(cmd_sg_entries, uint, 0444); 79 MODULE_PARM_DESC(cmd_sg_entries, 80 "Default number of gather/scatter entries in the SRP command (default is 12, max 255)"); 81 82 module_param(indirect_sg_entries, uint, 0444); 83 MODULE_PARM_DESC(indirect_sg_entries, 84 "Default max number of gather/scatter entries (default is 12, max is " __stringify(SG_MAX_SEGMENTS) ")"); 85 86 module_param(allow_ext_sg, bool, 0444); 87 MODULE_PARM_DESC(allow_ext_sg, 88 "Default behavior when there are more than cmd_sg_entries S/G entries after mapping; fails the request when false (default false)"); 89 90 module_param(topspin_workarounds, int, 0444); 91 MODULE_PARM_DESC(topspin_workarounds, 92 "Enable workarounds for Topspin/Cisco SRP target bugs if != 0"); 93 94 module_param(register_always, bool, 0444); 95 MODULE_PARM_DESC(register_always, 96 "Use memory registration even for contiguous memory regions"); 97 98 module_param(never_register, bool, 0444); 99 MODULE_PARM_DESC(never_register, "Never register memory"); 100 101 static const struct kernel_param_ops srp_tmo_ops; 102 103 static int srp_reconnect_delay = 10; 104 module_param_cb(reconnect_delay, &srp_tmo_ops, &srp_reconnect_delay, 105 S_IRUGO | S_IWUSR); 106 MODULE_PARM_DESC(reconnect_delay, "Time between successive reconnect attempts"); 107 108 static int srp_fast_io_fail_tmo = 15; 109 module_param_cb(fast_io_fail_tmo, &srp_tmo_ops, &srp_fast_io_fail_tmo, 110 S_IRUGO | S_IWUSR); 111 MODULE_PARM_DESC(fast_io_fail_tmo, 112 "Number of seconds between the observation of a transport" 113 " layer error and failing all I/O. \"off\" means that this" 114 " functionality is disabled."); 115 116 static int srp_dev_loss_tmo = 600; 117 module_param_cb(dev_loss_tmo, &srp_tmo_ops, &srp_dev_loss_tmo, 118 S_IRUGO | S_IWUSR); 119 MODULE_PARM_DESC(dev_loss_tmo, 120 "Maximum number of seconds that the SRP transport should" 121 " insulate transport layer errors. After this time has been" 122 " exceeded the SCSI host is removed. Should be" 123 " between 1 and " __stringify(SCSI_DEVICE_BLOCK_MAX_TIMEOUT) 124 " if fast_io_fail_tmo has not been set. \"off\" means that" 125 " this functionality is disabled."); 126 127 static bool srp_use_imm_data = true; 128 module_param_named(use_imm_data, srp_use_imm_data, bool, 0644); 129 MODULE_PARM_DESC(use_imm_data, 130 "Whether or not to request permission to use immediate data during SRP login."); 131 132 static unsigned int srp_max_imm_data = 8 * 1024; 133 module_param_named(max_imm_data, srp_max_imm_data, uint, 0644); 134 MODULE_PARM_DESC(max_imm_data, "Maximum immediate data size."); 135 136 static unsigned ch_count; 137 module_param(ch_count, uint, 0444); 138 MODULE_PARM_DESC(ch_count, 139 "Number of RDMA channels to use for communication with an SRP target. Using more than one channel improves performance if the HCA supports multiple completion vectors. The default value is the minimum of four times the number of online CPU sockets and the number of completion vectors supported by the HCA."); 140 141 static int srp_add_one(struct ib_device *device); 142 static void srp_remove_one(struct ib_device *device, void *client_data); 143 static void srp_rename_dev(struct ib_device *device, void *client_data); 144 static void srp_recv_done(struct ib_cq *cq, struct ib_wc *wc); 145 static void srp_handle_qp_err(struct ib_cq *cq, struct ib_wc *wc, 146 const char *opname); 147 static int srp_ib_cm_handler(struct ib_cm_id *cm_id, 148 const struct ib_cm_event *event); 149 static int srp_rdma_cm_handler(struct rdma_cm_id *cm_id, 150 struct rdma_cm_event *event); 151 152 static struct scsi_transport_template *ib_srp_transport_template; 153 static struct workqueue_struct *srp_remove_wq; 154 155 static struct ib_client srp_client = { 156 .name = "srp", 157 .add = srp_add_one, 158 .remove = srp_remove_one, 159 .rename = srp_rename_dev 160 }; 161 162 static struct ib_sa_client srp_sa_client; 163 164 static int srp_tmo_get(char *buffer, const struct kernel_param *kp) 165 { 166 int tmo = *(int *)kp->arg; 167 168 if (tmo >= 0) 169 return sysfs_emit(buffer, "%d\n", tmo); 170 else 171 return sysfs_emit(buffer, "off\n"); 172 } 173 174 static int srp_tmo_set(const char *val, const struct kernel_param *kp) 175 { 176 int tmo, res; 177 178 res = srp_parse_tmo(&tmo, val); 179 if (res) 180 goto out; 181 182 if (kp->arg == &srp_reconnect_delay) 183 res = srp_tmo_valid(tmo, srp_fast_io_fail_tmo, 184 srp_dev_loss_tmo); 185 else if (kp->arg == &srp_fast_io_fail_tmo) 186 res = srp_tmo_valid(srp_reconnect_delay, tmo, srp_dev_loss_tmo); 187 else 188 res = srp_tmo_valid(srp_reconnect_delay, srp_fast_io_fail_tmo, 189 tmo); 190 if (res) 191 goto out; 192 *(int *)kp->arg = tmo; 193 194 out: 195 return res; 196 } 197 198 static const struct kernel_param_ops srp_tmo_ops = { 199 .get = srp_tmo_get, 200 .set = srp_tmo_set, 201 }; 202 203 static inline struct srp_target_port *host_to_target(struct Scsi_Host *host) 204 { 205 return (struct srp_target_port *) host->hostdata; 206 } 207 208 static const char *srp_target_info(struct Scsi_Host *host) 209 { 210 return host_to_target(host)->target_name; 211 } 212 213 static int srp_target_is_topspin(struct srp_target_port *target) 214 { 215 static const u8 topspin_oui[3] = { 0x00, 0x05, 0xad }; 216 static const u8 cisco_oui[3] = { 0x00, 0x1b, 0x0d }; 217 218 return topspin_workarounds && 219 (!memcmp(&target->ioc_guid, topspin_oui, sizeof topspin_oui) || 220 !memcmp(&target->ioc_guid, cisco_oui, sizeof cisco_oui)); 221 } 222 223 static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size, 224 gfp_t gfp_mask, 225 enum dma_data_direction direction) 226 { 227 struct srp_iu *iu; 228 229 iu = kmalloc_obj(*iu, gfp_mask); 230 if (!iu) 231 goto out; 232 233 iu->buf = kzalloc(size, gfp_mask); 234 if (!iu->buf) 235 goto out_free_iu; 236 237 iu->dma = ib_dma_map_single(host->srp_dev->dev, iu->buf, size, 238 direction); 239 if (ib_dma_mapping_error(host->srp_dev->dev, iu->dma)) 240 goto out_free_buf; 241 242 iu->size = size; 243 iu->direction = direction; 244 245 return iu; 246 247 out_free_buf: 248 kfree(iu->buf); 249 out_free_iu: 250 kfree(iu); 251 out: 252 return NULL; 253 } 254 255 static void srp_free_iu(struct srp_host *host, struct srp_iu *iu) 256 { 257 if (!iu) 258 return; 259 260 ib_dma_unmap_single(host->srp_dev->dev, iu->dma, iu->size, 261 iu->direction); 262 kfree(iu->buf); 263 kfree(iu); 264 } 265 266 static void srp_qp_event(struct ib_event *event, void *context) 267 { 268 pr_debug("QP event %s (%d)\n", 269 ib_event_msg(event->event), event->event); 270 } 271 272 static int srp_init_ib_qp(struct srp_target_port *target, 273 struct ib_qp *qp) 274 { 275 struct ib_qp_attr *attr; 276 int ret; 277 278 attr = kmalloc_obj(*attr); 279 if (!attr) 280 return -ENOMEM; 281 282 ret = ib_find_cached_pkey(target->srp_host->srp_dev->dev, 283 target->srp_host->port, 284 be16_to_cpu(target->ib_cm.pkey), 285 &attr->pkey_index); 286 if (ret) 287 goto out; 288 289 attr->qp_state = IB_QPS_INIT; 290 attr->qp_access_flags = (IB_ACCESS_REMOTE_READ | 291 IB_ACCESS_REMOTE_WRITE); 292 attr->port_num = target->srp_host->port; 293 294 ret = ib_modify_qp(qp, attr, 295 IB_QP_STATE | 296 IB_QP_PKEY_INDEX | 297 IB_QP_ACCESS_FLAGS | 298 IB_QP_PORT); 299 300 out: 301 kfree(attr); 302 return ret; 303 } 304 305 static int srp_new_ib_cm_id(struct srp_rdma_ch *ch) 306 { 307 struct srp_target_port *target = ch->target; 308 struct ib_cm_id *new_cm_id; 309 310 new_cm_id = ib_create_cm_id(target->srp_host->srp_dev->dev, 311 srp_ib_cm_handler, ch); 312 if (IS_ERR(new_cm_id)) 313 return PTR_ERR(new_cm_id); 314 315 if (ch->ib_cm.cm_id) 316 ib_destroy_cm_id(ch->ib_cm.cm_id); 317 ch->ib_cm.cm_id = new_cm_id; 318 if (rdma_cap_opa_ah(target->srp_host->srp_dev->dev, 319 target->srp_host->port)) 320 ch->ib_cm.path.rec_type = SA_PATH_REC_TYPE_OPA; 321 else 322 ch->ib_cm.path.rec_type = SA_PATH_REC_TYPE_IB; 323 ch->ib_cm.path.sgid = target->sgid; 324 ch->ib_cm.path.dgid = target->ib_cm.orig_dgid; 325 ch->ib_cm.path.pkey = target->ib_cm.pkey; 326 ch->ib_cm.path.service_id = target->ib_cm.service_id; 327 328 return 0; 329 } 330 331 static int srp_new_rdma_cm_id(struct srp_rdma_ch *ch) 332 { 333 struct srp_target_port *target = ch->target; 334 struct rdma_cm_id *new_cm_id; 335 int ret; 336 337 new_cm_id = rdma_create_id(target->net, srp_rdma_cm_handler, ch, 338 RDMA_PS_TCP, IB_QPT_RC); 339 if (IS_ERR(new_cm_id)) { 340 ret = PTR_ERR(new_cm_id); 341 new_cm_id = NULL; 342 goto out; 343 } 344 345 init_completion(&ch->done); 346 ret = rdma_resolve_addr(new_cm_id, target->rdma_cm.src_specified ? 347 &target->rdma_cm.src.sa : NULL, 348 &target->rdma_cm.dst.sa, 349 SRP_PATH_REC_TIMEOUT_MS); 350 if (ret) { 351 pr_err("No route available from %pISpsc to %pISpsc (%d)\n", 352 &target->rdma_cm.src, &target->rdma_cm.dst, ret); 353 goto out; 354 } 355 ret = wait_for_completion_interruptible(&ch->done); 356 if (ret < 0) 357 goto out; 358 359 ret = ch->status; 360 if (ret) { 361 pr_err("Resolving address %pISpsc failed (%d)\n", 362 &target->rdma_cm.dst, ret); 363 goto out; 364 } 365 366 swap(ch->rdma_cm.cm_id, new_cm_id); 367 368 out: 369 if (new_cm_id) 370 rdma_destroy_id(new_cm_id); 371 372 return ret; 373 } 374 375 static int srp_new_cm_id(struct srp_rdma_ch *ch) 376 { 377 struct srp_target_port *target = ch->target; 378 379 return target->using_rdma_cm ? srp_new_rdma_cm_id(ch) : 380 srp_new_ib_cm_id(ch); 381 } 382 383 /** 384 * srp_destroy_fr_pool() - free the resources owned by a pool 385 * @pool: Fast registration pool to be destroyed. 386 */ 387 static void srp_destroy_fr_pool(struct srp_fr_pool *pool) 388 { 389 int i; 390 struct srp_fr_desc *d; 391 392 if (!pool) 393 return; 394 395 for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) { 396 if (d->mr) 397 ib_dereg_mr(d->mr); 398 } 399 kfree(pool); 400 } 401 402 /** 403 * srp_create_fr_pool() - allocate and initialize a pool for fast registration 404 * @device: IB device to allocate fast registration descriptors for. 405 * @pd: Protection domain associated with the FR descriptors. 406 * @pool_size: Number of descriptors to allocate. 407 * @max_page_list_len: Maximum fast registration work request page list length. 408 */ 409 static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device, 410 struct ib_pd *pd, int pool_size, 411 int max_page_list_len) 412 { 413 struct srp_fr_pool *pool; 414 struct srp_fr_desc *d; 415 struct ib_mr *mr; 416 int i, ret = -EINVAL; 417 enum ib_mr_type mr_type; 418 419 if (pool_size <= 0) 420 goto err; 421 ret = -ENOMEM; 422 pool = kzalloc_flex(*pool, desc, pool_size); 423 if (!pool) 424 goto err; 425 pool->size = pool_size; 426 pool->max_page_list_len = max_page_list_len; 427 spin_lock_init(&pool->lock); 428 INIT_LIST_HEAD(&pool->free_list); 429 430 if (device->attrs.kernel_cap_flags & IBK_SG_GAPS_REG) 431 mr_type = IB_MR_TYPE_SG_GAPS; 432 else 433 mr_type = IB_MR_TYPE_MEM_REG; 434 435 for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) { 436 mr = ib_alloc_mr(pd, mr_type, max_page_list_len); 437 if (IS_ERR(mr)) { 438 ret = PTR_ERR(mr); 439 if (ret == -ENOMEM) 440 pr_info("%s: ib_alloc_mr() failed. Try to reduce max_cmd_per_lun, max_sect or ch_count\n", 441 dev_name(&device->dev)); 442 goto destroy_pool; 443 } 444 d->mr = mr; 445 list_add_tail(&d->entry, &pool->free_list); 446 } 447 448 out: 449 return pool; 450 451 destroy_pool: 452 srp_destroy_fr_pool(pool); 453 454 err: 455 pool = ERR_PTR(ret); 456 goto out; 457 } 458 459 /** 460 * srp_fr_pool_get() - obtain a descriptor suitable for fast registration 461 * @pool: Pool to obtain descriptor from. 462 */ 463 static struct srp_fr_desc *srp_fr_pool_get(struct srp_fr_pool *pool) 464 { 465 struct srp_fr_desc *d = NULL; 466 unsigned long flags; 467 468 spin_lock_irqsave(&pool->lock, flags); 469 if (!list_empty(&pool->free_list)) { 470 d = list_first_entry(&pool->free_list, typeof(*d), entry); 471 list_del(&d->entry); 472 } 473 spin_unlock_irqrestore(&pool->lock, flags); 474 475 return d; 476 } 477 478 /** 479 * srp_fr_pool_put() - put an FR descriptor back in the free list 480 * @pool: Pool the descriptor was allocated from. 481 * @desc: Pointer to an array of fast registration descriptor pointers. 482 * @n: Number of descriptors to put back. 483 * 484 * Note: The caller must already have queued an invalidation request for 485 * desc->mr->rkey before calling this function. 486 */ 487 static void srp_fr_pool_put(struct srp_fr_pool *pool, struct srp_fr_desc **desc, 488 int n) 489 { 490 unsigned long flags; 491 int i; 492 493 spin_lock_irqsave(&pool->lock, flags); 494 for (i = 0; i < n; i++) 495 list_add(&desc[i]->entry, &pool->free_list); 496 spin_unlock_irqrestore(&pool->lock, flags); 497 } 498 499 static struct srp_fr_pool *srp_alloc_fr_pool(struct srp_target_port *target) 500 { 501 struct srp_device *dev = target->srp_host->srp_dev; 502 503 return srp_create_fr_pool(dev->dev, dev->pd, target->mr_pool_size, 504 dev->max_pages_per_mr); 505 } 506 507 /** 508 * srp_destroy_qp() - destroy an RDMA queue pair 509 * @ch: SRP RDMA channel. 510 * 511 * Drain the qp before destroying it. This avoids that the receive 512 * completion handler can access the queue pair while it is 513 * being destroyed. 514 */ 515 static void srp_destroy_qp(struct srp_rdma_ch *ch) 516 { 517 spin_lock_irq(&ch->lock); 518 ib_process_cq_direct(ch->send_cq, -1); 519 spin_unlock_irq(&ch->lock); 520 521 ib_drain_qp(ch->qp); 522 ib_destroy_qp(ch->qp); 523 } 524 525 static int srp_create_ch_ib(struct srp_rdma_ch *ch) 526 { 527 struct srp_target_port *target = ch->target; 528 struct srp_device *dev = target->srp_host->srp_dev; 529 const struct ib_device_attr *attr = &dev->dev->attrs; 530 struct ib_qp_init_attr *init_attr; 531 struct ib_cq *recv_cq, *send_cq; 532 struct ib_qp *qp; 533 struct srp_fr_pool *fr_pool = NULL; 534 const int m = 1 + dev->use_fast_reg * target->mr_per_cmd * 2; 535 int ret; 536 537 init_attr = kzalloc_obj(*init_attr); 538 if (!init_attr) 539 return -ENOMEM; 540 541 /* queue_size + 1 for ib_drain_rq() */ 542 recv_cq = ib_alloc_cq(dev->dev, ch, target->queue_size + 1, 543 ch->comp_vector, IB_POLL_SOFTIRQ); 544 if (IS_ERR(recv_cq)) { 545 ret = PTR_ERR(recv_cq); 546 goto err; 547 } 548 549 send_cq = ib_alloc_cq(dev->dev, ch, m * target->queue_size, 550 ch->comp_vector, IB_POLL_DIRECT); 551 if (IS_ERR(send_cq)) { 552 ret = PTR_ERR(send_cq); 553 goto err_recv_cq; 554 } 555 556 init_attr->event_handler = srp_qp_event; 557 init_attr->cap.max_send_wr = m * target->queue_size; 558 init_attr->cap.max_recv_wr = target->queue_size + 1; 559 init_attr->cap.max_recv_sge = 1; 560 init_attr->cap.max_send_sge = min(SRP_MAX_SGE, attr->max_send_sge); 561 init_attr->sq_sig_type = IB_SIGNAL_REQ_WR; 562 init_attr->qp_type = IB_QPT_RC; 563 init_attr->send_cq = send_cq; 564 init_attr->recv_cq = recv_cq; 565 566 ch->max_imm_sge = min(init_attr->cap.max_send_sge - 1U, 255U); 567 568 if (target->using_rdma_cm) { 569 ret = rdma_create_qp(ch->rdma_cm.cm_id, dev->pd, init_attr); 570 qp = ch->rdma_cm.cm_id->qp; 571 } else { 572 qp = ib_create_qp(dev->pd, init_attr); 573 if (!IS_ERR(qp)) { 574 ret = srp_init_ib_qp(target, qp); 575 if (ret) 576 ib_destroy_qp(qp); 577 } else { 578 ret = PTR_ERR(qp); 579 } 580 } 581 if (ret) { 582 pr_err("QP creation failed for dev %s: %d\n", 583 dev_name(&dev->dev->dev), ret); 584 goto err_send_cq; 585 } 586 587 if (dev->use_fast_reg) { 588 fr_pool = srp_alloc_fr_pool(target); 589 if (IS_ERR(fr_pool)) { 590 ret = PTR_ERR(fr_pool); 591 shost_printk(KERN_WARNING, target->scsi_host, PFX 592 "FR pool allocation failed (%d)\n", ret); 593 goto err_qp; 594 } 595 } 596 597 if (ch->qp) 598 srp_destroy_qp(ch); 599 if (ch->recv_cq) 600 ib_free_cq(ch->recv_cq); 601 if (ch->send_cq) 602 ib_free_cq(ch->send_cq); 603 604 ch->qp = qp; 605 ch->recv_cq = recv_cq; 606 ch->send_cq = send_cq; 607 608 if (dev->use_fast_reg) { 609 if (ch->fr_pool) 610 srp_destroy_fr_pool(ch->fr_pool); 611 ch->fr_pool = fr_pool; 612 } 613 614 kfree(init_attr); 615 return 0; 616 617 err_qp: 618 if (target->using_rdma_cm) 619 rdma_destroy_qp(ch->rdma_cm.cm_id); 620 else 621 ib_destroy_qp(qp); 622 623 err_send_cq: 624 ib_free_cq(send_cq); 625 626 err_recv_cq: 627 ib_free_cq(recv_cq); 628 629 err: 630 kfree(init_attr); 631 return ret; 632 } 633 634 /* 635 * Note: this function may be called without srp_alloc_iu_bufs() having been 636 * invoked. Hence the ch->[rt]x_ring checks. 637 */ 638 static void srp_free_ch_ib(struct srp_target_port *target, 639 struct srp_rdma_ch *ch) 640 { 641 struct srp_device *dev = target->srp_host->srp_dev; 642 int i; 643 644 if (!ch->target) 645 return; 646 647 if (target->using_rdma_cm) { 648 if (ch->rdma_cm.cm_id) { 649 rdma_destroy_id(ch->rdma_cm.cm_id); 650 ch->rdma_cm.cm_id = NULL; 651 } 652 } else { 653 if (ch->ib_cm.cm_id) { 654 ib_destroy_cm_id(ch->ib_cm.cm_id); 655 ch->ib_cm.cm_id = NULL; 656 } 657 } 658 659 /* If srp_new_cm_id() succeeded but srp_create_ch_ib() not, return. */ 660 if (!ch->qp) 661 return; 662 663 if (dev->use_fast_reg) { 664 if (ch->fr_pool) 665 srp_destroy_fr_pool(ch->fr_pool); 666 } 667 668 srp_destroy_qp(ch); 669 ib_free_cq(ch->send_cq); 670 ib_free_cq(ch->recv_cq); 671 672 /* 673 * Avoid that the SCSI error handler tries to use this channel after 674 * it has been freed. The SCSI error handler can namely continue 675 * trying to perform recovery actions after scsi_remove_host() 676 * returned. 677 */ 678 ch->target = NULL; 679 680 ch->qp = NULL; 681 ch->send_cq = ch->recv_cq = NULL; 682 683 if (ch->rx_ring) { 684 for (i = 0; i < target->queue_size; ++i) 685 srp_free_iu(target->srp_host, ch->rx_ring[i]); 686 kfree(ch->rx_ring); 687 ch->rx_ring = NULL; 688 } 689 if (ch->tx_ring) { 690 for (i = 0; i < target->queue_size; ++i) 691 srp_free_iu(target->srp_host, ch->tx_ring[i]); 692 kfree(ch->tx_ring); 693 ch->tx_ring = NULL; 694 } 695 } 696 697 static void srp_path_rec_completion(int status, 698 struct sa_path_rec *pathrec, 699 unsigned int num_paths, void *ch_ptr) 700 { 701 struct srp_rdma_ch *ch = ch_ptr; 702 struct srp_target_port *target = ch->target; 703 704 ch->status = status; 705 if (status) 706 shost_printk(KERN_ERR, target->scsi_host, 707 PFX "Got failed path rec status %d\n", status); 708 else 709 ch->ib_cm.path = *pathrec; 710 complete(&ch->done); 711 } 712 713 static int srp_ib_lookup_path(struct srp_rdma_ch *ch) 714 { 715 struct srp_target_port *target = ch->target; 716 int ret; 717 718 ch->ib_cm.path.numb_path = 1; 719 720 init_completion(&ch->done); 721 722 ch->ib_cm.path_query_id = ib_sa_path_rec_get(&srp_sa_client, 723 target->srp_host->srp_dev->dev, 724 target->srp_host->port, 725 &ch->ib_cm.path, 726 IB_SA_PATH_REC_SERVICE_ID | 727 IB_SA_PATH_REC_DGID | 728 IB_SA_PATH_REC_SGID | 729 IB_SA_PATH_REC_NUMB_PATH | 730 IB_SA_PATH_REC_PKEY, 731 SRP_PATH_REC_TIMEOUT_MS, 732 GFP_KERNEL, 733 srp_path_rec_completion, 734 ch, &ch->ib_cm.path_query); 735 if (ch->ib_cm.path_query_id < 0) 736 return ch->ib_cm.path_query_id; 737 738 ret = wait_for_completion_interruptible(&ch->done); 739 if (ret < 0) 740 return ret; 741 742 if (ch->status < 0) 743 shost_printk(KERN_WARNING, target->scsi_host, 744 PFX "Path record query failed: sgid %pI6, dgid %pI6, pkey %#04x, service_id %#16llx\n", 745 ch->ib_cm.path.sgid.raw, ch->ib_cm.path.dgid.raw, 746 be16_to_cpu(target->ib_cm.pkey), 747 be64_to_cpu(target->ib_cm.service_id)); 748 749 return ch->status; 750 } 751 752 static int srp_rdma_lookup_path(struct srp_rdma_ch *ch) 753 { 754 struct srp_target_port *target = ch->target; 755 int ret; 756 757 init_completion(&ch->done); 758 759 ret = rdma_resolve_route(ch->rdma_cm.cm_id, SRP_PATH_REC_TIMEOUT_MS); 760 if (ret) 761 return ret; 762 763 wait_for_completion_interruptible(&ch->done); 764 765 if (ch->status != 0) 766 shost_printk(KERN_WARNING, target->scsi_host, 767 PFX "Path resolution failed\n"); 768 769 return ch->status; 770 } 771 772 static int srp_lookup_path(struct srp_rdma_ch *ch) 773 { 774 struct srp_target_port *target = ch->target; 775 776 return target->using_rdma_cm ? srp_rdma_lookup_path(ch) : 777 srp_ib_lookup_path(ch); 778 } 779 780 static u8 srp_get_subnet_timeout(struct srp_host *host) 781 { 782 struct ib_port_attr attr; 783 int ret; 784 u8 subnet_timeout = 18; 785 786 ret = ib_query_port(host->srp_dev->dev, host->port, &attr); 787 if (ret == 0) 788 subnet_timeout = attr.subnet_timeout; 789 790 if (unlikely(subnet_timeout < 15)) 791 pr_warn("%s: subnet timeout %d may cause SRP login to fail.\n", 792 dev_name(&host->srp_dev->dev->dev), subnet_timeout); 793 794 return subnet_timeout; 795 } 796 797 static int srp_send_req(struct srp_rdma_ch *ch, uint32_t max_iu_len, 798 bool multich) 799 { 800 struct srp_target_port *target = ch->target; 801 struct { 802 struct rdma_conn_param rdma_param; 803 struct srp_login_req_rdma rdma_req; 804 struct ib_cm_req_param ib_param; 805 struct srp_login_req ib_req; 806 } *req = NULL; 807 char *ipi, *tpi; 808 int status; 809 810 req = kzalloc_obj(*req); 811 if (!req) 812 return -ENOMEM; 813 814 req->ib_param.flow_control = 1; 815 req->ib_param.retry_count = target->tl_retry_count; 816 817 /* 818 * Pick some arbitrary defaults here; we could make these 819 * module parameters if anyone cared about setting them. 820 */ 821 req->ib_param.responder_resources = 4; 822 req->ib_param.rnr_retry_count = 7; 823 req->ib_param.max_cm_retries = 15; 824 825 req->ib_req.opcode = SRP_LOGIN_REQ; 826 req->ib_req.tag = 0; 827 req->ib_req.req_it_iu_len = cpu_to_be32(max_iu_len); 828 req->ib_req.req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT | 829 SRP_BUF_FORMAT_INDIRECT); 830 req->ib_req.req_flags = (multich ? SRP_MULTICHAN_MULTI : 831 SRP_MULTICHAN_SINGLE); 832 if (srp_use_imm_data) { 833 req->ib_req.req_flags |= SRP_IMMED_REQUESTED; 834 req->ib_req.imm_data_offset = cpu_to_be16(SRP_IMM_DATA_OFFSET); 835 } 836 837 if (target->using_rdma_cm) { 838 req->rdma_param.flow_control = req->ib_param.flow_control; 839 req->rdma_param.responder_resources = 840 req->ib_param.responder_resources; 841 req->rdma_param.initiator_depth = req->ib_param.initiator_depth; 842 req->rdma_param.retry_count = req->ib_param.retry_count; 843 req->rdma_param.rnr_retry_count = req->ib_param.rnr_retry_count; 844 req->rdma_param.private_data = &req->rdma_req; 845 req->rdma_param.private_data_len = sizeof(req->rdma_req); 846 847 req->rdma_req.opcode = req->ib_req.opcode; 848 req->rdma_req.tag = req->ib_req.tag; 849 req->rdma_req.req_it_iu_len = req->ib_req.req_it_iu_len; 850 req->rdma_req.req_buf_fmt = req->ib_req.req_buf_fmt; 851 req->rdma_req.req_flags = req->ib_req.req_flags; 852 req->rdma_req.imm_data_offset = req->ib_req.imm_data_offset; 853 854 ipi = req->rdma_req.initiator_port_id; 855 tpi = req->rdma_req.target_port_id; 856 } else { 857 u8 subnet_timeout; 858 859 subnet_timeout = srp_get_subnet_timeout(target->srp_host); 860 861 req->ib_param.primary_path = &ch->ib_cm.path; 862 req->ib_param.alternate_path = NULL; 863 req->ib_param.service_id = target->ib_cm.service_id; 864 get_random_bytes(&req->ib_param.starting_psn, 4); 865 req->ib_param.starting_psn &= 0xffffff; 866 req->ib_param.qp_num = ch->qp->qp_num; 867 req->ib_param.qp_type = ch->qp->qp_type; 868 req->ib_param.local_cm_response_timeout = subnet_timeout + 2; 869 req->ib_param.remote_cm_response_timeout = subnet_timeout + 2; 870 req->ib_param.private_data = &req->ib_req; 871 req->ib_param.private_data_len = sizeof(req->ib_req); 872 873 ipi = req->ib_req.initiator_port_id; 874 tpi = req->ib_req.target_port_id; 875 } 876 877 /* 878 * In the published SRP specification (draft rev. 16a), the 879 * port identifier format is 8 bytes of ID extension followed 880 * by 8 bytes of GUID. Older drafts put the two halves in the 881 * opposite order, so that the GUID comes first. 882 * 883 * Targets conforming to these obsolete drafts can be 884 * recognized by the I/O Class they report. 885 */ 886 if (target->io_class == SRP_REV10_IB_IO_CLASS) { 887 memcpy(ipi, &target->sgid.global.interface_id, 8); 888 memcpy(ipi + 8, &target->initiator_ext, 8); 889 memcpy(tpi, &target->ioc_guid, 8); 890 memcpy(tpi + 8, &target->id_ext, 8); 891 } else { 892 memcpy(ipi, &target->initiator_ext, 8); 893 memcpy(ipi + 8, &target->sgid.global.interface_id, 8); 894 memcpy(tpi, &target->id_ext, 8); 895 memcpy(tpi + 8, &target->ioc_guid, 8); 896 } 897 898 /* 899 * Topspin/Cisco SRP targets will reject our login unless we 900 * zero out the first 8 bytes of our initiator port ID and set 901 * the second 8 bytes to the local node GUID. 902 */ 903 if (srp_target_is_topspin(target)) { 904 shost_printk(KERN_DEBUG, target->scsi_host, 905 PFX "Topspin/Cisco initiator port ID workaround " 906 "activated for target GUID %016llx\n", 907 be64_to_cpu(target->ioc_guid)); 908 memset(ipi, 0, 8); 909 memcpy(ipi + 8, &target->srp_host->srp_dev->dev->node_guid, 8); 910 } 911 912 if (target->using_rdma_cm) 913 status = rdma_connect(ch->rdma_cm.cm_id, &req->rdma_param); 914 else 915 status = ib_send_cm_req(ch->ib_cm.cm_id, &req->ib_param); 916 917 kfree(req); 918 919 return status; 920 } 921 922 static bool srp_queue_remove_work(struct srp_target_port *target) 923 { 924 bool changed = false; 925 926 spin_lock_irq(&target->lock); 927 if (target->state != SRP_TARGET_REMOVED) { 928 target->state = SRP_TARGET_REMOVED; 929 changed = true; 930 } 931 spin_unlock_irq(&target->lock); 932 933 if (changed) 934 queue_work(srp_remove_wq, &target->remove_work); 935 936 return changed; 937 } 938 939 static void srp_disconnect_target(struct srp_target_port *target) 940 { 941 struct srp_rdma_ch *ch; 942 int i, ret; 943 944 /* XXX should send SRP_I_LOGOUT request */ 945 946 for (i = 0; i < target->ch_count; i++) { 947 ch = &target->ch[i]; 948 ch->connected = false; 949 ret = 0; 950 if (target->using_rdma_cm) { 951 if (ch->rdma_cm.cm_id) 952 rdma_disconnect(ch->rdma_cm.cm_id); 953 } else { 954 if (ch->ib_cm.cm_id) 955 ret = ib_send_cm_dreq(ch->ib_cm.cm_id, 956 NULL, 0); 957 } 958 if (ret < 0) { 959 shost_printk(KERN_DEBUG, target->scsi_host, 960 PFX "Sending CM DREQ failed\n"); 961 } 962 } 963 } 964 965 static int srp_exit_cmd_priv(struct Scsi_Host *shost, struct scsi_cmnd *cmd) 966 { 967 struct srp_target_port *target = host_to_target(shost); 968 struct srp_device *dev = target->srp_host->srp_dev; 969 struct ib_device *ibdev = dev->dev; 970 struct srp_request *req = scsi_cmd_priv(cmd); 971 972 kfree(req->fr_list); 973 if (req->indirect_dma_addr) { 974 ib_dma_unmap_single(ibdev, req->indirect_dma_addr, 975 target->indirect_size, 976 DMA_TO_DEVICE); 977 } 978 kfree(req->indirect_desc); 979 980 return 0; 981 } 982 983 static int srp_init_cmd_priv(struct Scsi_Host *shost, struct scsi_cmnd *cmd) 984 { 985 struct srp_target_port *target = host_to_target(shost); 986 struct srp_device *srp_dev = target->srp_host->srp_dev; 987 struct ib_device *ibdev = srp_dev->dev; 988 struct srp_request *req = scsi_cmd_priv(cmd); 989 dma_addr_t dma_addr; 990 int ret = -ENOMEM; 991 992 if (srp_dev->use_fast_reg) { 993 req->fr_list = kmalloc_array(target->mr_per_cmd, sizeof(void *), 994 GFP_KERNEL); 995 if (!req->fr_list) 996 goto out; 997 } 998 req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL); 999 if (!req->indirect_desc) 1000 goto out; 1001 1002 dma_addr = ib_dma_map_single(ibdev, req->indirect_desc, 1003 target->indirect_size, 1004 DMA_TO_DEVICE); 1005 if (ib_dma_mapping_error(ibdev, dma_addr)) { 1006 srp_exit_cmd_priv(shost, cmd); 1007 goto out; 1008 } 1009 1010 req->indirect_dma_addr = dma_addr; 1011 ret = 0; 1012 1013 out: 1014 return ret; 1015 } 1016 1017 /** 1018 * srp_del_scsi_host_attr() - Remove attributes defined in the host template. 1019 * @shost: SCSI host whose attributes to remove from sysfs. 1020 * 1021 * Note: Any attributes defined in the host template and that did not exist 1022 * before invocation of this function will be ignored. 1023 */ 1024 static void srp_del_scsi_host_attr(struct Scsi_Host *shost) 1025 { 1026 const struct attribute_group **g; 1027 struct attribute **attr; 1028 1029 for (g = shost->hostt->shost_groups; *g; ++g) { 1030 for (attr = (*g)->attrs; *attr; ++attr) { 1031 struct device_attribute *dev_attr = 1032 container_of(*attr, typeof(*dev_attr), attr); 1033 1034 device_remove_file(&shost->shost_dev, dev_attr); 1035 } 1036 } 1037 } 1038 1039 static void srp_remove_target(struct srp_target_port *target) 1040 { 1041 struct srp_rdma_ch *ch; 1042 int i; 1043 1044 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED); 1045 1046 srp_del_scsi_host_attr(target->scsi_host); 1047 srp_rport_get(target->rport); 1048 srp_remove_host(target->scsi_host); 1049 scsi_remove_host(target->scsi_host); 1050 srp_stop_rport_timers(target->rport); 1051 srp_disconnect_target(target); 1052 kobj_ns_drop(KOBJ_NS_TYPE_NET, to_ns_common(target->net)); 1053 for (i = 0; i < target->ch_count; i++) { 1054 ch = &target->ch[i]; 1055 srp_free_ch_ib(target, ch); 1056 } 1057 cancel_work_sync(&target->tl_err_work); 1058 srp_rport_put(target->rport); 1059 kfree(target->ch); 1060 target->ch = NULL; 1061 1062 spin_lock(&target->srp_host->target_lock); 1063 list_del(&target->list); 1064 spin_unlock(&target->srp_host->target_lock); 1065 1066 scsi_host_put(target->scsi_host); 1067 } 1068 1069 static void srp_remove_work(struct work_struct *work) 1070 { 1071 struct srp_target_port *target = 1072 container_of(work, struct srp_target_port, remove_work); 1073 1074 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED); 1075 1076 srp_remove_target(target); 1077 } 1078 1079 static void srp_rport_delete(struct srp_rport *rport) 1080 { 1081 struct srp_target_port *target = rport->lld_data; 1082 1083 srp_queue_remove_work(target); 1084 } 1085 1086 /** 1087 * srp_connected_ch() - number of connected channels 1088 * @target: SRP target port. 1089 */ 1090 static int srp_connected_ch(struct srp_target_port *target) 1091 { 1092 int i, c = 0; 1093 1094 for (i = 0; i < target->ch_count; i++) 1095 c += target->ch[i].connected; 1096 1097 return c; 1098 } 1099 1100 static int srp_connect_ch(struct srp_rdma_ch *ch, uint32_t max_iu_len, 1101 bool multich) 1102 { 1103 struct srp_target_port *target = ch->target; 1104 int ret; 1105 1106 WARN_ON_ONCE(!multich && srp_connected_ch(target) > 0); 1107 1108 ret = srp_lookup_path(ch); 1109 if (ret) 1110 goto out; 1111 1112 while (1) { 1113 init_completion(&ch->done); 1114 ret = srp_send_req(ch, max_iu_len, multich); 1115 if (ret) 1116 goto out; 1117 ret = wait_for_completion_interruptible(&ch->done); 1118 if (ret < 0) 1119 goto out; 1120 1121 /* 1122 * The CM event handling code will set status to 1123 * SRP_PORT_REDIRECT if we get a port redirect REJ 1124 * back, or SRP_DLID_REDIRECT if we get a lid/qp 1125 * redirect REJ back. 1126 */ 1127 ret = ch->status; 1128 switch (ret) { 1129 case 0: 1130 ch->connected = true; 1131 goto out; 1132 1133 case SRP_PORT_REDIRECT: 1134 ret = srp_lookup_path(ch); 1135 if (ret) 1136 goto out; 1137 break; 1138 1139 case SRP_DLID_REDIRECT: 1140 break; 1141 1142 case SRP_STALE_CONN: 1143 shost_printk(KERN_ERR, target->scsi_host, PFX 1144 "giving up on stale connection\n"); 1145 ret = -ECONNRESET; 1146 goto out; 1147 1148 default: 1149 goto out; 1150 } 1151 } 1152 1153 out: 1154 return ret <= 0 ? ret : -ENODEV; 1155 } 1156 1157 static void srp_inv_rkey_err_done(struct ib_cq *cq, struct ib_wc *wc) 1158 { 1159 srp_handle_qp_err(cq, wc, "INV RKEY"); 1160 } 1161 1162 static int srp_inv_rkey(struct srp_request *req, struct srp_rdma_ch *ch, 1163 u32 rkey) 1164 { 1165 struct ib_send_wr wr = { 1166 .opcode = IB_WR_LOCAL_INV, 1167 .next = NULL, 1168 .num_sge = 0, 1169 .send_flags = 0, 1170 .ex.invalidate_rkey = rkey, 1171 }; 1172 1173 wr.wr_cqe = &req->reg_cqe; 1174 req->reg_cqe.done = srp_inv_rkey_err_done; 1175 return ib_post_send(ch->qp, &wr, NULL); 1176 } 1177 1178 static void srp_unmap_data(struct scsi_cmnd *scmnd, 1179 struct srp_rdma_ch *ch, 1180 struct srp_request *req) 1181 { 1182 struct srp_target_port *target = ch->target; 1183 struct srp_device *dev = target->srp_host->srp_dev; 1184 struct ib_device *ibdev = dev->dev; 1185 int i, res; 1186 1187 if (!scsi_sglist(scmnd) || 1188 (scmnd->sc_data_direction != DMA_TO_DEVICE && 1189 scmnd->sc_data_direction != DMA_FROM_DEVICE)) 1190 return; 1191 1192 if (dev->use_fast_reg) { 1193 struct srp_fr_desc **pfr; 1194 1195 for (i = req->nmdesc, pfr = req->fr_list; i > 0; i--, pfr++) { 1196 res = srp_inv_rkey(req, ch, (*pfr)->mr->rkey); 1197 if (res < 0) { 1198 shost_printk(KERN_ERR, target->scsi_host, PFX 1199 "Queueing INV WR for rkey %#x failed (%d)\n", 1200 (*pfr)->mr->rkey, res); 1201 queue_work(system_long_wq, 1202 &target->tl_err_work); 1203 } 1204 } 1205 if (req->nmdesc) 1206 srp_fr_pool_put(ch->fr_pool, req->fr_list, 1207 req->nmdesc); 1208 } 1209 1210 ib_dma_unmap_sg(ibdev, scsi_sglist(scmnd), scsi_sg_count(scmnd), 1211 scmnd->sc_data_direction); 1212 } 1213 1214 /** 1215 * srp_claim_req - Take ownership of the scmnd associated with a request. 1216 * @ch: SRP RDMA channel. 1217 * @req: SRP request. 1218 * @sdev: If not NULL, only take ownership for this SCSI device. 1219 * @scmnd: If NULL, take ownership of @req->scmnd. If not NULL, only take 1220 * ownership of @req->scmnd if it equals @scmnd. 1221 * 1222 * Return value: 1223 * Either NULL or a pointer to the SCSI command the caller became owner of. 1224 */ 1225 static struct scsi_cmnd *srp_claim_req(struct srp_rdma_ch *ch, 1226 struct srp_request *req, 1227 struct scsi_device *sdev, 1228 struct scsi_cmnd *scmnd) 1229 { 1230 unsigned long flags; 1231 1232 spin_lock_irqsave(&ch->lock, flags); 1233 if (req->scmnd && 1234 (!sdev || req->scmnd->device == sdev) && 1235 (!scmnd || req->scmnd == scmnd)) { 1236 scmnd = req->scmnd; 1237 req->scmnd = NULL; 1238 } else { 1239 scmnd = NULL; 1240 } 1241 spin_unlock_irqrestore(&ch->lock, flags); 1242 1243 return scmnd; 1244 } 1245 1246 /** 1247 * srp_free_req() - Unmap data and adjust ch->req_lim. 1248 * @ch: SRP RDMA channel. 1249 * @req: Request to be freed. 1250 * @scmnd: SCSI command associated with @req. 1251 * @req_lim_delta: Amount to be added to @target->req_lim. 1252 */ 1253 static void srp_free_req(struct srp_rdma_ch *ch, struct srp_request *req, 1254 struct scsi_cmnd *scmnd, s32 req_lim_delta) 1255 { 1256 unsigned long flags; 1257 1258 srp_unmap_data(scmnd, ch, req); 1259 1260 spin_lock_irqsave(&ch->lock, flags); 1261 ch->req_lim += req_lim_delta; 1262 spin_unlock_irqrestore(&ch->lock, flags); 1263 } 1264 1265 static void srp_finish_req(struct srp_rdma_ch *ch, struct srp_request *req, 1266 struct scsi_device *sdev, int result) 1267 { 1268 struct scsi_cmnd *scmnd = srp_claim_req(ch, req, sdev, NULL); 1269 1270 if (scmnd) { 1271 srp_free_req(ch, req, scmnd, 0); 1272 scmnd->result = result; 1273 scsi_done(scmnd); 1274 } 1275 } 1276 1277 struct srp_terminate_context { 1278 struct srp_target_port *srp_target; 1279 int scsi_result; 1280 }; 1281 1282 static bool srp_terminate_cmd(struct scsi_cmnd *scmnd, void *context_ptr) 1283 { 1284 struct srp_terminate_context *context = context_ptr; 1285 struct srp_target_port *target = context->srp_target; 1286 u32 tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmnd)); 1287 struct srp_rdma_ch *ch = &target->ch[blk_mq_unique_tag_to_hwq(tag)]; 1288 struct srp_request *req = scsi_cmd_priv(scmnd); 1289 1290 srp_finish_req(ch, req, NULL, context->scsi_result); 1291 1292 return true; 1293 } 1294 1295 static void srp_terminate_io(struct srp_rport *rport) 1296 { 1297 struct srp_target_port *target = rport->lld_data; 1298 struct srp_terminate_context context = { .srp_target = target, 1299 .scsi_result = DID_TRANSPORT_FAILFAST << 16 }; 1300 1301 scsi_host_busy_iter(target->scsi_host, srp_terminate_cmd, &context); 1302 } 1303 1304 /* Calculate maximum initiator to target information unit length. */ 1305 static uint32_t srp_max_it_iu_len(int cmd_sg_cnt, bool use_imm_data, 1306 uint32_t max_it_iu_size) 1307 { 1308 uint32_t max_iu_len = sizeof(struct srp_cmd) + SRP_MAX_ADD_CDB_LEN + 1309 sizeof(struct srp_indirect_buf) + 1310 cmd_sg_cnt * sizeof(struct srp_direct_buf); 1311 1312 if (use_imm_data) 1313 max_iu_len = max(max_iu_len, SRP_IMM_DATA_OFFSET + 1314 srp_max_imm_data); 1315 1316 if (max_it_iu_size) 1317 max_iu_len = min(max_iu_len, max_it_iu_size); 1318 1319 pr_debug("max_iu_len = %d\n", max_iu_len); 1320 1321 return max_iu_len; 1322 } 1323 1324 /* 1325 * It is up to the caller to ensure that srp_rport_reconnect() calls are 1326 * serialized and that no concurrent srp_queuecommand(), srp_abort(), 1327 * srp_reset_device() or srp_reset_host() calls will occur while this function 1328 * is in progress. One way to realize that is not to call this function 1329 * directly but to call srp_reconnect_rport() instead since that last function 1330 * serializes calls of this function via rport->mutex and also blocks 1331 * srp_queuecommand() calls before invoking this function. 1332 */ 1333 static int srp_rport_reconnect(struct srp_rport *rport) 1334 { 1335 struct srp_target_port *target = rport->lld_data; 1336 struct srp_rdma_ch *ch; 1337 uint32_t max_iu_len = srp_max_it_iu_len(target->cmd_sg_cnt, 1338 srp_use_imm_data, 1339 target->max_it_iu_size); 1340 int i, j, ret = 0; 1341 bool multich = false; 1342 1343 srp_disconnect_target(target); 1344 1345 if (target->state == SRP_TARGET_SCANNING) 1346 return -ENODEV; 1347 1348 /* 1349 * Now get a new local CM ID so that we avoid confusing the target in 1350 * case things are really fouled up. Doing so also ensures that all CM 1351 * callbacks will have finished before a new QP is allocated. 1352 */ 1353 for (i = 0; i < target->ch_count; i++) { 1354 ch = &target->ch[i]; 1355 ret += srp_new_cm_id(ch); 1356 } 1357 { 1358 struct srp_terminate_context context = { 1359 .srp_target = target, .scsi_result = DID_RESET << 16}; 1360 1361 scsi_host_busy_iter(target->scsi_host, srp_terminate_cmd, 1362 &context); 1363 } 1364 for (i = 0; i < target->ch_count; i++) { 1365 ch = &target->ch[i]; 1366 /* 1367 * Whether or not creating a new CM ID succeeded, create a new 1368 * QP. This guarantees that all completion callback function 1369 * invocations have finished before request resetting starts. 1370 */ 1371 ret += srp_create_ch_ib(ch); 1372 1373 INIT_LIST_HEAD(&ch->free_tx); 1374 for (j = 0; j < target->queue_size; ++j) 1375 list_add(&ch->tx_ring[j]->list, &ch->free_tx); 1376 } 1377 1378 target->qp_in_error = false; 1379 1380 for (i = 0; i < target->ch_count; i++) { 1381 ch = &target->ch[i]; 1382 if (ret) 1383 break; 1384 ret = srp_connect_ch(ch, max_iu_len, multich); 1385 multich = true; 1386 } 1387 1388 if (ret == 0) 1389 shost_printk(KERN_INFO, target->scsi_host, 1390 PFX "reconnect succeeded\n"); 1391 1392 return ret; 1393 } 1394 1395 static void srp_map_desc(struct srp_map_state *state, dma_addr_t dma_addr, 1396 unsigned int dma_len, u32 rkey) 1397 { 1398 struct srp_direct_buf *desc = state->desc; 1399 1400 WARN_ON_ONCE(!dma_len); 1401 1402 desc->va = cpu_to_be64(dma_addr); 1403 desc->key = cpu_to_be32(rkey); 1404 desc->len = cpu_to_be32(dma_len); 1405 1406 state->total_len += dma_len; 1407 state->desc++; 1408 state->ndesc++; 1409 } 1410 1411 static void srp_reg_mr_err_done(struct ib_cq *cq, struct ib_wc *wc) 1412 { 1413 srp_handle_qp_err(cq, wc, "FAST REG"); 1414 } 1415 1416 /* 1417 * Map up to sg_nents elements of state->sg where *sg_offset_p is the offset 1418 * where to start in the first element. If sg_offset_p != NULL then 1419 * *sg_offset_p is updated to the offset in state->sg[retval] of the first 1420 * byte that has not yet been mapped. 1421 */ 1422 static int srp_map_finish_fr(struct srp_map_state *state, 1423 struct srp_request *req, 1424 struct srp_rdma_ch *ch, int sg_nents, 1425 unsigned int *sg_offset_p) 1426 { 1427 struct srp_target_port *target = ch->target; 1428 struct srp_device *dev = target->srp_host->srp_dev; 1429 struct ib_reg_wr wr; 1430 struct srp_fr_desc *desc; 1431 u32 rkey; 1432 int n, err; 1433 1434 if (state->fr.next >= state->fr.end) { 1435 shost_printk(KERN_ERR, ch->target->scsi_host, 1436 PFX "Out of MRs (mr_per_cmd = %d)\n", 1437 ch->target->mr_per_cmd); 1438 return -ENOMEM; 1439 } 1440 1441 WARN_ON_ONCE(!dev->use_fast_reg); 1442 1443 if (sg_nents == 1 && target->global_rkey) { 1444 unsigned int sg_offset = sg_offset_p ? *sg_offset_p : 0; 1445 1446 srp_map_desc(state, sg_dma_address(state->sg) + sg_offset, 1447 sg_dma_len(state->sg) - sg_offset, 1448 target->global_rkey); 1449 if (sg_offset_p) 1450 *sg_offset_p = 0; 1451 return 1; 1452 } 1453 1454 desc = srp_fr_pool_get(ch->fr_pool); 1455 if (!desc) 1456 return -ENOMEM; 1457 1458 rkey = ib_inc_rkey(desc->mr->rkey); 1459 ib_update_fast_reg_key(desc->mr, rkey); 1460 1461 n = ib_map_mr_sg(desc->mr, state->sg, sg_nents, sg_offset_p, 1462 dev->mr_page_size); 1463 if (unlikely(n < 0)) { 1464 srp_fr_pool_put(ch->fr_pool, &desc, 1); 1465 pr_debug("%s: ib_map_mr_sg(%d, %d) returned %d.\n", 1466 dev_name(&req->scmnd->device->sdev_gendev), sg_nents, 1467 sg_offset_p ? *sg_offset_p : -1, n); 1468 return n; 1469 } 1470 1471 WARN_ON_ONCE(desc->mr->length == 0); 1472 1473 req->reg_cqe.done = srp_reg_mr_err_done; 1474 1475 wr.wr.next = NULL; 1476 wr.wr.opcode = IB_WR_REG_MR; 1477 wr.wr.wr_cqe = &req->reg_cqe; 1478 wr.wr.num_sge = 0; 1479 wr.wr.send_flags = 0; 1480 wr.mr = desc->mr; 1481 wr.key = desc->mr->rkey; 1482 wr.access = (IB_ACCESS_LOCAL_WRITE | 1483 IB_ACCESS_REMOTE_READ | 1484 IB_ACCESS_REMOTE_WRITE); 1485 1486 *state->fr.next++ = desc; 1487 state->nmdesc++; 1488 1489 srp_map_desc(state, desc->mr->iova, 1490 desc->mr->length, desc->mr->rkey); 1491 1492 err = ib_post_send(ch->qp, &wr.wr, NULL); 1493 if (unlikely(err)) { 1494 WARN_ON_ONCE(err == -ENOMEM); 1495 return err; 1496 } 1497 1498 return n; 1499 } 1500 1501 static int srp_map_sg_fr(struct srp_map_state *state, struct srp_rdma_ch *ch, 1502 struct srp_request *req, struct scatterlist *scat, 1503 int count) 1504 { 1505 unsigned int sg_offset = 0; 1506 1507 state->fr.next = req->fr_list; 1508 state->fr.end = req->fr_list + ch->target->mr_per_cmd; 1509 state->sg = scat; 1510 1511 if (count == 0) 1512 return 0; 1513 1514 while (count) { 1515 int i, n; 1516 1517 n = srp_map_finish_fr(state, req, ch, count, &sg_offset); 1518 if (unlikely(n < 0)) 1519 return n; 1520 1521 count -= n; 1522 for (i = 0; i < n; i++) 1523 state->sg = sg_next(state->sg); 1524 } 1525 1526 return 0; 1527 } 1528 1529 static int srp_map_sg_dma(struct srp_map_state *state, struct srp_rdma_ch *ch, 1530 struct srp_request *req, struct scatterlist *scat, 1531 int count) 1532 { 1533 struct srp_target_port *target = ch->target; 1534 struct scatterlist *sg; 1535 int i; 1536 1537 for_each_sg(scat, sg, count, i) { 1538 srp_map_desc(state, sg_dma_address(sg), sg_dma_len(sg), 1539 target->global_rkey); 1540 } 1541 1542 return 0; 1543 } 1544 1545 /* 1546 * Register the indirect data buffer descriptor with the HCA. 1547 * 1548 * Note: since the indirect data buffer descriptor has been allocated with 1549 * kmalloc() it is guaranteed that this buffer is a physically contiguous 1550 * memory buffer. 1551 */ 1552 static int srp_map_idb(struct srp_rdma_ch *ch, struct srp_request *req, 1553 void **next_mr, void **end_mr, u32 idb_len, 1554 __be32 *idb_rkey) 1555 { 1556 struct srp_target_port *target = ch->target; 1557 struct srp_device *dev = target->srp_host->srp_dev; 1558 struct srp_map_state state; 1559 struct srp_direct_buf idb_desc; 1560 struct scatterlist idb_sg[1]; 1561 int ret; 1562 1563 memset(&state, 0, sizeof(state)); 1564 memset(&idb_desc, 0, sizeof(idb_desc)); 1565 state.gen.next = next_mr; 1566 state.gen.end = end_mr; 1567 state.desc = &idb_desc; 1568 state.base_dma_addr = req->indirect_dma_addr; 1569 state.dma_len = idb_len; 1570 1571 if (dev->use_fast_reg) { 1572 state.sg = idb_sg; 1573 sg_init_one(idb_sg, req->indirect_desc, idb_len); 1574 idb_sg->dma_address = req->indirect_dma_addr; /* hack! */ 1575 #ifdef CONFIG_NEED_SG_DMA_LENGTH 1576 idb_sg->dma_length = idb_sg->length; /* hack^2 */ 1577 #endif 1578 ret = srp_map_finish_fr(&state, req, ch, 1, NULL); 1579 if (ret < 0) 1580 return ret; 1581 WARN_ON_ONCE(ret < 1); 1582 } else { 1583 return -EINVAL; 1584 } 1585 1586 *idb_rkey = idb_desc.key; 1587 1588 return 0; 1589 } 1590 1591 static void srp_check_mapping(struct srp_map_state *state, 1592 struct srp_rdma_ch *ch, struct srp_request *req, 1593 struct scatterlist *scat, int count) 1594 { 1595 struct srp_device *dev = ch->target->srp_host->srp_dev; 1596 struct srp_fr_desc **pfr; 1597 u64 desc_len = 0, mr_len = 0; 1598 int i; 1599 1600 for (i = 0; i < state->ndesc; i++) 1601 desc_len += be32_to_cpu(req->indirect_desc[i].len); 1602 if (dev->use_fast_reg) 1603 for (i = 0, pfr = req->fr_list; i < state->nmdesc; i++, pfr++) 1604 mr_len += (*pfr)->mr->length; 1605 if (desc_len != scsi_bufflen(req->scmnd) || 1606 mr_len > scsi_bufflen(req->scmnd)) 1607 pr_err("Inconsistent: scsi len %d <> desc len %lld <> mr len %lld; ndesc %d; nmdesc = %d\n", 1608 scsi_bufflen(req->scmnd), desc_len, mr_len, 1609 state->ndesc, state->nmdesc); 1610 } 1611 1612 /** 1613 * srp_map_data() - map SCSI data buffer onto an SRP request 1614 * @scmnd: SCSI command to map 1615 * @ch: SRP RDMA channel 1616 * @req: SRP request 1617 * 1618 * Returns the length in bytes of the SRP_CMD IU or a negative value if 1619 * mapping failed. The size of any immediate data is not included in the 1620 * return value. 1621 */ 1622 static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch, 1623 struct srp_request *req) 1624 { 1625 struct srp_target_port *target = ch->target; 1626 struct scatterlist *scat, *sg; 1627 struct srp_cmd *cmd = req->cmd->buf; 1628 int i, len, nents, count, ret; 1629 struct srp_device *dev; 1630 struct ib_device *ibdev; 1631 struct srp_map_state state; 1632 struct srp_indirect_buf *indirect_hdr; 1633 u64 data_len; 1634 u32 idb_len, table_len; 1635 __be32 idb_rkey; 1636 u8 fmt; 1637 1638 req->cmd->num_sge = 1; 1639 1640 if (!scsi_sglist(scmnd) || scmnd->sc_data_direction == DMA_NONE) 1641 return sizeof(struct srp_cmd) + cmd->add_cdb_len; 1642 1643 if (scmnd->sc_data_direction != DMA_FROM_DEVICE && 1644 scmnd->sc_data_direction != DMA_TO_DEVICE) { 1645 shost_printk(KERN_WARNING, target->scsi_host, 1646 PFX "Unhandled data direction %d\n", 1647 scmnd->sc_data_direction); 1648 return -EINVAL; 1649 } 1650 1651 nents = scsi_sg_count(scmnd); 1652 scat = scsi_sglist(scmnd); 1653 data_len = scsi_bufflen(scmnd); 1654 1655 dev = target->srp_host->srp_dev; 1656 ibdev = dev->dev; 1657 1658 count = ib_dma_map_sg(ibdev, scat, nents, scmnd->sc_data_direction); 1659 if (unlikely(count == 0)) 1660 return -EIO; 1661 1662 if (ch->use_imm_data && 1663 count <= ch->max_imm_sge && 1664 SRP_IMM_DATA_OFFSET + data_len <= ch->max_it_iu_len && 1665 scmnd->sc_data_direction == DMA_TO_DEVICE) { 1666 struct srp_imm_buf *buf; 1667 struct ib_sge *sge = &req->cmd->sge[1]; 1668 1669 fmt = SRP_DATA_DESC_IMM; 1670 len = SRP_IMM_DATA_OFFSET; 1671 req->nmdesc = 0; 1672 buf = (void *)cmd->add_data + cmd->add_cdb_len; 1673 buf->len = cpu_to_be32(data_len); 1674 WARN_ON_ONCE((void *)(buf + 1) > (void *)cmd + len); 1675 for_each_sg(scat, sg, count, i) { 1676 sge[i].addr = sg_dma_address(sg); 1677 sge[i].length = sg_dma_len(sg); 1678 sge[i].lkey = target->lkey; 1679 } 1680 req->cmd->num_sge += count; 1681 goto map_complete; 1682 } 1683 1684 fmt = SRP_DATA_DESC_DIRECT; 1685 len = sizeof(struct srp_cmd) + cmd->add_cdb_len + 1686 sizeof(struct srp_direct_buf); 1687 1688 if (count == 1 && target->global_rkey) { 1689 /* 1690 * The midlayer only generated a single gather/scatter 1691 * entry, or DMA mapping coalesced everything to a 1692 * single entry. So a direct descriptor along with 1693 * the DMA MR suffices. 1694 */ 1695 struct srp_direct_buf *buf; 1696 1697 buf = (void *)cmd->add_data + cmd->add_cdb_len; 1698 buf->va = cpu_to_be64(sg_dma_address(scat)); 1699 buf->key = cpu_to_be32(target->global_rkey); 1700 buf->len = cpu_to_be32(sg_dma_len(scat)); 1701 1702 req->nmdesc = 0; 1703 goto map_complete; 1704 } 1705 1706 /* 1707 * We have more than one scatter/gather entry, so build our indirect 1708 * descriptor table, trying to merge as many entries as we can. 1709 */ 1710 indirect_hdr = (void *)cmd->add_data + cmd->add_cdb_len; 1711 1712 ib_dma_sync_single_for_cpu(ibdev, req->indirect_dma_addr, 1713 target->indirect_size, DMA_TO_DEVICE); 1714 1715 memset(&state, 0, sizeof(state)); 1716 state.desc = req->indirect_desc; 1717 if (dev->use_fast_reg) 1718 ret = srp_map_sg_fr(&state, ch, req, scat, count); 1719 else 1720 ret = srp_map_sg_dma(&state, ch, req, scat, count); 1721 req->nmdesc = state.nmdesc; 1722 if (ret < 0) 1723 goto unmap; 1724 1725 { 1726 DEFINE_DYNAMIC_DEBUG_METADATA(ddm, 1727 "Memory mapping consistency check"); 1728 if (DYNAMIC_DEBUG_BRANCH(ddm)) 1729 srp_check_mapping(&state, ch, req, scat, count); 1730 } 1731 1732 /* We've mapped the request, now pull as much of the indirect 1733 * descriptor table as we can into the command buffer. If this 1734 * target is not using an external indirect table, we are 1735 * guaranteed to fit into the command, as the SCSI layer won't 1736 * give us more S/G entries than we allow. 1737 */ 1738 if (state.ndesc == 1) { 1739 /* 1740 * Memory registration collapsed the sg-list into one entry, 1741 * so use a direct descriptor. 1742 */ 1743 struct srp_direct_buf *buf; 1744 1745 buf = (void *)cmd->add_data + cmd->add_cdb_len; 1746 *buf = req->indirect_desc[0]; 1747 goto map_complete; 1748 } 1749 1750 if (unlikely(target->cmd_sg_cnt < state.ndesc && 1751 !target->allow_ext_sg)) { 1752 shost_printk(KERN_ERR, target->scsi_host, 1753 "Could not fit S/G list into SRP_CMD\n"); 1754 ret = -EIO; 1755 goto unmap; 1756 } 1757 1758 count = min(state.ndesc, target->cmd_sg_cnt); 1759 table_len = state.ndesc * sizeof (struct srp_direct_buf); 1760 idb_len = sizeof(struct srp_indirect_buf) + table_len; 1761 1762 fmt = SRP_DATA_DESC_INDIRECT; 1763 len = sizeof(struct srp_cmd) + cmd->add_cdb_len + 1764 sizeof(struct srp_indirect_buf); 1765 len += count * sizeof (struct srp_direct_buf); 1766 1767 memcpy(indirect_hdr->desc_list, req->indirect_desc, 1768 count * sizeof (struct srp_direct_buf)); 1769 1770 if (!target->global_rkey) { 1771 ret = srp_map_idb(ch, req, state.gen.next, state.gen.end, 1772 idb_len, &idb_rkey); 1773 if (ret < 0) 1774 goto unmap; 1775 req->nmdesc++; 1776 } else { 1777 idb_rkey = cpu_to_be32(target->global_rkey); 1778 } 1779 1780 indirect_hdr->table_desc.va = cpu_to_be64(req->indirect_dma_addr); 1781 indirect_hdr->table_desc.key = idb_rkey; 1782 indirect_hdr->table_desc.len = cpu_to_be32(table_len); 1783 indirect_hdr->len = cpu_to_be32(state.total_len); 1784 1785 if (scmnd->sc_data_direction == DMA_TO_DEVICE) 1786 cmd->data_out_desc_cnt = count; 1787 else 1788 cmd->data_in_desc_cnt = count; 1789 1790 ib_dma_sync_single_for_device(ibdev, req->indirect_dma_addr, table_len, 1791 DMA_TO_DEVICE); 1792 1793 map_complete: 1794 if (scmnd->sc_data_direction == DMA_TO_DEVICE) 1795 cmd->buf_fmt = fmt << 4; 1796 else 1797 cmd->buf_fmt = fmt; 1798 1799 return len; 1800 1801 unmap: 1802 srp_unmap_data(scmnd, ch, req); 1803 if (ret == -ENOMEM && req->nmdesc >= target->mr_pool_size) 1804 ret = -E2BIG; 1805 return ret; 1806 } 1807 1808 /* 1809 * Return an IU and possible credit to the free pool 1810 */ 1811 static void srp_put_tx_iu(struct srp_rdma_ch *ch, struct srp_iu *iu, 1812 enum srp_iu_type iu_type) 1813 { 1814 unsigned long flags; 1815 1816 spin_lock_irqsave(&ch->lock, flags); 1817 list_add(&iu->list, &ch->free_tx); 1818 if (iu_type != SRP_IU_RSP) 1819 ++ch->req_lim; 1820 spin_unlock_irqrestore(&ch->lock, flags); 1821 } 1822 1823 /* 1824 * Must be called with ch->lock held to protect req_lim and free_tx. 1825 * If IU is not sent, it must be returned using srp_put_tx_iu(). 1826 * 1827 * Note: 1828 * An upper limit for the number of allocated information units for each 1829 * request type is: 1830 * - SRP_IU_CMD: SRP_CMD_SQ_SIZE, since the SCSI mid-layer never queues 1831 * more than Scsi_Host.can_queue requests. 1832 * - SRP_IU_TSK_MGMT: SRP_TSK_MGMT_SQ_SIZE. 1833 * - SRP_IU_RSP: 1, since a conforming SRP target never sends more than 1834 * one unanswered SRP request to an initiator. 1835 */ 1836 static struct srp_iu *__srp_get_tx_iu(struct srp_rdma_ch *ch, 1837 enum srp_iu_type iu_type) 1838 { 1839 struct srp_target_port *target = ch->target; 1840 s32 rsv = (iu_type == SRP_IU_TSK_MGMT) ? 0 : SRP_TSK_MGMT_SQ_SIZE; 1841 struct srp_iu *iu; 1842 1843 lockdep_assert_held(&ch->lock); 1844 1845 ib_process_cq_direct(ch->send_cq, -1); 1846 1847 if (list_empty(&ch->free_tx)) 1848 return NULL; 1849 1850 /* Initiator responses to target requests do not consume credits */ 1851 if (iu_type != SRP_IU_RSP) { 1852 if (ch->req_lim <= rsv) { 1853 ++target->zero_req_lim; 1854 return NULL; 1855 } 1856 1857 --ch->req_lim; 1858 } 1859 1860 iu = list_first_entry(&ch->free_tx, struct srp_iu, list); 1861 list_del(&iu->list); 1862 return iu; 1863 } 1864 1865 /* 1866 * Note: if this function is called from inside ib_drain_sq() then it will 1867 * be called without ch->lock being held. If ib_drain_sq() dequeues a WQE 1868 * with status IB_WC_SUCCESS then that's a bug. 1869 */ 1870 static void srp_send_done(struct ib_cq *cq, struct ib_wc *wc) 1871 { 1872 struct srp_iu *iu = container_of(wc->wr_cqe, struct srp_iu, cqe); 1873 struct srp_rdma_ch *ch = cq->cq_context; 1874 1875 if (unlikely(wc->status != IB_WC_SUCCESS)) { 1876 srp_handle_qp_err(cq, wc, "SEND"); 1877 return; 1878 } 1879 1880 lockdep_assert_held(&ch->lock); 1881 1882 list_add(&iu->list, &ch->free_tx); 1883 } 1884 1885 /** 1886 * srp_post_send() - send an SRP information unit 1887 * @ch: RDMA channel over which to send the information unit. 1888 * @iu: Information unit to send. 1889 * @len: Length of the information unit excluding immediate data. 1890 */ 1891 static int srp_post_send(struct srp_rdma_ch *ch, struct srp_iu *iu, int len) 1892 { 1893 struct srp_target_port *target = ch->target; 1894 struct ib_send_wr wr; 1895 1896 if (WARN_ON_ONCE(iu->num_sge > SRP_MAX_SGE)) 1897 return -EINVAL; 1898 1899 iu->sge[0].addr = iu->dma; 1900 iu->sge[0].length = len; 1901 iu->sge[0].lkey = target->lkey; 1902 1903 iu->cqe.done = srp_send_done; 1904 1905 wr.next = NULL; 1906 wr.wr_cqe = &iu->cqe; 1907 wr.sg_list = &iu->sge[0]; 1908 wr.num_sge = iu->num_sge; 1909 wr.opcode = IB_WR_SEND; 1910 wr.send_flags = IB_SEND_SIGNALED; 1911 1912 return ib_post_send(ch->qp, &wr, NULL); 1913 } 1914 1915 static int srp_post_recv(struct srp_rdma_ch *ch, struct srp_iu *iu) 1916 { 1917 struct srp_target_port *target = ch->target; 1918 struct ib_recv_wr wr; 1919 struct ib_sge list; 1920 1921 list.addr = iu->dma; 1922 list.length = iu->size; 1923 list.lkey = target->lkey; 1924 1925 iu->cqe.done = srp_recv_done; 1926 1927 wr.next = NULL; 1928 wr.wr_cqe = &iu->cqe; 1929 wr.sg_list = &list; 1930 wr.num_sge = 1; 1931 1932 return ib_post_recv(ch->qp, &wr, NULL); 1933 } 1934 1935 static void srp_process_rsp(struct srp_rdma_ch *ch, struct srp_rsp *rsp) 1936 { 1937 struct srp_target_port *target = ch->target; 1938 struct srp_request *req; 1939 struct scsi_cmnd *scmnd; 1940 unsigned long flags; 1941 1942 if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) { 1943 spin_lock_irqsave(&ch->lock, flags); 1944 ch->req_lim += be32_to_cpu(rsp->req_lim_delta); 1945 if (rsp->tag == ch->tsk_mgmt_tag) { 1946 ch->tsk_mgmt_status = -1; 1947 if (be32_to_cpu(rsp->resp_data_len) >= 4) 1948 ch->tsk_mgmt_status = rsp->data[3]; 1949 complete(&ch->tsk_mgmt_done); 1950 } else { 1951 shost_printk(KERN_ERR, target->scsi_host, 1952 "Received tsk mgmt response too late for tag %#llx\n", 1953 rsp->tag); 1954 } 1955 spin_unlock_irqrestore(&ch->lock, flags); 1956 } else { 1957 scmnd = scsi_host_find_tag(target->scsi_host, rsp->tag); 1958 if (scmnd) { 1959 req = scsi_cmd_priv(scmnd); 1960 scmnd = srp_claim_req(ch, req, NULL, scmnd); 1961 } 1962 if (!scmnd) { 1963 shost_printk(KERN_ERR, target->scsi_host, 1964 "Null scmnd for RSP w/tag %#016llx received on ch %td / QP %#x\n", 1965 rsp->tag, ch - target->ch, ch->qp->qp_num); 1966 1967 spin_lock_irqsave(&ch->lock, flags); 1968 ch->req_lim += be32_to_cpu(rsp->req_lim_delta); 1969 spin_unlock_irqrestore(&ch->lock, flags); 1970 1971 return; 1972 } 1973 scmnd->result = rsp->status; 1974 1975 if (rsp->flags & SRP_RSP_FLAG_SNSVALID) { 1976 memcpy(scmnd->sense_buffer, rsp->data + 1977 be32_to_cpu(rsp->resp_data_len), 1978 min_t(int, be32_to_cpu(rsp->sense_data_len), 1979 SCSI_SENSE_BUFFERSIZE)); 1980 } 1981 1982 if (unlikely(rsp->flags & SRP_RSP_FLAG_DIUNDER)) 1983 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt)); 1984 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOUNDER)) 1985 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_out_res_cnt)); 1986 1987 srp_free_req(ch, req, scmnd, 1988 be32_to_cpu(rsp->req_lim_delta)); 1989 1990 scsi_done(scmnd); 1991 } 1992 } 1993 1994 static int srp_response_common(struct srp_rdma_ch *ch, s32 req_delta, 1995 void *rsp, int len) 1996 { 1997 struct srp_target_port *target = ch->target; 1998 struct ib_device *dev = target->srp_host->srp_dev->dev; 1999 unsigned long flags; 2000 struct srp_iu *iu; 2001 int err; 2002 2003 spin_lock_irqsave(&ch->lock, flags); 2004 ch->req_lim += req_delta; 2005 iu = __srp_get_tx_iu(ch, SRP_IU_RSP); 2006 spin_unlock_irqrestore(&ch->lock, flags); 2007 2008 if (!iu) { 2009 shost_printk(KERN_ERR, target->scsi_host, PFX 2010 "no IU available to send response\n"); 2011 return 1; 2012 } 2013 2014 iu->num_sge = 1; 2015 ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE); 2016 memcpy(iu->buf, rsp, len); 2017 ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE); 2018 2019 err = srp_post_send(ch, iu, len); 2020 if (err) { 2021 shost_printk(KERN_ERR, target->scsi_host, PFX 2022 "unable to post response: %d\n", err); 2023 srp_put_tx_iu(ch, iu, SRP_IU_RSP); 2024 } 2025 2026 return err; 2027 } 2028 2029 static void srp_process_cred_req(struct srp_rdma_ch *ch, 2030 struct srp_cred_req *req) 2031 { 2032 struct srp_cred_rsp rsp = { 2033 .opcode = SRP_CRED_RSP, 2034 .tag = req->tag, 2035 }; 2036 s32 delta = be32_to_cpu(req->req_lim_delta); 2037 2038 if (srp_response_common(ch, delta, &rsp, sizeof(rsp))) 2039 shost_printk(KERN_ERR, ch->target->scsi_host, PFX 2040 "problems processing SRP_CRED_REQ\n"); 2041 } 2042 2043 static void srp_process_aer_req(struct srp_rdma_ch *ch, 2044 struct srp_aer_req *req) 2045 { 2046 struct srp_target_port *target = ch->target; 2047 struct srp_aer_rsp rsp = { 2048 .opcode = SRP_AER_RSP, 2049 .tag = req->tag, 2050 }; 2051 s32 delta = be32_to_cpu(req->req_lim_delta); 2052 2053 shost_printk(KERN_ERR, target->scsi_host, PFX 2054 "ignoring AER for LUN %llu\n", scsilun_to_int(&req->lun)); 2055 2056 if (srp_response_common(ch, delta, &rsp, sizeof(rsp))) 2057 shost_printk(KERN_ERR, target->scsi_host, PFX 2058 "problems processing SRP_AER_REQ\n"); 2059 } 2060 2061 static void srp_recv_done(struct ib_cq *cq, struct ib_wc *wc) 2062 { 2063 struct srp_iu *iu = container_of(wc->wr_cqe, struct srp_iu, cqe); 2064 struct srp_rdma_ch *ch = cq->cq_context; 2065 struct srp_target_port *target = ch->target; 2066 struct ib_device *dev = target->srp_host->srp_dev->dev; 2067 int res; 2068 u8 opcode; 2069 2070 if (unlikely(wc->status != IB_WC_SUCCESS)) { 2071 srp_handle_qp_err(cq, wc, "RECV"); 2072 return; 2073 } 2074 2075 ib_dma_sync_single_for_cpu(dev, iu->dma, ch->max_ti_iu_len, 2076 DMA_FROM_DEVICE); 2077 2078 opcode = *(u8 *) iu->buf; 2079 2080 if (0) { 2081 shost_printk(KERN_ERR, target->scsi_host, 2082 PFX "recv completion, opcode 0x%02x\n", opcode); 2083 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 8, 1, 2084 iu->buf, wc->byte_len, true); 2085 } 2086 2087 switch (opcode) { 2088 case SRP_RSP: 2089 srp_process_rsp(ch, iu->buf); 2090 break; 2091 2092 case SRP_CRED_REQ: 2093 srp_process_cred_req(ch, iu->buf); 2094 break; 2095 2096 case SRP_AER_REQ: 2097 srp_process_aer_req(ch, iu->buf); 2098 break; 2099 2100 case SRP_T_LOGOUT: 2101 /* XXX Handle target logout */ 2102 shost_printk(KERN_WARNING, target->scsi_host, 2103 PFX "Got target logout request\n"); 2104 break; 2105 2106 default: 2107 shost_printk(KERN_WARNING, target->scsi_host, 2108 PFX "Unhandled SRP opcode 0x%02x\n", opcode); 2109 break; 2110 } 2111 2112 ib_dma_sync_single_for_device(dev, iu->dma, ch->max_ti_iu_len, 2113 DMA_FROM_DEVICE); 2114 2115 res = srp_post_recv(ch, iu); 2116 if (res != 0) 2117 shost_printk(KERN_ERR, target->scsi_host, 2118 PFX "Recv failed with error code %d\n", res); 2119 } 2120 2121 /** 2122 * srp_tl_err_work() - handle a transport layer error 2123 * @work: Work structure embedded in an SRP target port. 2124 * 2125 * Note: This function may get invoked before the rport has been created, 2126 * hence the target->rport test. 2127 */ 2128 static void srp_tl_err_work(struct work_struct *work) 2129 { 2130 struct srp_target_port *target; 2131 2132 target = container_of(work, struct srp_target_port, tl_err_work); 2133 if (target->rport) 2134 srp_start_tl_fail_timers(target->rport); 2135 } 2136 2137 static void srp_handle_qp_err(struct ib_cq *cq, struct ib_wc *wc, 2138 const char *opname) 2139 { 2140 struct srp_rdma_ch *ch = cq->cq_context; 2141 struct srp_target_port *target = ch->target; 2142 2143 if (ch->connected && !target->qp_in_error) { 2144 shost_printk(KERN_ERR, target->scsi_host, 2145 PFX "failed %s status %s (%d) for CQE %p\n", 2146 opname, ib_wc_status_msg(wc->status), wc->status, 2147 wc->wr_cqe); 2148 queue_work(system_long_wq, &target->tl_err_work); 2149 } 2150 target->qp_in_error = true; 2151 } 2152 2153 static enum scsi_qc_status srp_queuecommand(struct Scsi_Host *shost, 2154 struct scsi_cmnd *scmnd) 2155 { 2156 struct request *rq = scsi_cmd_to_rq(scmnd); 2157 struct srp_target_port *target = host_to_target(shost); 2158 struct srp_rdma_ch *ch; 2159 struct srp_request *req = scsi_cmd_priv(scmnd); 2160 struct srp_iu *iu; 2161 struct srp_cmd *cmd; 2162 struct ib_device *dev; 2163 unsigned long flags; 2164 u32 tag; 2165 int len, ret; 2166 2167 scmnd->result = srp_chkready(target->rport); 2168 if (unlikely(scmnd->result)) 2169 goto err; 2170 2171 WARN_ON_ONCE(rq->tag < 0); 2172 tag = blk_mq_unique_tag(rq); 2173 ch = &target->ch[blk_mq_unique_tag_to_hwq(tag)]; 2174 2175 spin_lock_irqsave(&ch->lock, flags); 2176 iu = __srp_get_tx_iu(ch, SRP_IU_CMD); 2177 spin_unlock_irqrestore(&ch->lock, flags); 2178 2179 if (!iu) 2180 goto err; 2181 2182 dev = target->srp_host->srp_dev->dev; 2183 ib_dma_sync_single_for_cpu(dev, iu->dma, ch->max_it_iu_len, 2184 DMA_TO_DEVICE); 2185 2186 cmd = iu->buf; 2187 memset(cmd, 0, sizeof *cmd); 2188 2189 cmd->opcode = SRP_CMD; 2190 int_to_scsilun(scmnd->device->lun, &cmd->lun); 2191 cmd->tag = tag; 2192 memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len); 2193 if (unlikely(scmnd->cmd_len > sizeof(cmd->cdb))) { 2194 cmd->add_cdb_len = round_up(scmnd->cmd_len - sizeof(cmd->cdb), 2195 4); 2196 if (WARN_ON_ONCE(cmd->add_cdb_len > SRP_MAX_ADD_CDB_LEN)) 2197 goto err_iu; 2198 } 2199 2200 req->scmnd = scmnd; 2201 req->cmd = iu; 2202 2203 len = srp_map_data(scmnd, ch, req); 2204 if (len < 0) { 2205 shost_printk(KERN_ERR, target->scsi_host, 2206 PFX "Failed to map data (%d)\n", len); 2207 /* 2208 * If we ran out of memory descriptors (-ENOMEM) because an 2209 * application is queuing many requests with more than 2210 * max_pages_per_mr sg-list elements, tell the SCSI mid-layer 2211 * to reduce queue depth temporarily. 2212 */ 2213 scmnd->result = len == -ENOMEM ? 2214 DID_OK << 16 | SAM_STAT_TASK_SET_FULL : DID_ERROR << 16; 2215 goto err_iu; 2216 } 2217 2218 ib_dma_sync_single_for_device(dev, iu->dma, ch->max_it_iu_len, 2219 DMA_TO_DEVICE); 2220 2221 if (srp_post_send(ch, iu, len)) { 2222 shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n"); 2223 scmnd->result = DID_ERROR << 16; 2224 goto err_unmap; 2225 } 2226 2227 return 0; 2228 2229 err_unmap: 2230 srp_unmap_data(scmnd, ch, req); 2231 2232 err_iu: 2233 srp_put_tx_iu(ch, iu, SRP_IU_CMD); 2234 2235 /* 2236 * Avoid that the loops that iterate over the request ring can 2237 * encounter a dangling SCSI command pointer. 2238 */ 2239 req->scmnd = NULL; 2240 2241 err: 2242 if (scmnd->result) { 2243 scsi_done(scmnd); 2244 ret = 0; 2245 } else { 2246 ret = SCSI_MLQUEUE_HOST_BUSY; 2247 } 2248 2249 return ret; 2250 } 2251 2252 /* 2253 * Note: the resources allocated in this function are freed in 2254 * srp_free_ch_ib(). 2255 */ 2256 static int srp_alloc_iu_bufs(struct srp_rdma_ch *ch) 2257 { 2258 struct srp_target_port *target = ch->target; 2259 int i; 2260 2261 ch->rx_ring = kzalloc_objs(*ch->rx_ring, target->queue_size); 2262 if (!ch->rx_ring) 2263 goto err_no_ring; 2264 ch->tx_ring = kzalloc_objs(*ch->tx_ring, target->queue_size); 2265 if (!ch->tx_ring) 2266 goto err_no_ring; 2267 2268 for (i = 0; i < target->queue_size; ++i) { 2269 ch->rx_ring[i] = srp_alloc_iu(target->srp_host, 2270 ch->max_ti_iu_len, 2271 GFP_KERNEL, DMA_FROM_DEVICE); 2272 if (!ch->rx_ring[i]) 2273 goto err; 2274 } 2275 2276 for (i = 0; i < target->queue_size; ++i) { 2277 ch->tx_ring[i] = srp_alloc_iu(target->srp_host, 2278 ch->max_it_iu_len, 2279 GFP_KERNEL, DMA_TO_DEVICE); 2280 if (!ch->tx_ring[i]) 2281 goto err; 2282 2283 list_add(&ch->tx_ring[i]->list, &ch->free_tx); 2284 } 2285 2286 return 0; 2287 2288 err: 2289 for (i = 0; i < target->queue_size; ++i) { 2290 srp_free_iu(target->srp_host, ch->rx_ring[i]); 2291 srp_free_iu(target->srp_host, ch->tx_ring[i]); 2292 } 2293 2294 2295 err_no_ring: 2296 kfree(ch->tx_ring); 2297 ch->tx_ring = NULL; 2298 kfree(ch->rx_ring); 2299 ch->rx_ring = NULL; 2300 2301 return -ENOMEM; 2302 } 2303 2304 static uint32_t srp_compute_rq_tmo(struct ib_qp_attr *qp_attr, int attr_mask) 2305 { 2306 uint64_t T_tr_ns, max_compl_time_ms; 2307 uint32_t rq_tmo_jiffies; 2308 2309 /* 2310 * According to section 11.2.4.2 in the IBTA spec (Modify Queue Pair, 2311 * table 91), both the QP timeout and the retry count have to be set 2312 * for RC QP's during the RTR to RTS transition. 2313 */ 2314 WARN_ON_ONCE((attr_mask & (IB_QP_TIMEOUT | IB_QP_RETRY_CNT)) != 2315 (IB_QP_TIMEOUT | IB_QP_RETRY_CNT)); 2316 2317 /* 2318 * Set target->rq_tmo_jiffies to one second more than the largest time 2319 * it can take before an error completion is generated. See also 2320 * C9-140..142 in the IBTA spec for more information about how to 2321 * convert the QP Local ACK Timeout value to nanoseconds. 2322 */ 2323 T_tr_ns = 4096 * (1ULL << qp_attr->timeout); 2324 max_compl_time_ms = qp_attr->retry_cnt * 4 * T_tr_ns; 2325 do_div(max_compl_time_ms, NSEC_PER_MSEC); 2326 rq_tmo_jiffies = msecs_to_jiffies(max_compl_time_ms + 1000); 2327 2328 return rq_tmo_jiffies; 2329 } 2330 2331 static void srp_cm_rep_handler(struct ib_cm_id *cm_id, 2332 const struct srp_login_rsp *lrsp, 2333 struct srp_rdma_ch *ch) 2334 { 2335 struct srp_target_port *target = ch->target; 2336 struct ib_qp_attr *qp_attr = NULL; 2337 int attr_mask = 0; 2338 int ret = 0; 2339 int i; 2340 2341 if (lrsp->opcode == SRP_LOGIN_RSP) { 2342 ch->max_ti_iu_len = be32_to_cpu(lrsp->max_ti_iu_len); 2343 ch->req_lim = be32_to_cpu(lrsp->req_lim_delta); 2344 ch->use_imm_data = srp_use_imm_data && 2345 (lrsp->rsp_flags & SRP_LOGIN_RSP_IMMED_SUPP); 2346 ch->max_it_iu_len = srp_max_it_iu_len(target->cmd_sg_cnt, 2347 ch->use_imm_data, 2348 target->max_it_iu_size); 2349 WARN_ON_ONCE(ch->max_it_iu_len > 2350 be32_to_cpu(lrsp->max_it_iu_len)); 2351 2352 if (ch->use_imm_data) 2353 shost_printk(KERN_DEBUG, target->scsi_host, 2354 PFX "using immediate data\n"); 2355 2356 /* 2357 * Reserve credits for task management so we don't 2358 * bounce requests back to the SCSI mid-layer. 2359 */ 2360 target->scsi_host->can_queue 2361 = min(ch->req_lim - SRP_TSK_MGMT_SQ_SIZE, 2362 target->scsi_host->can_queue); 2363 target->scsi_host->cmd_per_lun 2364 = min_t(int, target->scsi_host->can_queue, 2365 target->scsi_host->cmd_per_lun); 2366 } else { 2367 shost_printk(KERN_WARNING, target->scsi_host, 2368 PFX "Unhandled RSP opcode %#x\n", lrsp->opcode); 2369 ret = -ECONNRESET; 2370 goto error; 2371 } 2372 2373 if (!ch->rx_ring) { 2374 ret = srp_alloc_iu_bufs(ch); 2375 if (ret) 2376 goto error; 2377 } 2378 2379 for (i = 0; i < target->queue_size; i++) { 2380 struct srp_iu *iu = ch->rx_ring[i]; 2381 2382 ret = srp_post_recv(ch, iu); 2383 if (ret) 2384 goto error; 2385 } 2386 2387 if (!target->using_rdma_cm) { 2388 ret = -ENOMEM; 2389 qp_attr = kmalloc_obj(*qp_attr); 2390 if (!qp_attr) 2391 goto error; 2392 2393 qp_attr->qp_state = IB_QPS_RTR; 2394 ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask); 2395 if (ret) 2396 goto error_free; 2397 2398 ret = ib_modify_qp(ch->qp, qp_attr, attr_mask); 2399 if (ret) 2400 goto error_free; 2401 2402 qp_attr->qp_state = IB_QPS_RTS; 2403 ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask); 2404 if (ret) 2405 goto error_free; 2406 2407 target->rq_tmo_jiffies = srp_compute_rq_tmo(qp_attr, attr_mask); 2408 2409 ret = ib_modify_qp(ch->qp, qp_attr, attr_mask); 2410 if (ret) 2411 goto error_free; 2412 2413 ret = ib_send_cm_rtu(cm_id, NULL, 0); 2414 } 2415 2416 error_free: 2417 kfree(qp_attr); 2418 2419 error: 2420 ch->status = ret; 2421 } 2422 2423 static void srp_ib_cm_rej_handler(struct ib_cm_id *cm_id, 2424 const struct ib_cm_event *event, 2425 struct srp_rdma_ch *ch) 2426 { 2427 struct srp_target_port *target = ch->target; 2428 struct Scsi_Host *shost = target->scsi_host; 2429 struct ib_class_port_info *cpi; 2430 int opcode; 2431 u16 dlid; 2432 2433 switch (event->param.rej_rcvd.reason) { 2434 case IB_CM_REJ_PORT_CM_REDIRECT: 2435 cpi = event->param.rej_rcvd.ari; 2436 dlid = be16_to_cpu(cpi->redirect_lid); 2437 sa_path_set_dlid(&ch->ib_cm.path, dlid); 2438 ch->ib_cm.path.pkey = cpi->redirect_pkey; 2439 cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff; 2440 memcpy(ch->ib_cm.path.dgid.raw, cpi->redirect_gid, 16); 2441 2442 ch->status = dlid ? SRP_DLID_REDIRECT : SRP_PORT_REDIRECT; 2443 break; 2444 2445 case IB_CM_REJ_PORT_REDIRECT: 2446 if (srp_target_is_topspin(target)) { 2447 union ib_gid *dgid = &ch->ib_cm.path.dgid; 2448 2449 /* 2450 * Topspin/Cisco SRP gateways incorrectly send 2451 * reject reason code 25 when they mean 24 2452 * (port redirect). 2453 */ 2454 memcpy(dgid->raw, event->param.rej_rcvd.ari, 16); 2455 2456 shost_printk(KERN_DEBUG, shost, 2457 PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n", 2458 be64_to_cpu(dgid->global.subnet_prefix), 2459 be64_to_cpu(dgid->global.interface_id)); 2460 2461 ch->status = SRP_PORT_REDIRECT; 2462 } else { 2463 shost_printk(KERN_WARNING, shost, 2464 " REJ reason: IB_CM_REJ_PORT_REDIRECT\n"); 2465 ch->status = -ECONNRESET; 2466 } 2467 break; 2468 2469 case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID: 2470 shost_printk(KERN_WARNING, shost, 2471 " REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n"); 2472 ch->status = -ECONNRESET; 2473 break; 2474 2475 case IB_CM_REJ_CONSUMER_DEFINED: 2476 opcode = *(u8 *) event->private_data; 2477 if (opcode == SRP_LOGIN_REJ) { 2478 struct srp_login_rej *rej = event->private_data; 2479 u32 reason = be32_to_cpu(rej->reason); 2480 2481 if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE) 2482 shost_printk(KERN_WARNING, shost, 2483 PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n"); 2484 else 2485 shost_printk(KERN_WARNING, shost, PFX 2486 "SRP LOGIN from %pI6 to %pI6 REJECTED, reason 0x%08x\n", 2487 target->sgid.raw, 2488 target->ib_cm.orig_dgid.raw, 2489 reason); 2490 } else 2491 shost_printk(KERN_WARNING, shost, 2492 " REJ reason: IB_CM_REJ_CONSUMER_DEFINED," 2493 " opcode 0x%02x\n", opcode); 2494 ch->status = -ECONNRESET; 2495 break; 2496 2497 case IB_CM_REJ_STALE_CONN: 2498 shost_printk(KERN_WARNING, shost, " REJ reason: stale connection\n"); 2499 ch->status = SRP_STALE_CONN; 2500 break; 2501 2502 default: 2503 shost_printk(KERN_WARNING, shost, " REJ reason 0x%x\n", 2504 event->param.rej_rcvd.reason); 2505 ch->status = -ECONNRESET; 2506 } 2507 } 2508 2509 static int srp_ib_cm_handler(struct ib_cm_id *cm_id, 2510 const struct ib_cm_event *event) 2511 { 2512 struct srp_rdma_ch *ch = cm_id->context; 2513 struct srp_target_port *target = ch->target; 2514 int comp = 0; 2515 2516 switch (event->event) { 2517 case IB_CM_REQ_ERROR: 2518 shost_printk(KERN_DEBUG, target->scsi_host, 2519 PFX "Sending CM REQ failed\n"); 2520 comp = 1; 2521 ch->status = -ECONNRESET; 2522 break; 2523 2524 case IB_CM_REP_RECEIVED: 2525 comp = 1; 2526 srp_cm_rep_handler(cm_id, event->private_data, ch); 2527 break; 2528 2529 case IB_CM_REJ_RECEIVED: 2530 shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n"); 2531 comp = 1; 2532 2533 srp_ib_cm_rej_handler(cm_id, event, ch); 2534 break; 2535 2536 case IB_CM_DREQ_RECEIVED: 2537 shost_printk(KERN_WARNING, target->scsi_host, 2538 PFX "DREQ received - connection closed\n"); 2539 ch->connected = false; 2540 if (ib_send_cm_drep(cm_id, NULL, 0)) 2541 shost_printk(KERN_ERR, target->scsi_host, 2542 PFX "Sending CM DREP failed\n"); 2543 queue_work(system_long_wq, &target->tl_err_work); 2544 break; 2545 2546 case IB_CM_TIMEWAIT_EXIT: 2547 shost_printk(KERN_ERR, target->scsi_host, 2548 PFX "connection closed\n"); 2549 comp = 1; 2550 2551 ch->status = 0; 2552 break; 2553 2554 case IB_CM_MRA_RECEIVED: 2555 case IB_CM_DREQ_ERROR: 2556 case IB_CM_DREP_RECEIVED: 2557 break; 2558 2559 default: 2560 shost_printk(KERN_WARNING, target->scsi_host, 2561 PFX "Unhandled CM event %d\n", event->event); 2562 break; 2563 } 2564 2565 if (comp) 2566 complete(&ch->done); 2567 2568 return 0; 2569 } 2570 2571 static void srp_rdma_cm_rej_handler(struct srp_rdma_ch *ch, 2572 struct rdma_cm_event *event) 2573 { 2574 struct srp_target_port *target = ch->target; 2575 struct Scsi_Host *shost = target->scsi_host; 2576 int opcode; 2577 2578 switch (event->status) { 2579 case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID: 2580 shost_printk(KERN_WARNING, shost, 2581 " REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n"); 2582 ch->status = -ECONNRESET; 2583 break; 2584 2585 case IB_CM_REJ_CONSUMER_DEFINED: 2586 opcode = *(u8 *) event->param.conn.private_data; 2587 if (opcode == SRP_LOGIN_REJ) { 2588 struct srp_login_rej *rej = 2589 (struct srp_login_rej *) 2590 event->param.conn.private_data; 2591 u32 reason = be32_to_cpu(rej->reason); 2592 2593 if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE) 2594 shost_printk(KERN_WARNING, shost, 2595 PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n"); 2596 else 2597 shost_printk(KERN_WARNING, shost, 2598 PFX "SRP LOGIN REJECTED, reason 0x%08x\n", reason); 2599 } else { 2600 shost_printk(KERN_WARNING, shost, 2601 " REJ reason: IB_CM_REJ_CONSUMER_DEFINED, opcode 0x%02x\n", 2602 opcode); 2603 } 2604 ch->status = -ECONNRESET; 2605 break; 2606 2607 case IB_CM_REJ_STALE_CONN: 2608 shost_printk(KERN_WARNING, shost, 2609 " REJ reason: stale connection\n"); 2610 ch->status = SRP_STALE_CONN; 2611 break; 2612 2613 default: 2614 shost_printk(KERN_WARNING, shost, " REJ reason 0x%x\n", 2615 event->status); 2616 ch->status = -ECONNRESET; 2617 break; 2618 } 2619 } 2620 2621 static int srp_rdma_cm_handler(struct rdma_cm_id *cm_id, 2622 struct rdma_cm_event *event) 2623 { 2624 struct srp_rdma_ch *ch = cm_id->context; 2625 struct srp_target_port *target = ch->target; 2626 int comp = 0; 2627 2628 switch (event->event) { 2629 case RDMA_CM_EVENT_ADDR_RESOLVED: 2630 ch->status = 0; 2631 comp = 1; 2632 break; 2633 2634 case RDMA_CM_EVENT_ADDR_ERROR: 2635 ch->status = -ENXIO; 2636 comp = 1; 2637 break; 2638 2639 case RDMA_CM_EVENT_ROUTE_RESOLVED: 2640 ch->status = 0; 2641 comp = 1; 2642 break; 2643 2644 case RDMA_CM_EVENT_ROUTE_ERROR: 2645 case RDMA_CM_EVENT_UNREACHABLE: 2646 ch->status = -EHOSTUNREACH; 2647 comp = 1; 2648 break; 2649 2650 case RDMA_CM_EVENT_CONNECT_ERROR: 2651 shost_printk(KERN_DEBUG, target->scsi_host, 2652 PFX "Sending CM REQ failed\n"); 2653 comp = 1; 2654 ch->status = -ECONNRESET; 2655 break; 2656 2657 case RDMA_CM_EVENT_ESTABLISHED: 2658 comp = 1; 2659 srp_cm_rep_handler(NULL, event->param.conn.private_data, ch); 2660 break; 2661 2662 case RDMA_CM_EVENT_REJECTED: 2663 shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n"); 2664 comp = 1; 2665 2666 srp_rdma_cm_rej_handler(ch, event); 2667 break; 2668 2669 case RDMA_CM_EVENT_DISCONNECTED: 2670 if (ch->connected) { 2671 shost_printk(KERN_WARNING, target->scsi_host, 2672 PFX "received DREQ\n"); 2673 rdma_disconnect(ch->rdma_cm.cm_id); 2674 comp = 1; 2675 ch->status = 0; 2676 queue_work(system_long_wq, &target->tl_err_work); 2677 } 2678 break; 2679 2680 case RDMA_CM_EVENT_TIMEWAIT_EXIT: 2681 shost_printk(KERN_ERR, target->scsi_host, 2682 PFX "connection closed\n"); 2683 2684 comp = 1; 2685 ch->status = 0; 2686 break; 2687 2688 default: 2689 shost_printk(KERN_WARNING, target->scsi_host, 2690 PFX "Unhandled CM event %d\n", event->event); 2691 break; 2692 } 2693 2694 if (comp) 2695 complete(&ch->done); 2696 2697 return 0; 2698 } 2699 2700 /** 2701 * srp_change_queue_depth - setting device queue depth 2702 * @sdev: scsi device struct 2703 * @qdepth: requested queue depth 2704 * 2705 * Returns queue depth. 2706 */ 2707 static int 2708 srp_change_queue_depth(struct scsi_device *sdev, int qdepth) 2709 { 2710 if (!sdev->tagged_supported) 2711 qdepth = 1; 2712 return scsi_change_queue_depth(sdev, qdepth); 2713 } 2714 2715 static int srp_send_tsk_mgmt(struct srp_rdma_ch *ch, u64 req_tag, u64 lun, 2716 u8 func, u8 *status) 2717 { 2718 struct srp_target_port *target = ch->target; 2719 struct srp_rport *rport = target->rport; 2720 struct ib_device *dev = target->srp_host->srp_dev->dev; 2721 struct srp_iu *iu; 2722 struct srp_tsk_mgmt *tsk_mgmt; 2723 int res; 2724 2725 if (!ch->connected || target->qp_in_error) 2726 return -1; 2727 2728 /* 2729 * Lock the rport mutex to avoid that srp_create_ch_ib() is 2730 * invoked while a task management function is being sent. 2731 */ 2732 mutex_lock(&rport->mutex); 2733 spin_lock_irq(&ch->lock); 2734 iu = __srp_get_tx_iu(ch, SRP_IU_TSK_MGMT); 2735 spin_unlock_irq(&ch->lock); 2736 2737 if (!iu) { 2738 mutex_unlock(&rport->mutex); 2739 2740 return -1; 2741 } 2742 2743 iu->num_sge = 1; 2744 2745 ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt, 2746 DMA_TO_DEVICE); 2747 tsk_mgmt = iu->buf; 2748 memset(tsk_mgmt, 0, sizeof *tsk_mgmt); 2749 2750 tsk_mgmt->opcode = SRP_TSK_MGMT; 2751 int_to_scsilun(lun, &tsk_mgmt->lun); 2752 tsk_mgmt->tsk_mgmt_func = func; 2753 tsk_mgmt->task_tag = req_tag; 2754 2755 spin_lock_irq(&ch->lock); 2756 ch->tsk_mgmt_tag = (ch->tsk_mgmt_tag + 1) | SRP_TAG_TSK_MGMT; 2757 tsk_mgmt->tag = ch->tsk_mgmt_tag; 2758 spin_unlock_irq(&ch->lock); 2759 2760 init_completion(&ch->tsk_mgmt_done); 2761 2762 ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt, 2763 DMA_TO_DEVICE); 2764 if (srp_post_send(ch, iu, sizeof(*tsk_mgmt))) { 2765 srp_put_tx_iu(ch, iu, SRP_IU_TSK_MGMT); 2766 mutex_unlock(&rport->mutex); 2767 2768 return -1; 2769 } 2770 res = wait_for_completion_timeout(&ch->tsk_mgmt_done, 2771 msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS)); 2772 if (res > 0 && status) 2773 *status = ch->tsk_mgmt_status; 2774 mutex_unlock(&rport->mutex); 2775 2776 WARN_ON_ONCE(res < 0); 2777 2778 return res > 0 ? 0 : -1; 2779 } 2780 2781 static int srp_abort(struct scsi_cmnd *scmnd) 2782 { 2783 struct srp_target_port *target = host_to_target(scmnd->device->host); 2784 struct srp_request *req = scsi_cmd_priv(scmnd); 2785 u32 tag; 2786 u16 ch_idx; 2787 struct srp_rdma_ch *ch; 2788 2789 shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n"); 2790 2791 tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmnd)); 2792 ch_idx = blk_mq_unique_tag_to_hwq(tag); 2793 if (WARN_ON_ONCE(ch_idx >= target->ch_count)) 2794 return SUCCESS; 2795 ch = &target->ch[ch_idx]; 2796 if (!srp_claim_req(ch, req, NULL, scmnd)) 2797 return SUCCESS; 2798 shost_printk(KERN_ERR, target->scsi_host, 2799 "Sending SRP abort for tag %#x\n", tag); 2800 if (srp_send_tsk_mgmt(ch, tag, scmnd->device->lun, 2801 SRP_TSK_ABORT_TASK, NULL) == 0) { 2802 srp_free_req(ch, req, scmnd, 0); 2803 return SUCCESS; 2804 } 2805 if (target->rport->state == SRP_RPORT_LOST) 2806 return FAST_IO_FAIL; 2807 2808 return FAILED; 2809 } 2810 2811 static int srp_reset_device(struct scsi_cmnd *scmnd) 2812 { 2813 struct srp_target_port *target = host_to_target(scmnd->device->host); 2814 struct srp_rdma_ch *ch; 2815 u8 status; 2816 2817 shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n"); 2818 2819 ch = &target->ch[0]; 2820 if (srp_send_tsk_mgmt(ch, SRP_TAG_NO_REQ, scmnd->device->lun, 2821 SRP_TSK_LUN_RESET, &status)) 2822 return FAILED; 2823 if (status) 2824 return FAILED; 2825 2826 return SUCCESS; 2827 } 2828 2829 static int srp_reset_host(struct scsi_cmnd *scmnd) 2830 { 2831 struct srp_target_port *target = host_to_target(scmnd->device->host); 2832 2833 shost_printk(KERN_ERR, target->scsi_host, PFX "SRP reset_host called\n"); 2834 2835 return srp_reconnect_rport(target->rport) == 0 ? SUCCESS : FAILED; 2836 } 2837 2838 static int srp_target_alloc(struct scsi_target *starget) 2839 { 2840 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); 2841 struct srp_target_port *target = host_to_target(shost); 2842 2843 if (target->target_can_queue) 2844 starget->can_queue = target->target_can_queue; 2845 return 0; 2846 } 2847 2848 static int srp_sdev_configure(struct scsi_device *sdev, 2849 struct queue_limits *lim) 2850 { 2851 struct Scsi_Host *shost = sdev->host; 2852 struct srp_target_port *target = host_to_target(shost); 2853 struct request_queue *q = sdev->request_queue; 2854 unsigned long timeout; 2855 2856 if (sdev->type == TYPE_DISK) { 2857 timeout = max_t(unsigned, 30 * HZ, target->rq_tmo_jiffies); 2858 blk_queue_rq_timeout(q, timeout); 2859 } 2860 2861 return 0; 2862 } 2863 2864 static ssize_t id_ext_show(struct device *dev, struct device_attribute *attr, 2865 char *buf) 2866 { 2867 struct srp_target_port *target = host_to_target(class_to_shost(dev)); 2868 2869 return sysfs_emit(buf, "0x%016llx\n", be64_to_cpu(target->id_ext)); 2870 } 2871 2872 static DEVICE_ATTR_RO(id_ext); 2873 2874 static ssize_t ioc_guid_show(struct device *dev, struct device_attribute *attr, 2875 char *buf) 2876 { 2877 struct srp_target_port *target = host_to_target(class_to_shost(dev)); 2878 2879 return sysfs_emit(buf, "0x%016llx\n", be64_to_cpu(target->ioc_guid)); 2880 } 2881 2882 static DEVICE_ATTR_RO(ioc_guid); 2883 2884 static ssize_t service_id_show(struct device *dev, 2885 struct device_attribute *attr, char *buf) 2886 { 2887 struct srp_target_port *target = host_to_target(class_to_shost(dev)); 2888 2889 if (target->using_rdma_cm) 2890 return -ENOENT; 2891 return sysfs_emit(buf, "0x%016llx\n", 2892 be64_to_cpu(target->ib_cm.service_id)); 2893 } 2894 2895 static DEVICE_ATTR_RO(service_id); 2896 2897 static ssize_t pkey_show(struct device *dev, struct device_attribute *attr, 2898 char *buf) 2899 { 2900 struct srp_target_port *target = host_to_target(class_to_shost(dev)); 2901 2902 if (target->using_rdma_cm) 2903 return -ENOENT; 2904 2905 return sysfs_emit(buf, "0x%04x\n", be16_to_cpu(target->ib_cm.pkey)); 2906 } 2907 2908 static DEVICE_ATTR_RO(pkey); 2909 2910 static ssize_t sgid_show(struct device *dev, struct device_attribute *attr, 2911 char *buf) 2912 { 2913 struct srp_target_port *target = host_to_target(class_to_shost(dev)); 2914 2915 return sysfs_emit(buf, "%pI6\n", target->sgid.raw); 2916 } 2917 2918 static DEVICE_ATTR_RO(sgid); 2919 2920 static ssize_t dgid_show(struct device *dev, struct device_attribute *attr, 2921 char *buf) 2922 { 2923 struct srp_target_port *target = host_to_target(class_to_shost(dev)); 2924 struct srp_rdma_ch *ch = &target->ch[0]; 2925 2926 if (target->using_rdma_cm) 2927 return -ENOENT; 2928 2929 return sysfs_emit(buf, "%pI6\n", ch->ib_cm.path.dgid.raw); 2930 } 2931 2932 static DEVICE_ATTR_RO(dgid); 2933 2934 static ssize_t orig_dgid_show(struct device *dev, struct device_attribute *attr, 2935 char *buf) 2936 { 2937 struct srp_target_port *target = host_to_target(class_to_shost(dev)); 2938 2939 if (target->using_rdma_cm) 2940 return -ENOENT; 2941 2942 return sysfs_emit(buf, "%pI6\n", target->ib_cm.orig_dgid.raw); 2943 } 2944 2945 static DEVICE_ATTR_RO(orig_dgid); 2946 2947 static ssize_t req_lim_show(struct device *dev, struct device_attribute *attr, 2948 char *buf) 2949 { 2950 struct srp_target_port *target = host_to_target(class_to_shost(dev)); 2951 struct srp_rdma_ch *ch; 2952 int i, req_lim = INT_MAX; 2953 2954 for (i = 0; i < target->ch_count; i++) { 2955 ch = &target->ch[i]; 2956 req_lim = min(req_lim, ch->req_lim); 2957 } 2958 2959 return sysfs_emit(buf, "%d\n", req_lim); 2960 } 2961 2962 static DEVICE_ATTR_RO(req_lim); 2963 2964 static ssize_t zero_req_lim_show(struct device *dev, 2965 struct device_attribute *attr, char *buf) 2966 { 2967 struct srp_target_port *target = host_to_target(class_to_shost(dev)); 2968 2969 return sysfs_emit(buf, "%d\n", target->zero_req_lim); 2970 } 2971 2972 static DEVICE_ATTR_RO(zero_req_lim); 2973 2974 static ssize_t local_ib_port_show(struct device *dev, 2975 struct device_attribute *attr, char *buf) 2976 { 2977 struct srp_target_port *target = host_to_target(class_to_shost(dev)); 2978 2979 return sysfs_emit(buf, "%u\n", target->srp_host->port); 2980 } 2981 2982 static DEVICE_ATTR_RO(local_ib_port); 2983 2984 static ssize_t local_ib_device_show(struct device *dev, 2985 struct device_attribute *attr, char *buf) 2986 { 2987 struct srp_target_port *target = host_to_target(class_to_shost(dev)); 2988 2989 return sysfs_emit(buf, "%s\n", 2990 dev_name(&target->srp_host->srp_dev->dev->dev)); 2991 } 2992 2993 static DEVICE_ATTR_RO(local_ib_device); 2994 2995 static ssize_t ch_count_show(struct device *dev, struct device_attribute *attr, 2996 char *buf) 2997 { 2998 struct srp_target_port *target = host_to_target(class_to_shost(dev)); 2999 3000 return sysfs_emit(buf, "%d\n", target->ch_count); 3001 } 3002 3003 static DEVICE_ATTR_RO(ch_count); 3004 3005 static ssize_t comp_vector_show(struct device *dev, 3006 struct device_attribute *attr, char *buf) 3007 { 3008 struct srp_target_port *target = host_to_target(class_to_shost(dev)); 3009 3010 return sysfs_emit(buf, "%d\n", target->comp_vector); 3011 } 3012 3013 static DEVICE_ATTR_RO(comp_vector); 3014 3015 static ssize_t tl_retry_count_show(struct device *dev, 3016 struct device_attribute *attr, char *buf) 3017 { 3018 struct srp_target_port *target = host_to_target(class_to_shost(dev)); 3019 3020 return sysfs_emit(buf, "%d\n", target->tl_retry_count); 3021 } 3022 3023 static DEVICE_ATTR_RO(tl_retry_count); 3024 3025 static ssize_t cmd_sg_entries_show(struct device *dev, 3026 struct device_attribute *attr, char *buf) 3027 { 3028 struct srp_target_port *target = host_to_target(class_to_shost(dev)); 3029 3030 return sysfs_emit(buf, "%u\n", target->cmd_sg_cnt); 3031 } 3032 3033 static DEVICE_ATTR_RO(cmd_sg_entries); 3034 3035 static ssize_t allow_ext_sg_show(struct device *dev, 3036 struct device_attribute *attr, char *buf) 3037 { 3038 struct srp_target_port *target = host_to_target(class_to_shost(dev)); 3039 3040 return sysfs_emit(buf, "%s\n", target->allow_ext_sg ? "true" : "false"); 3041 } 3042 3043 static DEVICE_ATTR_RO(allow_ext_sg); 3044 3045 static struct attribute *srp_host_attrs[] = { 3046 &dev_attr_id_ext.attr, 3047 &dev_attr_ioc_guid.attr, 3048 &dev_attr_service_id.attr, 3049 &dev_attr_pkey.attr, 3050 &dev_attr_sgid.attr, 3051 &dev_attr_dgid.attr, 3052 &dev_attr_orig_dgid.attr, 3053 &dev_attr_req_lim.attr, 3054 &dev_attr_zero_req_lim.attr, 3055 &dev_attr_local_ib_port.attr, 3056 &dev_attr_local_ib_device.attr, 3057 &dev_attr_ch_count.attr, 3058 &dev_attr_comp_vector.attr, 3059 &dev_attr_tl_retry_count.attr, 3060 &dev_attr_cmd_sg_entries.attr, 3061 &dev_attr_allow_ext_sg.attr, 3062 NULL 3063 }; 3064 3065 ATTRIBUTE_GROUPS(srp_host); 3066 3067 static const struct scsi_host_template srp_template = { 3068 .module = THIS_MODULE, 3069 .name = "InfiniBand SRP initiator", 3070 .proc_name = DRV_NAME, 3071 .target_alloc = srp_target_alloc, 3072 .sdev_configure = srp_sdev_configure, 3073 .info = srp_target_info, 3074 .init_cmd_priv = srp_init_cmd_priv, 3075 .exit_cmd_priv = srp_exit_cmd_priv, 3076 .queuecommand = srp_queuecommand, 3077 .change_queue_depth = srp_change_queue_depth, 3078 .eh_timed_out = srp_timed_out, 3079 .eh_abort_handler = srp_abort, 3080 .eh_device_reset_handler = srp_reset_device, 3081 .eh_host_reset_handler = srp_reset_host, 3082 .skip_settle_delay = true, 3083 .sg_tablesize = SRP_DEF_SG_TABLESIZE, 3084 .can_queue = SRP_DEFAULT_CMD_SQ_SIZE, 3085 .this_id = -1, 3086 .cmd_per_lun = SRP_DEFAULT_CMD_SQ_SIZE, 3087 .shost_groups = srp_host_groups, 3088 .track_queue_depth = 1, 3089 .cmd_size = sizeof(struct srp_request), 3090 }; 3091 3092 static int srp_sdev_count(struct Scsi_Host *host) 3093 { 3094 struct scsi_device *sdev; 3095 int c = 0; 3096 3097 shost_for_each_device(sdev, host) 3098 c++; 3099 3100 return c; 3101 } 3102 3103 /* 3104 * Return values: 3105 * < 0 upon failure. Caller is responsible for SRP target port cleanup. 3106 * 0 and target->state == SRP_TARGET_REMOVED if asynchronous target port 3107 * removal has been scheduled. 3108 * 0 and target->state != SRP_TARGET_REMOVED upon success. 3109 */ 3110 static int srp_add_target(struct srp_host *host, struct srp_target_port *target) 3111 { 3112 struct srp_rport_identifiers ids; 3113 struct srp_rport *rport; 3114 3115 target->state = SRP_TARGET_SCANNING; 3116 sprintf(target->target_name, "SRP.T10:%016llX", 3117 be64_to_cpu(target->id_ext)); 3118 3119 if (scsi_add_host(target->scsi_host, host->srp_dev->dev->dev.parent)) 3120 return -ENODEV; 3121 3122 memcpy(ids.port_id, &target->id_ext, 8); 3123 memcpy(ids.port_id + 8, &target->ioc_guid, 8); 3124 ids.roles = SRP_RPORT_ROLE_TARGET; 3125 rport = srp_rport_add(target->scsi_host, &ids); 3126 if (IS_ERR(rport)) { 3127 scsi_remove_host(target->scsi_host); 3128 return PTR_ERR(rport); 3129 } 3130 3131 rport->lld_data = target; 3132 target->rport = rport; 3133 3134 spin_lock(&host->target_lock); 3135 list_add_tail(&target->list, &host->target_list); 3136 spin_unlock(&host->target_lock); 3137 3138 scsi_scan_target(&target->scsi_host->shost_gendev, 3139 0, target->scsi_id, SCAN_WILD_CARD, SCSI_SCAN_INITIAL); 3140 3141 if (srp_connected_ch(target) < target->ch_count || 3142 target->qp_in_error) { 3143 shost_printk(KERN_INFO, target->scsi_host, 3144 PFX "SCSI scan failed - removing SCSI host\n"); 3145 srp_queue_remove_work(target); 3146 goto out; 3147 } 3148 3149 pr_debug("%s: SCSI scan succeeded - detected %d LUNs\n", 3150 dev_name(&target->scsi_host->shost_gendev), 3151 srp_sdev_count(target->scsi_host)); 3152 3153 spin_lock_irq(&target->lock); 3154 if (target->state == SRP_TARGET_SCANNING) 3155 target->state = SRP_TARGET_LIVE; 3156 spin_unlock_irq(&target->lock); 3157 3158 out: 3159 return 0; 3160 } 3161 3162 static void srp_release_dev(struct device *dev) 3163 { 3164 struct srp_host *host = 3165 container_of(dev, struct srp_host, dev); 3166 3167 kfree(host); 3168 } 3169 3170 static struct attribute *srp_class_attrs[]; 3171 3172 ATTRIBUTE_GROUPS(srp_class); 3173 3174 static struct class srp_class = { 3175 .name = "infiniband_srp", 3176 .dev_groups = srp_class_groups, 3177 .dev_release = srp_release_dev 3178 }; 3179 3180 /** 3181 * srp_conn_unique() - check whether the connection to a target is unique 3182 * @host: SRP host. 3183 * @target: SRP target port. 3184 */ 3185 static bool srp_conn_unique(struct srp_host *host, 3186 struct srp_target_port *target) 3187 { 3188 struct srp_target_port *t; 3189 bool ret = false; 3190 3191 if (target->state == SRP_TARGET_REMOVED) 3192 goto out; 3193 3194 ret = true; 3195 3196 spin_lock(&host->target_lock); 3197 list_for_each_entry(t, &host->target_list, list) { 3198 if (t != target && 3199 target->id_ext == t->id_ext && 3200 target->ioc_guid == t->ioc_guid && 3201 target->initiator_ext == t->initiator_ext) { 3202 ret = false; 3203 break; 3204 } 3205 } 3206 spin_unlock(&host->target_lock); 3207 3208 out: 3209 return ret; 3210 } 3211 3212 /* 3213 * Target ports are added by writing 3214 * 3215 * id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,dgid=<dest GID>, 3216 * pkey=<P_Key>,service_id=<service ID> 3217 * or 3218 * id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>, 3219 * [src=<IPv4 address>,]dest=<IPv4 address>:<port number> 3220 * 3221 * to the add_target sysfs attribute. 3222 */ 3223 enum { 3224 SRP_OPT_ERR = 0, 3225 SRP_OPT_ID_EXT = 1 << 0, 3226 SRP_OPT_IOC_GUID = 1 << 1, 3227 SRP_OPT_DGID = 1 << 2, 3228 SRP_OPT_PKEY = 1 << 3, 3229 SRP_OPT_SERVICE_ID = 1 << 4, 3230 SRP_OPT_MAX_SECT = 1 << 5, 3231 SRP_OPT_MAX_CMD_PER_LUN = 1 << 6, 3232 SRP_OPT_IO_CLASS = 1 << 7, 3233 SRP_OPT_INITIATOR_EXT = 1 << 8, 3234 SRP_OPT_CMD_SG_ENTRIES = 1 << 9, 3235 SRP_OPT_ALLOW_EXT_SG = 1 << 10, 3236 SRP_OPT_SG_TABLESIZE = 1 << 11, 3237 SRP_OPT_COMP_VECTOR = 1 << 12, 3238 SRP_OPT_TL_RETRY_COUNT = 1 << 13, 3239 SRP_OPT_QUEUE_SIZE = 1 << 14, 3240 SRP_OPT_IP_SRC = 1 << 15, 3241 SRP_OPT_IP_DEST = 1 << 16, 3242 SRP_OPT_TARGET_CAN_QUEUE= 1 << 17, 3243 SRP_OPT_MAX_IT_IU_SIZE = 1 << 18, 3244 SRP_OPT_CH_COUNT = 1 << 19, 3245 }; 3246 3247 static unsigned int srp_opt_mandatory[] = { 3248 SRP_OPT_ID_EXT | 3249 SRP_OPT_IOC_GUID | 3250 SRP_OPT_DGID | 3251 SRP_OPT_PKEY | 3252 SRP_OPT_SERVICE_ID, 3253 SRP_OPT_ID_EXT | 3254 SRP_OPT_IOC_GUID | 3255 SRP_OPT_IP_DEST, 3256 }; 3257 3258 static const match_table_t srp_opt_tokens = { 3259 { SRP_OPT_ID_EXT, "id_ext=%s" }, 3260 { SRP_OPT_IOC_GUID, "ioc_guid=%s" }, 3261 { SRP_OPT_DGID, "dgid=%s" }, 3262 { SRP_OPT_PKEY, "pkey=%x" }, 3263 { SRP_OPT_SERVICE_ID, "service_id=%s" }, 3264 { SRP_OPT_MAX_SECT, "max_sect=%d" }, 3265 { SRP_OPT_MAX_CMD_PER_LUN, "max_cmd_per_lun=%d" }, 3266 { SRP_OPT_TARGET_CAN_QUEUE, "target_can_queue=%d" }, 3267 { SRP_OPT_IO_CLASS, "io_class=%x" }, 3268 { SRP_OPT_INITIATOR_EXT, "initiator_ext=%s" }, 3269 { SRP_OPT_CMD_SG_ENTRIES, "cmd_sg_entries=%u" }, 3270 { SRP_OPT_ALLOW_EXT_SG, "allow_ext_sg=%u" }, 3271 { SRP_OPT_SG_TABLESIZE, "sg_tablesize=%u" }, 3272 { SRP_OPT_COMP_VECTOR, "comp_vector=%u" }, 3273 { SRP_OPT_TL_RETRY_COUNT, "tl_retry_count=%u" }, 3274 { SRP_OPT_QUEUE_SIZE, "queue_size=%d" }, 3275 { SRP_OPT_IP_SRC, "src=%s" }, 3276 { SRP_OPT_IP_DEST, "dest=%s" }, 3277 { SRP_OPT_MAX_IT_IU_SIZE, "max_it_iu_size=%d" }, 3278 { SRP_OPT_CH_COUNT, "ch_count=%u", }, 3279 { SRP_OPT_ERR, NULL } 3280 }; 3281 3282 /** 3283 * srp_parse_in - parse an IP address and port number combination 3284 * @net: [in] Network namespace. 3285 * @sa: [out] Address family, IP address and port number. 3286 * @addr_port_str: [in] IP address and port number. 3287 * @has_port: [out] Whether or not @addr_port_str includes a port number. 3288 * 3289 * Parse the following address formats: 3290 * - IPv4: <ip_address>:<port>, e.g. 1.2.3.4:5. 3291 * - IPv6: \[<ipv6_address>\]:<port>, e.g. [1::2:3%4]:5. 3292 */ 3293 static int srp_parse_in(struct net *net, struct sockaddr_storage *sa, 3294 const char *addr_port_str, bool *has_port) 3295 { 3296 char *addr_end, *addr = kstrdup(addr_port_str, GFP_KERNEL); 3297 char *port_str; 3298 int ret; 3299 3300 if (!addr) 3301 return -ENOMEM; 3302 port_str = strrchr(addr, ':'); 3303 if (port_str && strchr(port_str, ']')) 3304 port_str = NULL; 3305 if (port_str) 3306 *port_str++ = '\0'; 3307 if (has_port) 3308 *has_port = port_str != NULL; 3309 ret = inet_pton_with_scope(net, AF_INET, addr, port_str, sa); 3310 if (ret && addr[0]) { 3311 addr_end = addr + strlen(addr) - 1; 3312 if (addr[0] == '[' && *addr_end == ']') { 3313 *addr_end = '\0'; 3314 ret = inet_pton_with_scope(net, AF_INET6, addr + 1, 3315 port_str, sa); 3316 } 3317 } 3318 kfree(addr); 3319 pr_debug("%s -> %pISpfsc\n", addr_port_str, sa); 3320 return ret; 3321 } 3322 3323 static int srp_parse_options(struct net *net, const char *buf, 3324 struct srp_target_port *target) 3325 { 3326 char *options, *sep_opt; 3327 char *p; 3328 substring_t args[MAX_OPT_ARGS]; 3329 unsigned long long ull; 3330 bool has_port; 3331 int opt_mask = 0; 3332 int token; 3333 int ret = -EINVAL; 3334 int i; 3335 3336 options = kstrdup(buf, GFP_KERNEL); 3337 if (!options) 3338 return -ENOMEM; 3339 3340 sep_opt = options; 3341 while ((p = strsep(&sep_opt, ",\n")) != NULL) { 3342 if (!*p) 3343 continue; 3344 3345 token = match_token(p, srp_opt_tokens, args); 3346 opt_mask |= token; 3347 3348 switch (token) { 3349 case SRP_OPT_ID_EXT: 3350 p = match_strdup(args); 3351 if (!p) { 3352 ret = -ENOMEM; 3353 goto out; 3354 } 3355 ret = kstrtoull(p, 16, &ull); 3356 if (ret) { 3357 pr_warn("invalid id_ext parameter '%s'\n", p); 3358 kfree(p); 3359 goto out; 3360 } 3361 target->id_ext = cpu_to_be64(ull); 3362 kfree(p); 3363 break; 3364 3365 case SRP_OPT_IOC_GUID: 3366 p = match_strdup(args); 3367 if (!p) { 3368 ret = -ENOMEM; 3369 goto out; 3370 } 3371 ret = kstrtoull(p, 16, &ull); 3372 if (ret) { 3373 pr_warn("invalid ioc_guid parameter '%s'\n", p); 3374 kfree(p); 3375 goto out; 3376 } 3377 target->ioc_guid = cpu_to_be64(ull); 3378 kfree(p); 3379 break; 3380 3381 case SRP_OPT_DGID: 3382 p = match_strdup(args); 3383 if (!p) { 3384 ret = -ENOMEM; 3385 goto out; 3386 } 3387 if (strlen(p) != 32) { 3388 pr_warn("bad dest GID parameter '%s'\n", p); 3389 kfree(p); 3390 goto out; 3391 } 3392 3393 ret = hex2bin(target->ib_cm.orig_dgid.raw, p, 16); 3394 kfree(p); 3395 if (ret < 0) 3396 goto out; 3397 break; 3398 3399 case SRP_OPT_PKEY: 3400 ret = match_hex(args, &token); 3401 if (ret) { 3402 pr_warn("bad P_Key parameter '%s'\n", p); 3403 goto out; 3404 } 3405 target->ib_cm.pkey = cpu_to_be16(token); 3406 break; 3407 3408 case SRP_OPT_SERVICE_ID: 3409 p = match_strdup(args); 3410 if (!p) { 3411 ret = -ENOMEM; 3412 goto out; 3413 } 3414 ret = kstrtoull(p, 16, &ull); 3415 if (ret) { 3416 pr_warn("bad service_id parameter '%s'\n", p); 3417 kfree(p); 3418 goto out; 3419 } 3420 target->ib_cm.service_id = cpu_to_be64(ull); 3421 kfree(p); 3422 break; 3423 3424 case SRP_OPT_IP_SRC: 3425 p = match_strdup(args); 3426 if (!p) { 3427 ret = -ENOMEM; 3428 goto out; 3429 } 3430 ret = srp_parse_in(net, &target->rdma_cm.src.ss, p, 3431 NULL); 3432 if (ret < 0) { 3433 pr_warn("bad source parameter '%s'\n", p); 3434 kfree(p); 3435 goto out; 3436 } 3437 target->rdma_cm.src_specified = true; 3438 kfree(p); 3439 break; 3440 3441 case SRP_OPT_IP_DEST: 3442 p = match_strdup(args); 3443 if (!p) { 3444 ret = -ENOMEM; 3445 goto out; 3446 } 3447 ret = srp_parse_in(net, &target->rdma_cm.dst.ss, p, 3448 &has_port); 3449 if (!has_port) 3450 ret = -EINVAL; 3451 if (ret < 0) { 3452 pr_warn("bad dest parameter '%s'\n", p); 3453 kfree(p); 3454 goto out; 3455 } 3456 target->using_rdma_cm = true; 3457 kfree(p); 3458 break; 3459 3460 case SRP_OPT_MAX_SECT: 3461 ret = match_int(args, &token); 3462 if (ret) { 3463 pr_warn("bad max sect parameter '%s'\n", p); 3464 goto out; 3465 } 3466 target->scsi_host->max_sectors = token; 3467 break; 3468 3469 case SRP_OPT_QUEUE_SIZE: 3470 ret = match_int(args, &token); 3471 if (ret) { 3472 pr_warn("match_int() failed for queue_size parameter '%s', Error %d\n", 3473 p, ret); 3474 goto out; 3475 } 3476 if (token < 1) { 3477 pr_warn("bad queue_size parameter '%s'\n", p); 3478 ret = -EINVAL; 3479 goto out; 3480 } 3481 target->scsi_host->can_queue = token; 3482 target->queue_size = token + SRP_RSP_SQ_SIZE + 3483 SRP_TSK_MGMT_SQ_SIZE; 3484 if (!(opt_mask & SRP_OPT_MAX_CMD_PER_LUN)) 3485 target->scsi_host->cmd_per_lun = token; 3486 break; 3487 3488 case SRP_OPT_MAX_CMD_PER_LUN: 3489 ret = match_int(args, &token); 3490 if (ret) { 3491 pr_warn("match_int() failed for max cmd_per_lun parameter '%s', Error %d\n", 3492 p, ret); 3493 goto out; 3494 } 3495 if (token < 1) { 3496 pr_warn("bad max cmd_per_lun parameter '%s'\n", 3497 p); 3498 ret = -EINVAL; 3499 goto out; 3500 } 3501 target->scsi_host->cmd_per_lun = token; 3502 break; 3503 3504 case SRP_OPT_TARGET_CAN_QUEUE: 3505 ret = match_int(args, &token); 3506 if (ret) { 3507 pr_warn("match_int() failed for max target_can_queue parameter '%s', Error %d\n", 3508 p, ret); 3509 goto out; 3510 } 3511 if (token < 1) { 3512 pr_warn("bad max target_can_queue parameter '%s'\n", 3513 p); 3514 ret = -EINVAL; 3515 goto out; 3516 } 3517 target->target_can_queue = token; 3518 break; 3519 3520 case SRP_OPT_IO_CLASS: 3521 ret = match_hex(args, &token); 3522 if (ret) { 3523 pr_warn("bad IO class parameter '%s'\n", p); 3524 goto out; 3525 } 3526 if (token != SRP_REV10_IB_IO_CLASS && 3527 token != SRP_REV16A_IB_IO_CLASS) { 3528 pr_warn("unknown IO class parameter value %x specified (use %x or %x).\n", 3529 token, SRP_REV10_IB_IO_CLASS, 3530 SRP_REV16A_IB_IO_CLASS); 3531 ret = -EINVAL; 3532 goto out; 3533 } 3534 target->io_class = token; 3535 break; 3536 3537 case SRP_OPT_INITIATOR_EXT: 3538 p = match_strdup(args); 3539 if (!p) { 3540 ret = -ENOMEM; 3541 goto out; 3542 } 3543 ret = kstrtoull(p, 16, &ull); 3544 if (ret) { 3545 pr_warn("bad initiator_ext value '%s'\n", p); 3546 kfree(p); 3547 goto out; 3548 } 3549 target->initiator_ext = cpu_to_be64(ull); 3550 kfree(p); 3551 break; 3552 3553 case SRP_OPT_CMD_SG_ENTRIES: 3554 ret = match_int(args, &token); 3555 if (ret) { 3556 pr_warn("match_int() failed for max cmd_sg_entries parameter '%s', Error %d\n", 3557 p, ret); 3558 goto out; 3559 } 3560 if (token < 1 || token > 255) { 3561 pr_warn("bad max cmd_sg_entries parameter '%s'\n", 3562 p); 3563 ret = -EINVAL; 3564 goto out; 3565 } 3566 target->cmd_sg_cnt = token; 3567 break; 3568 3569 case SRP_OPT_ALLOW_EXT_SG: 3570 ret = match_int(args, &token); 3571 if (ret) { 3572 pr_warn("bad allow_ext_sg parameter '%s'\n", p); 3573 goto out; 3574 } 3575 target->allow_ext_sg = !!token; 3576 break; 3577 3578 case SRP_OPT_SG_TABLESIZE: 3579 ret = match_int(args, &token); 3580 if (ret) { 3581 pr_warn("match_int() failed for max sg_tablesize parameter '%s', Error %d\n", 3582 p, ret); 3583 goto out; 3584 } 3585 if (token < 1 || token > SG_MAX_SEGMENTS) { 3586 pr_warn("bad max sg_tablesize parameter '%s'\n", 3587 p); 3588 ret = -EINVAL; 3589 goto out; 3590 } 3591 target->sg_tablesize = token; 3592 break; 3593 3594 case SRP_OPT_COMP_VECTOR: 3595 ret = match_int(args, &token); 3596 if (ret) { 3597 pr_warn("match_int() failed for comp_vector parameter '%s', Error %d\n", 3598 p, ret); 3599 goto out; 3600 } 3601 if (token < 0) { 3602 pr_warn("bad comp_vector parameter '%s'\n", p); 3603 ret = -EINVAL; 3604 goto out; 3605 } 3606 target->comp_vector = token; 3607 break; 3608 3609 case SRP_OPT_TL_RETRY_COUNT: 3610 ret = match_int(args, &token); 3611 if (ret) { 3612 pr_warn("match_int() failed for tl_retry_count parameter '%s', Error %d\n", 3613 p, ret); 3614 goto out; 3615 } 3616 if (token < 2 || token > 7) { 3617 pr_warn("bad tl_retry_count parameter '%s' (must be a number between 2 and 7)\n", 3618 p); 3619 ret = -EINVAL; 3620 goto out; 3621 } 3622 target->tl_retry_count = token; 3623 break; 3624 3625 case SRP_OPT_MAX_IT_IU_SIZE: 3626 ret = match_int(args, &token); 3627 if (ret) { 3628 pr_warn("match_int() failed for max it_iu_size parameter '%s', Error %d\n", 3629 p, ret); 3630 goto out; 3631 } 3632 if (token < 0) { 3633 pr_warn("bad maximum initiator to target IU size '%s'\n", p); 3634 ret = -EINVAL; 3635 goto out; 3636 } 3637 target->max_it_iu_size = token; 3638 break; 3639 3640 case SRP_OPT_CH_COUNT: 3641 ret = match_int(args, &token); 3642 if (ret) { 3643 pr_warn("match_int() failed for channel count parameter '%s', Error %d\n", 3644 p, ret); 3645 goto out; 3646 } 3647 if (token < 1) { 3648 pr_warn("bad channel count %s\n", p); 3649 ret = -EINVAL; 3650 goto out; 3651 } 3652 target->ch_count = token; 3653 break; 3654 3655 default: 3656 pr_warn("unknown parameter or missing value '%s' in target creation request\n", 3657 p); 3658 ret = -EINVAL; 3659 goto out; 3660 } 3661 } 3662 3663 for (i = 0; i < ARRAY_SIZE(srp_opt_mandatory); i++) { 3664 if ((opt_mask & srp_opt_mandatory[i]) == srp_opt_mandatory[i]) { 3665 ret = 0; 3666 break; 3667 } 3668 } 3669 if (ret) 3670 pr_warn("target creation request is missing one or more parameters\n"); 3671 3672 if (target->scsi_host->cmd_per_lun > target->scsi_host->can_queue 3673 && (opt_mask & SRP_OPT_MAX_CMD_PER_LUN)) 3674 pr_warn("cmd_per_lun = %d > queue_size = %d\n", 3675 target->scsi_host->cmd_per_lun, 3676 target->scsi_host->can_queue); 3677 3678 out: 3679 kfree(options); 3680 return ret; 3681 } 3682 3683 static ssize_t add_target_store(struct device *dev, 3684 struct device_attribute *attr, const char *buf, 3685 size_t count) 3686 { 3687 struct srp_host *host = 3688 container_of(dev, struct srp_host, dev); 3689 struct Scsi_Host *target_host; 3690 struct srp_target_port *target; 3691 struct srp_rdma_ch *ch; 3692 struct srp_device *srp_dev = host->srp_dev; 3693 struct ib_device *ibdev = srp_dev->dev; 3694 int ret, i, ch_idx; 3695 unsigned int max_sectors_per_mr, mr_per_cmd = 0; 3696 bool multich = false; 3697 uint32_t max_iu_len; 3698 3699 target_host = scsi_host_alloc(&srp_template, 3700 sizeof (struct srp_target_port)); 3701 if (!target_host) 3702 return -ENOMEM; 3703 3704 target_host->transportt = ib_srp_transport_template; 3705 target_host->max_channel = 0; 3706 target_host->max_id = 1; 3707 target_host->max_lun = -1LL; 3708 target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb; 3709 3710 if (ibdev->attrs.kernel_cap_flags & IBK_SG_GAPS_REG) 3711 target_host->max_segment_size = ib_dma_max_seg_size(ibdev); 3712 else 3713 target_host->virt_boundary_mask = ~srp_dev->mr_page_mask; 3714 3715 target = host_to_target(target_host); 3716 3717 target->net = to_net_ns(kobj_ns_grab_current(KOBJ_NS_TYPE_NET)); 3718 target->io_class = SRP_REV16A_IB_IO_CLASS; 3719 target->scsi_host = target_host; 3720 target->srp_host = host; 3721 target->lkey = host->srp_dev->pd->local_dma_lkey; 3722 target->global_rkey = host->srp_dev->global_rkey; 3723 target->cmd_sg_cnt = cmd_sg_entries; 3724 target->sg_tablesize = indirect_sg_entries ? : cmd_sg_entries; 3725 target->allow_ext_sg = allow_ext_sg; 3726 target->tl_retry_count = 7; 3727 target->queue_size = SRP_DEFAULT_QUEUE_SIZE; 3728 3729 /* 3730 * Avoid that the SCSI host can be removed by srp_remove_target() 3731 * before this function returns. 3732 */ 3733 scsi_host_get(target->scsi_host); 3734 3735 ret = mutex_lock_interruptible(&host->add_target_mutex); 3736 if (ret < 0) 3737 goto put; 3738 3739 ret = srp_parse_options(target->net, buf, target); 3740 if (ret) 3741 goto out; 3742 3743 if (!srp_conn_unique(target->srp_host, target)) { 3744 if (target->using_rdma_cm) { 3745 shost_printk(KERN_INFO, target->scsi_host, 3746 PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;dest=%pIS\n", 3747 be64_to_cpu(target->id_ext), 3748 be64_to_cpu(target->ioc_guid), 3749 &target->rdma_cm.dst); 3750 } else { 3751 shost_printk(KERN_INFO, target->scsi_host, 3752 PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;initiator_ext=%016llx\n", 3753 be64_to_cpu(target->id_ext), 3754 be64_to_cpu(target->ioc_guid), 3755 be64_to_cpu(target->initiator_ext)); 3756 } 3757 ret = -EEXIST; 3758 goto out; 3759 } 3760 3761 if (!srp_dev->has_fr && !target->allow_ext_sg && 3762 target->cmd_sg_cnt < target->sg_tablesize) { 3763 pr_warn("No MR pool and no external indirect descriptors, limiting sg_tablesize to cmd_sg_cnt\n"); 3764 target->sg_tablesize = target->cmd_sg_cnt; 3765 } 3766 3767 if (srp_dev->use_fast_reg) { 3768 bool gaps_reg = ibdev->attrs.kernel_cap_flags & 3769 IBK_SG_GAPS_REG; 3770 3771 max_sectors_per_mr = srp_dev->max_pages_per_mr << 3772 (ilog2(srp_dev->mr_page_size) - 9); 3773 if (!gaps_reg) { 3774 /* 3775 * FR can only map one HCA page per entry. If the start 3776 * address is not aligned on a HCA page boundary two 3777 * entries will be used for the head and the tail 3778 * although these two entries combined contain at most 3779 * one HCA page of data. Hence the "+ 1" in the 3780 * calculation below. 3781 * 3782 * The indirect data buffer descriptor is contiguous 3783 * so the memory for that buffer will only be 3784 * registered if register_always is true. Hence add 3785 * one to mr_per_cmd if register_always has been set. 3786 */ 3787 mr_per_cmd = register_always + 3788 (target->scsi_host->max_sectors + 1 + 3789 max_sectors_per_mr - 1) / max_sectors_per_mr; 3790 } else { 3791 mr_per_cmd = register_always + 3792 (target->sg_tablesize + 3793 srp_dev->max_pages_per_mr - 1) / 3794 srp_dev->max_pages_per_mr; 3795 } 3796 pr_debug("max_sectors = %u; max_pages_per_mr = %u; mr_page_size = %u; max_sectors_per_mr = %u; mr_per_cmd = %u\n", 3797 target->scsi_host->max_sectors, srp_dev->max_pages_per_mr, srp_dev->mr_page_size, 3798 max_sectors_per_mr, mr_per_cmd); 3799 } 3800 3801 target_host->sg_tablesize = target->sg_tablesize; 3802 target->mr_pool_size = target->scsi_host->can_queue * mr_per_cmd; 3803 target->mr_per_cmd = mr_per_cmd; 3804 target->indirect_size = target->sg_tablesize * 3805 sizeof (struct srp_direct_buf); 3806 max_iu_len = srp_max_it_iu_len(target->cmd_sg_cnt, 3807 srp_use_imm_data, 3808 target->max_it_iu_size); 3809 3810 INIT_WORK(&target->tl_err_work, srp_tl_err_work); 3811 INIT_WORK(&target->remove_work, srp_remove_work); 3812 spin_lock_init(&target->lock); 3813 ret = rdma_query_gid(ibdev, host->port, 0, &target->sgid); 3814 if (ret) 3815 goto out; 3816 3817 ret = -ENOMEM; 3818 if (target->ch_count == 0) { 3819 target->ch_count = 3820 min(ch_count ?: 3821 max(4 * num_online_nodes(), 3822 ibdev->num_comp_vectors), 3823 num_online_cpus()); 3824 } 3825 3826 target->ch = kzalloc_objs(*target->ch, target->ch_count); 3827 if (!target->ch) 3828 goto out; 3829 3830 for (ch_idx = 0; ch_idx < target->ch_count; ++ch_idx) { 3831 ch = &target->ch[ch_idx]; 3832 ch->target = target; 3833 ch->comp_vector = ch_idx % ibdev->num_comp_vectors; 3834 spin_lock_init(&ch->lock); 3835 INIT_LIST_HEAD(&ch->free_tx); 3836 ret = srp_new_cm_id(ch); 3837 if (ret) 3838 goto err_disconnect; 3839 3840 ret = srp_create_ch_ib(ch); 3841 if (ret) 3842 goto err_disconnect; 3843 3844 ret = srp_connect_ch(ch, max_iu_len, multich); 3845 if (ret) { 3846 char dst[64]; 3847 3848 if (target->using_rdma_cm) 3849 snprintf(dst, sizeof(dst), "%pIS", 3850 &target->rdma_cm.dst); 3851 else 3852 snprintf(dst, sizeof(dst), "%pI6", 3853 target->ib_cm.orig_dgid.raw); 3854 shost_printk(KERN_ERR, target->scsi_host, 3855 PFX "Connection %d/%d to %s failed\n", 3856 ch_idx, 3857 target->ch_count, dst); 3858 if (ch_idx == 0) { 3859 goto free_ch; 3860 } else { 3861 srp_free_ch_ib(target, ch); 3862 target->ch_count = ch - target->ch; 3863 goto connected; 3864 } 3865 } 3866 multich = true; 3867 } 3868 3869 connected: 3870 target->scsi_host->nr_hw_queues = target->ch_count; 3871 3872 ret = srp_add_target(host, target); 3873 if (ret) 3874 goto err_disconnect; 3875 3876 if (target->state != SRP_TARGET_REMOVED) { 3877 if (target->using_rdma_cm) { 3878 shost_printk(KERN_DEBUG, target->scsi_host, PFX 3879 "new target: id_ext %016llx ioc_guid %016llx sgid %pI6 dest %pIS\n", 3880 be64_to_cpu(target->id_ext), 3881 be64_to_cpu(target->ioc_guid), 3882 target->sgid.raw, &target->rdma_cm.dst); 3883 } else { 3884 shost_printk(KERN_DEBUG, target->scsi_host, PFX 3885 "new target: id_ext %016llx ioc_guid %016llx pkey %04x service_id %016llx sgid %pI6 dgid %pI6\n", 3886 be64_to_cpu(target->id_ext), 3887 be64_to_cpu(target->ioc_guid), 3888 be16_to_cpu(target->ib_cm.pkey), 3889 be64_to_cpu(target->ib_cm.service_id), 3890 target->sgid.raw, 3891 target->ib_cm.orig_dgid.raw); 3892 } 3893 } 3894 3895 ret = count; 3896 3897 out: 3898 mutex_unlock(&host->add_target_mutex); 3899 3900 put: 3901 scsi_host_put(target->scsi_host); 3902 if (ret < 0) { 3903 /* 3904 * If a call to srp_remove_target() has not been scheduled, 3905 * drop the network namespace reference now that was obtained 3906 * earlier in this function. 3907 */ 3908 if (target->state != SRP_TARGET_REMOVED) 3909 kobj_ns_drop(KOBJ_NS_TYPE_NET, to_ns_common(target->net)); 3910 scsi_host_put(target->scsi_host); 3911 } 3912 3913 return ret; 3914 3915 err_disconnect: 3916 srp_disconnect_target(target); 3917 3918 free_ch: 3919 for (i = 0; i < target->ch_count; i++) { 3920 ch = &target->ch[i]; 3921 srp_free_ch_ib(target, ch); 3922 } 3923 3924 kfree(target->ch); 3925 goto out; 3926 } 3927 3928 static DEVICE_ATTR_WO(add_target); 3929 3930 static ssize_t ibdev_show(struct device *dev, struct device_attribute *attr, 3931 char *buf) 3932 { 3933 struct srp_host *host = container_of(dev, struct srp_host, dev); 3934 3935 return sysfs_emit(buf, "%s\n", dev_name(&host->srp_dev->dev->dev)); 3936 } 3937 3938 static DEVICE_ATTR_RO(ibdev); 3939 3940 static ssize_t port_show(struct device *dev, struct device_attribute *attr, 3941 char *buf) 3942 { 3943 struct srp_host *host = container_of(dev, struct srp_host, dev); 3944 3945 return sysfs_emit(buf, "%u\n", host->port); 3946 } 3947 3948 static DEVICE_ATTR_RO(port); 3949 3950 static struct attribute *srp_class_attrs[] = { 3951 &dev_attr_add_target.attr, 3952 &dev_attr_ibdev.attr, 3953 &dev_attr_port.attr, 3954 NULL 3955 }; 3956 3957 static struct srp_host *srp_add_port(struct srp_device *device, u32 port) 3958 { 3959 struct srp_host *host; 3960 3961 host = kzalloc_obj(*host); 3962 if (!host) 3963 return NULL; 3964 3965 INIT_LIST_HEAD(&host->target_list); 3966 spin_lock_init(&host->target_lock); 3967 mutex_init(&host->add_target_mutex); 3968 host->srp_dev = device; 3969 host->port = port; 3970 3971 device_initialize(&host->dev); 3972 host->dev.class = &srp_class; 3973 host->dev.parent = device->dev->dev.parent; 3974 if (dev_set_name(&host->dev, "srp-%s-%u", dev_name(&device->dev->dev), 3975 port)) 3976 goto put_host; 3977 if (device_add(&host->dev)) 3978 goto put_host; 3979 3980 return host; 3981 3982 put_host: 3983 put_device(&host->dev); 3984 return NULL; 3985 } 3986 3987 static void srp_rename_dev(struct ib_device *device, void *client_data) 3988 { 3989 struct srp_device *srp_dev = client_data; 3990 struct srp_host *host, *tmp_host; 3991 3992 list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) { 3993 char name[IB_DEVICE_NAME_MAX + 8]; 3994 3995 snprintf(name, sizeof(name), "srp-%s-%u", 3996 dev_name(&device->dev), host->port); 3997 device_rename(&host->dev, name); 3998 } 3999 } 4000 4001 static int srp_add_one(struct ib_device *device) 4002 { 4003 struct srp_device *srp_dev; 4004 struct ib_device_attr *attr = &device->attrs; 4005 struct srp_host *host; 4006 int mr_page_shift; 4007 u32 p; 4008 u64 max_pages_per_mr; 4009 unsigned int flags = 0; 4010 4011 srp_dev = kzalloc_obj(*srp_dev); 4012 if (!srp_dev) 4013 return -ENOMEM; 4014 4015 /* 4016 * Use the smallest page size supported by the HCA, down to a 4017 * minimum of 4096 bytes. We're unlikely to build large sglists 4018 * out of smaller entries. 4019 */ 4020 mr_page_shift = max(12, ffs(attr->page_size_cap) - 1); 4021 srp_dev->mr_page_size = 1 << mr_page_shift; 4022 srp_dev->mr_page_mask = ~((u64) srp_dev->mr_page_size - 1); 4023 max_pages_per_mr = attr->max_mr_size; 4024 do_div(max_pages_per_mr, srp_dev->mr_page_size); 4025 pr_debug("%s: %llu / %u = %llu <> %u\n", __func__, 4026 attr->max_mr_size, srp_dev->mr_page_size, 4027 max_pages_per_mr, SRP_MAX_PAGES_PER_MR); 4028 srp_dev->max_pages_per_mr = min_t(u64, SRP_MAX_PAGES_PER_MR, 4029 max_pages_per_mr); 4030 4031 srp_dev->has_fr = (attr->device_cap_flags & 4032 IB_DEVICE_MEM_MGT_EXTENSIONS); 4033 if (!never_register && !srp_dev->has_fr) 4034 dev_warn(&device->dev, "FR is not supported\n"); 4035 else if (!never_register && 4036 attr->max_mr_size >= 2 * srp_dev->mr_page_size) 4037 srp_dev->use_fast_reg = srp_dev->has_fr; 4038 4039 if (never_register || !register_always || !srp_dev->has_fr) 4040 flags |= IB_PD_UNSAFE_GLOBAL_RKEY; 4041 4042 if (srp_dev->use_fast_reg) { 4043 srp_dev->max_pages_per_mr = 4044 min_t(u32, srp_dev->max_pages_per_mr, 4045 attr->max_fast_reg_page_list_len); 4046 } 4047 srp_dev->mr_max_size = srp_dev->mr_page_size * 4048 srp_dev->max_pages_per_mr; 4049 pr_debug("%s: mr_page_shift = %d, device->max_mr_size = %#llx, device->max_fast_reg_page_list_len = %u, max_pages_per_mr = %d, mr_max_size = %#x\n", 4050 dev_name(&device->dev), mr_page_shift, attr->max_mr_size, 4051 attr->max_fast_reg_page_list_len, 4052 srp_dev->max_pages_per_mr, srp_dev->mr_max_size); 4053 4054 INIT_LIST_HEAD(&srp_dev->dev_list); 4055 4056 srp_dev->dev = device; 4057 srp_dev->pd = ib_alloc_pd(device, flags); 4058 if (IS_ERR(srp_dev->pd)) { 4059 int ret = PTR_ERR(srp_dev->pd); 4060 4061 kfree(srp_dev); 4062 return ret; 4063 } 4064 4065 if (flags & IB_PD_UNSAFE_GLOBAL_RKEY) { 4066 srp_dev->global_rkey = srp_dev->pd->unsafe_global_rkey; 4067 WARN_ON_ONCE(srp_dev->global_rkey == 0); 4068 } 4069 4070 rdma_for_each_port (device, p) { 4071 host = srp_add_port(srp_dev, p); 4072 if (host) 4073 list_add_tail(&host->list, &srp_dev->dev_list); 4074 } 4075 4076 ib_set_client_data(device, &srp_client, srp_dev); 4077 return 0; 4078 } 4079 4080 static void srp_remove_one(struct ib_device *device, void *client_data) 4081 { 4082 struct srp_device *srp_dev; 4083 struct srp_host *host, *tmp_host; 4084 struct srp_target_port *target; 4085 4086 srp_dev = client_data; 4087 4088 list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) { 4089 /* 4090 * Remove the add_target sysfs entry so that no new target ports 4091 * can be created. 4092 */ 4093 device_del(&host->dev); 4094 4095 /* 4096 * Remove all target ports. 4097 */ 4098 spin_lock(&host->target_lock); 4099 list_for_each_entry(target, &host->target_list, list) 4100 srp_queue_remove_work(target); 4101 spin_unlock(&host->target_lock); 4102 4103 /* 4104 * srp_queue_remove_work() queues a call to 4105 * srp_remove_target(). The latter function cancels 4106 * target->tl_err_work so waiting for the remove works to 4107 * finish is sufficient. 4108 */ 4109 flush_workqueue(srp_remove_wq); 4110 4111 put_device(&host->dev); 4112 } 4113 4114 ib_dealloc_pd(srp_dev->pd); 4115 4116 kfree(srp_dev); 4117 } 4118 4119 static struct srp_function_template ib_srp_transport_functions = { 4120 .has_rport_state = true, 4121 .reset_timer_if_blocked = true, 4122 .reconnect_delay = &srp_reconnect_delay, 4123 .fast_io_fail_tmo = &srp_fast_io_fail_tmo, 4124 .dev_loss_tmo = &srp_dev_loss_tmo, 4125 .reconnect = srp_rport_reconnect, 4126 .rport_delete = srp_rport_delete, 4127 .terminate_rport_io = srp_terminate_io, 4128 }; 4129 4130 static int __init srp_init_module(void) 4131 { 4132 int ret; 4133 4134 BUILD_BUG_ON(sizeof(struct srp_aer_req) != 36); 4135 BUILD_BUG_ON(sizeof(struct srp_cmd) != 48); 4136 BUILD_BUG_ON(sizeof(struct srp_imm_buf) != 4); 4137 BUILD_BUG_ON(sizeof(struct srp_indirect_buf) != 20); 4138 BUILD_BUG_ON(sizeof(struct srp_login_req) != 64); 4139 BUILD_BUG_ON(sizeof(struct srp_login_req_rdma) != 56); 4140 BUILD_BUG_ON(sizeof(struct srp_rsp) != 36); 4141 4142 if (srp_sg_tablesize) { 4143 pr_warn("srp_sg_tablesize is deprecated, please use cmd_sg_entries\n"); 4144 if (!cmd_sg_entries) 4145 cmd_sg_entries = srp_sg_tablesize; 4146 } 4147 4148 if (!cmd_sg_entries) 4149 cmd_sg_entries = SRP_DEF_SG_TABLESIZE; 4150 4151 if (cmd_sg_entries > 255) { 4152 pr_warn("Clamping cmd_sg_entries to 255\n"); 4153 cmd_sg_entries = 255; 4154 } 4155 4156 if (!indirect_sg_entries) 4157 indirect_sg_entries = cmd_sg_entries; 4158 else if (indirect_sg_entries < cmd_sg_entries) { 4159 pr_warn("Bumping up indirect_sg_entries to match cmd_sg_entries (%u)\n", 4160 cmd_sg_entries); 4161 indirect_sg_entries = cmd_sg_entries; 4162 } 4163 4164 if (indirect_sg_entries > SG_MAX_SEGMENTS) { 4165 pr_warn("Clamping indirect_sg_entries to %u\n", 4166 SG_MAX_SEGMENTS); 4167 indirect_sg_entries = SG_MAX_SEGMENTS; 4168 } 4169 4170 srp_remove_wq = create_workqueue("srp_remove"); 4171 if (!srp_remove_wq) { 4172 ret = -ENOMEM; 4173 goto out; 4174 } 4175 4176 ret = -ENOMEM; 4177 ib_srp_transport_template = 4178 srp_attach_transport(&ib_srp_transport_functions); 4179 if (!ib_srp_transport_template) 4180 goto destroy_wq; 4181 4182 ret = class_register(&srp_class); 4183 if (ret) { 4184 pr_err("couldn't register class infiniband_srp\n"); 4185 goto release_tr; 4186 } 4187 4188 ib_sa_register_client(&srp_sa_client); 4189 4190 ret = ib_register_client(&srp_client); 4191 if (ret) { 4192 pr_err("couldn't register IB client\n"); 4193 goto unreg_sa; 4194 } 4195 4196 out: 4197 return ret; 4198 4199 unreg_sa: 4200 ib_sa_unregister_client(&srp_sa_client); 4201 class_unregister(&srp_class); 4202 4203 release_tr: 4204 srp_release_transport(ib_srp_transport_template); 4205 4206 destroy_wq: 4207 destroy_workqueue(srp_remove_wq); 4208 goto out; 4209 } 4210 4211 static void __exit srp_cleanup_module(void) 4212 { 4213 ib_unregister_client(&srp_client); 4214 ib_sa_unregister_client(&srp_sa_client); 4215 class_unregister(&srp_class); 4216 srp_release_transport(ib_srp_transport_template); 4217 destroy_workqueue(srp_remove_wq); 4218 } 4219 4220 module_init(srp_init_module); 4221 module_exit(srp_cleanup_module); 4222