1 /* 2 * Copyright (c) 2006 - 2009 Mellanox Technology Inc. All rights reserved. 3 * Copyright (C) 2008 - 2011 Bart Van Assche <bvanassche@acm.org>. 4 * 5 * This software is available to you under a choice of one of two 6 * licenses. You may choose to be licensed under the terms of the GNU 7 * General Public License (GPL) Version 2, available from the file 8 * COPYING in the main directory of this source tree, or the 9 * OpenIB.org BSD license below: 10 * 11 * Redistribution and use in source and binary forms, with or 12 * without modification, are permitted provided that the following 13 * conditions are met: 14 * 15 * - Redistributions of source code must retain the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer. 18 * 19 * - Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials 22 * provided with the distribution. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * SOFTWARE. 32 * 33 */ 34 35 #include <linux/module.h> 36 #include <linux/init.h> 37 #include <linux/slab.h> 38 #include <linux/err.h> 39 #include <linux/ctype.h> 40 #include <linux/kthread.h> 41 #include <linux/string.h> 42 #include <linux/delay.h> 43 #include <linux/atomic.h> 44 #include <scsi/scsi_tcq.h> 45 #include <target/configfs_macros.h> 46 #include <target/target_core_base.h> 47 #include <target/target_core_fabric_configfs.h> 48 #include <target/target_core_fabric.h> 49 #include <target/target_core_configfs.h> 50 #include "ib_srpt.h" 51 52 /* Name of this kernel module. */ 53 #define DRV_NAME "ib_srpt" 54 #define DRV_VERSION "2.0.0" 55 #define DRV_RELDATE "2011-02-14" 56 57 #define SRPT_ID_STRING "Linux SRP target" 58 59 #undef pr_fmt 60 #define pr_fmt(fmt) DRV_NAME " " fmt 61 62 MODULE_AUTHOR("Vu Pham and Bart Van Assche"); 63 MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol target " 64 "v" DRV_VERSION " (" DRV_RELDATE ")"); 65 MODULE_LICENSE("Dual BSD/GPL"); 66 67 /* 68 * Global Variables 69 */ 70 71 static u64 srpt_service_guid; 72 static DEFINE_SPINLOCK(srpt_dev_lock); /* Protects srpt_dev_list. */ 73 static LIST_HEAD(srpt_dev_list); /* List of srpt_device structures. */ 74 75 static unsigned srp_max_req_size = DEFAULT_MAX_REQ_SIZE; 76 module_param(srp_max_req_size, int, 0444); 77 MODULE_PARM_DESC(srp_max_req_size, 78 "Maximum size of SRP request messages in bytes."); 79 80 static int srpt_srq_size = DEFAULT_SRPT_SRQ_SIZE; 81 module_param(srpt_srq_size, int, 0444); 82 MODULE_PARM_DESC(srpt_srq_size, 83 "Shared receive queue (SRQ) size."); 84 85 static int srpt_get_u64_x(char *buffer, struct kernel_param *kp) 86 { 87 return sprintf(buffer, "0x%016llx", *(u64 *)kp->arg); 88 } 89 module_param_call(srpt_service_guid, NULL, srpt_get_u64_x, &srpt_service_guid, 90 0444); 91 MODULE_PARM_DESC(srpt_service_guid, 92 "Using this value for ioc_guid, id_ext, and cm_listen_id" 93 " instead of using the node_guid of the first HCA."); 94 95 static struct ib_client srpt_client; 96 static struct target_fabric_configfs *srpt_target; 97 static void srpt_release_channel(struct srpt_rdma_ch *ch); 98 static int srpt_queue_status(struct se_cmd *cmd); 99 100 /** 101 * opposite_dma_dir() - Swap DMA_TO_DEVICE and DMA_FROM_DEVICE. 102 */ 103 static inline 104 enum dma_data_direction opposite_dma_dir(enum dma_data_direction dir) 105 { 106 switch (dir) { 107 case DMA_TO_DEVICE: return DMA_FROM_DEVICE; 108 case DMA_FROM_DEVICE: return DMA_TO_DEVICE; 109 default: return dir; 110 } 111 } 112 113 /** 114 * srpt_sdev_name() - Return the name associated with the HCA. 115 * 116 * Examples are ib0, ib1, ... 117 */ 118 static inline const char *srpt_sdev_name(struct srpt_device *sdev) 119 { 120 return sdev->device->name; 121 } 122 123 static enum rdma_ch_state srpt_get_ch_state(struct srpt_rdma_ch *ch) 124 { 125 unsigned long flags; 126 enum rdma_ch_state state; 127 128 spin_lock_irqsave(&ch->spinlock, flags); 129 state = ch->state; 130 spin_unlock_irqrestore(&ch->spinlock, flags); 131 return state; 132 } 133 134 static enum rdma_ch_state 135 srpt_set_ch_state(struct srpt_rdma_ch *ch, enum rdma_ch_state new_state) 136 { 137 unsigned long flags; 138 enum rdma_ch_state prev; 139 140 spin_lock_irqsave(&ch->spinlock, flags); 141 prev = ch->state; 142 ch->state = new_state; 143 spin_unlock_irqrestore(&ch->spinlock, flags); 144 return prev; 145 } 146 147 /** 148 * srpt_test_and_set_ch_state() - Test and set the channel state. 149 * 150 * Returns true if and only if the channel state has been set to the new state. 151 */ 152 static bool 153 srpt_test_and_set_ch_state(struct srpt_rdma_ch *ch, enum rdma_ch_state old, 154 enum rdma_ch_state new) 155 { 156 unsigned long flags; 157 enum rdma_ch_state prev; 158 159 spin_lock_irqsave(&ch->spinlock, flags); 160 prev = ch->state; 161 if (prev == old) 162 ch->state = new; 163 spin_unlock_irqrestore(&ch->spinlock, flags); 164 return prev == old; 165 } 166 167 /** 168 * srpt_event_handler() - Asynchronous IB event callback function. 169 * 170 * Callback function called by the InfiniBand core when an asynchronous IB 171 * event occurs. This callback may occur in interrupt context. See also 172 * section 11.5.2, Set Asynchronous Event Handler in the InfiniBand 173 * Architecture Specification. 174 */ 175 static void srpt_event_handler(struct ib_event_handler *handler, 176 struct ib_event *event) 177 { 178 struct srpt_device *sdev; 179 struct srpt_port *sport; 180 181 sdev = ib_get_client_data(event->device, &srpt_client); 182 if (!sdev || sdev->device != event->device) 183 return; 184 185 pr_debug("ASYNC event= %d on device= %s\n", event->event, 186 srpt_sdev_name(sdev)); 187 188 switch (event->event) { 189 case IB_EVENT_PORT_ERR: 190 if (event->element.port_num <= sdev->device->phys_port_cnt) { 191 sport = &sdev->port[event->element.port_num - 1]; 192 sport->lid = 0; 193 sport->sm_lid = 0; 194 } 195 break; 196 case IB_EVENT_PORT_ACTIVE: 197 case IB_EVENT_LID_CHANGE: 198 case IB_EVENT_PKEY_CHANGE: 199 case IB_EVENT_SM_CHANGE: 200 case IB_EVENT_CLIENT_REREGISTER: 201 /* Refresh port data asynchronously. */ 202 if (event->element.port_num <= sdev->device->phys_port_cnt) { 203 sport = &sdev->port[event->element.port_num - 1]; 204 if (!sport->lid && !sport->sm_lid) 205 schedule_work(&sport->work); 206 } 207 break; 208 default: 209 printk(KERN_ERR "received unrecognized IB event %d\n", 210 event->event); 211 break; 212 } 213 } 214 215 /** 216 * srpt_srq_event() - SRQ event callback function. 217 */ 218 static void srpt_srq_event(struct ib_event *event, void *ctx) 219 { 220 printk(KERN_INFO "SRQ event %d\n", event->event); 221 } 222 223 /** 224 * srpt_qp_event() - QP event callback function. 225 */ 226 static void srpt_qp_event(struct ib_event *event, struct srpt_rdma_ch *ch) 227 { 228 pr_debug("QP event %d on cm_id=%p sess_name=%s state=%d\n", 229 event->event, ch->cm_id, ch->sess_name, srpt_get_ch_state(ch)); 230 231 switch (event->event) { 232 case IB_EVENT_COMM_EST: 233 ib_cm_notify(ch->cm_id, event->event); 234 break; 235 case IB_EVENT_QP_LAST_WQE_REACHED: 236 if (srpt_test_and_set_ch_state(ch, CH_DRAINING, 237 CH_RELEASING)) 238 srpt_release_channel(ch); 239 else 240 pr_debug("%s: state %d - ignored LAST_WQE.\n", 241 ch->sess_name, srpt_get_ch_state(ch)); 242 break; 243 default: 244 printk(KERN_ERR "received unrecognized IB QP event %d\n", 245 event->event); 246 break; 247 } 248 } 249 250 /** 251 * srpt_set_ioc() - Helper function for initializing an IOUnitInfo structure. 252 * 253 * @slot: one-based slot number. 254 * @value: four-bit value. 255 * 256 * Copies the lowest four bits of value in element slot of the array of four 257 * bit elements called c_list (controller list). The index slot is one-based. 258 */ 259 static void srpt_set_ioc(u8 *c_list, u32 slot, u8 value) 260 { 261 u16 id; 262 u8 tmp; 263 264 id = (slot - 1) / 2; 265 if (slot & 0x1) { 266 tmp = c_list[id] & 0xf; 267 c_list[id] = (value << 4) | tmp; 268 } else { 269 tmp = c_list[id] & 0xf0; 270 c_list[id] = (value & 0xf) | tmp; 271 } 272 } 273 274 /** 275 * srpt_get_class_port_info() - Copy ClassPortInfo to a management datagram. 276 * 277 * See also section 16.3.3.1 ClassPortInfo in the InfiniBand Architecture 278 * Specification. 279 */ 280 static void srpt_get_class_port_info(struct ib_dm_mad *mad) 281 { 282 struct ib_class_port_info *cif; 283 284 cif = (struct ib_class_port_info *)mad->data; 285 memset(cif, 0, sizeof *cif); 286 cif->base_version = 1; 287 cif->class_version = 1; 288 cif->resp_time_value = 20; 289 290 mad->mad_hdr.status = 0; 291 } 292 293 /** 294 * srpt_get_iou() - Write IOUnitInfo to a management datagram. 295 * 296 * See also section 16.3.3.3 IOUnitInfo in the InfiniBand Architecture 297 * Specification. See also section B.7, table B.6 in the SRP r16a document. 298 */ 299 static void srpt_get_iou(struct ib_dm_mad *mad) 300 { 301 struct ib_dm_iou_info *ioui; 302 u8 slot; 303 int i; 304 305 ioui = (struct ib_dm_iou_info *)mad->data; 306 ioui->change_id = __constant_cpu_to_be16(1); 307 ioui->max_controllers = 16; 308 309 /* set present for slot 1 and empty for the rest */ 310 srpt_set_ioc(ioui->controller_list, 1, 1); 311 for (i = 1, slot = 2; i < 16; i++, slot++) 312 srpt_set_ioc(ioui->controller_list, slot, 0); 313 314 mad->mad_hdr.status = 0; 315 } 316 317 /** 318 * srpt_get_ioc() - Write IOControllerprofile to a management datagram. 319 * 320 * See also section 16.3.3.4 IOControllerProfile in the InfiniBand 321 * Architecture Specification. See also section B.7, table B.7 in the SRP 322 * r16a document. 323 */ 324 static void srpt_get_ioc(struct srpt_port *sport, u32 slot, 325 struct ib_dm_mad *mad) 326 { 327 struct srpt_device *sdev = sport->sdev; 328 struct ib_dm_ioc_profile *iocp; 329 330 iocp = (struct ib_dm_ioc_profile *)mad->data; 331 332 if (!slot || slot > 16) { 333 mad->mad_hdr.status 334 = __constant_cpu_to_be16(DM_MAD_STATUS_INVALID_FIELD); 335 return; 336 } 337 338 if (slot > 2) { 339 mad->mad_hdr.status 340 = __constant_cpu_to_be16(DM_MAD_STATUS_NO_IOC); 341 return; 342 } 343 344 memset(iocp, 0, sizeof *iocp); 345 strcpy(iocp->id_string, SRPT_ID_STRING); 346 iocp->guid = cpu_to_be64(srpt_service_guid); 347 iocp->vendor_id = cpu_to_be32(sdev->dev_attr.vendor_id); 348 iocp->device_id = cpu_to_be32(sdev->dev_attr.vendor_part_id); 349 iocp->device_version = cpu_to_be16(sdev->dev_attr.hw_ver); 350 iocp->subsys_vendor_id = cpu_to_be32(sdev->dev_attr.vendor_id); 351 iocp->subsys_device_id = 0x0; 352 iocp->io_class = __constant_cpu_to_be16(SRP_REV16A_IB_IO_CLASS); 353 iocp->io_subclass = __constant_cpu_to_be16(SRP_IO_SUBCLASS); 354 iocp->protocol = __constant_cpu_to_be16(SRP_PROTOCOL); 355 iocp->protocol_version = __constant_cpu_to_be16(SRP_PROTOCOL_VERSION); 356 iocp->send_queue_depth = cpu_to_be16(sdev->srq_size); 357 iocp->rdma_read_depth = 4; 358 iocp->send_size = cpu_to_be32(srp_max_req_size); 359 iocp->rdma_size = cpu_to_be32(min(sport->port_attrib.srp_max_rdma_size, 360 1U << 24)); 361 iocp->num_svc_entries = 1; 362 iocp->op_cap_mask = SRP_SEND_TO_IOC | SRP_SEND_FROM_IOC | 363 SRP_RDMA_READ_FROM_IOC | SRP_RDMA_WRITE_FROM_IOC; 364 365 mad->mad_hdr.status = 0; 366 } 367 368 /** 369 * srpt_get_svc_entries() - Write ServiceEntries to a management datagram. 370 * 371 * See also section 16.3.3.5 ServiceEntries in the InfiniBand Architecture 372 * Specification. See also section B.7, table B.8 in the SRP r16a document. 373 */ 374 static void srpt_get_svc_entries(u64 ioc_guid, 375 u16 slot, u8 hi, u8 lo, struct ib_dm_mad *mad) 376 { 377 struct ib_dm_svc_entries *svc_entries; 378 379 WARN_ON(!ioc_guid); 380 381 if (!slot || slot > 16) { 382 mad->mad_hdr.status 383 = __constant_cpu_to_be16(DM_MAD_STATUS_INVALID_FIELD); 384 return; 385 } 386 387 if (slot > 2 || lo > hi || hi > 1) { 388 mad->mad_hdr.status 389 = __constant_cpu_to_be16(DM_MAD_STATUS_NO_IOC); 390 return; 391 } 392 393 svc_entries = (struct ib_dm_svc_entries *)mad->data; 394 memset(svc_entries, 0, sizeof *svc_entries); 395 svc_entries->service_entries[0].id = cpu_to_be64(ioc_guid); 396 snprintf(svc_entries->service_entries[0].name, 397 sizeof(svc_entries->service_entries[0].name), 398 "%s%016llx", 399 SRP_SERVICE_NAME_PREFIX, 400 ioc_guid); 401 402 mad->mad_hdr.status = 0; 403 } 404 405 /** 406 * srpt_mgmt_method_get() - Process a received management datagram. 407 * @sp: source port through which the MAD has been received. 408 * @rq_mad: received MAD. 409 * @rsp_mad: response MAD. 410 */ 411 static void srpt_mgmt_method_get(struct srpt_port *sp, struct ib_mad *rq_mad, 412 struct ib_dm_mad *rsp_mad) 413 { 414 u16 attr_id; 415 u32 slot; 416 u8 hi, lo; 417 418 attr_id = be16_to_cpu(rq_mad->mad_hdr.attr_id); 419 switch (attr_id) { 420 case DM_ATTR_CLASS_PORT_INFO: 421 srpt_get_class_port_info(rsp_mad); 422 break; 423 case DM_ATTR_IOU_INFO: 424 srpt_get_iou(rsp_mad); 425 break; 426 case DM_ATTR_IOC_PROFILE: 427 slot = be32_to_cpu(rq_mad->mad_hdr.attr_mod); 428 srpt_get_ioc(sp, slot, rsp_mad); 429 break; 430 case DM_ATTR_SVC_ENTRIES: 431 slot = be32_to_cpu(rq_mad->mad_hdr.attr_mod); 432 hi = (u8) ((slot >> 8) & 0xff); 433 lo = (u8) (slot & 0xff); 434 slot = (u16) ((slot >> 16) & 0xffff); 435 srpt_get_svc_entries(srpt_service_guid, 436 slot, hi, lo, rsp_mad); 437 break; 438 default: 439 rsp_mad->mad_hdr.status = 440 __constant_cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD_ATTR); 441 break; 442 } 443 } 444 445 /** 446 * srpt_mad_send_handler() - Post MAD-send callback function. 447 */ 448 static void srpt_mad_send_handler(struct ib_mad_agent *mad_agent, 449 struct ib_mad_send_wc *mad_wc) 450 { 451 ib_destroy_ah(mad_wc->send_buf->ah); 452 ib_free_send_mad(mad_wc->send_buf); 453 } 454 455 /** 456 * srpt_mad_recv_handler() - MAD reception callback function. 457 */ 458 static void srpt_mad_recv_handler(struct ib_mad_agent *mad_agent, 459 struct ib_mad_recv_wc *mad_wc) 460 { 461 struct srpt_port *sport = (struct srpt_port *)mad_agent->context; 462 struct ib_ah *ah; 463 struct ib_mad_send_buf *rsp; 464 struct ib_dm_mad *dm_mad; 465 466 if (!mad_wc || !mad_wc->recv_buf.mad) 467 return; 468 469 ah = ib_create_ah_from_wc(mad_agent->qp->pd, mad_wc->wc, 470 mad_wc->recv_buf.grh, mad_agent->port_num); 471 if (IS_ERR(ah)) 472 goto err; 473 474 BUILD_BUG_ON(offsetof(struct ib_dm_mad, data) != IB_MGMT_DEVICE_HDR); 475 476 rsp = ib_create_send_mad(mad_agent, mad_wc->wc->src_qp, 477 mad_wc->wc->pkey_index, 0, 478 IB_MGMT_DEVICE_HDR, IB_MGMT_DEVICE_DATA, 479 GFP_KERNEL); 480 if (IS_ERR(rsp)) 481 goto err_rsp; 482 483 rsp->ah = ah; 484 485 dm_mad = rsp->mad; 486 memcpy(dm_mad, mad_wc->recv_buf.mad, sizeof *dm_mad); 487 dm_mad->mad_hdr.method = IB_MGMT_METHOD_GET_RESP; 488 dm_mad->mad_hdr.status = 0; 489 490 switch (mad_wc->recv_buf.mad->mad_hdr.method) { 491 case IB_MGMT_METHOD_GET: 492 srpt_mgmt_method_get(sport, mad_wc->recv_buf.mad, dm_mad); 493 break; 494 case IB_MGMT_METHOD_SET: 495 dm_mad->mad_hdr.status = 496 __constant_cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD_ATTR); 497 break; 498 default: 499 dm_mad->mad_hdr.status = 500 __constant_cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD); 501 break; 502 } 503 504 if (!ib_post_send_mad(rsp, NULL)) { 505 ib_free_recv_mad(mad_wc); 506 /* will destroy_ah & free_send_mad in send completion */ 507 return; 508 } 509 510 ib_free_send_mad(rsp); 511 512 err_rsp: 513 ib_destroy_ah(ah); 514 err: 515 ib_free_recv_mad(mad_wc); 516 } 517 518 /** 519 * srpt_refresh_port() - Configure a HCA port. 520 * 521 * Enable InfiniBand management datagram processing, update the cached sm_lid, 522 * lid and gid values, and register a callback function for processing MADs 523 * on the specified port. 524 * 525 * Note: It is safe to call this function more than once for the same port. 526 */ 527 static int srpt_refresh_port(struct srpt_port *sport) 528 { 529 struct ib_mad_reg_req reg_req; 530 struct ib_port_modify port_modify; 531 struct ib_port_attr port_attr; 532 int ret; 533 534 memset(&port_modify, 0, sizeof port_modify); 535 port_modify.set_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP; 536 port_modify.clr_port_cap_mask = 0; 537 538 ret = ib_modify_port(sport->sdev->device, sport->port, 0, &port_modify); 539 if (ret) 540 goto err_mod_port; 541 542 ret = ib_query_port(sport->sdev->device, sport->port, &port_attr); 543 if (ret) 544 goto err_query_port; 545 546 sport->sm_lid = port_attr.sm_lid; 547 sport->lid = port_attr.lid; 548 549 ret = ib_query_gid(sport->sdev->device, sport->port, 0, &sport->gid); 550 if (ret) 551 goto err_query_port; 552 553 if (!sport->mad_agent) { 554 memset(®_req, 0, sizeof reg_req); 555 reg_req.mgmt_class = IB_MGMT_CLASS_DEVICE_MGMT; 556 reg_req.mgmt_class_version = IB_MGMT_BASE_VERSION; 557 set_bit(IB_MGMT_METHOD_GET, reg_req.method_mask); 558 set_bit(IB_MGMT_METHOD_SET, reg_req.method_mask); 559 560 sport->mad_agent = ib_register_mad_agent(sport->sdev->device, 561 sport->port, 562 IB_QPT_GSI, 563 ®_req, 0, 564 srpt_mad_send_handler, 565 srpt_mad_recv_handler, 566 sport); 567 if (IS_ERR(sport->mad_agent)) { 568 ret = PTR_ERR(sport->mad_agent); 569 sport->mad_agent = NULL; 570 goto err_query_port; 571 } 572 } 573 574 return 0; 575 576 err_query_port: 577 578 port_modify.set_port_cap_mask = 0; 579 port_modify.clr_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP; 580 ib_modify_port(sport->sdev->device, sport->port, 0, &port_modify); 581 582 err_mod_port: 583 584 return ret; 585 } 586 587 /** 588 * srpt_unregister_mad_agent() - Unregister MAD callback functions. 589 * 590 * Note: It is safe to call this function more than once for the same device. 591 */ 592 static void srpt_unregister_mad_agent(struct srpt_device *sdev) 593 { 594 struct ib_port_modify port_modify = { 595 .clr_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP, 596 }; 597 struct srpt_port *sport; 598 int i; 599 600 for (i = 1; i <= sdev->device->phys_port_cnt; i++) { 601 sport = &sdev->port[i - 1]; 602 WARN_ON(sport->port != i); 603 if (ib_modify_port(sdev->device, i, 0, &port_modify) < 0) 604 printk(KERN_ERR "disabling MAD processing failed.\n"); 605 if (sport->mad_agent) { 606 ib_unregister_mad_agent(sport->mad_agent); 607 sport->mad_agent = NULL; 608 } 609 } 610 } 611 612 /** 613 * srpt_alloc_ioctx() - Allocate an SRPT I/O context structure. 614 */ 615 static struct srpt_ioctx *srpt_alloc_ioctx(struct srpt_device *sdev, 616 int ioctx_size, int dma_size, 617 enum dma_data_direction dir) 618 { 619 struct srpt_ioctx *ioctx; 620 621 ioctx = kmalloc(ioctx_size, GFP_KERNEL); 622 if (!ioctx) 623 goto err; 624 625 ioctx->buf = kmalloc(dma_size, GFP_KERNEL); 626 if (!ioctx->buf) 627 goto err_free_ioctx; 628 629 ioctx->dma = ib_dma_map_single(sdev->device, ioctx->buf, dma_size, dir); 630 if (ib_dma_mapping_error(sdev->device, ioctx->dma)) 631 goto err_free_buf; 632 633 return ioctx; 634 635 err_free_buf: 636 kfree(ioctx->buf); 637 err_free_ioctx: 638 kfree(ioctx); 639 err: 640 return NULL; 641 } 642 643 /** 644 * srpt_free_ioctx() - Free an SRPT I/O context structure. 645 */ 646 static void srpt_free_ioctx(struct srpt_device *sdev, struct srpt_ioctx *ioctx, 647 int dma_size, enum dma_data_direction dir) 648 { 649 if (!ioctx) 650 return; 651 652 ib_dma_unmap_single(sdev->device, ioctx->dma, dma_size, dir); 653 kfree(ioctx->buf); 654 kfree(ioctx); 655 } 656 657 /** 658 * srpt_alloc_ioctx_ring() - Allocate a ring of SRPT I/O context structures. 659 * @sdev: Device to allocate the I/O context ring for. 660 * @ring_size: Number of elements in the I/O context ring. 661 * @ioctx_size: I/O context size. 662 * @dma_size: DMA buffer size. 663 * @dir: DMA data direction. 664 */ 665 static struct srpt_ioctx **srpt_alloc_ioctx_ring(struct srpt_device *sdev, 666 int ring_size, int ioctx_size, 667 int dma_size, enum dma_data_direction dir) 668 { 669 struct srpt_ioctx **ring; 670 int i; 671 672 WARN_ON(ioctx_size != sizeof(struct srpt_recv_ioctx) 673 && ioctx_size != sizeof(struct srpt_send_ioctx)); 674 675 ring = kmalloc(ring_size * sizeof(ring[0]), GFP_KERNEL); 676 if (!ring) 677 goto out; 678 for (i = 0; i < ring_size; ++i) { 679 ring[i] = srpt_alloc_ioctx(sdev, ioctx_size, dma_size, dir); 680 if (!ring[i]) 681 goto err; 682 ring[i]->index = i; 683 } 684 goto out; 685 686 err: 687 while (--i >= 0) 688 srpt_free_ioctx(sdev, ring[i], dma_size, dir); 689 kfree(ring); 690 ring = NULL; 691 out: 692 return ring; 693 } 694 695 /** 696 * srpt_free_ioctx_ring() - Free the ring of SRPT I/O context structures. 697 */ 698 static void srpt_free_ioctx_ring(struct srpt_ioctx **ioctx_ring, 699 struct srpt_device *sdev, int ring_size, 700 int dma_size, enum dma_data_direction dir) 701 { 702 int i; 703 704 for (i = 0; i < ring_size; ++i) 705 srpt_free_ioctx(sdev, ioctx_ring[i], dma_size, dir); 706 kfree(ioctx_ring); 707 } 708 709 /** 710 * srpt_get_cmd_state() - Get the state of a SCSI command. 711 */ 712 static enum srpt_command_state srpt_get_cmd_state(struct srpt_send_ioctx *ioctx) 713 { 714 enum srpt_command_state state; 715 unsigned long flags; 716 717 BUG_ON(!ioctx); 718 719 spin_lock_irqsave(&ioctx->spinlock, flags); 720 state = ioctx->state; 721 spin_unlock_irqrestore(&ioctx->spinlock, flags); 722 return state; 723 } 724 725 /** 726 * srpt_set_cmd_state() - Set the state of a SCSI command. 727 * 728 * Does not modify the state of aborted commands. Returns the previous command 729 * state. 730 */ 731 static enum srpt_command_state srpt_set_cmd_state(struct srpt_send_ioctx *ioctx, 732 enum srpt_command_state new) 733 { 734 enum srpt_command_state previous; 735 unsigned long flags; 736 737 BUG_ON(!ioctx); 738 739 spin_lock_irqsave(&ioctx->spinlock, flags); 740 previous = ioctx->state; 741 if (previous != SRPT_STATE_DONE) 742 ioctx->state = new; 743 spin_unlock_irqrestore(&ioctx->spinlock, flags); 744 745 return previous; 746 } 747 748 /** 749 * srpt_test_and_set_cmd_state() - Test and set the state of a command. 750 * 751 * Returns true if and only if the previous command state was equal to 'old'. 752 */ 753 static bool srpt_test_and_set_cmd_state(struct srpt_send_ioctx *ioctx, 754 enum srpt_command_state old, 755 enum srpt_command_state new) 756 { 757 enum srpt_command_state previous; 758 unsigned long flags; 759 760 WARN_ON(!ioctx); 761 WARN_ON(old == SRPT_STATE_DONE); 762 WARN_ON(new == SRPT_STATE_NEW); 763 764 spin_lock_irqsave(&ioctx->spinlock, flags); 765 previous = ioctx->state; 766 if (previous == old) 767 ioctx->state = new; 768 spin_unlock_irqrestore(&ioctx->spinlock, flags); 769 return previous == old; 770 } 771 772 /** 773 * srpt_post_recv() - Post an IB receive request. 774 */ 775 static int srpt_post_recv(struct srpt_device *sdev, 776 struct srpt_recv_ioctx *ioctx) 777 { 778 struct ib_sge list; 779 struct ib_recv_wr wr, *bad_wr; 780 781 BUG_ON(!sdev); 782 wr.wr_id = encode_wr_id(SRPT_RECV, ioctx->ioctx.index); 783 784 list.addr = ioctx->ioctx.dma; 785 list.length = srp_max_req_size; 786 list.lkey = sdev->mr->lkey; 787 788 wr.next = NULL; 789 wr.sg_list = &list; 790 wr.num_sge = 1; 791 792 return ib_post_srq_recv(sdev->srq, &wr, &bad_wr); 793 } 794 795 /** 796 * srpt_post_send() - Post an IB send request. 797 * 798 * Returns zero upon success and a non-zero value upon failure. 799 */ 800 static int srpt_post_send(struct srpt_rdma_ch *ch, 801 struct srpt_send_ioctx *ioctx, int len) 802 { 803 struct ib_sge list; 804 struct ib_send_wr wr, *bad_wr; 805 struct srpt_device *sdev = ch->sport->sdev; 806 int ret; 807 808 atomic_inc(&ch->req_lim); 809 810 ret = -ENOMEM; 811 if (unlikely(atomic_dec_return(&ch->sq_wr_avail) < 0)) { 812 printk(KERN_WARNING "IB send queue full (needed 1)\n"); 813 goto out; 814 } 815 816 ib_dma_sync_single_for_device(sdev->device, ioctx->ioctx.dma, len, 817 DMA_TO_DEVICE); 818 819 list.addr = ioctx->ioctx.dma; 820 list.length = len; 821 list.lkey = sdev->mr->lkey; 822 823 wr.next = NULL; 824 wr.wr_id = encode_wr_id(SRPT_SEND, ioctx->ioctx.index); 825 wr.sg_list = &list; 826 wr.num_sge = 1; 827 wr.opcode = IB_WR_SEND; 828 wr.send_flags = IB_SEND_SIGNALED; 829 830 ret = ib_post_send(ch->qp, &wr, &bad_wr); 831 832 out: 833 if (ret < 0) { 834 atomic_inc(&ch->sq_wr_avail); 835 atomic_dec(&ch->req_lim); 836 } 837 return ret; 838 } 839 840 /** 841 * srpt_get_desc_tbl() - Parse the data descriptors of an SRP_CMD request. 842 * @ioctx: Pointer to the I/O context associated with the request. 843 * @srp_cmd: Pointer to the SRP_CMD request data. 844 * @dir: Pointer to the variable to which the transfer direction will be 845 * written. 846 * @data_len: Pointer to the variable to which the total data length of all 847 * descriptors in the SRP_CMD request will be written. 848 * 849 * This function initializes ioctx->nrbuf and ioctx->r_bufs. 850 * 851 * Returns -EINVAL when the SRP_CMD request contains inconsistent descriptors; 852 * -ENOMEM when memory allocation fails and zero upon success. 853 */ 854 static int srpt_get_desc_tbl(struct srpt_send_ioctx *ioctx, 855 struct srp_cmd *srp_cmd, 856 enum dma_data_direction *dir, u64 *data_len) 857 { 858 struct srp_indirect_buf *idb; 859 struct srp_direct_buf *db; 860 unsigned add_cdb_offset; 861 int ret; 862 863 /* 864 * The pointer computations below will only be compiled correctly 865 * if srp_cmd::add_data is declared as s8*, u8*, s8[] or u8[], so check 866 * whether srp_cmd::add_data has been declared as a byte pointer. 867 */ 868 BUILD_BUG_ON(!__same_type(srp_cmd->add_data[0], (s8)0) 869 && !__same_type(srp_cmd->add_data[0], (u8)0)); 870 871 BUG_ON(!dir); 872 BUG_ON(!data_len); 873 874 ret = 0; 875 *data_len = 0; 876 877 /* 878 * The lower four bits of the buffer format field contain the DATA-IN 879 * buffer descriptor format, and the highest four bits contain the 880 * DATA-OUT buffer descriptor format. 881 */ 882 *dir = DMA_NONE; 883 if (srp_cmd->buf_fmt & 0xf) 884 /* DATA-IN: transfer data from target to initiator (read). */ 885 *dir = DMA_FROM_DEVICE; 886 else if (srp_cmd->buf_fmt >> 4) 887 /* DATA-OUT: transfer data from initiator to target (write). */ 888 *dir = DMA_TO_DEVICE; 889 890 /* 891 * According to the SRP spec, the lower two bits of the 'ADDITIONAL 892 * CDB LENGTH' field are reserved and the size in bytes of this field 893 * is four times the value specified in bits 3..7. Hence the "& ~3". 894 */ 895 add_cdb_offset = srp_cmd->add_cdb_len & ~3; 896 if (((srp_cmd->buf_fmt & 0xf) == SRP_DATA_DESC_DIRECT) || 897 ((srp_cmd->buf_fmt >> 4) == SRP_DATA_DESC_DIRECT)) { 898 ioctx->n_rbuf = 1; 899 ioctx->rbufs = &ioctx->single_rbuf; 900 901 db = (struct srp_direct_buf *)(srp_cmd->add_data 902 + add_cdb_offset); 903 memcpy(ioctx->rbufs, db, sizeof *db); 904 *data_len = be32_to_cpu(db->len); 905 } else if (((srp_cmd->buf_fmt & 0xf) == SRP_DATA_DESC_INDIRECT) || 906 ((srp_cmd->buf_fmt >> 4) == SRP_DATA_DESC_INDIRECT)) { 907 idb = (struct srp_indirect_buf *)(srp_cmd->add_data 908 + add_cdb_offset); 909 910 ioctx->n_rbuf = be32_to_cpu(idb->table_desc.len) / sizeof *db; 911 912 if (ioctx->n_rbuf > 913 (srp_cmd->data_out_desc_cnt + srp_cmd->data_in_desc_cnt)) { 914 printk(KERN_ERR "received unsupported SRP_CMD request" 915 " type (%u out + %u in != %u / %zu)\n", 916 srp_cmd->data_out_desc_cnt, 917 srp_cmd->data_in_desc_cnt, 918 be32_to_cpu(idb->table_desc.len), 919 sizeof(*db)); 920 ioctx->n_rbuf = 0; 921 ret = -EINVAL; 922 goto out; 923 } 924 925 if (ioctx->n_rbuf == 1) 926 ioctx->rbufs = &ioctx->single_rbuf; 927 else { 928 ioctx->rbufs = 929 kmalloc(ioctx->n_rbuf * sizeof *db, GFP_ATOMIC); 930 if (!ioctx->rbufs) { 931 ioctx->n_rbuf = 0; 932 ret = -ENOMEM; 933 goto out; 934 } 935 } 936 937 db = idb->desc_list; 938 memcpy(ioctx->rbufs, db, ioctx->n_rbuf * sizeof *db); 939 *data_len = be32_to_cpu(idb->len); 940 } 941 out: 942 return ret; 943 } 944 945 /** 946 * srpt_init_ch_qp() - Initialize queue pair attributes. 947 * 948 * Initialized the attributes of queue pair 'qp' by allowing local write, 949 * remote read and remote write. Also transitions 'qp' to state IB_QPS_INIT. 950 */ 951 static int srpt_init_ch_qp(struct srpt_rdma_ch *ch, struct ib_qp *qp) 952 { 953 struct ib_qp_attr *attr; 954 int ret; 955 956 attr = kzalloc(sizeof *attr, GFP_KERNEL); 957 if (!attr) 958 return -ENOMEM; 959 960 attr->qp_state = IB_QPS_INIT; 961 attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_READ | 962 IB_ACCESS_REMOTE_WRITE; 963 attr->port_num = ch->sport->port; 964 attr->pkey_index = 0; 965 966 ret = ib_modify_qp(qp, attr, 967 IB_QP_STATE | IB_QP_ACCESS_FLAGS | IB_QP_PORT | 968 IB_QP_PKEY_INDEX); 969 970 kfree(attr); 971 return ret; 972 } 973 974 /** 975 * srpt_ch_qp_rtr() - Change the state of a channel to 'ready to receive' (RTR). 976 * @ch: channel of the queue pair. 977 * @qp: queue pair to change the state of. 978 * 979 * Returns zero upon success and a negative value upon failure. 980 * 981 * Note: currently a struct ib_qp_attr takes 136 bytes on a 64-bit system. 982 * If this structure ever becomes larger, it might be necessary to allocate 983 * it dynamically instead of on the stack. 984 */ 985 static int srpt_ch_qp_rtr(struct srpt_rdma_ch *ch, struct ib_qp *qp) 986 { 987 struct ib_qp_attr qp_attr; 988 int attr_mask; 989 int ret; 990 991 qp_attr.qp_state = IB_QPS_RTR; 992 ret = ib_cm_init_qp_attr(ch->cm_id, &qp_attr, &attr_mask); 993 if (ret) 994 goto out; 995 996 qp_attr.max_dest_rd_atomic = 4; 997 998 ret = ib_modify_qp(qp, &qp_attr, attr_mask); 999 1000 out: 1001 return ret; 1002 } 1003 1004 /** 1005 * srpt_ch_qp_rts() - Change the state of a channel to 'ready to send' (RTS). 1006 * @ch: channel of the queue pair. 1007 * @qp: queue pair to change the state of. 1008 * 1009 * Returns zero upon success and a negative value upon failure. 1010 * 1011 * Note: currently a struct ib_qp_attr takes 136 bytes on a 64-bit system. 1012 * If this structure ever becomes larger, it might be necessary to allocate 1013 * it dynamically instead of on the stack. 1014 */ 1015 static int srpt_ch_qp_rts(struct srpt_rdma_ch *ch, struct ib_qp *qp) 1016 { 1017 struct ib_qp_attr qp_attr; 1018 int attr_mask; 1019 int ret; 1020 1021 qp_attr.qp_state = IB_QPS_RTS; 1022 ret = ib_cm_init_qp_attr(ch->cm_id, &qp_attr, &attr_mask); 1023 if (ret) 1024 goto out; 1025 1026 qp_attr.max_rd_atomic = 4; 1027 1028 ret = ib_modify_qp(qp, &qp_attr, attr_mask); 1029 1030 out: 1031 return ret; 1032 } 1033 1034 /** 1035 * srpt_ch_qp_err() - Set the channel queue pair state to 'error'. 1036 */ 1037 static int srpt_ch_qp_err(struct srpt_rdma_ch *ch) 1038 { 1039 struct ib_qp_attr qp_attr; 1040 1041 qp_attr.qp_state = IB_QPS_ERR; 1042 return ib_modify_qp(ch->qp, &qp_attr, IB_QP_STATE); 1043 } 1044 1045 /** 1046 * srpt_unmap_sg_to_ib_sge() - Unmap an IB SGE list. 1047 */ 1048 static void srpt_unmap_sg_to_ib_sge(struct srpt_rdma_ch *ch, 1049 struct srpt_send_ioctx *ioctx) 1050 { 1051 struct scatterlist *sg; 1052 enum dma_data_direction dir; 1053 1054 BUG_ON(!ch); 1055 BUG_ON(!ioctx); 1056 BUG_ON(ioctx->n_rdma && !ioctx->rdma_ius); 1057 1058 while (ioctx->n_rdma) 1059 kfree(ioctx->rdma_ius[--ioctx->n_rdma].sge); 1060 1061 kfree(ioctx->rdma_ius); 1062 ioctx->rdma_ius = NULL; 1063 1064 if (ioctx->mapped_sg_count) { 1065 sg = ioctx->sg; 1066 WARN_ON(!sg); 1067 dir = ioctx->cmd.data_direction; 1068 BUG_ON(dir == DMA_NONE); 1069 ib_dma_unmap_sg(ch->sport->sdev->device, sg, ioctx->sg_cnt, 1070 opposite_dma_dir(dir)); 1071 ioctx->mapped_sg_count = 0; 1072 } 1073 } 1074 1075 /** 1076 * srpt_map_sg_to_ib_sge() - Map an SG list to an IB SGE list. 1077 */ 1078 static int srpt_map_sg_to_ib_sge(struct srpt_rdma_ch *ch, 1079 struct srpt_send_ioctx *ioctx) 1080 { 1081 struct se_cmd *cmd; 1082 struct scatterlist *sg, *sg_orig; 1083 int sg_cnt; 1084 enum dma_data_direction dir; 1085 struct rdma_iu *riu; 1086 struct srp_direct_buf *db; 1087 dma_addr_t dma_addr; 1088 struct ib_sge *sge; 1089 u64 raddr; 1090 u32 rsize; 1091 u32 tsize; 1092 u32 dma_len; 1093 int count, nrdma; 1094 int i, j, k; 1095 1096 BUG_ON(!ch); 1097 BUG_ON(!ioctx); 1098 cmd = &ioctx->cmd; 1099 dir = cmd->data_direction; 1100 BUG_ON(dir == DMA_NONE); 1101 1102 ioctx->sg = sg = sg_orig = cmd->t_data_sg; 1103 ioctx->sg_cnt = sg_cnt = cmd->t_data_nents; 1104 1105 count = ib_dma_map_sg(ch->sport->sdev->device, sg, sg_cnt, 1106 opposite_dma_dir(dir)); 1107 if (unlikely(!count)) 1108 return -EAGAIN; 1109 1110 ioctx->mapped_sg_count = count; 1111 1112 if (ioctx->rdma_ius && ioctx->n_rdma_ius) 1113 nrdma = ioctx->n_rdma_ius; 1114 else { 1115 nrdma = (count + SRPT_DEF_SG_PER_WQE - 1) / SRPT_DEF_SG_PER_WQE 1116 + ioctx->n_rbuf; 1117 1118 ioctx->rdma_ius = kzalloc(nrdma * sizeof *riu, GFP_KERNEL); 1119 if (!ioctx->rdma_ius) 1120 goto free_mem; 1121 1122 ioctx->n_rdma_ius = nrdma; 1123 } 1124 1125 db = ioctx->rbufs; 1126 tsize = cmd->data_length; 1127 dma_len = sg_dma_len(&sg[0]); 1128 riu = ioctx->rdma_ius; 1129 1130 /* 1131 * For each remote desc - calculate the #ib_sge. 1132 * If #ib_sge < SRPT_DEF_SG_PER_WQE per rdma operation then 1133 * each remote desc rdma_iu is required a rdma wr; 1134 * else 1135 * we need to allocate extra rdma_iu to carry extra #ib_sge in 1136 * another rdma wr 1137 */ 1138 for (i = 0, j = 0; 1139 j < count && i < ioctx->n_rbuf && tsize > 0; ++i, ++riu, ++db) { 1140 rsize = be32_to_cpu(db->len); 1141 raddr = be64_to_cpu(db->va); 1142 riu->raddr = raddr; 1143 riu->rkey = be32_to_cpu(db->key); 1144 riu->sge_cnt = 0; 1145 1146 /* calculate how many sge required for this remote_buf */ 1147 while (rsize > 0 && tsize > 0) { 1148 1149 if (rsize >= dma_len) { 1150 tsize -= dma_len; 1151 rsize -= dma_len; 1152 raddr += dma_len; 1153 1154 if (tsize > 0) { 1155 ++j; 1156 if (j < count) { 1157 sg = sg_next(sg); 1158 dma_len = sg_dma_len(sg); 1159 } 1160 } 1161 } else { 1162 tsize -= rsize; 1163 dma_len -= rsize; 1164 rsize = 0; 1165 } 1166 1167 ++riu->sge_cnt; 1168 1169 if (rsize > 0 && riu->sge_cnt == SRPT_DEF_SG_PER_WQE) { 1170 ++ioctx->n_rdma; 1171 riu->sge = 1172 kmalloc(riu->sge_cnt * sizeof *riu->sge, 1173 GFP_KERNEL); 1174 if (!riu->sge) 1175 goto free_mem; 1176 1177 ++riu; 1178 riu->sge_cnt = 0; 1179 riu->raddr = raddr; 1180 riu->rkey = be32_to_cpu(db->key); 1181 } 1182 } 1183 1184 ++ioctx->n_rdma; 1185 riu->sge = kmalloc(riu->sge_cnt * sizeof *riu->sge, 1186 GFP_KERNEL); 1187 if (!riu->sge) 1188 goto free_mem; 1189 } 1190 1191 db = ioctx->rbufs; 1192 tsize = cmd->data_length; 1193 riu = ioctx->rdma_ius; 1194 sg = sg_orig; 1195 dma_len = sg_dma_len(&sg[0]); 1196 dma_addr = sg_dma_address(&sg[0]); 1197 1198 /* this second loop is really mapped sg_addres to rdma_iu->ib_sge */ 1199 for (i = 0, j = 0; 1200 j < count && i < ioctx->n_rbuf && tsize > 0; ++i, ++riu, ++db) { 1201 rsize = be32_to_cpu(db->len); 1202 sge = riu->sge; 1203 k = 0; 1204 1205 while (rsize > 0 && tsize > 0) { 1206 sge->addr = dma_addr; 1207 sge->lkey = ch->sport->sdev->mr->lkey; 1208 1209 if (rsize >= dma_len) { 1210 sge->length = 1211 (tsize < dma_len) ? tsize : dma_len; 1212 tsize -= dma_len; 1213 rsize -= dma_len; 1214 1215 if (tsize > 0) { 1216 ++j; 1217 if (j < count) { 1218 sg = sg_next(sg); 1219 dma_len = sg_dma_len(sg); 1220 dma_addr = sg_dma_address(sg); 1221 } 1222 } 1223 } else { 1224 sge->length = (tsize < rsize) ? tsize : rsize; 1225 tsize -= rsize; 1226 dma_len -= rsize; 1227 dma_addr += rsize; 1228 rsize = 0; 1229 } 1230 1231 ++k; 1232 if (k == riu->sge_cnt && rsize > 0 && tsize > 0) { 1233 ++riu; 1234 sge = riu->sge; 1235 k = 0; 1236 } else if (rsize > 0 && tsize > 0) 1237 ++sge; 1238 } 1239 } 1240 1241 return 0; 1242 1243 free_mem: 1244 srpt_unmap_sg_to_ib_sge(ch, ioctx); 1245 1246 return -ENOMEM; 1247 } 1248 1249 /** 1250 * srpt_get_send_ioctx() - Obtain an I/O context for sending to the initiator. 1251 */ 1252 static struct srpt_send_ioctx *srpt_get_send_ioctx(struct srpt_rdma_ch *ch) 1253 { 1254 struct srpt_send_ioctx *ioctx; 1255 unsigned long flags; 1256 1257 BUG_ON(!ch); 1258 1259 ioctx = NULL; 1260 spin_lock_irqsave(&ch->spinlock, flags); 1261 if (!list_empty(&ch->free_list)) { 1262 ioctx = list_first_entry(&ch->free_list, 1263 struct srpt_send_ioctx, free_list); 1264 list_del(&ioctx->free_list); 1265 } 1266 spin_unlock_irqrestore(&ch->spinlock, flags); 1267 1268 if (!ioctx) 1269 return ioctx; 1270 1271 BUG_ON(ioctx->ch != ch); 1272 kref_init(&ioctx->kref); 1273 spin_lock_init(&ioctx->spinlock); 1274 ioctx->state = SRPT_STATE_NEW; 1275 ioctx->n_rbuf = 0; 1276 ioctx->rbufs = NULL; 1277 ioctx->n_rdma = 0; 1278 ioctx->n_rdma_ius = 0; 1279 ioctx->rdma_ius = NULL; 1280 ioctx->mapped_sg_count = 0; 1281 init_completion(&ioctx->tx_done); 1282 ioctx->queue_status_only = false; 1283 /* 1284 * transport_init_se_cmd() does not initialize all fields, so do it 1285 * here. 1286 */ 1287 memset(&ioctx->cmd, 0, sizeof(ioctx->cmd)); 1288 memset(&ioctx->sense_data, 0, sizeof(ioctx->sense_data)); 1289 1290 return ioctx; 1291 } 1292 1293 /** 1294 * srpt_put_send_ioctx() - Free up resources. 1295 */ 1296 static void srpt_put_send_ioctx(struct srpt_send_ioctx *ioctx) 1297 { 1298 struct srpt_rdma_ch *ch; 1299 unsigned long flags; 1300 1301 BUG_ON(!ioctx); 1302 ch = ioctx->ch; 1303 BUG_ON(!ch); 1304 1305 WARN_ON(srpt_get_cmd_state(ioctx) != SRPT_STATE_DONE); 1306 1307 srpt_unmap_sg_to_ib_sge(ioctx->ch, ioctx); 1308 transport_generic_free_cmd(&ioctx->cmd, 0); 1309 1310 if (ioctx->n_rbuf > 1) { 1311 kfree(ioctx->rbufs); 1312 ioctx->rbufs = NULL; 1313 ioctx->n_rbuf = 0; 1314 } 1315 1316 spin_lock_irqsave(&ch->spinlock, flags); 1317 list_add(&ioctx->free_list, &ch->free_list); 1318 spin_unlock_irqrestore(&ch->spinlock, flags); 1319 } 1320 1321 static void srpt_put_send_ioctx_kref(struct kref *kref) 1322 { 1323 srpt_put_send_ioctx(container_of(kref, struct srpt_send_ioctx, kref)); 1324 } 1325 1326 /** 1327 * srpt_abort_cmd() - Abort a SCSI command. 1328 * @ioctx: I/O context associated with the SCSI command. 1329 * @context: Preferred execution context. 1330 */ 1331 static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx) 1332 { 1333 enum srpt_command_state state; 1334 unsigned long flags; 1335 1336 BUG_ON(!ioctx); 1337 1338 /* 1339 * If the command is in a state where the target core is waiting for 1340 * the ib_srpt driver, change the state to the next state. Changing 1341 * the state of the command from SRPT_STATE_NEED_DATA to 1342 * SRPT_STATE_DATA_IN ensures that srpt_xmit_response() will call this 1343 * function a second time. 1344 */ 1345 1346 spin_lock_irqsave(&ioctx->spinlock, flags); 1347 state = ioctx->state; 1348 switch (state) { 1349 case SRPT_STATE_NEED_DATA: 1350 ioctx->state = SRPT_STATE_DATA_IN; 1351 break; 1352 case SRPT_STATE_DATA_IN: 1353 case SRPT_STATE_CMD_RSP_SENT: 1354 case SRPT_STATE_MGMT_RSP_SENT: 1355 ioctx->state = SRPT_STATE_DONE; 1356 break; 1357 default: 1358 break; 1359 } 1360 spin_unlock_irqrestore(&ioctx->spinlock, flags); 1361 1362 if (state == SRPT_STATE_DONE) 1363 goto out; 1364 1365 pr_debug("Aborting cmd with state %d and tag %lld\n", state, 1366 ioctx->tag); 1367 1368 switch (state) { 1369 case SRPT_STATE_NEW: 1370 case SRPT_STATE_DATA_IN: 1371 case SRPT_STATE_MGMT: 1372 /* 1373 * Do nothing - defer abort processing until 1374 * srpt_queue_response() is invoked. 1375 */ 1376 WARN_ON(!transport_check_aborted_status(&ioctx->cmd, false)); 1377 break; 1378 case SRPT_STATE_NEED_DATA: 1379 /* DMA_TO_DEVICE (write) - RDMA read error. */ 1380 spin_lock_irqsave(&ioctx->cmd.t_state_lock, flags); 1381 ioctx->cmd.transport_state |= CMD_T_LUN_STOP; 1382 spin_unlock_irqrestore(&ioctx->cmd.t_state_lock, flags); 1383 transport_generic_handle_data(&ioctx->cmd); 1384 break; 1385 case SRPT_STATE_CMD_RSP_SENT: 1386 /* 1387 * SRP_RSP sending failed or the SRP_RSP send completion has 1388 * not been received in time. 1389 */ 1390 srpt_unmap_sg_to_ib_sge(ioctx->ch, ioctx); 1391 spin_lock_irqsave(&ioctx->cmd.t_state_lock, flags); 1392 ioctx->cmd.transport_state |= CMD_T_LUN_STOP; 1393 spin_unlock_irqrestore(&ioctx->cmd.t_state_lock, flags); 1394 kref_put(&ioctx->kref, srpt_put_send_ioctx_kref); 1395 break; 1396 case SRPT_STATE_MGMT_RSP_SENT: 1397 srpt_set_cmd_state(ioctx, SRPT_STATE_DONE); 1398 kref_put(&ioctx->kref, srpt_put_send_ioctx_kref); 1399 break; 1400 default: 1401 WARN_ON("ERROR: unexpected command state"); 1402 break; 1403 } 1404 1405 out: 1406 return state; 1407 } 1408 1409 /** 1410 * srpt_handle_send_err_comp() - Process an IB_WC_SEND error completion. 1411 */ 1412 static void srpt_handle_send_err_comp(struct srpt_rdma_ch *ch, u64 wr_id) 1413 { 1414 struct srpt_send_ioctx *ioctx; 1415 enum srpt_command_state state; 1416 struct se_cmd *cmd; 1417 u32 index; 1418 1419 atomic_inc(&ch->sq_wr_avail); 1420 1421 index = idx_from_wr_id(wr_id); 1422 ioctx = ch->ioctx_ring[index]; 1423 state = srpt_get_cmd_state(ioctx); 1424 cmd = &ioctx->cmd; 1425 1426 WARN_ON(state != SRPT_STATE_CMD_RSP_SENT 1427 && state != SRPT_STATE_MGMT_RSP_SENT 1428 && state != SRPT_STATE_NEED_DATA 1429 && state != SRPT_STATE_DONE); 1430 1431 /* If SRP_RSP sending failed, undo the ch->req_lim change. */ 1432 if (state == SRPT_STATE_CMD_RSP_SENT 1433 || state == SRPT_STATE_MGMT_RSP_SENT) 1434 atomic_dec(&ch->req_lim); 1435 1436 srpt_abort_cmd(ioctx); 1437 } 1438 1439 /** 1440 * srpt_handle_send_comp() - Process an IB send completion notification. 1441 */ 1442 static void srpt_handle_send_comp(struct srpt_rdma_ch *ch, 1443 struct srpt_send_ioctx *ioctx) 1444 { 1445 enum srpt_command_state state; 1446 1447 atomic_inc(&ch->sq_wr_avail); 1448 1449 state = srpt_set_cmd_state(ioctx, SRPT_STATE_DONE); 1450 1451 if (WARN_ON(state != SRPT_STATE_CMD_RSP_SENT 1452 && state != SRPT_STATE_MGMT_RSP_SENT 1453 && state != SRPT_STATE_DONE)) 1454 pr_debug("state = %d\n", state); 1455 1456 if (state != SRPT_STATE_DONE) 1457 kref_put(&ioctx->kref, srpt_put_send_ioctx_kref); 1458 else 1459 printk(KERN_ERR "IB completion has been received too late for" 1460 " wr_id = %u.\n", ioctx->ioctx.index); 1461 } 1462 1463 /** 1464 * srpt_handle_rdma_comp() - Process an IB RDMA completion notification. 1465 * 1466 * Note: transport_generic_handle_data() is asynchronous so unmapping the 1467 * data that has been transferred via IB RDMA must be postponed until the 1468 * check_stop_free() callback. 1469 */ 1470 static void srpt_handle_rdma_comp(struct srpt_rdma_ch *ch, 1471 struct srpt_send_ioctx *ioctx, 1472 enum srpt_opcode opcode) 1473 { 1474 WARN_ON(ioctx->n_rdma <= 0); 1475 atomic_add(ioctx->n_rdma, &ch->sq_wr_avail); 1476 1477 if (opcode == SRPT_RDMA_READ_LAST) { 1478 if (srpt_test_and_set_cmd_state(ioctx, SRPT_STATE_NEED_DATA, 1479 SRPT_STATE_DATA_IN)) 1480 transport_generic_handle_data(&ioctx->cmd); 1481 else 1482 printk(KERN_ERR "%s[%d]: wrong state = %d\n", __func__, 1483 __LINE__, srpt_get_cmd_state(ioctx)); 1484 } else if (opcode == SRPT_RDMA_ABORT) { 1485 ioctx->rdma_aborted = true; 1486 } else { 1487 WARN(true, "unexpected opcode %d\n", opcode); 1488 } 1489 } 1490 1491 /** 1492 * srpt_handle_rdma_err_comp() - Process an IB RDMA error completion. 1493 */ 1494 static void srpt_handle_rdma_err_comp(struct srpt_rdma_ch *ch, 1495 struct srpt_send_ioctx *ioctx, 1496 enum srpt_opcode opcode) 1497 { 1498 struct se_cmd *cmd; 1499 enum srpt_command_state state; 1500 unsigned long flags; 1501 1502 cmd = &ioctx->cmd; 1503 state = srpt_get_cmd_state(ioctx); 1504 switch (opcode) { 1505 case SRPT_RDMA_READ_LAST: 1506 if (ioctx->n_rdma <= 0) { 1507 printk(KERN_ERR "Received invalid RDMA read" 1508 " error completion with idx %d\n", 1509 ioctx->ioctx.index); 1510 break; 1511 } 1512 atomic_add(ioctx->n_rdma, &ch->sq_wr_avail); 1513 if (state == SRPT_STATE_NEED_DATA) 1514 srpt_abort_cmd(ioctx); 1515 else 1516 printk(KERN_ERR "%s[%d]: wrong state = %d\n", 1517 __func__, __LINE__, state); 1518 break; 1519 case SRPT_RDMA_WRITE_LAST: 1520 spin_lock_irqsave(&ioctx->cmd.t_state_lock, flags); 1521 ioctx->cmd.transport_state |= CMD_T_LUN_STOP; 1522 spin_unlock_irqrestore(&ioctx->cmd.t_state_lock, flags); 1523 break; 1524 default: 1525 printk(KERN_ERR "%s[%d]: opcode = %u\n", __func__, 1526 __LINE__, opcode); 1527 break; 1528 } 1529 } 1530 1531 /** 1532 * srpt_build_cmd_rsp() - Build an SRP_RSP response. 1533 * @ch: RDMA channel through which the request has been received. 1534 * @ioctx: I/O context associated with the SRP_CMD request. The response will 1535 * be built in the buffer ioctx->buf points at and hence this function will 1536 * overwrite the request data. 1537 * @tag: tag of the request for which this response is being generated. 1538 * @status: value for the STATUS field of the SRP_RSP information unit. 1539 * 1540 * Returns the size in bytes of the SRP_RSP response. 1541 * 1542 * An SRP_RSP response contains a SCSI status or service response. See also 1543 * section 6.9 in the SRP r16a document for the format of an SRP_RSP 1544 * response. See also SPC-2 for more information about sense data. 1545 */ 1546 static int srpt_build_cmd_rsp(struct srpt_rdma_ch *ch, 1547 struct srpt_send_ioctx *ioctx, u64 tag, 1548 int status) 1549 { 1550 struct srp_rsp *srp_rsp; 1551 const u8 *sense_data; 1552 int sense_data_len, max_sense_len; 1553 1554 /* 1555 * The lowest bit of all SAM-3 status codes is zero (see also 1556 * paragraph 5.3 in SAM-3). 1557 */ 1558 WARN_ON(status & 1); 1559 1560 srp_rsp = ioctx->ioctx.buf; 1561 BUG_ON(!srp_rsp); 1562 1563 sense_data = ioctx->sense_data; 1564 sense_data_len = ioctx->cmd.scsi_sense_length; 1565 WARN_ON(sense_data_len > sizeof(ioctx->sense_data)); 1566 1567 memset(srp_rsp, 0, sizeof *srp_rsp); 1568 srp_rsp->opcode = SRP_RSP; 1569 srp_rsp->req_lim_delta = 1570 __constant_cpu_to_be32(1 + atomic_xchg(&ch->req_lim_delta, 0)); 1571 srp_rsp->tag = tag; 1572 srp_rsp->status = status; 1573 1574 if (sense_data_len) { 1575 BUILD_BUG_ON(MIN_MAX_RSP_SIZE <= sizeof(*srp_rsp)); 1576 max_sense_len = ch->max_ti_iu_len - sizeof(*srp_rsp); 1577 if (sense_data_len > max_sense_len) { 1578 printk(KERN_WARNING "truncated sense data from %d to %d" 1579 " bytes\n", sense_data_len, max_sense_len); 1580 sense_data_len = max_sense_len; 1581 } 1582 1583 srp_rsp->flags |= SRP_RSP_FLAG_SNSVALID; 1584 srp_rsp->sense_data_len = cpu_to_be32(sense_data_len); 1585 memcpy(srp_rsp + 1, sense_data, sense_data_len); 1586 } 1587 1588 return sizeof(*srp_rsp) + sense_data_len; 1589 } 1590 1591 /** 1592 * srpt_build_tskmgmt_rsp() - Build a task management response. 1593 * @ch: RDMA channel through which the request has been received. 1594 * @ioctx: I/O context in which the SRP_RSP response will be built. 1595 * @rsp_code: RSP_CODE that will be stored in the response. 1596 * @tag: Tag of the request for which this response is being generated. 1597 * 1598 * Returns the size in bytes of the SRP_RSP response. 1599 * 1600 * An SRP_RSP response contains a SCSI status or service response. See also 1601 * section 6.9 in the SRP r16a document for the format of an SRP_RSP 1602 * response. 1603 */ 1604 static int srpt_build_tskmgmt_rsp(struct srpt_rdma_ch *ch, 1605 struct srpt_send_ioctx *ioctx, 1606 u8 rsp_code, u64 tag) 1607 { 1608 struct srp_rsp *srp_rsp; 1609 int resp_data_len; 1610 int resp_len; 1611 1612 resp_data_len = (rsp_code == SRP_TSK_MGMT_SUCCESS) ? 0 : 4; 1613 resp_len = sizeof(*srp_rsp) + resp_data_len; 1614 1615 srp_rsp = ioctx->ioctx.buf; 1616 BUG_ON(!srp_rsp); 1617 memset(srp_rsp, 0, sizeof *srp_rsp); 1618 1619 srp_rsp->opcode = SRP_RSP; 1620 srp_rsp->req_lim_delta = __constant_cpu_to_be32(1 1621 + atomic_xchg(&ch->req_lim_delta, 0)); 1622 srp_rsp->tag = tag; 1623 1624 if (rsp_code != SRP_TSK_MGMT_SUCCESS) { 1625 srp_rsp->flags |= SRP_RSP_FLAG_RSPVALID; 1626 srp_rsp->resp_data_len = cpu_to_be32(resp_data_len); 1627 srp_rsp->data[3] = rsp_code; 1628 } 1629 1630 return resp_len; 1631 } 1632 1633 #define NO_SUCH_LUN ((uint64_t)-1LL) 1634 1635 /* 1636 * SCSI LUN addressing method. See also SAM-2 and the section about 1637 * eight byte LUNs. 1638 */ 1639 enum scsi_lun_addr_method { 1640 SCSI_LUN_ADDR_METHOD_PERIPHERAL = 0, 1641 SCSI_LUN_ADDR_METHOD_FLAT = 1, 1642 SCSI_LUN_ADDR_METHOD_LUN = 2, 1643 SCSI_LUN_ADDR_METHOD_EXTENDED_LUN = 3, 1644 }; 1645 1646 /* 1647 * srpt_unpack_lun() - Convert from network LUN to linear LUN. 1648 * 1649 * Convert an 2-byte, 4-byte, 6-byte or 8-byte LUN structure in network byte 1650 * order (big endian) to a linear LUN. Supports three LUN addressing methods: 1651 * peripheral, flat and logical unit. See also SAM-2, section 4.9.4 (page 40). 1652 */ 1653 static uint64_t srpt_unpack_lun(const uint8_t *lun, int len) 1654 { 1655 uint64_t res = NO_SUCH_LUN; 1656 int addressing_method; 1657 1658 if (unlikely(len < 2)) { 1659 printk(KERN_ERR "Illegal LUN length %d, expected 2 bytes or " 1660 "more", len); 1661 goto out; 1662 } 1663 1664 switch (len) { 1665 case 8: 1666 if ((*((__be64 *)lun) & 1667 __constant_cpu_to_be64(0x0000FFFFFFFFFFFFLL)) != 0) 1668 goto out_err; 1669 break; 1670 case 4: 1671 if (*((__be16 *)&lun[2]) != 0) 1672 goto out_err; 1673 break; 1674 case 6: 1675 if (*((__be32 *)&lun[2]) != 0) 1676 goto out_err; 1677 break; 1678 case 2: 1679 break; 1680 default: 1681 goto out_err; 1682 } 1683 1684 addressing_method = (*lun) >> 6; /* highest two bits of byte 0 */ 1685 switch (addressing_method) { 1686 case SCSI_LUN_ADDR_METHOD_PERIPHERAL: 1687 case SCSI_LUN_ADDR_METHOD_FLAT: 1688 case SCSI_LUN_ADDR_METHOD_LUN: 1689 res = *(lun + 1) | (((*lun) & 0x3f) << 8); 1690 break; 1691 1692 case SCSI_LUN_ADDR_METHOD_EXTENDED_LUN: 1693 default: 1694 printk(KERN_ERR "Unimplemented LUN addressing method %u", 1695 addressing_method); 1696 break; 1697 } 1698 1699 out: 1700 return res; 1701 1702 out_err: 1703 printk(KERN_ERR "Support for multi-level LUNs has not yet been" 1704 " implemented"); 1705 goto out; 1706 } 1707 1708 static int srpt_check_stop_free(struct se_cmd *cmd) 1709 { 1710 struct srpt_send_ioctx *ioctx; 1711 1712 ioctx = container_of(cmd, struct srpt_send_ioctx, cmd); 1713 return kref_put(&ioctx->kref, srpt_put_send_ioctx_kref); 1714 } 1715 1716 /** 1717 * srpt_handle_cmd() - Process SRP_CMD. 1718 */ 1719 static int srpt_handle_cmd(struct srpt_rdma_ch *ch, 1720 struct srpt_recv_ioctx *recv_ioctx, 1721 struct srpt_send_ioctx *send_ioctx) 1722 { 1723 struct se_cmd *cmd; 1724 struct srp_cmd *srp_cmd; 1725 uint64_t unpacked_lun; 1726 u64 data_len; 1727 enum dma_data_direction dir; 1728 int ret; 1729 1730 BUG_ON(!send_ioctx); 1731 1732 srp_cmd = recv_ioctx->ioctx.buf; 1733 kref_get(&send_ioctx->kref); 1734 cmd = &send_ioctx->cmd; 1735 send_ioctx->tag = srp_cmd->tag; 1736 1737 switch (srp_cmd->task_attr) { 1738 case SRP_CMD_SIMPLE_Q: 1739 cmd->sam_task_attr = MSG_SIMPLE_TAG; 1740 break; 1741 case SRP_CMD_ORDERED_Q: 1742 default: 1743 cmd->sam_task_attr = MSG_ORDERED_TAG; 1744 break; 1745 case SRP_CMD_HEAD_OF_Q: 1746 cmd->sam_task_attr = MSG_HEAD_TAG; 1747 break; 1748 case SRP_CMD_ACA: 1749 cmd->sam_task_attr = MSG_ACA_TAG; 1750 break; 1751 } 1752 1753 ret = srpt_get_desc_tbl(send_ioctx, srp_cmd, &dir, &data_len); 1754 if (ret) { 1755 printk(KERN_ERR "0x%llx: parsing SRP descriptor table failed.\n", 1756 srp_cmd->tag); 1757 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 1758 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; 1759 kref_put(&send_ioctx->kref, srpt_put_send_ioctx_kref); 1760 goto send_sense; 1761 } 1762 1763 cmd->data_length = data_len; 1764 cmd->data_direction = dir; 1765 unpacked_lun = srpt_unpack_lun((uint8_t *)&srp_cmd->lun, 1766 sizeof(srp_cmd->lun)); 1767 if (transport_lookup_cmd_lun(cmd, unpacked_lun) < 0) { 1768 kref_put(&send_ioctx->kref, srpt_put_send_ioctx_kref); 1769 goto send_sense; 1770 } 1771 ret = target_setup_cmd_from_cdb(cmd, srp_cmd->cdb); 1772 if (ret < 0) { 1773 kref_put(&send_ioctx->kref, srpt_put_send_ioctx_kref); 1774 if (cmd->se_cmd_flags & SCF_SCSI_RESERVATION_CONFLICT) { 1775 srpt_queue_status(cmd); 1776 return 0; 1777 } else 1778 goto send_sense; 1779 } 1780 1781 transport_handle_cdb_direct(cmd); 1782 return 0; 1783 1784 send_sense: 1785 transport_send_check_condition_and_sense(cmd, cmd->scsi_sense_reason, 1786 0); 1787 return -1; 1788 } 1789 1790 /** 1791 * srpt_rx_mgmt_fn_tag() - Process a task management function by tag. 1792 * @ch: RDMA channel of the task management request. 1793 * @fn: Task management function to perform. 1794 * @req_tag: Tag of the SRP task management request. 1795 * @mgmt_ioctx: I/O context of the task management request. 1796 * 1797 * Returns zero if the target core will process the task management 1798 * request asynchronously. 1799 * 1800 * Note: It is assumed that the initiator serializes tag-based task management 1801 * requests. 1802 */ 1803 static int srpt_rx_mgmt_fn_tag(struct srpt_send_ioctx *ioctx, u64 tag) 1804 { 1805 struct srpt_device *sdev; 1806 struct srpt_rdma_ch *ch; 1807 struct srpt_send_ioctx *target; 1808 int ret, i; 1809 1810 ret = -EINVAL; 1811 ch = ioctx->ch; 1812 BUG_ON(!ch); 1813 BUG_ON(!ch->sport); 1814 sdev = ch->sport->sdev; 1815 BUG_ON(!sdev); 1816 spin_lock_irq(&sdev->spinlock); 1817 for (i = 0; i < ch->rq_size; ++i) { 1818 target = ch->ioctx_ring[i]; 1819 if (target->cmd.se_lun == ioctx->cmd.se_lun && 1820 target->tag == tag && 1821 srpt_get_cmd_state(target) != SRPT_STATE_DONE) { 1822 ret = 0; 1823 /* now let the target core abort &target->cmd; */ 1824 break; 1825 } 1826 } 1827 spin_unlock_irq(&sdev->spinlock); 1828 return ret; 1829 } 1830 1831 static int srp_tmr_to_tcm(int fn) 1832 { 1833 switch (fn) { 1834 case SRP_TSK_ABORT_TASK: 1835 return TMR_ABORT_TASK; 1836 case SRP_TSK_ABORT_TASK_SET: 1837 return TMR_ABORT_TASK_SET; 1838 case SRP_TSK_CLEAR_TASK_SET: 1839 return TMR_CLEAR_TASK_SET; 1840 case SRP_TSK_LUN_RESET: 1841 return TMR_LUN_RESET; 1842 case SRP_TSK_CLEAR_ACA: 1843 return TMR_CLEAR_ACA; 1844 default: 1845 return -1; 1846 } 1847 } 1848 1849 /** 1850 * srpt_handle_tsk_mgmt() - Process an SRP_TSK_MGMT information unit. 1851 * 1852 * Returns 0 if and only if the request will be processed by the target core. 1853 * 1854 * For more information about SRP_TSK_MGMT information units, see also section 1855 * 6.7 in the SRP r16a document. 1856 */ 1857 static void srpt_handle_tsk_mgmt(struct srpt_rdma_ch *ch, 1858 struct srpt_recv_ioctx *recv_ioctx, 1859 struct srpt_send_ioctx *send_ioctx) 1860 { 1861 struct srp_tsk_mgmt *srp_tsk; 1862 struct se_cmd *cmd; 1863 uint64_t unpacked_lun; 1864 int tcm_tmr; 1865 int res; 1866 1867 BUG_ON(!send_ioctx); 1868 1869 srp_tsk = recv_ioctx->ioctx.buf; 1870 cmd = &send_ioctx->cmd; 1871 1872 pr_debug("recv tsk_mgmt fn %d for task_tag %lld and cmd tag %lld" 1873 " cm_id %p sess %p\n", srp_tsk->tsk_mgmt_func, 1874 srp_tsk->task_tag, srp_tsk->tag, ch->cm_id, ch->sess); 1875 1876 srpt_set_cmd_state(send_ioctx, SRPT_STATE_MGMT); 1877 send_ioctx->tag = srp_tsk->tag; 1878 tcm_tmr = srp_tmr_to_tcm(srp_tsk->tsk_mgmt_func); 1879 if (tcm_tmr < 0) { 1880 send_ioctx->cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 1881 send_ioctx->cmd.se_tmr_req->response = 1882 TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED; 1883 goto process_tmr; 1884 } 1885 res = core_tmr_alloc_req(cmd, NULL, tcm_tmr, GFP_KERNEL); 1886 if (res < 0) { 1887 send_ioctx->cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 1888 send_ioctx->cmd.se_tmr_req->response = TMR_FUNCTION_REJECTED; 1889 goto process_tmr; 1890 } 1891 1892 unpacked_lun = srpt_unpack_lun((uint8_t *)&srp_tsk->lun, 1893 sizeof(srp_tsk->lun)); 1894 res = transport_lookup_tmr_lun(&send_ioctx->cmd, unpacked_lun); 1895 if (res) { 1896 pr_debug("rejecting TMR for LUN %lld\n", unpacked_lun); 1897 send_ioctx->cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 1898 send_ioctx->cmd.se_tmr_req->response = TMR_LUN_DOES_NOT_EXIST; 1899 goto process_tmr; 1900 } 1901 1902 if (srp_tsk->tsk_mgmt_func == SRP_TSK_ABORT_TASK) 1903 srpt_rx_mgmt_fn_tag(send_ioctx, srp_tsk->task_tag); 1904 1905 process_tmr: 1906 kref_get(&send_ioctx->kref); 1907 if (!(send_ioctx->cmd.se_cmd_flags & SCF_SCSI_CDB_EXCEPTION)) 1908 transport_generic_handle_tmr(&send_ioctx->cmd); 1909 else 1910 transport_send_check_condition_and_sense(cmd, 1911 cmd->scsi_sense_reason, 0); 1912 1913 } 1914 1915 /** 1916 * srpt_handle_new_iu() - Process a newly received information unit. 1917 * @ch: RDMA channel through which the information unit has been received. 1918 * @ioctx: SRPT I/O context associated with the information unit. 1919 */ 1920 static void srpt_handle_new_iu(struct srpt_rdma_ch *ch, 1921 struct srpt_recv_ioctx *recv_ioctx, 1922 struct srpt_send_ioctx *send_ioctx) 1923 { 1924 struct srp_cmd *srp_cmd; 1925 enum rdma_ch_state ch_state; 1926 1927 BUG_ON(!ch); 1928 BUG_ON(!recv_ioctx); 1929 1930 ib_dma_sync_single_for_cpu(ch->sport->sdev->device, 1931 recv_ioctx->ioctx.dma, srp_max_req_size, 1932 DMA_FROM_DEVICE); 1933 1934 ch_state = srpt_get_ch_state(ch); 1935 if (unlikely(ch_state == CH_CONNECTING)) { 1936 list_add_tail(&recv_ioctx->wait_list, &ch->cmd_wait_list); 1937 goto out; 1938 } 1939 1940 if (unlikely(ch_state != CH_LIVE)) 1941 goto out; 1942 1943 srp_cmd = recv_ioctx->ioctx.buf; 1944 if (srp_cmd->opcode == SRP_CMD || srp_cmd->opcode == SRP_TSK_MGMT) { 1945 if (!send_ioctx) 1946 send_ioctx = srpt_get_send_ioctx(ch); 1947 if (unlikely(!send_ioctx)) { 1948 list_add_tail(&recv_ioctx->wait_list, 1949 &ch->cmd_wait_list); 1950 goto out; 1951 } 1952 } 1953 1954 transport_init_se_cmd(&send_ioctx->cmd, &srpt_target->tf_ops, ch->sess, 1955 0, DMA_NONE, MSG_SIMPLE_TAG, 1956 send_ioctx->sense_data); 1957 1958 switch (srp_cmd->opcode) { 1959 case SRP_CMD: 1960 srpt_handle_cmd(ch, recv_ioctx, send_ioctx); 1961 break; 1962 case SRP_TSK_MGMT: 1963 srpt_handle_tsk_mgmt(ch, recv_ioctx, send_ioctx); 1964 break; 1965 case SRP_I_LOGOUT: 1966 printk(KERN_ERR "Not yet implemented: SRP_I_LOGOUT\n"); 1967 break; 1968 case SRP_CRED_RSP: 1969 pr_debug("received SRP_CRED_RSP\n"); 1970 break; 1971 case SRP_AER_RSP: 1972 pr_debug("received SRP_AER_RSP\n"); 1973 break; 1974 case SRP_RSP: 1975 printk(KERN_ERR "Received SRP_RSP\n"); 1976 break; 1977 default: 1978 printk(KERN_ERR "received IU with unknown opcode 0x%x\n", 1979 srp_cmd->opcode); 1980 break; 1981 } 1982 1983 srpt_post_recv(ch->sport->sdev, recv_ioctx); 1984 out: 1985 return; 1986 } 1987 1988 static void srpt_process_rcv_completion(struct ib_cq *cq, 1989 struct srpt_rdma_ch *ch, 1990 struct ib_wc *wc) 1991 { 1992 struct srpt_device *sdev = ch->sport->sdev; 1993 struct srpt_recv_ioctx *ioctx; 1994 u32 index; 1995 1996 index = idx_from_wr_id(wc->wr_id); 1997 if (wc->status == IB_WC_SUCCESS) { 1998 int req_lim; 1999 2000 req_lim = atomic_dec_return(&ch->req_lim); 2001 if (unlikely(req_lim < 0)) 2002 printk(KERN_ERR "req_lim = %d < 0\n", req_lim); 2003 ioctx = sdev->ioctx_ring[index]; 2004 srpt_handle_new_iu(ch, ioctx, NULL); 2005 } else { 2006 printk(KERN_INFO "receiving failed for idx %u with status %d\n", 2007 index, wc->status); 2008 } 2009 } 2010 2011 /** 2012 * srpt_process_send_completion() - Process an IB send completion. 2013 * 2014 * Note: Although this has not yet been observed during tests, at least in 2015 * theory it is possible that the srpt_get_send_ioctx() call invoked by 2016 * srpt_handle_new_iu() fails. This is possible because the req_lim_delta 2017 * value in each response is set to one, and it is possible that this response 2018 * makes the initiator send a new request before the send completion for that 2019 * response has been processed. This could e.g. happen if the call to 2020 * srpt_put_send_iotcx() is delayed because of a higher priority interrupt or 2021 * if IB retransmission causes generation of the send completion to be 2022 * delayed. Incoming information units for which srpt_get_send_ioctx() fails 2023 * are queued on cmd_wait_list. The code below processes these delayed 2024 * requests one at a time. 2025 */ 2026 static void srpt_process_send_completion(struct ib_cq *cq, 2027 struct srpt_rdma_ch *ch, 2028 struct ib_wc *wc) 2029 { 2030 struct srpt_send_ioctx *send_ioctx; 2031 uint32_t index; 2032 enum srpt_opcode opcode; 2033 2034 index = idx_from_wr_id(wc->wr_id); 2035 opcode = opcode_from_wr_id(wc->wr_id); 2036 send_ioctx = ch->ioctx_ring[index]; 2037 if (wc->status == IB_WC_SUCCESS) { 2038 if (opcode == SRPT_SEND) 2039 srpt_handle_send_comp(ch, send_ioctx); 2040 else { 2041 WARN_ON(opcode != SRPT_RDMA_ABORT && 2042 wc->opcode != IB_WC_RDMA_READ); 2043 srpt_handle_rdma_comp(ch, send_ioctx, opcode); 2044 } 2045 } else { 2046 if (opcode == SRPT_SEND) { 2047 printk(KERN_INFO "sending response for idx %u failed" 2048 " with status %d\n", index, wc->status); 2049 srpt_handle_send_err_comp(ch, wc->wr_id); 2050 } else if (opcode != SRPT_RDMA_MID) { 2051 printk(KERN_INFO "RDMA t %d for idx %u failed with" 2052 " status %d", opcode, index, wc->status); 2053 srpt_handle_rdma_err_comp(ch, send_ioctx, opcode); 2054 } 2055 } 2056 2057 while (unlikely(opcode == SRPT_SEND 2058 && !list_empty(&ch->cmd_wait_list) 2059 && srpt_get_ch_state(ch) == CH_LIVE 2060 && (send_ioctx = srpt_get_send_ioctx(ch)) != NULL)) { 2061 struct srpt_recv_ioctx *recv_ioctx; 2062 2063 recv_ioctx = list_first_entry(&ch->cmd_wait_list, 2064 struct srpt_recv_ioctx, 2065 wait_list); 2066 list_del(&recv_ioctx->wait_list); 2067 srpt_handle_new_iu(ch, recv_ioctx, send_ioctx); 2068 } 2069 } 2070 2071 static void srpt_process_completion(struct ib_cq *cq, struct srpt_rdma_ch *ch) 2072 { 2073 struct ib_wc *const wc = ch->wc; 2074 int i, n; 2075 2076 WARN_ON(cq != ch->cq); 2077 2078 ib_req_notify_cq(cq, IB_CQ_NEXT_COMP); 2079 while ((n = ib_poll_cq(cq, ARRAY_SIZE(ch->wc), wc)) > 0) { 2080 for (i = 0; i < n; i++) { 2081 if (opcode_from_wr_id(wc[i].wr_id) == SRPT_RECV) 2082 srpt_process_rcv_completion(cq, ch, &wc[i]); 2083 else 2084 srpt_process_send_completion(cq, ch, &wc[i]); 2085 } 2086 } 2087 } 2088 2089 /** 2090 * srpt_completion() - IB completion queue callback function. 2091 * 2092 * Notes: 2093 * - It is guaranteed that a completion handler will never be invoked 2094 * concurrently on two different CPUs for the same completion queue. See also 2095 * Documentation/infiniband/core_locking.txt and the implementation of 2096 * handle_edge_irq() in kernel/irq/chip.c. 2097 * - When threaded IRQs are enabled, completion handlers are invoked in thread 2098 * context instead of interrupt context. 2099 */ 2100 static void srpt_completion(struct ib_cq *cq, void *ctx) 2101 { 2102 struct srpt_rdma_ch *ch = ctx; 2103 2104 wake_up_interruptible(&ch->wait_queue); 2105 } 2106 2107 static int srpt_compl_thread(void *arg) 2108 { 2109 struct srpt_rdma_ch *ch; 2110 2111 /* Hibernation / freezing of the SRPT kernel thread is not supported. */ 2112 current->flags |= PF_NOFREEZE; 2113 2114 ch = arg; 2115 BUG_ON(!ch); 2116 printk(KERN_INFO "Session %s: kernel thread %s (PID %d) started\n", 2117 ch->sess_name, ch->thread->comm, current->pid); 2118 while (!kthread_should_stop()) { 2119 wait_event_interruptible(ch->wait_queue, 2120 (srpt_process_completion(ch->cq, ch), 2121 kthread_should_stop())); 2122 } 2123 printk(KERN_INFO "Session %s: kernel thread %s (PID %d) stopped\n", 2124 ch->sess_name, ch->thread->comm, current->pid); 2125 return 0; 2126 } 2127 2128 /** 2129 * srpt_create_ch_ib() - Create receive and send completion queues. 2130 */ 2131 static int srpt_create_ch_ib(struct srpt_rdma_ch *ch) 2132 { 2133 struct ib_qp_init_attr *qp_init; 2134 struct srpt_port *sport = ch->sport; 2135 struct srpt_device *sdev = sport->sdev; 2136 u32 srp_sq_size = sport->port_attrib.srp_sq_size; 2137 int ret; 2138 2139 WARN_ON(ch->rq_size < 1); 2140 2141 ret = -ENOMEM; 2142 qp_init = kzalloc(sizeof *qp_init, GFP_KERNEL); 2143 if (!qp_init) 2144 goto out; 2145 2146 ch->cq = ib_create_cq(sdev->device, srpt_completion, NULL, ch, 2147 ch->rq_size + srp_sq_size, 0); 2148 if (IS_ERR(ch->cq)) { 2149 ret = PTR_ERR(ch->cq); 2150 printk(KERN_ERR "failed to create CQ cqe= %d ret= %d\n", 2151 ch->rq_size + srp_sq_size, ret); 2152 goto out; 2153 } 2154 2155 qp_init->qp_context = (void *)ch; 2156 qp_init->event_handler 2157 = (void(*)(struct ib_event *, void*))srpt_qp_event; 2158 qp_init->send_cq = ch->cq; 2159 qp_init->recv_cq = ch->cq; 2160 qp_init->srq = sdev->srq; 2161 qp_init->sq_sig_type = IB_SIGNAL_REQ_WR; 2162 qp_init->qp_type = IB_QPT_RC; 2163 qp_init->cap.max_send_wr = srp_sq_size; 2164 qp_init->cap.max_send_sge = SRPT_DEF_SG_PER_WQE; 2165 2166 ch->qp = ib_create_qp(sdev->pd, qp_init); 2167 if (IS_ERR(ch->qp)) { 2168 ret = PTR_ERR(ch->qp); 2169 printk(KERN_ERR "failed to create_qp ret= %d\n", ret); 2170 goto err_destroy_cq; 2171 } 2172 2173 atomic_set(&ch->sq_wr_avail, qp_init->cap.max_send_wr); 2174 2175 pr_debug("%s: max_cqe= %d max_sge= %d sq_size = %d cm_id= %p\n", 2176 __func__, ch->cq->cqe, qp_init->cap.max_send_sge, 2177 qp_init->cap.max_send_wr, ch->cm_id); 2178 2179 ret = srpt_init_ch_qp(ch, ch->qp); 2180 if (ret) 2181 goto err_destroy_qp; 2182 2183 init_waitqueue_head(&ch->wait_queue); 2184 2185 pr_debug("creating thread for session %s\n", ch->sess_name); 2186 2187 ch->thread = kthread_run(srpt_compl_thread, ch, "ib_srpt_compl"); 2188 if (IS_ERR(ch->thread)) { 2189 printk(KERN_ERR "failed to create kernel thread %ld\n", 2190 PTR_ERR(ch->thread)); 2191 ch->thread = NULL; 2192 goto err_destroy_qp; 2193 } 2194 2195 out: 2196 kfree(qp_init); 2197 return ret; 2198 2199 err_destroy_qp: 2200 ib_destroy_qp(ch->qp); 2201 err_destroy_cq: 2202 ib_destroy_cq(ch->cq); 2203 goto out; 2204 } 2205 2206 static void srpt_destroy_ch_ib(struct srpt_rdma_ch *ch) 2207 { 2208 if (ch->thread) 2209 kthread_stop(ch->thread); 2210 2211 ib_destroy_qp(ch->qp); 2212 ib_destroy_cq(ch->cq); 2213 } 2214 2215 /** 2216 * __srpt_close_ch() - Close an RDMA channel by setting the QP error state. 2217 * 2218 * Reset the QP and make sure all resources associated with the channel will 2219 * be deallocated at an appropriate time. 2220 * 2221 * Note: The caller must hold ch->sport->sdev->spinlock. 2222 */ 2223 static void __srpt_close_ch(struct srpt_rdma_ch *ch) 2224 { 2225 struct srpt_device *sdev; 2226 enum rdma_ch_state prev_state; 2227 unsigned long flags; 2228 2229 sdev = ch->sport->sdev; 2230 2231 spin_lock_irqsave(&ch->spinlock, flags); 2232 prev_state = ch->state; 2233 switch (prev_state) { 2234 case CH_CONNECTING: 2235 case CH_LIVE: 2236 ch->state = CH_DISCONNECTING; 2237 break; 2238 default: 2239 break; 2240 } 2241 spin_unlock_irqrestore(&ch->spinlock, flags); 2242 2243 switch (prev_state) { 2244 case CH_CONNECTING: 2245 ib_send_cm_rej(ch->cm_id, IB_CM_REJ_NO_RESOURCES, NULL, 0, 2246 NULL, 0); 2247 /* fall through */ 2248 case CH_LIVE: 2249 if (ib_send_cm_dreq(ch->cm_id, NULL, 0) < 0) 2250 printk(KERN_ERR "sending CM DREQ failed.\n"); 2251 break; 2252 case CH_DISCONNECTING: 2253 break; 2254 case CH_DRAINING: 2255 case CH_RELEASING: 2256 break; 2257 } 2258 } 2259 2260 /** 2261 * srpt_close_ch() - Close an RDMA channel. 2262 */ 2263 static void srpt_close_ch(struct srpt_rdma_ch *ch) 2264 { 2265 struct srpt_device *sdev; 2266 2267 sdev = ch->sport->sdev; 2268 spin_lock_irq(&sdev->spinlock); 2269 __srpt_close_ch(ch); 2270 spin_unlock_irq(&sdev->spinlock); 2271 } 2272 2273 /** 2274 * srpt_drain_channel() - Drain a channel by resetting the IB queue pair. 2275 * @cm_id: Pointer to the CM ID of the channel to be drained. 2276 * 2277 * Note: Must be called from inside srpt_cm_handler to avoid a race between 2278 * accessing sdev->spinlock and the call to kfree(sdev) in srpt_remove_one() 2279 * (the caller of srpt_cm_handler holds the cm_id spinlock; srpt_remove_one() 2280 * waits until all target sessions for the associated IB device have been 2281 * unregistered and target session registration involves a call to 2282 * ib_destroy_cm_id(), which locks the cm_id spinlock and hence waits until 2283 * this function has finished). 2284 */ 2285 static void srpt_drain_channel(struct ib_cm_id *cm_id) 2286 { 2287 struct srpt_device *sdev; 2288 struct srpt_rdma_ch *ch; 2289 int ret; 2290 bool do_reset = false; 2291 2292 WARN_ON_ONCE(irqs_disabled()); 2293 2294 sdev = cm_id->context; 2295 BUG_ON(!sdev); 2296 spin_lock_irq(&sdev->spinlock); 2297 list_for_each_entry(ch, &sdev->rch_list, list) { 2298 if (ch->cm_id == cm_id) { 2299 do_reset = srpt_test_and_set_ch_state(ch, 2300 CH_CONNECTING, CH_DRAINING) || 2301 srpt_test_and_set_ch_state(ch, 2302 CH_LIVE, CH_DRAINING) || 2303 srpt_test_and_set_ch_state(ch, 2304 CH_DISCONNECTING, CH_DRAINING); 2305 break; 2306 } 2307 } 2308 spin_unlock_irq(&sdev->spinlock); 2309 2310 if (do_reset) { 2311 ret = srpt_ch_qp_err(ch); 2312 if (ret < 0) 2313 printk(KERN_ERR "Setting queue pair in error state" 2314 " failed: %d\n", ret); 2315 } 2316 } 2317 2318 /** 2319 * srpt_find_channel() - Look up an RDMA channel. 2320 * @cm_id: Pointer to the CM ID of the channel to be looked up. 2321 * 2322 * Return NULL if no matching RDMA channel has been found. 2323 */ 2324 static struct srpt_rdma_ch *srpt_find_channel(struct srpt_device *sdev, 2325 struct ib_cm_id *cm_id) 2326 { 2327 struct srpt_rdma_ch *ch; 2328 bool found; 2329 2330 WARN_ON_ONCE(irqs_disabled()); 2331 BUG_ON(!sdev); 2332 2333 found = false; 2334 spin_lock_irq(&sdev->spinlock); 2335 list_for_each_entry(ch, &sdev->rch_list, list) { 2336 if (ch->cm_id == cm_id) { 2337 found = true; 2338 break; 2339 } 2340 } 2341 spin_unlock_irq(&sdev->spinlock); 2342 2343 return found ? ch : NULL; 2344 } 2345 2346 /** 2347 * srpt_release_channel() - Release channel resources. 2348 * 2349 * Schedules the actual release because: 2350 * - Calling the ib_destroy_cm_id() call from inside an IB CM callback would 2351 * trigger a deadlock. 2352 * - It is not safe to call TCM transport_* functions from interrupt context. 2353 */ 2354 static void srpt_release_channel(struct srpt_rdma_ch *ch) 2355 { 2356 schedule_work(&ch->release_work); 2357 } 2358 2359 static void srpt_release_channel_work(struct work_struct *w) 2360 { 2361 struct srpt_rdma_ch *ch; 2362 struct srpt_device *sdev; 2363 2364 ch = container_of(w, struct srpt_rdma_ch, release_work); 2365 pr_debug("ch = %p; ch->sess = %p; release_done = %p\n", ch, ch->sess, 2366 ch->release_done); 2367 2368 sdev = ch->sport->sdev; 2369 BUG_ON(!sdev); 2370 2371 transport_deregister_session_configfs(ch->sess); 2372 transport_deregister_session(ch->sess); 2373 ch->sess = NULL; 2374 2375 srpt_destroy_ch_ib(ch); 2376 2377 srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_ring, 2378 ch->sport->sdev, ch->rq_size, 2379 ch->rsp_size, DMA_TO_DEVICE); 2380 2381 spin_lock_irq(&sdev->spinlock); 2382 list_del(&ch->list); 2383 spin_unlock_irq(&sdev->spinlock); 2384 2385 ib_destroy_cm_id(ch->cm_id); 2386 2387 if (ch->release_done) 2388 complete(ch->release_done); 2389 2390 wake_up(&sdev->ch_releaseQ); 2391 2392 kfree(ch); 2393 } 2394 2395 static struct srpt_node_acl *__srpt_lookup_acl(struct srpt_port *sport, 2396 u8 i_port_id[16]) 2397 { 2398 struct srpt_node_acl *nacl; 2399 2400 list_for_each_entry(nacl, &sport->port_acl_list, list) 2401 if (memcmp(nacl->i_port_id, i_port_id, 2402 sizeof(nacl->i_port_id)) == 0) 2403 return nacl; 2404 2405 return NULL; 2406 } 2407 2408 static struct srpt_node_acl *srpt_lookup_acl(struct srpt_port *sport, 2409 u8 i_port_id[16]) 2410 { 2411 struct srpt_node_acl *nacl; 2412 2413 spin_lock_irq(&sport->port_acl_lock); 2414 nacl = __srpt_lookup_acl(sport, i_port_id); 2415 spin_unlock_irq(&sport->port_acl_lock); 2416 2417 return nacl; 2418 } 2419 2420 /** 2421 * srpt_cm_req_recv() - Process the event IB_CM_REQ_RECEIVED. 2422 * 2423 * Ownership of the cm_id is transferred to the target session if this 2424 * functions returns zero. Otherwise the caller remains the owner of cm_id. 2425 */ 2426 static int srpt_cm_req_recv(struct ib_cm_id *cm_id, 2427 struct ib_cm_req_event_param *param, 2428 void *private_data) 2429 { 2430 struct srpt_device *sdev = cm_id->context; 2431 struct srpt_port *sport = &sdev->port[param->port - 1]; 2432 struct srp_login_req *req; 2433 struct srp_login_rsp *rsp; 2434 struct srp_login_rej *rej; 2435 struct ib_cm_rep_param *rep_param; 2436 struct srpt_rdma_ch *ch, *tmp_ch; 2437 struct srpt_node_acl *nacl; 2438 u32 it_iu_len; 2439 int i; 2440 int ret = 0; 2441 2442 WARN_ON_ONCE(irqs_disabled()); 2443 2444 if (WARN_ON(!sdev || !private_data)) 2445 return -EINVAL; 2446 2447 req = (struct srp_login_req *)private_data; 2448 2449 it_iu_len = be32_to_cpu(req->req_it_iu_len); 2450 2451 printk(KERN_INFO "Received SRP_LOGIN_REQ with i_port_id 0x%llx:0x%llx," 2452 " t_port_id 0x%llx:0x%llx and it_iu_len %d on port %d" 2453 " (guid=0x%llx:0x%llx)\n", 2454 be64_to_cpu(*(__be64 *)&req->initiator_port_id[0]), 2455 be64_to_cpu(*(__be64 *)&req->initiator_port_id[8]), 2456 be64_to_cpu(*(__be64 *)&req->target_port_id[0]), 2457 be64_to_cpu(*(__be64 *)&req->target_port_id[8]), 2458 it_iu_len, 2459 param->port, 2460 be64_to_cpu(*(__be64 *)&sdev->port[param->port - 1].gid.raw[0]), 2461 be64_to_cpu(*(__be64 *)&sdev->port[param->port - 1].gid.raw[8])); 2462 2463 rsp = kzalloc(sizeof *rsp, GFP_KERNEL); 2464 rej = kzalloc(sizeof *rej, GFP_KERNEL); 2465 rep_param = kzalloc(sizeof *rep_param, GFP_KERNEL); 2466 2467 if (!rsp || !rej || !rep_param) { 2468 ret = -ENOMEM; 2469 goto out; 2470 } 2471 2472 if (it_iu_len > srp_max_req_size || it_iu_len < 64) { 2473 rej->reason = __constant_cpu_to_be32( 2474 SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE); 2475 ret = -EINVAL; 2476 printk(KERN_ERR "rejected SRP_LOGIN_REQ because its" 2477 " length (%d bytes) is out of range (%d .. %d)\n", 2478 it_iu_len, 64, srp_max_req_size); 2479 goto reject; 2480 } 2481 2482 if (!sport->enabled) { 2483 rej->reason = __constant_cpu_to_be32( 2484 SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES); 2485 ret = -EINVAL; 2486 printk(KERN_ERR "rejected SRP_LOGIN_REQ because the target port" 2487 " has not yet been enabled\n"); 2488 goto reject; 2489 } 2490 2491 if ((req->req_flags & SRP_MTCH_ACTION) == SRP_MULTICHAN_SINGLE) { 2492 rsp->rsp_flags = SRP_LOGIN_RSP_MULTICHAN_NO_CHAN; 2493 2494 spin_lock_irq(&sdev->spinlock); 2495 2496 list_for_each_entry_safe(ch, tmp_ch, &sdev->rch_list, list) { 2497 if (!memcmp(ch->i_port_id, req->initiator_port_id, 16) 2498 && !memcmp(ch->t_port_id, req->target_port_id, 16) 2499 && param->port == ch->sport->port 2500 && param->listen_id == ch->sport->sdev->cm_id 2501 && ch->cm_id) { 2502 enum rdma_ch_state ch_state; 2503 2504 ch_state = srpt_get_ch_state(ch); 2505 if (ch_state != CH_CONNECTING 2506 && ch_state != CH_LIVE) 2507 continue; 2508 2509 /* found an existing channel */ 2510 pr_debug("Found existing channel %s" 2511 " cm_id= %p state= %d\n", 2512 ch->sess_name, ch->cm_id, ch_state); 2513 2514 __srpt_close_ch(ch); 2515 2516 rsp->rsp_flags = 2517 SRP_LOGIN_RSP_MULTICHAN_TERMINATED; 2518 } 2519 } 2520 2521 spin_unlock_irq(&sdev->spinlock); 2522 2523 } else 2524 rsp->rsp_flags = SRP_LOGIN_RSP_MULTICHAN_MAINTAINED; 2525 2526 if (*(__be64 *)req->target_port_id != cpu_to_be64(srpt_service_guid) 2527 || *(__be64 *)(req->target_port_id + 8) != 2528 cpu_to_be64(srpt_service_guid)) { 2529 rej->reason = __constant_cpu_to_be32( 2530 SRP_LOGIN_REJ_UNABLE_ASSOCIATE_CHANNEL); 2531 ret = -ENOMEM; 2532 printk(KERN_ERR "rejected SRP_LOGIN_REQ because it" 2533 " has an invalid target port identifier.\n"); 2534 goto reject; 2535 } 2536 2537 ch = kzalloc(sizeof *ch, GFP_KERNEL); 2538 if (!ch) { 2539 rej->reason = __constant_cpu_to_be32( 2540 SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES); 2541 printk(KERN_ERR "rejected SRP_LOGIN_REQ because no memory.\n"); 2542 ret = -ENOMEM; 2543 goto reject; 2544 } 2545 2546 INIT_WORK(&ch->release_work, srpt_release_channel_work); 2547 memcpy(ch->i_port_id, req->initiator_port_id, 16); 2548 memcpy(ch->t_port_id, req->target_port_id, 16); 2549 ch->sport = &sdev->port[param->port - 1]; 2550 ch->cm_id = cm_id; 2551 /* 2552 * Avoid QUEUE_FULL conditions by limiting the number of buffers used 2553 * for the SRP protocol to the command queue size. 2554 */ 2555 ch->rq_size = SRPT_RQ_SIZE; 2556 spin_lock_init(&ch->spinlock); 2557 ch->state = CH_CONNECTING; 2558 INIT_LIST_HEAD(&ch->cmd_wait_list); 2559 ch->rsp_size = ch->sport->port_attrib.srp_max_rsp_size; 2560 2561 ch->ioctx_ring = (struct srpt_send_ioctx **) 2562 srpt_alloc_ioctx_ring(ch->sport->sdev, ch->rq_size, 2563 sizeof(*ch->ioctx_ring[0]), 2564 ch->rsp_size, DMA_TO_DEVICE); 2565 if (!ch->ioctx_ring) 2566 goto free_ch; 2567 2568 INIT_LIST_HEAD(&ch->free_list); 2569 for (i = 0; i < ch->rq_size; i++) { 2570 ch->ioctx_ring[i]->ch = ch; 2571 list_add_tail(&ch->ioctx_ring[i]->free_list, &ch->free_list); 2572 } 2573 2574 ret = srpt_create_ch_ib(ch); 2575 if (ret) { 2576 rej->reason = __constant_cpu_to_be32( 2577 SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES); 2578 printk(KERN_ERR "rejected SRP_LOGIN_REQ because creating" 2579 " a new RDMA channel failed.\n"); 2580 goto free_ring; 2581 } 2582 2583 ret = srpt_ch_qp_rtr(ch, ch->qp); 2584 if (ret) { 2585 rej->reason = __constant_cpu_to_be32( 2586 SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES); 2587 printk(KERN_ERR "rejected SRP_LOGIN_REQ because enabling" 2588 " RTR failed (error code = %d)\n", ret); 2589 goto destroy_ib; 2590 } 2591 /* 2592 * Use the initator port identifier as the session name. 2593 */ 2594 snprintf(ch->sess_name, sizeof(ch->sess_name), "0x%016llx%016llx", 2595 be64_to_cpu(*(__be64 *)ch->i_port_id), 2596 be64_to_cpu(*(__be64 *)(ch->i_port_id + 8))); 2597 2598 pr_debug("registering session %s\n", ch->sess_name); 2599 2600 nacl = srpt_lookup_acl(sport, ch->i_port_id); 2601 if (!nacl) { 2602 printk(KERN_INFO "Rejected login because no ACL has been" 2603 " configured yet for initiator %s.\n", ch->sess_name); 2604 rej->reason = __constant_cpu_to_be32( 2605 SRP_LOGIN_REJ_CHANNEL_LIMIT_REACHED); 2606 goto destroy_ib; 2607 } 2608 2609 ch->sess = transport_init_session(); 2610 if (IS_ERR(ch->sess)) { 2611 rej->reason = __constant_cpu_to_be32( 2612 SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES); 2613 pr_debug("Failed to create session\n"); 2614 goto deregister_session; 2615 } 2616 ch->sess->se_node_acl = &nacl->nacl; 2617 transport_register_session(&sport->port_tpg_1, &nacl->nacl, ch->sess, ch); 2618 2619 pr_debug("Establish connection sess=%p name=%s cm_id=%p\n", ch->sess, 2620 ch->sess_name, ch->cm_id); 2621 2622 /* create srp_login_response */ 2623 rsp->opcode = SRP_LOGIN_RSP; 2624 rsp->tag = req->tag; 2625 rsp->max_it_iu_len = req->req_it_iu_len; 2626 rsp->max_ti_iu_len = req->req_it_iu_len; 2627 ch->max_ti_iu_len = it_iu_len; 2628 rsp->buf_fmt = __constant_cpu_to_be16(SRP_BUF_FORMAT_DIRECT 2629 | SRP_BUF_FORMAT_INDIRECT); 2630 rsp->req_lim_delta = cpu_to_be32(ch->rq_size); 2631 atomic_set(&ch->req_lim, ch->rq_size); 2632 atomic_set(&ch->req_lim_delta, 0); 2633 2634 /* create cm reply */ 2635 rep_param->qp_num = ch->qp->qp_num; 2636 rep_param->private_data = (void *)rsp; 2637 rep_param->private_data_len = sizeof *rsp; 2638 rep_param->rnr_retry_count = 7; 2639 rep_param->flow_control = 1; 2640 rep_param->failover_accepted = 0; 2641 rep_param->srq = 1; 2642 rep_param->responder_resources = 4; 2643 rep_param->initiator_depth = 4; 2644 2645 ret = ib_send_cm_rep(cm_id, rep_param); 2646 if (ret) { 2647 printk(KERN_ERR "sending SRP_LOGIN_REQ response failed" 2648 " (error code = %d)\n", ret); 2649 goto release_channel; 2650 } 2651 2652 spin_lock_irq(&sdev->spinlock); 2653 list_add_tail(&ch->list, &sdev->rch_list); 2654 spin_unlock_irq(&sdev->spinlock); 2655 2656 goto out; 2657 2658 release_channel: 2659 srpt_set_ch_state(ch, CH_RELEASING); 2660 transport_deregister_session_configfs(ch->sess); 2661 2662 deregister_session: 2663 transport_deregister_session(ch->sess); 2664 ch->sess = NULL; 2665 2666 destroy_ib: 2667 srpt_destroy_ch_ib(ch); 2668 2669 free_ring: 2670 srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_ring, 2671 ch->sport->sdev, ch->rq_size, 2672 ch->rsp_size, DMA_TO_DEVICE); 2673 free_ch: 2674 kfree(ch); 2675 2676 reject: 2677 rej->opcode = SRP_LOGIN_REJ; 2678 rej->tag = req->tag; 2679 rej->buf_fmt = __constant_cpu_to_be16(SRP_BUF_FORMAT_DIRECT 2680 | SRP_BUF_FORMAT_INDIRECT); 2681 2682 ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED, NULL, 0, 2683 (void *)rej, sizeof *rej); 2684 2685 out: 2686 kfree(rep_param); 2687 kfree(rsp); 2688 kfree(rej); 2689 2690 return ret; 2691 } 2692 2693 static void srpt_cm_rej_recv(struct ib_cm_id *cm_id) 2694 { 2695 printk(KERN_INFO "Received IB REJ for cm_id %p.\n", cm_id); 2696 srpt_drain_channel(cm_id); 2697 } 2698 2699 /** 2700 * srpt_cm_rtu_recv() - Process an IB_CM_RTU_RECEIVED or USER_ESTABLISHED event. 2701 * 2702 * An IB_CM_RTU_RECEIVED message indicates that the connection is established 2703 * and that the recipient may begin transmitting (RTU = ready to use). 2704 */ 2705 static void srpt_cm_rtu_recv(struct ib_cm_id *cm_id) 2706 { 2707 struct srpt_rdma_ch *ch; 2708 int ret; 2709 2710 ch = srpt_find_channel(cm_id->context, cm_id); 2711 BUG_ON(!ch); 2712 2713 if (srpt_test_and_set_ch_state(ch, CH_CONNECTING, CH_LIVE)) { 2714 struct srpt_recv_ioctx *ioctx, *ioctx_tmp; 2715 2716 ret = srpt_ch_qp_rts(ch, ch->qp); 2717 2718 list_for_each_entry_safe(ioctx, ioctx_tmp, &ch->cmd_wait_list, 2719 wait_list) { 2720 list_del(&ioctx->wait_list); 2721 srpt_handle_new_iu(ch, ioctx, NULL); 2722 } 2723 if (ret) 2724 srpt_close_ch(ch); 2725 } 2726 } 2727 2728 static void srpt_cm_timewait_exit(struct ib_cm_id *cm_id) 2729 { 2730 printk(KERN_INFO "Received IB TimeWait exit for cm_id %p.\n", cm_id); 2731 srpt_drain_channel(cm_id); 2732 } 2733 2734 static void srpt_cm_rep_error(struct ib_cm_id *cm_id) 2735 { 2736 printk(KERN_INFO "Received IB REP error for cm_id %p.\n", cm_id); 2737 srpt_drain_channel(cm_id); 2738 } 2739 2740 /** 2741 * srpt_cm_dreq_recv() - Process reception of a DREQ message. 2742 */ 2743 static void srpt_cm_dreq_recv(struct ib_cm_id *cm_id) 2744 { 2745 struct srpt_rdma_ch *ch; 2746 unsigned long flags; 2747 bool send_drep = false; 2748 2749 ch = srpt_find_channel(cm_id->context, cm_id); 2750 BUG_ON(!ch); 2751 2752 pr_debug("cm_id= %p ch->state= %d\n", cm_id, srpt_get_ch_state(ch)); 2753 2754 spin_lock_irqsave(&ch->spinlock, flags); 2755 switch (ch->state) { 2756 case CH_CONNECTING: 2757 case CH_LIVE: 2758 send_drep = true; 2759 ch->state = CH_DISCONNECTING; 2760 break; 2761 case CH_DISCONNECTING: 2762 case CH_DRAINING: 2763 case CH_RELEASING: 2764 WARN(true, "unexpected channel state %d\n", ch->state); 2765 break; 2766 } 2767 spin_unlock_irqrestore(&ch->spinlock, flags); 2768 2769 if (send_drep) { 2770 if (ib_send_cm_drep(ch->cm_id, NULL, 0) < 0) 2771 printk(KERN_ERR "Sending IB DREP failed.\n"); 2772 printk(KERN_INFO "Received DREQ and sent DREP for session %s.\n", 2773 ch->sess_name); 2774 } 2775 } 2776 2777 /** 2778 * srpt_cm_drep_recv() - Process reception of a DREP message. 2779 */ 2780 static void srpt_cm_drep_recv(struct ib_cm_id *cm_id) 2781 { 2782 printk(KERN_INFO "Received InfiniBand DREP message for cm_id %p.\n", 2783 cm_id); 2784 srpt_drain_channel(cm_id); 2785 } 2786 2787 /** 2788 * srpt_cm_handler() - IB connection manager callback function. 2789 * 2790 * A non-zero return value will cause the caller destroy the CM ID. 2791 * 2792 * Note: srpt_cm_handler() must only return a non-zero value when transferring 2793 * ownership of the cm_id to a channel by srpt_cm_req_recv() failed. Returning 2794 * a non-zero value in any other case will trigger a race with the 2795 * ib_destroy_cm_id() call in srpt_release_channel(). 2796 */ 2797 static int srpt_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event) 2798 { 2799 int ret; 2800 2801 ret = 0; 2802 switch (event->event) { 2803 case IB_CM_REQ_RECEIVED: 2804 ret = srpt_cm_req_recv(cm_id, &event->param.req_rcvd, 2805 event->private_data); 2806 break; 2807 case IB_CM_REJ_RECEIVED: 2808 srpt_cm_rej_recv(cm_id); 2809 break; 2810 case IB_CM_RTU_RECEIVED: 2811 case IB_CM_USER_ESTABLISHED: 2812 srpt_cm_rtu_recv(cm_id); 2813 break; 2814 case IB_CM_DREQ_RECEIVED: 2815 srpt_cm_dreq_recv(cm_id); 2816 break; 2817 case IB_CM_DREP_RECEIVED: 2818 srpt_cm_drep_recv(cm_id); 2819 break; 2820 case IB_CM_TIMEWAIT_EXIT: 2821 srpt_cm_timewait_exit(cm_id); 2822 break; 2823 case IB_CM_REP_ERROR: 2824 srpt_cm_rep_error(cm_id); 2825 break; 2826 case IB_CM_DREQ_ERROR: 2827 printk(KERN_INFO "Received IB DREQ ERROR event.\n"); 2828 break; 2829 case IB_CM_MRA_RECEIVED: 2830 printk(KERN_INFO "Received IB MRA event\n"); 2831 break; 2832 default: 2833 printk(KERN_ERR "received unrecognized IB CM event %d\n", 2834 event->event); 2835 break; 2836 } 2837 2838 return ret; 2839 } 2840 2841 /** 2842 * srpt_perform_rdmas() - Perform IB RDMA. 2843 * 2844 * Returns zero upon success or a negative number upon failure. 2845 */ 2846 static int srpt_perform_rdmas(struct srpt_rdma_ch *ch, 2847 struct srpt_send_ioctx *ioctx) 2848 { 2849 struct ib_send_wr wr; 2850 struct ib_send_wr *bad_wr; 2851 struct rdma_iu *riu; 2852 int i; 2853 int ret; 2854 int sq_wr_avail; 2855 enum dma_data_direction dir; 2856 const int n_rdma = ioctx->n_rdma; 2857 2858 dir = ioctx->cmd.data_direction; 2859 if (dir == DMA_TO_DEVICE) { 2860 /* write */ 2861 ret = -ENOMEM; 2862 sq_wr_avail = atomic_sub_return(n_rdma, &ch->sq_wr_avail); 2863 if (sq_wr_avail < 0) { 2864 printk(KERN_WARNING "IB send queue full (needed %d)\n", 2865 n_rdma); 2866 goto out; 2867 } 2868 } 2869 2870 ioctx->rdma_aborted = false; 2871 ret = 0; 2872 riu = ioctx->rdma_ius; 2873 memset(&wr, 0, sizeof wr); 2874 2875 for (i = 0; i < n_rdma; ++i, ++riu) { 2876 if (dir == DMA_FROM_DEVICE) { 2877 wr.opcode = IB_WR_RDMA_WRITE; 2878 wr.wr_id = encode_wr_id(i == n_rdma - 1 ? 2879 SRPT_RDMA_WRITE_LAST : 2880 SRPT_RDMA_MID, 2881 ioctx->ioctx.index); 2882 } else { 2883 wr.opcode = IB_WR_RDMA_READ; 2884 wr.wr_id = encode_wr_id(i == n_rdma - 1 ? 2885 SRPT_RDMA_READ_LAST : 2886 SRPT_RDMA_MID, 2887 ioctx->ioctx.index); 2888 } 2889 wr.next = NULL; 2890 wr.wr.rdma.remote_addr = riu->raddr; 2891 wr.wr.rdma.rkey = riu->rkey; 2892 wr.num_sge = riu->sge_cnt; 2893 wr.sg_list = riu->sge; 2894 2895 /* only get completion event for the last rdma write */ 2896 if (i == (n_rdma - 1) && dir == DMA_TO_DEVICE) 2897 wr.send_flags = IB_SEND_SIGNALED; 2898 2899 ret = ib_post_send(ch->qp, &wr, &bad_wr); 2900 if (ret) 2901 break; 2902 } 2903 2904 if (ret) 2905 printk(KERN_ERR "%s[%d]: ib_post_send() returned %d for %d/%d", 2906 __func__, __LINE__, ret, i, n_rdma); 2907 if (ret && i > 0) { 2908 wr.num_sge = 0; 2909 wr.wr_id = encode_wr_id(SRPT_RDMA_ABORT, ioctx->ioctx.index); 2910 wr.send_flags = IB_SEND_SIGNALED; 2911 while (ch->state == CH_LIVE && 2912 ib_post_send(ch->qp, &wr, &bad_wr) != 0) { 2913 printk(KERN_INFO "Trying to abort failed RDMA transfer [%d]", 2914 ioctx->ioctx.index); 2915 msleep(1000); 2916 } 2917 while (ch->state != CH_RELEASING && !ioctx->rdma_aborted) { 2918 printk(KERN_INFO "Waiting until RDMA abort finished [%d]", 2919 ioctx->ioctx.index); 2920 msleep(1000); 2921 } 2922 } 2923 out: 2924 if (unlikely(dir == DMA_TO_DEVICE && ret < 0)) 2925 atomic_add(n_rdma, &ch->sq_wr_avail); 2926 return ret; 2927 } 2928 2929 /** 2930 * srpt_xfer_data() - Start data transfer from initiator to target. 2931 */ 2932 static int srpt_xfer_data(struct srpt_rdma_ch *ch, 2933 struct srpt_send_ioctx *ioctx) 2934 { 2935 int ret; 2936 2937 ret = srpt_map_sg_to_ib_sge(ch, ioctx); 2938 if (ret) { 2939 printk(KERN_ERR "%s[%d] ret=%d\n", __func__, __LINE__, ret); 2940 goto out; 2941 } 2942 2943 ret = srpt_perform_rdmas(ch, ioctx); 2944 if (ret) { 2945 if (ret == -EAGAIN || ret == -ENOMEM) 2946 printk(KERN_INFO "%s[%d] queue full -- ret=%d\n", 2947 __func__, __LINE__, ret); 2948 else 2949 printk(KERN_ERR "%s[%d] fatal error -- ret=%d\n", 2950 __func__, __LINE__, ret); 2951 goto out_unmap; 2952 } 2953 2954 out: 2955 return ret; 2956 out_unmap: 2957 srpt_unmap_sg_to_ib_sge(ch, ioctx); 2958 goto out; 2959 } 2960 2961 static int srpt_write_pending_status(struct se_cmd *se_cmd) 2962 { 2963 struct srpt_send_ioctx *ioctx; 2964 2965 ioctx = container_of(se_cmd, struct srpt_send_ioctx, cmd); 2966 return srpt_get_cmd_state(ioctx) == SRPT_STATE_NEED_DATA; 2967 } 2968 2969 /* 2970 * srpt_write_pending() - Start data transfer from initiator to target (write). 2971 */ 2972 static int srpt_write_pending(struct se_cmd *se_cmd) 2973 { 2974 struct srpt_rdma_ch *ch; 2975 struct srpt_send_ioctx *ioctx; 2976 enum srpt_command_state new_state; 2977 enum rdma_ch_state ch_state; 2978 int ret; 2979 2980 ioctx = container_of(se_cmd, struct srpt_send_ioctx, cmd); 2981 2982 new_state = srpt_set_cmd_state(ioctx, SRPT_STATE_NEED_DATA); 2983 WARN_ON(new_state == SRPT_STATE_DONE); 2984 2985 ch = ioctx->ch; 2986 BUG_ON(!ch); 2987 2988 ch_state = srpt_get_ch_state(ch); 2989 switch (ch_state) { 2990 case CH_CONNECTING: 2991 WARN(true, "unexpected channel state %d\n", ch_state); 2992 ret = -EINVAL; 2993 goto out; 2994 case CH_LIVE: 2995 break; 2996 case CH_DISCONNECTING: 2997 case CH_DRAINING: 2998 case CH_RELEASING: 2999 pr_debug("cmd with tag %lld: channel disconnecting\n", 3000 ioctx->tag); 3001 srpt_set_cmd_state(ioctx, SRPT_STATE_DATA_IN); 3002 ret = -EINVAL; 3003 goto out; 3004 } 3005 ret = srpt_xfer_data(ch, ioctx); 3006 3007 out: 3008 return ret; 3009 } 3010 3011 static u8 tcm_to_srp_tsk_mgmt_status(const int tcm_mgmt_status) 3012 { 3013 switch (tcm_mgmt_status) { 3014 case TMR_FUNCTION_COMPLETE: 3015 return SRP_TSK_MGMT_SUCCESS; 3016 case TMR_FUNCTION_REJECTED: 3017 return SRP_TSK_MGMT_FUNC_NOT_SUPP; 3018 } 3019 return SRP_TSK_MGMT_FAILED; 3020 } 3021 3022 /** 3023 * srpt_queue_response() - Transmits the response to a SCSI command. 3024 * 3025 * Callback function called by the TCM core. Must not block since it can be 3026 * invoked on the context of the IB completion handler. 3027 */ 3028 static int srpt_queue_response(struct se_cmd *cmd) 3029 { 3030 struct srpt_rdma_ch *ch; 3031 struct srpt_send_ioctx *ioctx; 3032 enum srpt_command_state state; 3033 unsigned long flags; 3034 int ret; 3035 enum dma_data_direction dir; 3036 int resp_len; 3037 u8 srp_tm_status; 3038 3039 ret = 0; 3040 3041 ioctx = container_of(cmd, struct srpt_send_ioctx, cmd); 3042 ch = ioctx->ch; 3043 BUG_ON(!ch); 3044 3045 spin_lock_irqsave(&ioctx->spinlock, flags); 3046 state = ioctx->state; 3047 switch (state) { 3048 case SRPT_STATE_NEW: 3049 case SRPT_STATE_DATA_IN: 3050 ioctx->state = SRPT_STATE_CMD_RSP_SENT; 3051 break; 3052 case SRPT_STATE_MGMT: 3053 ioctx->state = SRPT_STATE_MGMT_RSP_SENT; 3054 break; 3055 default: 3056 WARN(true, "ch %p; cmd %d: unexpected command state %d\n", 3057 ch, ioctx->ioctx.index, ioctx->state); 3058 break; 3059 } 3060 spin_unlock_irqrestore(&ioctx->spinlock, flags); 3061 3062 if (unlikely(transport_check_aborted_status(&ioctx->cmd, false) 3063 || WARN_ON_ONCE(state == SRPT_STATE_CMD_RSP_SENT))) { 3064 atomic_inc(&ch->req_lim_delta); 3065 srpt_abort_cmd(ioctx); 3066 goto out; 3067 } 3068 3069 dir = ioctx->cmd.data_direction; 3070 3071 /* For read commands, transfer the data to the initiator. */ 3072 if (dir == DMA_FROM_DEVICE && ioctx->cmd.data_length && 3073 !ioctx->queue_status_only) { 3074 ret = srpt_xfer_data(ch, ioctx); 3075 if (ret) { 3076 printk(KERN_ERR "xfer_data failed for tag %llu\n", 3077 ioctx->tag); 3078 goto out; 3079 } 3080 } 3081 3082 if (state != SRPT_STATE_MGMT) 3083 resp_len = srpt_build_cmd_rsp(ch, ioctx, ioctx->tag, 3084 cmd->scsi_status); 3085 else { 3086 srp_tm_status 3087 = tcm_to_srp_tsk_mgmt_status(cmd->se_tmr_req->response); 3088 resp_len = srpt_build_tskmgmt_rsp(ch, ioctx, srp_tm_status, 3089 ioctx->tag); 3090 } 3091 ret = srpt_post_send(ch, ioctx, resp_len); 3092 if (ret) { 3093 printk(KERN_ERR "sending cmd response failed for tag %llu\n", 3094 ioctx->tag); 3095 srpt_unmap_sg_to_ib_sge(ch, ioctx); 3096 srpt_set_cmd_state(ioctx, SRPT_STATE_DONE); 3097 kref_put(&ioctx->kref, srpt_put_send_ioctx_kref); 3098 } 3099 3100 out: 3101 return ret; 3102 } 3103 3104 static int srpt_queue_status(struct se_cmd *cmd) 3105 { 3106 struct srpt_send_ioctx *ioctx; 3107 3108 ioctx = container_of(cmd, struct srpt_send_ioctx, cmd); 3109 BUG_ON(ioctx->sense_data != cmd->sense_buffer); 3110 if (cmd->se_cmd_flags & 3111 (SCF_TRANSPORT_TASK_SENSE | SCF_EMULATED_TASK_SENSE)) 3112 WARN_ON(cmd->scsi_status != SAM_STAT_CHECK_CONDITION); 3113 ioctx->queue_status_only = true; 3114 return srpt_queue_response(cmd); 3115 } 3116 3117 static void srpt_refresh_port_work(struct work_struct *work) 3118 { 3119 struct srpt_port *sport = container_of(work, struct srpt_port, work); 3120 3121 srpt_refresh_port(sport); 3122 } 3123 3124 static int srpt_ch_list_empty(struct srpt_device *sdev) 3125 { 3126 int res; 3127 3128 spin_lock_irq(&sdev->spinlock); 3129 res = list_empty(&sdev->rch_list); 3130 spin_unlock_irq(&sdev->spinlock); 3131 3132 return res; 3133 } 3134 3135 /** 3136 * srpt_release_sdev() - Free the channel resources associated with a target. 3137 */ 3138 static int srpt_release_sdev(struct srpt_device *sdev) 3139 { 3140 struct srpt_rdma_ch *ch, *tmp_ch; 3141 int res; 3142 3143 WARN_ON_ONCE(irqs_disabled()); 3144 3145 BUG_ON(!sdev); 3146 3147 spin_lock_irq(&sdev->spinlock); 3148 list_for_each_entry_safe(ch, tmp_ch, &sdev->rch_list, list) 3149 __srpt_close_ch(ch); 3150 spin_unlock_irq(&sdev->spinlock); 3151 3152 res = wait_event_interruptible(sdev->ch_releaseQ, 3153 srpt_ch_list_empty(sdev)); 3154 if (res) 3155 printk(KERN_ERR "%s: interrupted.\n", __func__); 3156 3157 return 0; 3158 } 3159 3160 static struct srpt_port *__srpt_lookup_port(const char *name) 3161 { 3162 struct ib_device *dev; 3163 struct srpt_device *sdev; 3164 struct srpt_port *sport; 3165 int i; 3166 3167 list_for_each_entry(sdev, &srpt_dev_list, list) { 3168 dev = sdev->device; 3169 if (!dev) 3170 continue; 3171 3172 for (i = 0; i < dev->phys_port_cnt; i++) { 3173 sport = &sdev->port[i]; 3174 3175 if (!strcmp(sport->port_guid, name)) 3176 return sport; 3177 } 3178 } 3179 3180 return NULL; 3181 } 3182 3183 static struct srpt_port *srpt_lookup_port(const char *name) 3184 { 3185 struct srpt_port *sport; 3186 3187 spin_lock(&srpt_dev_lock); 3188 sport = __srpt_lookup_port(name); 3189 spin_unlock(&srpt_dev_lock); 3190 3191 return sport; 3192 } 3193 3194 /** 3195 * srpt_add_one() - Infiniband device addition callback function. 3196 */ 3197 static void srpt_add_one(struct ib_device *device) 3198 { 3199 struct srpt_device *sdev; 3200 struct srpt_port *sport; 3201 struct ib_srq_init_attr srq_attr; 3202 int i; 3203 3204 pr_debug("device = %p, device->dma_ops = %p\n", device, 3205 device->dma_ops); 3206 3207 sdev = kzalloc(sizeof *sdev, GFP_KERNEL); 3208 if (!sdev) 3209 goto err; 3210 3211 sdev->device = device; 3212 INIT_LIST_HEAD(&sdev->rch_list); 3213 init_waitqueue_head(&sdev->ch_releaseQ); 3214 spin_lock_init(&sdev->spinlock); 3215 3216 if (ib_query_device(device, &sdev->dev_attr)) 3217 goto free_dev; 3218 3219 sdev->pd = ib_alloc_pd(device); 3220 if (IS_ERR(sdev->pd)) 3221 goto free_dev; 3222 3223 sdev->mr = ib_get_dma_mr(sdev->pd, IB_ACCESS_LOCAL_WRITE); 3224 if (IS_ERR(sdev->mr)) 3225 goto err_pd; 3226 3227 sdev->srq_size = min(srpt_srq_size, sdev->dev_attr.max_srq_wr); 3228 3229 srq_attr.event_handler = srpt_srq_event; 3230 srq_attr.srq_context = (void *)sdev; 3231 srq_attr.attr.max_wr = sdev->srq_size; 3232 srq_attr.attr.max_sge = 1; 3233 srq_attr.attr.srq_limit = 0; 3234 srq_attr.srq_type = IB_SRQT_BASIC; 3235 3236 sdev->srq = ib_create_srq(sdev->pd, &srq_attr); 3237 if (IS_ERR(sdev->srq)) 3238 goto err_mr; 3239 3240 pr_debug("%s: create SRQ #wr= %d max_allow=%d dev= %s\n", 3241 __func__, sdev->srq_size, sdev->dev_attr.max_srq_wr, 3242 device->name); 3243 3244 if (!srpt_service_guid) 3245 srpt_service_guid = be64_to_cpu(device->node_guid); 3246 3247 sdev->cm_id = ib_create_cm_id(device, srpt_cm_handler, sdev); 3248 if (IS_ERR(sdev->cm_id)) 3249 goto err_srq; 3250 3251 /* print out target login information */ 3252 pr_debug("Target login info: id_ext=%016llx,ioc_guid=%016llx," 3253 "pkey=ffff,service_id=%016llx\n", srpt_service_guid, 3254 srpt_service_guid, srpt_service_guid); 3255 3256 /* 3257 * We do not have a consistent service_id (ie. also id_ext of target_id) 3258 * to identify this target. We currently use the guid of the first HCA 3259 * in the system as service_id; therefore, the target_id will change 3260 * if this HCA is gone bad and replaced by different HCA 3261 */ 3262 if (ib_cm_listen(sdev->cm_id, cpu_to_be64(srpt_service_guid), 0, NULL)) 3263 goto err_cm; 3264 3265 INIT_IB_EVENT_HANDLER(&sdev->event_handler, sdev->device, 3266 srpt_event_handler); 3267 if (ib_register_event_handler(&sdev->event_handler)) 3268 goto err_cm; 3269 3270 sdev->ioctx_ring = (struct srpt_recv_ioctx **) 3271 srpt_alloc_ioctx_ring(sdev, sdev->srq_size, 3272 sizeof(*sdev->ioctx_ring[0]), 3273 srp_max_req_size, DMA_FROM_DEVICE); 3274 if (!sdev->ioctx_ring) 3275 goto err_event; 3276 3277 for (i = 0; i < sdev->srq_size; ++i) 3278 srpt_post_recv(sdev, sdev->ioctx_ring[i]); 3279 3280 WARN_ON(sdev->device->phys_port_cnt > ARRAY_SIZE(sdev->port)); 3281 3282 for (i = 1; i <= sdev->device->phys_port_cnt; i++) { 3283 sport = &sdev->port[i - 1]; 3284 sport->sdev = sdev; 3285 sport->port = i; 3286 sport->port_attrib.srp_max_rdma_size = DEFAULT_MAX_RDMA_SIZE; 3287 sport->port_attrib.srp_max_rsp_size = DEFAULT_MAX_RSP_SIZE; 3288 sport->port_attrib.srp_sq_size = DEF_SRPT_SQ_SIZE; 3289 INIT_WORK(&sport->work, srpt_refresh_port_work); 3290 INIT_LIST_HEAD(&sport->port_acl_list); 3291 spin_lock_init(&sport->port_acl_lock); 3292 3293 if (srpt_refresh_port(sport)) { 3294 printk(KERN_ERR "MAD registration failed for %s-%d.\n", 3295 srpt_sdev_name(sdev), i); 3296 goto err_ring; 3297 } 3298 snprintf(sport->port_guid, sizeof(sport->port_guid), 3299 "0x%016llx%016llx", 3300 be64_to_cpu(sport->gid.global.subnet_prefix), 3301 be64_to_cpu(sport->gid.global.interface_id)); 3302 } 3303 3304 spin_lock(&srpt_dev_lock); 3305 list_add_tail(&sdev->list, &srpt_dev_list); 3306 spin_unlock(&srpt_dev_lock); 3307 3308 out: 3309 ib_set_client_data(device, &srpt_client, sdev); 3310 pr_debug("added %s.\n", device->name); 3311 return; 3312 3313 err_ring: 3314 srpt_free_ioctx_ring((struct srpt_ioctx **)sdev->ioctx_ring, sdev, 3315 sdev->srq_size, srp_max_req_size, 3316 DMA_FROM_DEVICE); 3317 err_event: 3318 ib_unregister_event_handler(&sdev->event_handler); 3319 err_cm: 3320 ib_destroy_cm_id(sdev->cm_id); 3321 err_srq: 3322 ib_destroy_srq(sdev->srq); 3323 err_mr: 3324 ib_dereg_mr(sdev->mr); 3325 err_pd: 3326 ib_dealloc_pd(sdev->pd); 3327 free_dev: 3328 kfree(sdev); 3329 err: 3330 sdev = NULL; 3331 printk(KERN_INFO "%s(%s) failed.\n", __func__, device->name); 3332 goto out; 3333 } 3334 3335 /** 3336 * srpt_remove_one() - InfiniBand device removal callback function. 3337 */ 3338 static void srpt_remove_one(struct ib_device *device) 3339 { 3340 struct srpt_device *sdev; 3341 int i; 3342 3343 sdev = ib_get_client_data(device, &srpt_client); 3344 if (!sdev) { 3345 printk(KERN_INFO "%s(%s): nothing to do.\n", __func__, 3346 device->name); 3347 return; 3348 } 3349 3350 srpt_unregister_mad_agent(sdev); 3351 3352 ib_unregister_event_handler(&sdev->event_handler); 3353 3354 /* Cancel any work queued by the just unregistered IB event handler. */ 3355 for (i = 0; i < sdev->device->phys_port_cnt; i++) 3356 cancel_work_sync(&sdev->port[i].work); 3357 3358 ib_destroy_cm_id(sdev->cm_id); 3359 3360 /* 3361 * Unregistering a target must happen after destroying sdev->cm_id 3362 * such that no new SRP_LOGIN_REQ information units can arrive while 3363 * destroying the target. 3364 */ 3365 spin_lock(&srpt_dev_lock); 3366 list_del(&sdev->list); 3367 spin_unlock(&srpt_dev_lock); 3368 srpt_release_sdev(sdev); 3369 3370 ib_destroy_srq(sdev->srq); 3371 ib_dereg_mr(sdev->mr); 3372 ib_dealloc_pd(sdev->pd); 3373 3374 srpt_free_ioctx_ring((struct srpt_ioctx **)sdev->ioctx_ring, sdev, 3375 sdev->srq_size, srp_max_req_size, DMA_FROM_DEVICE); 3376 sdev->ioctx_ring = NULL; 3377 kfree(sdev); 3378 } 3379 3380 static struct ib_client srpt_client = { 3381 .name = DRV_NAME, 3382 .add = srpt_add_one, 3383 .remove = srpt_remove_one 3384 }; 3385 3386 static int srpt_check_true(struct se_portal_group *se_tpg) 3387 { 3388 return 1; 3389 } 3390 3391 static int srpt_check_false(struct se_portal_group *se_tpg) 3392 { 3393 return 0; 3394 } 3395 3396 static char *srpt_get_fabric_name(void) 3397 { 3398 return "srpt"; 3399 } 3400 3401 static u8 srpt_get_fabric_proto_ident(struct se_portal_group *se_tpg) 3402 { 3403 return SCSI_TRANSPORTID_PROTOCOLID_SRP; 3404 } 3405 3406 static char *srpt_get_fabric_wwn(struct se_portal_group *tpg) 3407 { 3408 struct srpt_port *sport = container_of(tpg, struct srpt_port, port_tpg_1); 3409 3410 return sport->port_guid; 3411 } 3412 3413 static u16 srpt_get_tag(struct se_portal_group *tpg) 3414 { 3415 return 1; 3416 } 3417 3418 static u32 srpt_get_default_depth(struct se_portal_group *se_tpg) 3419 { 3420 return 1; 3421 } 3422 3423 static u32 srpt_get_pr_transport_id(struct se_portal_group *se_tpg, 3424 struct se_node_acl *se_nacl, 3425 struct t10_pr_registration *pr_reg, 3426 int *format_code, unsigned char *buf) 3427 { 3428 struct srpt_node_acl *nacl; 3429 struct spc_rdma_transport_id *tr_id; 3430 3431 nacl = container_of(se_nacl, struct srpt_node_acl, nacl); 3432 tr_id = (void *)buf; 3433 tr_id->protocol_identifier = SCSI_TRANSPORTID_PROTOCOLID_SRP; 3434 memcpy(tr_id->i_port_id, nacl->i_port_id, sizeof(tr_id->i_port_id)); 3435 return sizeof(*tr_id); 3436 } 3437 3438 static u32 srpt_get_pr_transport_id_len(struct se_portal_group *se_tpg, 3439 struct se_node_acl *se_nacl, 3440 struct t10_pr_registration *pr_reg, 3441 int *format_code) 3442 { 3443 *format_code = 0; 3444 return sizeof(struct spc_rdma_transport_id); 3445 } 3446 3447 static char *srpt_parse_pr_out_transport_id(struct se_portal_group *se_tpg, 3448 const char *buf, u32 *out_tid_len, 3449 char **port_nexus_ptr) 3450 { 3451 struct spc_rdma_transport_id *tr_id; 3452 3453 *port_nexus_ptr = NULL; 3454 *out_tid_len = sizeof(struct spc_rdma_transport_id); 3455 tr_id = (void *)buf; 3456 return (char *)tr_id->i_port_id; 3457 } 3458 3459 static struct se_node_acl *srpt_alloc_fabric_acl(struct se_portal_group *se_tpg) 3460 { 3461 struct srpt_node_acl *nacl; 3462 3463 nacl = kzalloc(sizeof(struct srpt_node_acl), GFP_KERNEL); 3464 if (!nacl) { 3465 printk(KERN_ERR "Unable to allocate struct srpt_node_acl\n"); 3466 return NULL; 3467 } 3468 3469 return &nacl->nacl; 3470 } 3471 3472 static void srpt_release_fabric_acl(struct se_portal_group *se_tpg, 3473 struct se_node_acl *se_nacl) 3474 { 3475 struct srpt_node_acl *nacl; 3476 3477 nacl = container_of(se_nacl, struct srpt_node_acl, nacl); 3478 kfree(nacl); 3479 } 3480 3481 static u32 srpt_tpg_get_inst_index(struct se_portal_group *se_tpg) 3482 { 3483 return 1; 3484 } 3485 3486 static void srpt_release_cmd(struct se_cmd *se_cmd) 3487 { 3488 } 3489 3490 /** 3491 * srpt_shutdown_session() - Whether or not a session may be shut down. 3492 */ 3493 static int srpt_shutdown_session(struct se_session *se_sess) 3494 { 3495 return true; 3496 } 3497 3498 /** 3499 * srpt_close_session() - Forcibly close a session. 3500 * 3501 * Callback function invoked by the TCM core to clean up sessions associated 3502 * with a node ACL when the user invokes 3503 * rmdir /sys/kernel/config/target/$driver/$port/$tpg/acls/$i_port_id 3504 */ 3505 static void srpt_close_session(struct se_session *se_sess) 3506 { 3507 DECLARE_COMPLETION_ONSTACK(release_done); 3508 struct srpt_rdma_ch *ch; 3509 struct srpt_device *sdev; 3510 int res; 3511 3512 ch = se_sess->fabric_sess_ptr; 3513 WARN_ON(ch->sess != se_sess); 3514 3515 pr_debug("ch %p state %d\n", ch, srpt_get_ch_state(ch)); 3516 3517 sdev = ch->sport->sdev; 3518 spin_lock_irq(&sdev->spinlock); 3519 BUG_ON(ch->release_done); 3520 ch->release_done = &release_done; 3521 __srpt_close_ch(ch); 3522 spin_unlock_irq(&sdev->spinlock); 3523 3524 res = wait_for_completion_timeout(&release_done, 60 * HZ); 3525 WARN_ON(res <= 0); 3526 } 3527 3528 /** 3529 * srpt_sess_get_index() - Return the value of scsiAttIntrPortIndex (SCSI-MIB). 3530 * 3531 * A quote from RFC 4455 (SCSI-MIB) about this MIB object: 3532 * This object represents an arbitrary integer used to uniquely identify a 3533 * particular attached remote initiator port to a particular SCSI target port 3534 * within a particular SCSI target device within a particular SCSI instance. 3535 */ 3536 static u32 srpt_sess_get_index(struct se_session *se_sess) 3537 { 3538 return 0; 3539 } 3540 3541 static void srpt_set_default_node_attrs(struct se_node_acl *nacl) 3542 { 3543 } 3544 3545 static u32 srpt_get_task_tag(struct se_cmd *se_cmd) 3546 { 3547 struct srpt_send_ioctx *ioctx; 3548 3549 ioctx = container_of(se_cmd, struct srpt_send_ioctx, cmd); 3550 return ioctx->tag; 3551 } 3552 3553 /* Note: only used from inside debug printk's by the TCM core. */ 3554 static int srpt_get_tcm_cmd_state(struct se_cmd *se_cmd) 3555 { 3556 struct srpt_send_ioctx *ioctx; 3557 3558 ioctx = container_of(se_cmd, struct srpt_send_ioctx, cmd); 3559 return srpt_get_cmd_state(ioctx); 3560 } 3561 3562 static u16 srpt_set_fabric_sense_len(struct se_cmd *cmd, u32 sense_length) 3563 { 3564 return 0; 3565 } 3566 3567 static u16 srpt_get_fabric_sense_len(void) 3568 { 3569 return 0; 3570 } 3571 3572 /** 3573 * srpt_parse_i_port_id() - Parse an initiator port ID. 3574 * @name: ASCII representation of a 128-bit initiator port ID. 3575 * @i_port_id: Binary 128-bit port ID. 3576 */ 3577 static int srpt_parse_i_port_id(u8 i_port_id[16], const char *name) 3578 { 3579 const char *p; 3580 unsigned len, count, leading_zero_bytes; 3581 int ret, rc; 3582 3583 p = name; 3584 if (strnicmp(p, "0x", 2) == 0) 3585 p += 2; 3586 ret = -EINVAL; 3587 len = strlen(p); 3588 if (len % 2) 3589 goto out; 3590 count = min(len / 2, 16U); 3591 leading_zero_bytes = 16 - count; 3592 memset(i_port_id, 0, leading_zero_bytes); 3593 rc = hex2bin(i_port_id + leading_zero_bytes, p, count); 3594 if (rc < 0) 3595 pr_debug("hex2bin failed for srpt_parse_i_port_id: %d\n", rc); 3596 ret = 0; 3597 out: 3598 return ret; 3599 } 3600 3601 /* 3602 * configfs callback function invoked for 3603 * mkdir /sys/kernel/config/target/$driver/$port/$tpg/acls/$i_port_id 3604 */ 3605 static struct se_node_acl *srpt_make_nodeacl(struct se_portal_group *tpg, 3606 struct config_group *group, 3607 const char *name) 3608 { 3609 struct srpt_port *sport = container_of(tpg, struct srpt_port, port_tpg_1); 3610 struct se_node_acl *se_nacl, *se_nacl_new; 3611 struct srpt_node_acl *nacl; 3612 int ret = 0; 3613 u32 nexus_depth = 1; 3614 u8 i_port_id[16]; 3615 3616 if (srpt_parse_i_port_id(i_port_id, name) < 0) { 3617 printk(KERN_ERR "invalid initiator port ID %s\n", name); 3618 ret = -EINVAL; 3619 goto err; 3620 } 3621 3622 se_nacl_new = srpt_alloc_fabric_acl(tpg); 3623 if (!se_nacl_new) { 3624 ret = -ENOMEM; 3625 goto err; 3626 } 3627 /* 3628 * nacl_new may be released by core_tpg_add_initiator_node_acl() 3629 * when converting a node ACL from demo mode to explict 3630 */ 3631 se_nacl = core_tpg_add_initiator_node_acl(tpg, se_nacl_new, name, 3632 nexus_depth); 3633 if (IS_ERR(se_nacl)) { 3634 ret = PTR_ERR(se_nacl); 3635 goto err; 3636 } 3637 /* Locate our struct srpt_node_acl and set sdev and i_port_id. */ 3638 nacl = container_of(se_nacl, struct srpt_node_acl, nacl); 3639 memcpy(&nacl->i_port_id[0], &i_port_id[0], 16); 3640 nacl->sport = sport; 3641 3642 spin_lock_irq(&sport->port_acl_lock); 3643 list_add_tail(&nacl->list, &sport->port_acl_list); 3644 spin_unlock_irq(&sport->port_acl_lock); 3645 3646 return se_nacl; 3647 err: 3648 return ERR_PTR(ret); 3649 } 3650 3651 /* 3652 * configfs callback function invoked for 3653 * rmdir /sys/kernel/config/target/$driver/$port/$tpg/acls/$i_port_id 3654 */ 3655 static void srpt_drop_nodeacl(struct se_node_acl *se_nacl) 3656 { 3657 struct srpt_node_acl *nacl; 3658 struct srpt_device *sdev; 3659 struct srpt_port *sport; 3660 3661 nacl = container_of(se_nacl, struct srpt_node_acl, nacl); 3662 sport = nacl->sport; 3663 sdev = sport->sdev; 3664 spin_lock_irq(&sport->port_acl_lock); 3665 list_del(&nacl->list); 3666 spin_unlock_irq(&sport->port_acl_lock); 3667 core_tpg_del_initiator_node_acl(&sport->port_tpg_1, se_nacl, 1); 3668 srpt_release_fabric_acl(NULL, se_nacl); 3669 } 3670 3671 static ssize_t srpt_tpg_attrib_show_srp_max_rdma_size( 3672 struct se_portal_group *se_tpg, 3673 char *page) 3674 { 3675 struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1); 3676 3677 return sprintf(page, "%u\n", sport->port_attrib.srp_max_rdma_size); 3678 } 3679 3680 static ssize_t srpt_tpg_attrib_store_srp_max_rdma_size( 3681 struct se_portal_group *se_tpg, 3682 const char *page, 3683 size_t count) 3684 { 3685 struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1); 3686 unsigned long val; 3687 int ret; 3688 3689 ret = strict_strtoul(page, 0, &val); 3690 if (ret < 0) { 3691 pr_err("strict_strtoul() failed with ret: %d\n", ret); 3692 return -EINVAL; 3693 } 3694 if (val > MAX_SRPT_RDMA_SIZE) { 3695 pr_err("val: %lu exceeds MAX_SRPT_RDMA_SIZE: %d\n", val, 3696 MAX_SRPT_RDMA_SIZE); 3697 return -EINVAL; 3698 } 3699 if (val < DEFAULT_MAX_RDMA_SIZE) { 3700 pr_err("val: %lu smaller than DEFAULT_MAX_RDMA_SIZE: %d\n", 3701 val, DEFAULT_MAX_RDMA_SIZE); 3702 return -EINVAL; 3703 } 3704 sport->port_attrib.srp_max_rdma_size = val; 3705 3706 return count; 3707 } 3708 3709 TF_TPG_ATTRIB_ATTR(srpt, srp_max_rdma_size, S_IRUGO | S_IWUSR); 3710 3711 static ssize_t srpt_tpg_attrib_show_srp_max_rsp_size( 3712 struct se_portal_group *se_tpg, 3713 char *page) 3714 { 3715 struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1); 3716 3717 return sprintf(page, "%u\n", sport->port_attrib.srp_max_rsp_size); 3718 } 3719 3720 static ssize_t srpt_tpg_attrib_store_srp_max_rsp_size( 3721 struct se_portal_group *se_tpg, 3722 const char *page, 3723 size_t count) 3724 { 3725 struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1); 3726 unsigned long val; 3727 int ret; 3728 3729 ret = strict_strtoul(page, 0, &val); 3730 if (ret < 0) { 3731 pr_err("strict_strtoul() failed with ret: %d\n", ret); 3732 return -EINVAL; 3733 } 3734 if (val > MAX_SRPT_RSP_SIZE) { 3735 pr_err("val: %lu exceeds MAX_SRPT_RSP_SIZE: %d\n", val, 3736 MAX_SRPT_RSP_SIZE); 3737 return -EINVAL; 3738 } 3739 if (val < MIN_MAX_RSP_SIZE) { 3740 pr_err("val: %lu smaller than MIN_MAX_RSP_SIZE: %d\n", val, 3741 MIN_MAX_RSP_SIZE); 3742 return -EINVAL; 3743 } 3744 sport->port_attrib.srp_max_rsp_size = val; 3745 3746 return count; 3747 } 3748 3749 TF_TPG_ATTRIB_ATTR(srpt, srp_max_rsp_size, S_IRUGO | S_IWUSR); 3750 3751 static ssize_t srpt_tpg_attrib_show_srp_sq_size( 3752 struct se_portal_group *se_tpg, 3753 char *page) 3754 { 3755 struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1); 3756 3757 return sprintf(page, "%u\n", sport->port_attrib.srp_sq_size); 3758 } 3759 3760 static ssize_t srpt_tpg_attrib_store_srp_sq_size( 3761 struct se_portal_group *se_tpg, 3762 const char *page, 3763 size_t count) 3764 { 3765 struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1); 3766 unsigned long val; 3767 int ret; 3768 3769 ret = strict_strtoul(page, 0, &val); 3770 if (ret < 0) { 3771 pr_err("strict_strtoul() failed with ret: %d\n", ret); 3772 return -EINVAL; 3773 } 3774 if (val > MAX_SRPT_SRQ_SIZE) { 3775 pr_err("val: %lu exceeds MAX_SRPT_SRQ_SIZE: %d\n", val, 3776 MAX_SRPT_SRQ_SIZE); 3777 return -EINVAL; 3778 } 3779 if (val < MIN_SRPT_SRQ_SIZE) { 3780 pr_err("val: %lu smaller than MIN_SRPT_SRQ_SIZE: %d\n", val, 3781 MIN_SRPT_SRQ_SIZE); 3782 return -EINVAL; 3783 } 3784 sport->port_attrib.srp_sq_size = val; 3785 3786 return count; 3787 } 3788 3789 TF_TPG_ATTRIB_ATTR(srpt, srp_sq_size, S_IRUGO | S_IWUSR); 3790 3791 static struct configfs_attribute *srpt_tpg_attrib_attrs[] = { 3792 &srpt_tpg_attrib_srp_max_rdma_size.attr, 3793 &srpt_tpg_attrib_srp_max_rsp_size.attr, 3794 &srpt_tpg_attrib_srp_sq_size.attr, 3795 NULL, 3796 }; 3797 3798 static ssize_t srpt_tpg_show_enable( 3799 struct se_portal_group *se_tpg, 3800 char *page) 3801 { 3802 struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1); 3803 3804 return snprintf(page, PAGE_SIZE, "%d\n", (sport->enabled) ? 1: 0); 3805 } 3806 3807 static ssize_t srpt_tpg_store_enable( 3808 struct se_portal_group *se_tpg, 3809 const char *page, 3810 size_t count) 3811 { 3812 struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1); 3813 unsigned long tmp; 3814 int ret; 3815 3816 ret = strict_strtoul(page, 0, &tmp); 3817 if (ret < 0) { 3818 printk(KERN_ERR "Unable to extract srpt_tpg_store_enable\n"); 3819 return -EINVAL; 3820 } 3821 3822 if ((tmp != 0) && (tmp != 1)) { 3823 printk(KERN_ERR "Illegal value for srpt_tpg_store_enable: %lu\n", tmp); 3824 return -EINVAL; 3825 } 3826 if (tmp == 1) 3827 sport->enabled = true; 3828 else 3829 sport->enabled = false; 3830 3831 return count; 3832 } 3833 3834 TF_TPG_BASE_ATTR(srpt, enable, S_IRUGO | S_IWUSR); 3835 3836 static struct configfs_attribute *srpt_tpg_attrs[] = { 3837 &srpt_tpg_enable.attr, 3838 NULL, 3839 }; 3840 3841 /** 3842 * configfs callback invoked for 3843 * mkdir /sys/kernel/config/target/$driver/$port/$tpg 3844 */ 3845 static struct se_portal_group *srpt_make_tpg(struct se_wwn *wwn, 3846 struct config_group *group, 3847 const char *name) 3848 { 3849 struct srpt_port *sport = container_of(wwn, struct srpt_port, port_wwn); 3850 int res; 3851 3852 /* Initialize sport->port_wwn and sport->port_tpg_1 */ 3853 res = core_tpg_register(&srpt_target->tf_ops, &sport->port_wwn, 3854 &sport->port_tpg_1, sport, TRANSPORT_TPG_TYPE_NORMAL); 3855 if (res) 3856 return ERR_PTR(res); 3857 3858 return &sport->port_tpg_1; 3859 } 3860 3861 /** 3862 * configfs callback invoked for 3863 * rmdir /sys/kernel/config/target/$driver/$port/$tpg 3864 */ 3865 static void srpt_drop_tpg(struct se_portal_group *tpg) 3866 { 3867 struct srpt_port *sport = container_of(tpg, 3868 struct srpt_port, port_tpg_1); 3869 3870 sport->enabled = false; 3871 core_tpg_deregister(&sport->port_tpg_1); 3872 } 3873 3874 /** 3875 * configfs callback invoked for 3876 * mkdir /sys/kernel/config/target/$driver/$port 3877 */ 3878 static struct se_wwn *srpt_make_tport(struct target_fabric_configfs *tf, 3879 struct config_group *group, 3880 const char *name) 3881 { 3882 struct srpt_port *sport; 3883 int ret; 3884 3885 sport = srpt_lookup_port(name); 3886 pr_debug("make_tport(%s)\n", name); 3887 ret = -EINVAL; 3888 if (!sport) 3889 goto err; 3890 3891 return &sport->port_wwn; 3892 3893 err: 3894 return ERR_PTR(ret); 3895 } 3896 3897 /** 3898 * configfs callback invoked for 3899 * rmdir /sys/kernel/config/target/$driver/$port 3900 */ 3901 static void srpt_drop_tport(struct se_wwn *wwn) 3902 { 3903 struct srpt_port *sport = container_of(wwn, struct srpt_port, port_wwn); 3904 3905 pr_debug("drop_tport(%s\n", config_item_name(&sport->port_wwn.wwn_group.cg_item)); 3906 } 3907 3908 static ssize_t srpt_wwn_show_attr_version(struct target_fabric_configfs *tf, 3909 char *buf) 3910 { 3911 return scnprintf(buf, PAGE_SIZE, "%s\n", DRV_VERSION); 3912 } 3913 3914 TF_WWN_ATTR_RO(srpt, version); 3915 3916 static struct configfs_attribute *srpt_wwn_attrs[] = { 3917 &srpt_wwn_version.attr, 3918 NULL, 3919 }; 3920 3921 static struct target_core_fabric_ops srpt_template = { 3922 .get_fabric_name = srpt_get_fabric_name, 3923 .get_fabric_proto_ident = srpt_get_fabric_proto_ident, 3924 .tpg_get_wwn = srpt_get_fabric_wwn, 3925 .tpg_get_tag = srpt_get_tag, 3926 .tpg_get_default_depth = srpt_get_default_depth, 3927 .tpg_get_pr_transport_id = srpt_get_pr_transport_id, 3928 .tpg_get_pr_transport_id_len = srpt_get_pr_transport_id_len, 3929 .tpg_parse_pr_out_transport_id = srpt_parse_pr_out_transport_id, 3930 .tpg_check_demo_mode = srpt_check_false, 3931 .tpg_check_demo_mode_cache = srpt_check_true, 3932 .tpg_check_demo_mode_write_protect = srpt_check_true, 3933 .tpg_check_prod_mode_write_protect = srpt_check_false, 3934 .tpg_alloc_fabric_acl = srpt_alloc_fabric_acl, 3935 .tpg_release_fabric_acl = srpt_release_fabric_acl, 3936 .tpg_get_inst_index = srpt_tpg_get_inst_index, 3937 .release_cmd = srpt_release_cmd, 3938 .check_stop_free = srpt_check_stop_free, 3939 .shutdown_session = srpt_shutdown_session, 3940 .close_session = srpt_close_session, 3941 .sess_get_index = srpt_sess_get_index, 3942 .sess_get_initiator_sid = NULL, 3943 .write_pending = srpt_write_pending, 3944 .write_pending_status = srpt_write_pending_status, 3945 .set_default_node_attributes = srpt_set_default_node_attrs, 3946 .get_task_tag = srpt_get_task_tag, 3947 .get_cmd_state = srpt_get_tcm_cmd_state, 3948 .queue_data_in = srpt_queue_response, 3949 .queue_status = srpt_queue_status, 3950 .queue_tm_rsp = srpt_queue_response, 3951 .get_fabric_sense_len = srpt_get_fabric_sense_len, 3952 .set_fabric_sense_len = srpt_set_fabric_sense_len, 3953 /* 3954 * Setup function pointers for generic logic in 3955 * target_core_fabric_configfs.c 3956 */ 3957 .fabric_make_wwn = srpt_make_tport, 3958 .fabric_drop_wwn = srpt_drop_tport, 3959 .fabric_make_tpg = srpt_make_tpg, 3960 .fabric_drop_tpg = srpt_drop_tpg, 3961 .fabric_post_link = NULL, 3962 .fabric_pre_unlink = NULL, 3963 .fabric_make_np = NULL, 3964 .fabric_drop_np = NULL, 3965 .fabric_make_nodeacl = srpt_make_nodeacl, 3966 .fabric_drop_nodeacl = srpt_drop_nodeacl, 3967 }; 3968 3969 /** 3970 * srpt_init_module() - Kernel module initialization. 3971 * 3972 * Note: Since ib_register_client() registers callback functions, and since at 3973 * least one of these callback functions (srpt_add_one()) calls target core 3974 * functions, this driver must be registered with the target core before 3975 * ib_register_client() is called. 3976 */ 3977 static int __init srpt_init_module(void) 3978 { 3979 int ret; 3980 3981 ret = -EINVAL; 3982 if (srp_max_req_size < MIN_MAX_REQ_SIZE) { 3983 printk(KERN_ERR "invalid value %d for kernel module parameter" 3984 " srp_max_req_size -- must be at least %d.\n", 3985 srp_max_req_size, MIN_MAX_REQ_SIZE); 3986 goto out; 3987 } 3988 3989 if (srpt_srq_size < MIN_SRPT_SRQ_SIZE 3990 || srpt_srq_size > MAX_SRPT_SRQ_SIZE) { 3991 printk(KERN_ERR "invalid value %d for kernel module parameter" 3992 " srpt_srq_size -- must be in the range [%d..%d].\n", 3993 srpt_srq_size, MIN_SRPT_SRQ_SIZE, MAX_SRPT_SRQ_SIZE); 3994 goto out; 3995 } 3996 3997 srpt_target = target_fabric_configfs_init(THIS_MODULE, "srpt"); 3998 if (IS_ERR(srpt_target)) { 3999 printk(KERN_ERR "couldn't register\n"); 4000 ret = PTR_ERR(srpt_target); 4001 goto out; 4002 } 4003 4004 srpt_target->tf_ops = srpt_template; 4005 4006 /* 4007 * Set up default attribute lists. 4008 */ 4009 srpt_target->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = srpt_wwn_attrs; 4010 srpt_target->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = srpt_tpg_attrs; 4011 srpt_target->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = srpt_tpg_attrib_attrs; 4012 srpt_target->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL; 4013 srpt_target->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL; 4014 srpt_target->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = NULL; 4015 srpt_target->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = NULL; 4016 srpt_target->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = NULL; 4017 srpt_target->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = NULL; 4018 4019 ret = target_fabric_configfs_register(srpt_target); 4020 if (ret < 0) { 4021 printk(KERN_ERR "couldn't register\n"); 4022 goto out_free_target; 4023 } 4024 4025 ret = ib_register_client(&srpt_client); 4026 if (ret) { 4027 printk(KERN_ERR "couldn't register IB client\n"); 4028 goto out_unregister_target; 4029 } 4030 4031 return 0; 4032 4033 out_unregister_target: 4034 target_fabric_configfs_deregister(srpt_target); 4035 srpt_target = NULL; 4036 out_free_target: 4037 if (srpt_target) 4038 target_fabric_configfs_free(srpt_target); 4039 out: 4040 return ret; 4041 } 4042 4043 static void __exit srpt_cleanup_module(void) 4044 { 4045 ib_unregister_client(&srpt_client); 4046 target_fabric_configfs_deregister(srpt_target); 4047 srpt_target = NULL; 4048 } 4049 4050 module_init(srpt_init_module); 4051 module_exit(srpt_cleanup_module); 4052