1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2016 Avago Technologies. All rights reserved. 4 */ 5 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 6 #include <linux/module.h> 7 #include <linux/slab.h> 8 #include <linux/blk-mq.h> 9 #include <linux/parser.h> 10 #include <linux/random.h> 11 #include <uapi/scsi/fc/fc_fs.h> 12 #include <uapi/scsi/fc/fc_els.h> 13 14 #include "nvmet.h" 15 #include <linux/nvme-fc-driver.h> 16 #include <linux/nvme-fc.h> 17 #include "../host/fc.h" 18 19 20 /* *************************** Data Structures/Defines ****************** */ 21 22 23 #define NVMET_LS_CTX_COUNT 256 24 25 struct nvmet_fc_tgtport; 26 struct nvmet_fc_tgt_assoc; 27 28 struct nvmet_fc_ls_iod { /* for an LS RQST RCV */ 29 struct nvmefc_ls_rsp *lsrsp; 30 struct nvmefc_tgt_fcp_req *fcpreq; /* only if RS */ 31 32 struct list_head ls_rcv_list; /* tgtport->ls_rcv_list */ 33 34 struct nvmet_fc_tgtport *tgtport; 35 struct nvmet_fc_tgt_assoc *assoc; 36 void *hosthandle; 37 38 union nvmefc_ls_requests *rqstbuf; 39 union nvmefc_ls_responses *rspbuf; 40 u16 rqstdatalen; 41 dma_addr_t rspdma; 42 43 struct scatterlist sg[2]; 44 45 struct work_struct work; 46 } __aligned(sizeof(unsigned long long)); 47 48 struct nvmet_fc_ls_req_op { /* for an LS RQST XMT */ 49 struct nvmefc_ls_req ls_req; 50 51 struct nvmet_fc_tgtport *tgtport; 52 void *hosthandle; 53 54 int ls_error; 55 struct list_head lsreq_list; /* tgtport->ls_req_list */ 56 bool req_queued; 57 }; 58 59 60 /* desired maximum for a single sequence - if sg list allows it */ 61 #define NVMET_FC_MAX_SEQ_LENGTH (256 * 1024) 62 63 enum nvmet_fcp_datadir { 64 NVMET_FCP_NODATA, 65 NVMET_FCP_WRITE, 66 NVMET_FCP_READ, 67 NVMET_FCP_ABORTED, 68 }; 69 70 struct nvmet_fc_fcp_iod { 71 struct nvmefc_tgt_fcp_req *fcpreq; 72 73 struct nvme_fc_cmd_iu cmdiubuf; 74 struct nvme_fc_ersp_iu rspiubuf; 75 dma_addr_t rspdma; 76 struct scatterlist *next_sg; 77 struct scatterlist *data_sg; 78 int data_sg_cnt; 79 u32 offset; 80 enum nvmet_fcp_datadir io_dir; 81 bool active; 82 bool abort; 83 bool aborted; 84 bool writedataactive; 85 spinlock_t flock; 86 87 struct nvmet_req req; 88 struct work_struct defer_work; 89 90 struct nvmet_fc_tgtport *tgtport; 91 struct nvmet_fc_tgt_queue *queue; 92 93 struct list_head fcp_list; /* tgtport->fcp_list */ 94 }; 95 96 struct nvmet_fc_tgtport { 97 struct nvmet_fc_target_port fc_target_port; 98 99 struct list_head tgt_list; /* nvmet_fc_target_list */ 100 struct device *dev; /* dev for dma mapping */ 101 struct nvmet_fc_target_template *ops; 102 103 struct nvmet_fc_ls_iod *iod; 104 spinlock_t lock; 105 struct list_head ls_rcv_list; 106 struct list_head ls_req_list; 107 struct list_head ls_busylist; 108 struct list_head assoc_list; 109 struct list_head host_list; 110 struct ida assoc_cnt; 111 struct nvmet_fc_port_entry *pe; 112 struct kref ref; 113 u32 max_sg_cnt; 114 }; 115 116 struct nvmet_fc_port_entry { 117 struct nvmet_fc_tgtport *tgtport; 118 struct nvmet_port *port; 119 u64 node_name; 120 u64 port_name; 121 struct list_head pe_list; 122 }; 123 124 struct nvmet_fc_defer_fcp_req { 125 struct list_head req_list; 126 struct nvmefc_tgt_fcp_req *fcp_req; 127 }; 128 129 struct nvmet_fc_tgt_queue { 130 bool ninetypercent; 131 u16 qid; 132 u16 sqsize; 133 u16 ersp_ratio; 134 __le16 sqhd; 135 atomic_t connected; 136 atomic_t sqtail; 137 atomic_t zrspcnt; 138 atomic_t rsn; 139 spinlock_t qlock; 140 struct nvmet_cq nvme_cq; 141 struct nvmet_sq nvme_sq; 142 struct nvmet_fc_tgt_assoc *assoc; 143 struct list_head fod_list; 144 struct list_head pending_cmd_list; 145 struct list_head avail_defer_list; 146 struct workqueue_struct *work_q; 147 struct kref ref; 148 struct nvmet_fc_fcp_iod fod[]; /* array of fcp_iods */ 149 } __aligned(sizeof(unsigned long long)); 150 151 struct nvmet_fc_hostport { 152 struct nvmet_fc_tgtport *tgtport; 153 void *hosthandle; 154 struct list_head host_list; 155 struct kref ref; 156 u8 invalid; 157 }; 158 159 struct nvmet_fc_tgt_assoc { 160 u64 association_id; 161 u32 a_id; 162 atomic_t terminating; 163 struct nvmet_fc_tgtport *tgtport; 164 struct nvmet_fc_hostport *hostport; 165 struct nvmet_fc_ls_iod *rcv_disconn; 166 struct list_head a_list; 167 struct nvmet_fc_tgt_queue *queues[NVMET_NR_QUEUES + 1]; 168 struct kref ref; 169 struct work_struct del_work; 170 }; 171 172 173 static inline int 174 nvmet_fc_iodnum(struct nvmet_fc_ls_iod *iodptr) 175 { 176 return (iodptr - iodptr->tgtport->iod); 177 } 178 179 static inline int 180 nvmet_fc_fodnum(struct nvmet_fc_fcp_iod *fodptr) 181 { 182 return (fodptr - fodptr->queue->fod); 183 } 184 185 186 /* 187 * Association and Connection IDs: 188 * 189 * Association ID will have random number in upper 6 bytes and zero 190 * in lower 2 bytes 191 * 192 * Connection IDs will be Association ID with QID or'd in lower 2 bytes 193 * 194 * note: Association ID = Connection ID for queue 0 195 */ 196 #define BYTES_FOR_QID sizeof(u16) 197 #define BYTES_FOR_QID_SHIFT (BYTES_FOR_QID * 8) 198 #define NVMET_FC_QUEUEID_MASK ((u64)((1 << BYTES_FOR_QID_SHIFT) - 1)) 199 200 static inline u64 201 nvmet_fc_makeconnid(struct nvmet_fc_tgt_assoc *assoc, u16 qid) 202 { 203 return (assoc->association_id | qid); 204 } 205 206 static inline u64 207 nvmet_fc_getassociationid(u64 connectionid) 208 { 209 return connectionid & ~NVMET_FC_QUEUEID_MASK; 210 } 211 212 static inline u16 213 nvmet_fc_getqueueid(u64 connectionid) 214 { 215 return (u16)(connectionid & NVMET_FC_QUEUEID_MASK); 216 } 217 218 static inline struct nvmet_fc_tgtport * 219 targetport_to_tgtport(struct nvmet_fc_target_port *targetport) 220 { 221 return container_of(targetport, struct nvmet_fc_tgtport, 222 fc_target_port); 223 } 224 225 static inline struct nvmet_fc_fcp_iod * 226 nvmet_req_to_fod(struct nvmet_req *nvme_req) 227 { 228 return container_of(nvme_req, struct nvmet_fc_fcp_iod, req); 229 } 230 231 232 /* *************************** Globals **************************** */ 233 234 235 static DEFINE_SPINLOCK(nvmet_fc_tgtlock); 236 237 static LIST_HEAD(nvmet_fc_target_list); 238 static DEFINE_IDA(nvmet_fc_tgtport_cnt); 239 static LIST_HEAD(nvmet_fc_portentry_list); 240 241 242 static void nvmet_fc_handle_ls_rqst_work(struct work_struct *work); 243 static void nvmet_fc_fcp_rqst_op_defer_work(struct work_struct *work); 244 static void nvmet_fc_tgt_a_put(struct nvmet_fc_tgt_assoc *assoc); 245 static int nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc); 246 static void nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue); 247 static int nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue); 248 static void nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport); 249 static int nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport); 250 static void nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport, 251 struct nvmet_fc_fcp_iod *fod); 252 static void nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc); 253 static void nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport, 254 struct nvmet_fc_ls_iod *iod); 255 256 257 /* *********************** FC-NVME DMA Handling **************************** */ 258 259 /* 260 * The fcloop device passes in a NULL device pointer. Real LLD's will 261 * pass in a valid device pointer. If NULL is passed to the dma mapping 262 * routines, depending on the platform, it may or may not succeed, and 263 * may crash. 264 * 265 * As such: 266 * Wrapper all the dma routines and check the dev pointer. 267 * 268 * If simple mappings (return just a dma address, we'll noop them, 269 * returning a dma address of 0. 270 * 271 * On more complex mappings (dma_map_sg), a pseudo routine fills 272 * in the scatter list, setting all dma addresses to 0. 273 */ 274 275 static inline dma_addr_t 276 fc_dma_map_single(struct device *dev, void *ptr, size_t size, 277 enum dma_data_direction dir) 278 { 279 return dev ? dma_map_single(dev, ptr, size, dir) : (dma_addr_t)0L; 280 } 281 282 static inline int 283 fc_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) 284 { 285 return dev ? dma_mapping_error(dev, dma_addr) : 0; 286 } 287 288 static inline void 289 fc_dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size, 290 enum dma_data_direction dir) 291 { 292 if (dev) 293 dma_unmap_single(dev, addr, size, dir); 294 } 295 296 static inline void 297 fc_dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size, 298 enum dma_data_direction dir) 299 { 300 if (dev) 301 dma_sync_single_for_cpu(dev, addr, size, dir); 302 } 303 304 static inline void 305 fc_dma_sync_single_for_device(struct device *dev, dma_addr_t addr, size_t size, 306 enum dma_data_direction dir) 307 { 308 if (dev) 309 dma_sync_single_for_device(dev, addr, size, dir); 310 } 311 312 /* pseudo dma_map_sg call */ 313 static int 314 fc_map_sg(struct scatterlist *sg, int nents) 315 { 316 struct scatterlist *s; 317 int i; 318 319 WARN_ON(nents == 0 || sg[0].length == 0); 320 321 for_each_sg(sg, s, nents, i) { 322 s->dma_address = 0L; 323 #ifdef CONFIG_NEED_SG_DMA_LENGTH 324 s->dma_length = s->length; 325 #endif 326 } 327 return nents; 328 } 329 330 static inline int 331 fc_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, 332 enum dma_data_direction dir) 333 { 334 return dev ? dma_map_sg(dev, sg, nents, dir) : fc_map_sg(sg, nents); 335 } 336 337 static inline void 338 fc_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, 339 enum dma_data_direction dir) 340 { 341 if (dev) 342 dma_unmap_sg(dev, sg, nents, dir); 343 } 344 345 346 /* ********************** FC-NVME LS XMT Handling ************************* */ 347 348 349 static void 350 __nvmet_fc_finish_ls_req(struct nvmet_fc_ls_req_op *lsop) 351 { 352 struct nvmet_fc_tgtport *tgtport = lsop->tgtport; 353 struct nvmefc_ls_req *lsreq = &lsop->ls_req; 354 unsigned long flags; 355 356 spin_lock_irqsave(&tgtport->lock, flags); 357 358 if (!lsop->req_queued) { 359 spin_unlock_irqrestore(&tgtport->lock, flags); 360 return; 361 } 362 363 list_del(&lsop->lsreq_list); 364 365 lsop->req_queued = false; 366 367 spin_unlock_irqrestore(&tgtport->lock, flags); 368 369 fc_dma_unmap_single(tgtport->dev, lsreq->rqstdma, 370 (lsreq->rqstlen + lsreq->rsplen), 371 DMA_BIDIRECTIONAL); 372 373 nvmet_fc_tgtport_put(tgtport); 374 } 375 376 static int 377 __nvmet_fc_send_ls_req(struct nvmet_fc_tgtport *tgtport, 378 struct nvmet_fc_ls_req_op *lsop, 379 void (*done)(struct nvmefc_ls_req *req, int status)) 380 { 381 struct nvmefc_ls_req *lsreq = &lsop->ls_req; 382 unsigned long flags; 383 int ret = 0; 384 385 if (!tgtport->ops->ls_req) 386 return -EOPNOTSUPP; 387 388 if (!nvmet_fc_tgtport_get(tgtport)) 389 return -ESHUTDOWN; 390 391 lsreq->done = done; 392 lsop->req_queued = false; 393 INIT_LIST_HEAD(&lsop->lsreq_list); 394 395 lsreq->rqstdma = fc_dma_map_single(tgtport->dev, lsreq->rqstaddr, 396 lsreq->rqstlen + lsreq->rsplen, 397 DMA_BIDIRECTIONAL); 398 if (fc_dma_mapping_error(tgtport->dev, lsreq->rqstdma)) { 399 ret = -EFAULT; 400 goto out_puttgtport; 401 } 402 lsreq->rspdma = lsreq->rqstdma + lsreq->rqstlen; 403 404 spin_lock_irqsave(&tgtport->lock, flags); 405 406 list_add_tail(&lsop->lsreq_list, &tgtport->ls_req_list); 407 408 lsop->req_queued = true; 409 410 spin_unlock_irqrestore(&tgtport->lock, flags); 411 412 ret = tgtport->ops->ls_req(&tgtport->fc_target_port, lsop->hosthandle, 413 lsreq); 414 if (ret) 415 goto out_unlink; 416 417 return 0; 418 419 out_unlink: 420 lsop->ls_error = ret; 421 spin_lock_irqsave(&tgtport->lock, flags); 422 lsop->req_queued = false; 423 list_del(&lsop->lsreq_list); 424 spin_unlock_irqrestore(&tgtport->lock, flags); 425 fc_dma_unmap_single(tgtport->dev, lsreq->rqstdma, 426 (lsreq->rqstlen + lsreq->rsplen), 427 DMA_BIDIRECTIONAL); 428 out_puttgtport: 429 nvmet_fc_tgtport_put(tgtport); 430 431 return ret; 432 } 433 434 static int 435 nvmet_fc_send_ls_req_async(struct nvmet_fc_tgtport *tgtport, 436 struct nvmet_fc_ls_req_op *lsop, 437 void (*done)(struct nvmefc_ls_req *req, int status)) 438 { 439 /* don't wait for completion */ 440 441 return __nvmet_fc_send_ls_req(tgtport, lsop, done); 442 } 443 444 static void 445 nvmet_fc_disconnect_assoc_done(struct nvmefc_ls_req *lsreq, int status) 446 { 447 struct nvmet_fc_ls_req_op *lsop = 448 container_of(lsreq, struct nvmet_fc_ls_req_op, ls_req); 449 450 __nvmet_fc_finish_ls_req(lsop); 451 452 /* fc-nvme target doesn't care about success or failure of cmd */ 453 454 kfree(lsop); 455 } 456 457 /* 458 * This routine sends a FC-NVME LS to disconnect (aka terminate) 459 * the FC-NVME Association. Terminating the association also 460 * terminates the FC-NVME connections (per queue, both admin and io 461 * queues) that are part of the association. E.g. things are torn 462 * down, and the related FC-NVME Association ID and Connection IDs 463 * become invalid. 464 * 465 * The behavior of the fc-nvme target is such that it's 466 * understanding of the association and connections will implicitly 467 * be torn down. The action is implicit as it may be due to a loss of 468 * connectivity with the fc-nvme host, so the target may never get a 469 * response even if it tried. As such, the action of this routine 470 * is to asynchronously send the LS, ignore any results of the LS, and 471 * continue on with terminating the association. If the fc-nvme host 472 * is present and receives the LS, it too can tear down. 473 */ 474 static void 475 nvmet_fc_xmt_disconnect_assoc(struct nvmet_fc_tgt_assoc *assoc) 476 { 477 struct nvmet_fc_tgtport *tgtport = assoc->tgtport; 478 struct fcnvme_ls_disconnect_assoc_rqst *discon_rqst; 479 struct fcnvme_ls_disconnect_assoc_acc *discon_acc; 480 struct nvmet_fc_ls_req_op *lsop; 481 struct nvmefc_ls_req *lsreq; 482 int ret; 483 484 /* 485 * If ls_req is NULL or no hosthandle, it's an older lldd and no 486 * message is normal. Otherwise, send unless the hostport has 487 * already been invalidated by the lldd. 488 */ 489 if (!tgtport->ops->ls_req || !assoc->hostport || 490 assoc->hostport->invalid) 491 return; 492 493 lsop = kzalloc((sizeof(*lsop) + 494 sizeof(*discon_rqst) + sizeof(*discon_acc) + 495 tgtport->ops->lsrqst_priv_sz), GFP_KERNEL); 496 if (!lsop) { 497 dev_info(tgtport->dev, 498 "{%d:%d} send Disconnect Association failed: ENOMEM\n", 499 tgtport->fc_target_port.port_num, assoc->a_id); 500 return; 501 } 502 503 discon_rqst = (struct fcnvme_ls_disconnect_assoc_rqst *)&lsop[1]; 504 discon_acc = (struct fcnvme_ls_disconnect_assoc_acc *)&discon_rqst[1]; 505 lsreq = &lsop->ls_req; 506 if (tgtport->ops->lsrqst_priv_sz) 507 lsreq->private = (void *)&discon_acc[1]; 508 else 509 lsreq->private = NULL; 510 511 lsop->tgtport = tgtport; 512 lsop->hosthandle = assoc->hostport->hosthandle; 513 514 nvmefc_fmt_lsreq_discon_assoc(lsreq, discon_rqst, discon_acc, 515 assoc->association_id); 516 517 ret = nvmet_fc_send_ls_req_async(tgtport, lsop, 518 nvmet_fc_disconnect_assoc_done); 519 if (ret) { 520 dev_info(tgtport->dev, 521 "{%d:%d} XMT Disconnect Association failed: %d\n", 522 tgtport->fc_target_port.port_num, assoc->a_id, ret); 523 kfree(lsop); 524 } 525 } 526 527 528 /* *********************** FC-NVME Port Management ************************ */ 529 530 531 static int 532 nvmet_fc_alloc_ls_iodlist(struct nvmet_fc_tgtport *tgtport) 533 { 534 struct nvmet_fc_ls_iod *iod; 535 int i; 536 537 iod = kcalloc(NVMET_LS_CTX_COUNT, sizeof(struct nvmet_fc_ls_iod), 538 GFP_KERNEL); 539 if (!iod) 540 return -ENOMEM; 541 542 tgtport->iod = iod; 543 544 for (i = 0; i < NVMET_LS_CTX_COUNT; iod++, i++) { 545 INIT_WORK(&iod->work, nvmet_fc_handle_ls_rqst_work); 546 iod->tgtport = tgtport; 547 list_add_tail(&iod->ls_rcv_list, &tgtport->ls_rcv_list); 548 549 iod->rqstbuf = kzalloc(sizeof(union nvmefc_ls_requests) + 550 sizeof(union nvmefc_ls_responses), 551 GFP_KERNEL); 552 if (!iod->rqstbuf) 553 goto out_fail; 554 555 iod->rspbuf = (union nvmefc_ls_responses *)&iod->rqstbuf[1]; 556 557 iod->rspdma = fc_dma_map_single(tgtport->dev, iod->rspbuf, 558 sizeof(*iod->rspbuf), 559 DMA_TO_DEVICE); 560 if (fc_dma_mapping_error(tgtport->dev, iod->rspdma)) 561 goto out_fail; 562 } 563 564 return 0; 565 566 out_fail: 567 kfree(iod->rqstbuf); 568 list_del(&iod->ls_rcv_list); 569 for (iod--, i--; i >= 0; iod--, i--) { 570 fc_dma_unmap_single(tgtport->dev, iod->rspdma, 571 sizeof(*iod->rspbuf), DMA_TO_DEVICE); 572 kfree(iod->rqstbuf); 573 list_del(&iod->ls_rcv_list); 574 } 575 576 kfree(iod); 577 578 return -EFAULT; 579 } 580 581 static void 582 nvmet_fc_free_ls_iodlist(struct nvmet_fc_tgtport *tgtport) 583 { 584 struct nvmet_fc_ls_iod *iod = tgtport->iod; 585 int i; 586 587 for (i = 0; i < NVMET_LS_CTX_COUNT; iod++, i++) { 588 fc_dma_unmap_single(tgtport->dev, 589 iod->rspdma, sizeof(*iod->rspbuf), 590 DMA_TO_DEVICE); 591 kfree(iod->rqstbuf); 592 list_del(&iod->ls_rcv_list); 593 } 594 kfree(tgtport->iod); 595 } 596 597 static struct nvmet_fc_ls_iod * 598 nvmet_fc_alloc_ls_iod(struct nvmet_fc_tgtport *tgtport) 599 { 600 struct nvmet_fc_ls_iod *iod; 601 unsigned long flags; 602 603 spin_lock_irqsave(&tgtport->lock, flags); 604 iod = list_first_entry_or_null(&tgtport->ls_rcv_list, 605 struct nvmet_fc_ls_iod, ls_rcv_list); 606 if (iod) 607 list_move_tail(&iod->ls_rcv_list, &tgtport->ls_busylist); 608 spin_unlock_irqrestore(&tgtport->lock, flags); 609 return iod; 610 } 611 612 613 static void 614 nvmet_fc_free_ls_iod(struct nvmet_fc_tgtport *tgtport, 615 struct nvmet_fc_ls_iod *iod) 616 { 617 unsigned long flags; 618 619 spin_lock_irqsave(&tgtport->lock, flags); 620 list_move(&iod->ls_rcv_list, &tgtport->ls_rcv_list); 621 spin_unlock_irqrestore(&tgtport->lock, flags); 622 } 623 624 static void 625 nvmet_fc_prep_fcp_iodlist(struct nvmet_fc_tgtport *tgtport, 626 struct nvmet_fc_tgt_queue *queue) 627 { 628 struct nvmet_fc_fcp_iod *fod = queue->fod; 629 int i; 630 631 for (i = 0; i < queue->sqsize; fod++, i++) { 632 INIT_WORK(&fod->defer_work, nvmet_fc_fcp_rqst_op_defer_work); 633 fod->tgtport = tgtport; 634 fod->queue = queue; 635 fod->active = false; 636 fod->abort = false; 637 fod->aborted = false; 638 fod->fcpreq = NULL; 639 list_add_tail(&fod->fcp_list, &queue->fod_list); 640 spin_lock_init(&fod->flock); 641 642 fod->rspdma = fc_dma_map_single(tgtport->dev, &fod->rspiubuf, 643 sizeof(fod->rspiubuf), DMA_TO_DEVICE); 644 if (fc_dma_mapping_error(tgtport->dev, fod->rspdma)) { 645 list_del(&fod->fcp_list); 646 for (fod--, i--; i >= 0; fod--, i--) { 647 fc_dma_unmap_single(tgtport->dev, fod->rspdma, 648 sizeof(fod->rspiubuf), 649 DMA_TO_DEVICE); 650 fod->rspdma = 0L; 651 list_del(&fod->fcp_list); 652 } 653 654 return; 655 } 656 } 657 } 658 659 static void 660 nvmet_fc_destroy_fcp_iodlist(struct nvmet_fc_tgtport *tgtport, 661 struct nvmet_fc_tgt_queue *queue) 662 { 663 struct nvmet_fc_fcp_iod *fod = queue->fod; 664 int i; 665 666 for (i = 0; i < queue->sqsize; fod++, i++) { 667 if (fod->rspdma) 668 fc_dma_unmap_single(tgtport->dev, fod->rspdma, 669 sizeof(fod->rspiubuf), DMA_TO_DEVICE); 670 } 671 } 672 673 static struct nvmet_fc_fcp_iod * 674 nvmet_fc_alloc_fcp_iod(struct nvmet_fc_tgt_queue *queue) 675 { 676 struct nvmet_fc_fcp_iod *fod; 677 678 lockdep_assert_held(&queue->qlock); 679 680 fod = list_first_entry_or_null(&queue->fod_list, 681 struct nvmet_fc_fcp_iod, fcp_list); 682 if (fod) { 683 list_del(&fod->fcp_list); 684 fod->active = true; 685 /* 686 * no queue reference is taken, as it was taken by the 687 * queue lookup just prior to the allocation. The iod 688 * will "inherit" that reference. 689 */ 690 } 691 return fod; 692 } 693 694 695 static void 696 nvmet_fc_queue_fcp_req(struct nvmet_fc_tgtport *tgtport, 697 struct nvmet_fc_tgt_queue *queue, 698 struct nvmefc_tgt_fcp_req *fcpreq) 699 { 700 struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private; 701 702 /* 703 * put all admin cmds on hw queue id 0. All io commands go to 704 * the respective hw queue based on a modulo basis 705 */ 706 fcpreq->hwqid = queue->qid ? 707 ((queue->qid - 1) % tgtport->ops->max_hw_queues) : 0; 708 709 nvmet_fc_handle_fcp_rqst(tgtport, fod); 710 } 711 712 static void 713 nvmet_fc_fcp_rqst_op_defer_work(struct work_struct *work) 714 { 715 struct nvmet_fc_fcp_iod *fod = 716 container_of(work, struct nvmet_fc_fcp_iod, defer_work); 717 718 /* Submit deferred IO for processing */ 719 nvmet_fc_queue_fcp_req(fod->tgtport, fod->queue, fod->fcpreq); 720 721 } 722 723 static void 724 nvmet_fc_free_fcp_iod(struct nvmet_fc_tgt_queue *queue, 725 struct nvmet_fc_fcp_iod *fod) 726 { 727 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq; 728 struct nvmet_fc_tgtport *tgtport = fod->tgtport; 729 struct nvmet_fc_defer_fcp_req *deferfcp; 730 unsigned long flags; 731 732 fc_dma_sync_single_for_cpu(tgtport->dev, fod->rspdma, 733 sizeof(fod->rspiubuf), DMA_TO_DEVICE); 734 735 fcpreq->nvmet_fc_private = NULL; 736 737 fod->active = false; 738 fod->abort = false; 739 fod->aborted = false; 740 fod->writedataactive = false; 741 fod->fcpreq = NULL; 742 743 tgtport->ops->fcp_req_release(&tgtport->fc_target_port, fcpreq); 744 745 /* release the queue lookup reference on the completed IO */ 746 nvmet_fc_tgt_q_put(queue); 747 748 spin_lock_irqsave(&queue->qlock, flags); 749 deferfcp = list_first_entry_or_null(&queue->pending_cmd_list, 750 struct nvmet_fc_defer_fcp_req, req_list); 751 if (!deferfcp) { 752 list_add_tail(&fod->fcp_list, &fod->queue->fod_list); 753 spin_unlock_irqrestore(&queue->qlock, flags); 754 return; 755 } 756 757 /* Re-use the fod for the next pending cmd that was deferred */ 758 list_del(&deferfcp->req_list); 759 760 fcpreq = deferfcp->fcp_req; 761 762 /* deferfcp can be reused for another IO at a later date */ 763 list_add_tail(&deferfcp->req_list, &queue->avail_defer_list); 764 765 spin_unlock_irqrestore(&queue->qlock, flags); 766 767 /* Save NVME CMD IO in fod */ 768 memcpy(&fod->cmdiubuf, fcpreq->rspaddr, fcpreq->rsplen); 769 770 /* Setup new fcpreq to be processed */ 771 fcpreq->rspaddr = NULL; 772 fcpreq->rsplen = 0; 773 fcpreq->nvmet_fc_private = fod; 774 fod->fcpreq = fcpreq; 775 fod->active = true; 776 777 /* inform LLDD IO is now being processed */ 778 tgtport->ops->defer_rcv(&tgtport->fc_target_port, fcpreq); 779 780 /* 781 * Leave the queue lookup get reference taken when 782 * fod was originally allocated. 783 */ 784 785 queue_work(queue->work_q, &fod->defer_work); 786 } 787 788 static struct nvmet_fc_tgt_queue * 789 nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc, 790 u16 qid, u16 sqsize) 791 { 792 struct nvmet_fc_tgt_queue *queue; 793 unsigned long flags; 794 int ret; 795 796 if (qid > NVMET_NR_QUEUES) 797 return NULL; 798 799 queue = kzalloc(struct_size(queue, fod, sqsize), GFP_KERNEL); 800 if (!queue) 801 return NULL; 802 803 if (!nvmet_fc_tgt_a_get(assoc)) 804 goto out_free_queue; 805 806 queue->work_q = alloc_workqueue("ntfc%d.%d.%d", 0, 0, 807 assoc->tgtport->fc_target_port.port_num, 808 assoc->a_id, qid); 809 if (!queue->work_q) 810 goto out_a_put; 811 812 queue->qid = qid; 813 queue->sqsize = sqsize; 814 queue->assoc = assoc; 815 INIT_LIST_HEAD(&queue->fod_list); 816 INIT_LIST_HEAD(&queue->avail_defer_list); 817 INIT_LIST_HEAD(&queue->pending_cmd_list); 818 atomic_set(&queue->connected, 0); 819 atomic_set(&queue->sqtail, 0); 820 atomic_set(&queue->rsn, 1); 821 atomic_set(&queue->zrspcnt, 0); 822 spin_lock_init(&queue->qlock); 823 kref_init(&queue->ref); 824 825 nvmet_fc_prep_fcp_iodlist(assoc->tgtport, queue); 826 827 ret = nvmet_sq_init(&queue->nvme_sq); 828 if (ret) 829 goto out_fail_iodlist; 830 831 WARN_ON(assoc->queues[qid]); 832 spin_lock_irqsave(&assoc->tgtport->lock, flags); 833 assoc->queues[qid] = queue; 834 spin_unlock_irqrestore(&assoc->tgtport->lock, flags); 835 836 return queue; 837 838 out_fail_iodlist: 839 nvmet_fc_destroy_fcp_iodlist(assoc->tgtport, queue); 840 destroy_workqueue(queue->work_q); 841 out_a_put: 842 nvmet_fc_tgt_a_put(assoc); 843 out_free_queue: 844 kfree(queue); 845 return NULL; 846 } 847 848 849 static void 850 nvmet_fc_tgt_queue_free(struct kref *ref) 851 { 852 struct nvmet_fc_tgt_queue *queue = 853 container_of(ref, struct nvmet_fc_tgt_queue, ref); 854 unsigned long flags; 855 856 spin_lock_irqsave(&queue->assoc->tgtport->lock, flags); 857 queue->assoc->queues[queue->qid] = NULL; 858 spin_unlock_irqrestore(&queue->assoc->tgtport->lock, flags); 859 860 nvmet_fc_destroy_fcp_iodlist(queue->assoc->tgtport, queue); 861 862 nvmet_fc_tgt_a_put(queue->assoc); 863 864 destroy_workqueue(queue->work_q); 865 866 kfree(queue); 867 } 868 869 static void 870 nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue) 871 { 872 kref_put(&queue->ref, nvmet_fc_tgt_queue_free); 873 } 874 875 static int 876 nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue) 877 { 878 return kref_get_unless_zero(&queue->ref); 879 } 880 881 882 static void 883 nvmet_fc_delete_target_queue(struct nvmet_fc_tgt_queue *queue) 884 { 885 struct nvmet_fc_tgtport *tgtport = queue->assoc->tgtport; 886 struct nvmet_fc_fcp_iod *fod = queue->fod; 887 struct nvmet_fc_defer_fcp_req *deferfcp, *tempptr; 888 unsigned long flags; 889 int i; 890 bool disconnect; 891 892 disconnect = atomic_xchg(&queue->connected, 0); 893 894 /* if not connected, nothing to do */ 895 if (!disconnect) 896 return; 897 898 spin_lock_irqsave(&queue->qlock, flags); 899 /* abort outstanding io's */ 900 for (i = 0; i < queue->sqsize; fod++, i++) { 901 if (fod->active) { 902 spin_lock(&fod->flock); 903 fod->abort = true; 904 /* 905 * only call lldd abort routine if waiting for 906 * writedata. other outstanding ops should finish 907 * on their own. 908 */ 909 if (fod->writedataactive) { 910 fod->aborted = true; 911 spin_unlock(&fod->flock); 912 tgtport->ops->fcp_abort( 913 &tgtport->fc_target_port, fod->fcpreq); 914 } else 915 spin_unlock(&fod->flock); 916 } 917 } 918 919 /* Cleanup defer'ed IOs in queue */ 920 list_for_each_entry_safe(deferfcp, tempptr, &queue->avail_defer_list, 921 req_list) { 922 list_del(&deferfcp->req_list); 923 kfree(deferfcp); 924 } 925 926 for (;;) { 927 deferfcp = list_first_entry_or_null(&queue->pending_cmd_list, 928 struct nvmet_fc_defer_fcp_req, req_list); 929 if (!deferfcp) 930 break; 931 932 list_del(&deferfcp->req_list); 933 spin_unlock_irqrestore(&queue->qlock, flags); 934 935 tgtport->ops->defer_rcv(&tgtport->fc_target_port, 936 deferfcp->fcp_req); 937 938 tgtport->ops->fcp_abort(&tgtport->fc_target_port, 939 deferfcp->fcp_req); 940 941 tgtport->ops->fcp_req_release(&tgtport->fc_target_port, 942 deferfcp->fcp_req); 943 944 /* release the queue lookup reference */ 945 nvmet_fc_tgt_q_put(queue); 946 947 kfree(deferfcp); 948 949 spin_lock_irqsave(&queue->qlock, flags); 950 } 951 spin_unlock_irqrestore(&queue->qlock, flags); 952 953 flush_workqueue(queue->work_q); 954 955 nvmet_sq_destroy(&queue->nvme_sq); 956 957 nvmet_fc_tgt_q_put(queue); 958 } 959 960 static struct nvmet_fc_tgt_queue * 961 nvmet_fc_find_target_queue(struct nvmet_fc_tgtport *tgtport, 962 u64 connection_id) 963 { 964 struct nvmet_fc_tgt_assoc *assoc; 965 struct nvmet_fc_tgt_queue *queue; 966 u64 association_id = nvmet_fc_getassociationid(connection_id); 967 u16 qid = nvmet_fc_getqueueid(connection_id); 968 unsigned long flags; 969 970 if (qid > NVMET_NR_QUEUES) 971 return NULL; 972 973 spin_lock_irqsave(&tgtport->lock, flags); 974 list_for_each_entry(assoc, &tgtport->assoc_list, a_list) { 975 if (association_id == assoc->association_id) { 976 queue = assoc->queues[qid]; 977 if (queue && 978 (!atomic_read(&queue->connected) || 979 !nvmet_fc_tgt_q_get(queue))) 980 queue = NULL; 981 spin_unlock_irqrestore(&tgtport->lock, flags); 982 return queue; 983 } 984 } 985 spin_unlock_irqrestore(&tgtport->lock, flags); 986 return NULL; 987 } 988 989 static void 990 nvmet_fc_hostport_free(struct kref *ref) 991 { 992 struct nvmet_fc_hostport *hostport = 993 container_of(ref, struct nvmet_fc_hostport, ref); 994 struct nvmet_fc_tgtport *tgtport = hostport->tgtport; 995 unsigned long flags; 996 997 spin_lock_irqsave(&tgtport->lock, flags); 998 list_del(&hostport->host_list); 999 spin_unlock_irqrestore(&tgtport->lock, flags); 1000 if (tgtport->ops->host_release && hostport->invalid) 1001 tgtport->ops->host_release(hostport->hosthandle); 1002 kfree(hostport); 1003 nvmet_fc_tgtport_put(tgtport); 1004 } 1005 1006 static void 1007 nvmet_fc_hostport_put(struct nvmet_fc_hostport *hostport) 1008 { 1009 kref_put(&hostport->ref, nvmet_fc_hostport_free); 1010 } 1011 1012 static int 1013 nvmet_fc_hostport_get(struct nvmet_fc_hostport *hostport) 1014 { 1015 return kref_get_unless_zero(&hostport->ref); 1016 } 1017 1018 static void 1019 nvmet_fc_free_hostport(struct nvmet_fc_hostport *hostport) 1020 { 1021 /* if LLDD not implemented, leave as NULL */ 1022 if (!hostport || !hostport->hosthandle) 1023 return; 1024 1025 nvmet_fc_hostport_put(hostport); 1026 } 1027 1028 static struct nvmet_fc_hostport * 1029 nvmet_fc_alloc_hostport(struct nvmet_fc_tgtport *tgtport, void *hosthandle) 1030 { 1031 struct nvmet_fc_hostport *newhost, *host, *match = NULL; 1032 unsigned long flags; 1033 1034 /* if LLDD not implemented, leave as NULL */ 1035 if (!hosthandle) 1036 return NULL; 1037 1038 /* take reference for what will be the newly allocated hostport */ 1039 if (!nvmet_fc_tgtport_get(tgtport)) 1040 return ERR_PTR(-EINVAL); 1041 1042 newhost = kzalloc(sizeof(*newhost), GFP_KERNEL); 1043 if (!newhost) { 1044 spin_lock_irqsave(&tgtport->lock, flags); 1045 list_for_each_entry(host, &tgtport->host_list, host_list) { 1046 if (host->hosthandle == hosthandle && !host->invalid) { 1047 if (nvmet_fc_hostport_get(host)) { 1048 match = host; 1049 break; 1050 } 1051 } 1052 } 1053 spin_unlock_irqrestore(&tgtport->lock, flags); 1054 /* no allocation - release reference */ 1055 nvmet_fc_tgtport_put(tgtport); 1056 return (match) ? match : ERR_PTR(-ENOMEM); 1057 } 1058 1059 newhost->tgtport = tgtport; 1060 newhost->hosthandle = hosthandle; 1061 INIT_LIST_HEAD(&newhost->host_list); 1062 kref_init(&newhost->ref); 1063 1064 spin_lock_irqsave(&tgtport->lock, flags); 1065 list_for_each_entry(host, &tgtport->host_list, host_list) { 1066 if (host->hosthandle == hosthandle && !host->invalid) { 1067 if (nvmet_fc_hostport_get(host)) { 1068 match = host; 1069 break; 1070 } 1071 } 1072 } 1073 if (match) { 1074 kfree(newhost); 1075 newhost = NULL; 1076 /* releasing allocation - release reference */ 1077 nvmet_fc_tgtport_put(tgtport); 1078 } else 1079 list_add_tail(&newhost->host_list, &tgtport->host_list); 1080 spin_unlock_irqrestore(&tgtport->lock, flags); 1081 1082 return (match) ? match : newhost; 1083 } 1084 1085 static void 1086 nvmet_fc_delete_assoc(struct work_struct *work) 1087 { 1088 struct nvmet_fc_tgt_assoc *assoc = 1089 container_of(work, struct nvmet_fc_tgt_assoc, del_work); 1090 1091 nvmet_fc_delete_target_assoc(assoc); 1092 nvmet_fc_tgt_a_put(assoc); 1093 } 1094 1095 static struct nvmet_fc_tgt_assoc * 1096 nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport, void *hosthandle) 1097 { 1098 struct nvmet_fc_tgt_assoc *assoc, *tmpassoc; 1099 unsigned long flags; 1100 u64 ran; 1101 int idx; 1102 bool needrandom = true; 1103 1104 assoc = kzalloc(sizeof(*assoc), GFP_KERNEL); 1105 if (!assoc) 1106 return NULL; 1107 1108 idx = ida_simple_get(&tgtport->assoc_cnt, 0, 0, GFP_KERNEL); 1109 if (idx < 0) 1110 goto out_free_assoc; 1111 1112 if (!nvmet_fc_tgtport_get(tgtport)) 1113 goto out_ida; 1114 1115 assoc->hostport = nvmet_fc_alloc_hostport(tgtport, hosthandle); 1116 if (IS_ERR(assoc->hostport)) 1117 goto out_put; 1118 1119 assoc->tgtport = tgtport; 1120 assoc->a_id = idx; 1121 INIT_LIST_HEAD(&assoc->a_list); 1122 kref_init(&assoc->ref); 1123 INIT_WORK(&assoc->del_work, nvmet_fc_delete_assoc); 1124 atomic_set(&assoc->terminating, 0); 1125 1126 while (needrandom) { 1127 get_random_bytes(&ran, sizeof(ran) - BYTES_FOR_QID); 1128 ran = ran << BYTES_FOR_QID_SHIFT; 1129 1130 spin_lock_irqsave(&tgtport->lock, flags); 1131 needrandom = false; 1132 list_for_each_entry(tmpassoc, &tgtport->assoc_list, a_list) { 1133 if (ran == tmpassoc->association_id) { 1134 needrandom = true; 1135 break; 1136 } 1137 } 1138 if (!needrandom) { 1139 assoc->association_id = ran; 1140 list_add_tail(&assoc->a_list, &tgtport->assoc_list); 1141 } 1142 spin_unlock_irqrestore(&tgtport->lock, flags); 1143 } 1144 1145 return assoc; 1146 1147 out_put: 1148 nvmet_fc_tgtport_put(tgtport); 1149 out_ida: 1150 ida_simple_remove(&tgtport->assoc_cnt, idx); 1151 out_free_assoc: 1152 kfree(assoc); 1153 return NULL; 1154 } 1155 1156 static void 1157 nvmet_fc_target_assoc_free(struct kref *ref) 1158 { 1159 struct nvmet_fc_tgt_assoc *assoc = 1160 container_of(ref, struct nvmet_fc_tgt_assoc, ref); 1161 struct nvmet_fc_tgtport *tgtport = assoc->tgtport; 1162 struct nvmet_fc_ls_iod *oldls; 1163 unsigned long flags; 1164 1165 /* Send Disconnect now that all i/o has completed */ 1166 nvmet_fc_xmt_disconnect_assoc(assoc); 1167 1168 nvmet_fc_free_hostport(assoc->hostport); 1169 spin_lock_irqsave(&tgtport->lock, flags); 1170 list_del(&assoc->a_list); 1171 oldls = assoc->rcv_disconn; 1172 spin_unlock_irqrestore(&tgtport->lock, flags); 1173 /* if pending Rcv Disconnect Association LS, send rsp now */ 1174 if (oldls) 1175 nvmet_fc_xmt_ls_rsp(tgtport, oldls); 1176 ida_simple_remove(&tgtport->assoc_cnt, assoc->a_id); 1177 dev_info(tgtport->dev, 1178 "{%d:%d} Association freed\n", 1179 tgtport->fc_target_port.port_num, assoc->a_id); 1180 kfree(assoc); 1181 nvmet_fc_tgtport_put(tgtport); 1182 } 1183 1184 static void 1185 nvmet_fc_tgt_a_put(struct nvmet_fc_tgt_assoc *assoc) 1186 { 1187 kref_put(&assoc->ref, nvmet_fc_target_assoc_free); 1188 } 1189 1190 static int 1191 nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc) 1192 { 1193 return kref_get_unless_zero(&assoc->ref); 1194 } 1195 1196 static void 1197 nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc) 1198 { 1199 struct nvmet_fc_tgtport *tgtport = assoc->tgtport; 1200 struct nvmet_fc_tgt_queue *queue; 1201 unsigned long flags; 1202 int i, terminating; 1203 1204 terminating = atomic_xchg(&assoc->terminating, 1); 1205 1206 /* if already terminating, do nothing */ 1207 if (terminating) 1208 return; 1209 1210 spin_lock_irqsave(&tgtport->lock, flags); 1211 for (i = NVMET_NR_QUEUES; i >= 0; i--) { 1212 queue = assoc->queues[i]; 1213 if (queue) { 1214 if (!nvmet_fc_tgt_q_get(queue)) 1215 continue; 1216 spin_unlock_irqrestore(&tgtport->lock, flags); 1217 nvmet_fc_delete_target_queue(queue); 1218 nvmet_fc_tgt_q_put(queue); 1219 spin_lock_irqsave(&tgtport->lock, flags); 1220 } 1221 } 1222 spin_unlock_irqrestore(&tgtport->lock, flags); 1223 1224 dev_info(tgtport->dev, 1225 "{%d:%d} Association deleted\n", 1226 tgtport->fc_target_port.port_num, assoc->a_id); 1227 1228 nvmet_fc_tgt_a_put(assoc); 1229 } 1230 1231 static struct nvmet_fc_tgt_assoc * 1232 nvmet_fc_find_target_assoc(struct nvmet_fc_tgtport *tgtport, 1233 u64 association_id) 1234 { 1235 struct nvmet_fc_tgt_assoc *assoc; 1236 struct nvmet_fc_tgt_assoc *ret = NULL; 1237 unsigned long flags; 1238 1239 spin_lock_irqsave(&tgtport->lock, flags); 1240 list_for_each_entry(assoc, &tgtport->assoc_list, a_list) { 1241 if (association_id == assoc->association_id) { 1242 ret = assoc; 1243 if (!nvmet_fc_tgt_a_get(assoc)) 1244 ret = NULL; 1245 break; 1246 } 1247 } 1248 spin_unlock_irqrestore(&tgtport->lock, flags); 1249 1250 return ret; 1251 } 1252 1253 static void 1254 nvmet_fc_portentry_bind(struct nvmet_fc_tgtport *tgtport, 1255 struct nvmet_fc_port_entry *pe, 1256 struct nvmet_port *port) 1257 { 1258 lockdep_assert_held(&nvmet_fc_tgtlock); 1259 1260 pe->tgtport = tgtport; 1261 tgtport->pe = pe; 1262 1263 pe->port = port; 1264 port->priv = pe; 1265 1266 pe->node_name = tgtport->fc_target_port.node_name; 1267 pe->port_name = tgtport->fc_target_port.port_name; 1268 INIT_LIST_HEAD(&pe->pe_list); 1269 1270 list_add_tail(&pe->pe_list, &nvmet_fc_portentry_list); 1271 } 1272 1273 static void 1274 nvmet_fc_portentry_unbind(struct nvmet_fc_port_entry *pe) 1275 { 1276 unsigned long flags; 1277 1278 spin_lock_irqsave(&nvmet_fc_tgtlock, flags); 1279 if (pe->tgtport) 1280 pe->tgtport->pe = NULL; 1281 list_del(&pe->pe_list); 1282 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); 1283 } 1284 1285 /* 1286 * called when a targetport deregisters. Breaks the relationship 1287 * with the nvmet port, but leaves the port_entry in place so that 1288 * re-registration can resume operation. 1289 */ 1290 static void 1291 nvmet_fc_portentry_unbind_tgt(struct nvmet_fc_tgtport *tgtport) 1292 { 1293 struct nvmet_fc_port_entry *pe; 1294 unsigned long flags; 1295 1296 spin_lock_irqsave(&nvmet_fc_tgtlock, flags); 1297 pe = tgtport->pe; 1298 if (pe) 1299 pe->tgtport = NULL; 1300 tgtport->pe = NULL; 1301 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); 1302 } 1303 1304 /* 1305 * called when a new targetport is registered. Looks in the 1306 * existing nvmet port_entries to see if the nvmet layer is 1307 * configured for the targetport's wwn's. (the targetport existed, 1308 * nvmet configured, the lldd unregistered the tgtport, and is now 1309 * reregistering the same targetport). If so, set the nvmet port 1310 * port entry on the targetport. 1311 */ 1312 static void 1313 nvmet_fc_portentry_rebind_tgt(struct nvmet_fc_tgtport *tgtport) 1314 { 1315 struct nvmet_fc_port_entry *pe; 1316 unsigned long flags; 1317 1318 spin_lock_irqsave(&nvmet_fc_tgtlock, flags); 1319 list_for_each_entry(pe, &nvmet_fc_portentry_list, pe_list) { 1320 if (tgtport->fc_target_port.node_name == pe->node_name && 1321 tgtport->fc_target_port.port_name == pe->port_name) { 1322 WARN_ON(pe->tgtport); 1323 tgtport->pe = pe; 1324 pe->tgtport = tgtport; 1325 break; 1326 } 1327 } 1328 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); 1329 } 1330 1331 /** 1332 * nvme_fc_register_targetport - transport entry point called by an 1333 * LLDD to register the existence of a local 1334 * NVME subystem FC port. 1335 * @pinfo: pointer to information about the port to be registered 1336 * @template: LLDD entrypoints and operational parameters for the port 1337 * @dev: physical hardware device node port corresponds to. Will be 1338 * used for DMA mappings 1339 * @portptr: pointer to a local port pointer. Upon success, the routine 1340 * will allocate a nvme_fc_local_port structure and place its 1341 * address in the local port pointer. Upon failure, local port 1342 * pointer will be set to NULL. 1343 * 1344 * Returns: 1345 * a completion status. Must be 0 upon success; a negative errno 1346 * (ex: -ENXIO) upon failure. 1347 */ 1348 int 1349 nvmet_fc_register_targetport(struct nvmet_fc_port_info *pinfo, 1350 struct nvmet_fc_target_template *template, 1351 struct device *dev, 1352 struct nvmet_fc_target_port **portptr) 1353 { 1354 struct nvmet_fc_tgtport *newrec; 1355 unsigned long flags; 1356 int ret, idx; 1357 1358 if (!template->xmt_ls_rsp || !template->fcp_op || 1359 !template->fcp_abort || 1360 !template->fcp_req_release || !template->targetport_delete || 1361 !template->max_hw_queues || !template->max_sgl_segments || 1362 !template->max_dif_sgl_segments || !template->dma_boundary) { 1363 ret = -EINVAL; 1364 goto out_regtgt_failed; 1365 } 1366 1367 newrec = kzalloc((sizeof(*newrec) + template->target_priv_sz), 1368 GFP_KERNEL); 1369 if (!newrec) { 1370 ret = -ENOMEM; 1371 goto out_regtgt_failed; 1372 } 1373 1374 idx = ida_simple_get(&nvmet_fc_tgtport_cnt, 0, 0, GFP_KERNEL); 1375 if (idx < 0) { 1376 ret = -ENOSPC; 1377 goto out_fail_kfree; 1378 } 1379 1380 if (!get_device(dev) && dev) { 1381 ret = -ENODEV; 1382 goto out_ida_put; 1383 } 1384 1385 newrec->fc_target_port.node_name = pinfo->node_name; 1386 newrec->fc_target_port.port_name = pinfo->port_name; 1387 if (template->target_priv_sz) 1388 newrec->fc_target_port.private = &newrec[1]; 1389 else 1390 newrec->fc_target_port.private = NULL; 1391 newrec->fc_target_port.port_id = pinfo->port_id; 1392 newrec->fc_target_port.port_num = idx; 1393 INIT_LIST_HEAD(&newrec->tgt_list); 1394 newrec->dev = dev; 1395 newrec->ops = template; 1396 spin_lock_init(&newrec->lock); 1397 INIT_LIST_HEAD(&newrec->ls_rcv_list); 1398 INIT_LIST_HEAD(&newrec->ls_req_list); 1399 INIT_LIST_HEAD(&newrec->ls_busylist); 1400 INIT_LIST_HEAD(&newrec->assoc_list); 1401 INIT_LIST_HEAD(&newrec->host_list); 1402 kref_init(&newrec->ref); 1403 ida_init(&newrec->assoc_cnt); 1404 newrec->max_sg_cnt = template->max_sgl_segments; 1405 1406 ret = nvmet_fc_alloc_ls_iodlist(newrec); 1407 if (ret) { 1408 ret = -ENOMEM; 1409 goto out_free_newrec; 1410 } 1411 1412 nvmet_fc_portentry_rebind_tgt(newrec); 1413 1414 spin_lock_irqsave(&nvmet_fc_tgtlock, flags); 1415 list_add_tail(&newrec->tgt_list, &nvmet_fc_target_list); 1416 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); 1417 1418 *portptr = &newrec->fc_target_port; 1419 return 0; 1420 1421 out_free_newrec: 1422 put_device(dev); 1423 out_ida_put: 1424 ida_simple_remove(&nvmet_fc_tgtport_cnt, idx); 1425 out_fail_kfree: 1426 kfree(newrec); 1427 out_regtgt_failed: 1428 *portptr = NULL; 1429 return ret; 1430 } 1431 EXPORT_SYMBOL_GPL(nvmet_fc_register_targetport); 1432 1433 1434 static void 1435 nvmet_fc_free_tgtport(struct kref *ref) 1436 { 1437 struct nvmet_fc_tgtport *tgtport = 1438 container_of(ref, struct nvmet_fc_tgtport, ref); 1439 struct device *dev = tgtport->dev; 1440 unsigned long flags; 1441 1442 spin_lock_irqsave(&nvmet_fc_tgtlock, flags); 1443 list_del(&tgtport->tgt_list); 1444 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); 1445 1446 nvmet_fc_free_ls_iodlist(tgtport); 1447 1448 /* let the LLDD know we've finished tearing it down */ 1449 tgtport->ops->targetport_delete(&tgtport->fc_target_port); 1450 1451 ida_simple_remove(&nvmet_fc_tgtport_cnt, 1452 tgtport->fc_target_port.port_num); 1453 1454 ida_destroy(&tgtport->assoc_cnt); 1455 1456 kfree(tgtport); 1457 1458 put_device(dev); 1459 } 1460 1461 static void 1462 nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport) 1463 { 1464 kref_put(&tgtport->ref, nvmet_fc_free_tgtport); 1465 } 1466 1467 static int 1468 nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport) 1469 { 1470 return kref_get_unless_zero(&tgtport->ref); 1471 } 1472 1473 static void 1474 __nvmet_fc_free_assocs(struct nvmet_fc_tgtport *tgtport) 1475 { 1476 struct nvmet_fc_tgt_assoc *assoc, *next; 1477 unsigned long flags; 1478 1479 spin_lock_irqsave(&tgtport->lock, flags); 1480 list_for_each_entry_safe(assoc, next, 1481 &tgtport->assoc_list, a_list) { 1482 if (!nvmet_fc_tgt_a_get(assoc)) 1483 continue; 1484 if (!schedule_work(&assoc->del_work)) 1485 /* already deleting - release local reference */ 1486 nvmet_fc_tgt_a_put(assoc); 1487 } 1488 spin_unlock_irqrestore(&tgtport->lock, flags); 1489 } 1490 1491 /** 1492 * nvmet_fc_invalidate_host - transport entry point called by an LLDD 1493 * to remove references to a hosthandle for LS's. 1494 * 1495 * The nvmet-fc layer ensures that any references to the hosthandle 1496 * on the targetport are forgotten (set to NULL). The LLDD will 1497 * typically call this when a login with a remote host port has been 1498 * lost, thus LS's for the remote host port are no longer possible. 1499 * 1500 * If an LS request is outstanding to the targetport/hosthandle (or 1501 * issued concurrently with the call to invalidate the host), the 1502 * LLDD is responsible for terminating/aborting the LS and completing 1503 * the LS request. It is recommended that these terminations/aborts 1504 * occur after calling to invalidate the host handle to avoid additional 1505 * retries by the nvmet-fc transport. The nvmet-fc transport may 1506 * continue to reference host handle while it cleans up outstanding 1507 * NVME associations. The nvmet-fc transport will call the 1508 * ops->host_release() callback to notify the LLDD that all references 1509 * are complete and the related host handle can be recovered. 1510 * Note: if there are no references, the callback may be called before 1511 * the invalidate host call returns. 1512 * 1513 * @target_port: pointer to the (registered) target port that a prior 1514 * LS was received on and which supplied the transport the 1515 * hosthandle. 1516 * @hosthandle: the handle (pointer) that represents the host port 1517 * that no longer has connectivity and that LS's should 1518 * no longer be directed to. 1519 */ 1520 void 1521 nvmet_fc_invalidate_host(struct nvmet_fc_target_port *target_port, 1522 void *hosthandle) 1523 { 1524 struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port); 1525 struct nvmet_fc_tgt_assoc *assoc, *next; 1526 unsigned long flags; 1527 bool noassoc = true; 1528 1529 spin_lock_irqsave(&tgtport->lock, flags); 1530 list_for_each_entry_safe(assoc, next, 1531 &tgtport->assoc_list, a_list) { 1532 if (!assoc->hostport || 1533 assoc->hostport->hosthandle != hosthandle) 1534 continue; 1535 if (!nvmet_fc_tgt_a_get(assoc)) 1536 continue; 1537 assoc->hostport->invalid = 1; 1538 noassoc = false; 1539 if (!schedule_work(&assoc->del_work)) 1540 /* already deleting - release local reference */ 1541 nvmet_fc_tgt_a_put(assoc); 1542 } 1543 spin_unlock_irqrestore(&tgtport->lock, flags); 1544 1545 /* if there's nothing to wait for - call the callback */ 1546 if (noassoc && tgtport->ops->host_release) 1547 tgtport->ops->host_release(hosthandle); 1548 } 1549 EXPORT_SYMBOL_GPL(nvmet_fc_invalidate_host); 1550 1551 /* 1552 * nvmet layer has called to terminate an association 1553 */ 1554 static void 1555 nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl) 1556 { 1557 struct nvmet_fc_tgtport *tgtport, *next; 1558 struct nvmet_fc_tgt_assoc *assoc; 1559 struct nvmet_fc_tgt_queue *queue; 1560 unsigned long flags; 1561 bool found_ctrl = false; 1562 1563 /* this is a bit ugly, but don't want to make locks layered */ 1564 spin_lock_irqsave(&nvmet_fc_tgtlock, flags); 1565 list_for_each_entry_safe(tgtport, next, &nvmet_fc_target_list, 1566 tgt_list) { 1567 if (!nvmet_fc_tgtport_get(tgtport)) 1568 continue; 1569 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); 1570 1571 spin_lock_irqsave(&tgtport->lock, flags); 1572 list_for_each_entry(assoc, &tgtport->assoc_list, a_list) { 1573 queue = assoc->queues[0]; 1574 if (queue && queue->nvme_sq.ctrl == ctrl) { 1575 if (nvmet_fc_tgt_a_get(assoc)) 1576 found_ctrl = true; 1577 break; 1578 } 1579 } 1580 spin_unlock_irqrestore(&tgtport->lock, flags); 1581 1582 nvmet_fc_tgtport_put(tgtport); 1583 1584 if (found_ctrl) { 1585 if (!schedule_work(&assoc->del_work)) 1586 /* already deleting - release local reference */ 1587 nvmet_fc_tgt_a_put(assoc); 1588 return; 1589 } 1590 1591 spin_lock_irqsave(&nvmet_fc_tgtlock, flags); 1592 } 1593 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); 1594 } 1595 1596 /** 1597 * nvme_fc_unregister_targetport - transport entry point called by an 1598 * LLDD to deregister/remove a previously 1599 * registered a local NVME subsystem FC port. 1600 * @target_port: pointer to the (registered) target port that is to be 1601 * deregistered. 1602 * 1603 * Returns: 1604 * a completion status. Must be 0 upon success; a negative errno 1605 * (ex: -ENXIO) upon failure. 1606 */ 1607 int 1608 nvmet_fc_unregister_targetport(struct nvmet_fc_target_port *target_port) 1609 { 1610 struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port); 1611 1612 nvmet_fc_portentry_unbind_tgt(tgtport); 1613 1614 /* terminate any outstanding associations */ 1615 __nvmet_fc_free_assocs(tgtport); 1616 1617 /* 1618 * should terminate LS's as well. However, LS's will be generated 1619 * at the tail end of association termination, so they likely don't 1620 * exist yet. And even if they did, it's worthwhile to just let 1621 * them finish and targetport ref counting will clean things up. 1622 */ 1623 1624 nvmet_fc_tgtport_put(tgtport); 1625 1626 return 0; 1627 } 1628 EXPORT_SYMBOL_GPL(nvmet_fc_unregister_targetport); 1629 1630 1631 /* ********************** FC-NVME LS RCV Handling ************************* */ 1632 1633 1634 static void 1635 nvmet_fc_ls_create_association(struct nvmet_fc_tgtport *tgtport, 1636 struct nvmet_fc_ls_iod *iod) 1637 { 1638 struct fcnvme_ls_cr_assoc_rqst *rqst = &iod->rqstbuf->rq_cr_assoc; 1639 struct fcnvme_ls_cr_assoc_acc *acc = &iod->rspbuf->rsp_cr_assoc; 1640 struct nvmet_fc_tgt_queue *queue; 1641 int ret = 0; 1642 1643 memset(acc, 0, sizeof(*acc)); 1644 1645 /* 1646 * FC-NVME spec changes. There are initiators sending different 1647 * lengths as padding sizes for Create Association Cmd descriptor 1648 * was incorrect. 1649 * Accept anything of "minimum" length. Assume format per 1.15 1650 * spec (with HOSTID reduced to 16 bytes), ignore how long the 1651 * trailing pad length is. 1652 */ 1653 if (iod->rqstdatalen < FCNVME_LSDESC_CRA_RQST_MINLEN) 1654 ret = VERR_CR_ASSOC_LEN; 1655 else if (be32_to_cpu(rqst->desc_list_len) < 1656 FCNVME_LSDESC_CRA_RQST_MIN_LISTLEN) 1657 ret = VERR_CR_ASSOC_RQST_LEN; 1658 else if (rqst->assoc_cmd.desc_tag != 1659 cpu_to_be32(FCNVME_LSDESC_CREATE_ASSOC_CMD)) 1660 ret = VERR_CR_ASSOC_CMD; 1661 else if (be32_to_cpu(rqst->assoc_cmd.desc_len) < 1662 FCNVME_LSDESC_CRA_CMD_DESC_MIN_DESCLEN) 1663 ret = VERR_CR_ASSOC_CMD_LEN; 1664 else if (!rqst->assoc_cmd.ersp_ratio || 1665 (be16_to_cpu(rqst->assoc_cmd.ersp_ratio) >= 1666 be16_to_cpu(rqst->assoc_cmd.sqsize))) 1667 ret = VERR_ERSP_RATIO; 1668 1669 else { 1670 /* new association w/ admin queue */ 1671 iod->assoc = nvmet_fc_alloc_target_assoc( 1672 tgtport, iod->hosthandle); 1673 if (!iod->assoc) 1674 ret = VERR_ASSOC_ALLOC_FAIL; 1675 else { 1676 queue = nvmet_fc_alloc_target_queue(iod->assoc, 0, 1677 be16_to_cpu(rqst->assoc_cmd.sqsize)); 1678 if (!queue) 1679 ret = VERR_QUEUE_ALLOC_FAIL; 1680 } 1681 } 1682 1683 if (ret) { 1684 dev_err(tgtport->dev, 1685 "Create Association LS failed: %s\n", 1686 validation_errors[ret]); 1687 iod->lsrsp->rsplen = nvme_fc_format_rjt(acc, 1688 sizeof(*acc), rqst->w0.ls_cmd, 1689 FCNVME_RJT_RC_LOGIC, 1690 FCNVME_RJT_EXP_NONE, 0); 1691 return; 1692 } 1693 1694 queue->ersp_ratio = be16_to_cpu(rqst->assoc_cmd.ersp_ratio); 1695 atomic_set(&queue->connected, 1); 1696 queue->sqhd = 0; /* best place to init value */ 1697 1698 dev_info(tgtport->dev, 1699 "{%d:%d} Association created\n", 1700 tgtport->fc_target_port.port_num, iod->assoc->a_id); 1701 1702 /* format a response */ 1703 1704 iod->lsrsp->rsplen = sizeof(*acc); 1705 1706 nvme_fc_format_rsp_hdr(acc, FCNVME_LS_ACC, 1707 fcnvme_lsdesc_len( 1708 sizeof(struct fcnvme_ls_cr_assoc_acc)), 1709 FCNVME_LS_CREATE_ASSOCIATION); 1710 acc->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID); 1711 acc->associd.desc_len = 1712 fcnvme_lsdesc_len( 1713 sizeof(struct fcnvme_lsdesc_assoc_id)); 1714 acc->associd.association_id = 1715 cpu_to_be64(nvmet_fc_makeconnid(iod->assoc, 0)); 1716 acc->connectid.desc_tag = cpu_to_be32(FCNVME_LSDESC_CONN_ID); 1717 acc->connectid.desc_len = 1718 fcnvme_lsdesc_len( 1719 sizeof(struct fcnvme_lsdesc_conn_id)); 1720 acc->connectid.connection_id = acc->associd.association_id; 1721 } 1722 1723 static void 1724 nvmet_fc_ls_create_connection(struct nvmet_fc_tgtport *tgtport, 1725 struct nvmet_fc_ls_iod *iod) 1726 { 1727 struct fcnvme_ls_cr_conn_rqst *rqst = &iod->rqstbuf->rq_cr_conn; 1728 struct fcnvme_ls_cr_conn_acc *acc = &iod->rspbuf->rsp_cr_conn; 1729 struct nvmet_fc_tgt_queue *queue; 1730 int ret = 0; 1731 1732 memset(acc, 0, sizeof(*acc)); 1733 1734 if (iod->rqstdatalen < sizeof(struct fcnvme_ls_cr_conn_rqst)) 1735 ret = VERR_CR_CONN_LEN; 1736 else if (rqst->desc_list_len != 1737 fcnvme_lsdesc_len( 1738 sizeof(struct fcnvme_ls_cr_conn_rqst))) 1739 ret = VERR_CR_CONN_RQST_LEN; 1740 else if (rqst->associd.desc_tag != cpu_to_be32(FCNVME_LSDESC_ASSOC_ID)) 1741 ret = VERR_ASSOC_ID; 1742 else if (rqst->associd.desc_len != 1743 fcnvme_lsdesc_len( 1744 sizeof(struct fcnvme_lsdesc_assoc_id))) 1745 ret = VERR_ASSOC_ID_LEN; 1746 else if (rqst->connect_cmd.desc_tag != 1747 cpu_to_be32(FCNVME_LSDESC_CREATE_CONN_CMD)) 1748 ret = VERR_CR_CONN_CMD; 1749 else if (rqst->connect_cmd.desc_len != 1750 fcnvme_lsdesc_len( 1751 sizeof(struct fcnvme_lsdesc_cr_conn_cmd))) 1752 ret = VERR_CR_CONN_CMD_LEN; 1753 else if (!rqst->connect_cmd.ersp_ratio || 1754 (be16_to_cpu(rqst->connect_cmd.ersp_ratio) >= 1755 be16_to_cpu(rqst->connect_cmd.sqsize))) 1756 ret = VERR_ERSP_RATIO; 1757 1758 else { 1759 /* new io queue */ 1760 iod->assoc = nvmet_fc_find_target_assoc(tgtport, 1761 be64_to_cpu(rqst->associd.association_id)); 1762 if (!iod->assoc) 1763 ret = VERR_NO_ASSOC; 1764 else { 1765 queue = nvmet_fc_alloc_target_queue(iod->assoc, 1766 be16_to_cpu(rqst->connect_cmd.qid), 1767 be16_to_cpu(rqst->connect_cmd.sqsize)); 1768 if (!queue) 1769 ret = VERR_QUEUE_ALLOC_FAIL; 1770 1771 /* release get taken in nvmet_fc_find_target_assoc */ 1772 nvmet_fc_tgt_a_put(iod->assoc); 1773 } 1774 } 1775 1776 if (ret) { 1777 dev_err(tgtport->dev, 1778 "Create Connection LS failed: %s\n", 1779 validation_errors[ret]); 1780 iod->lsrsp->rsplen = nvme_fc_format_rjt(acc, 1781 sizeof(*acc), rqst->w0.ls_cmd, 1782 (ret == VERR_NO_ASSOC) ? 1783 FCNVME_RJT_RC_INV_ASSOC : 1784 FCNVME_RJT_RC_LOGIC, 1785 FCNVME_RJT_EXP_NONE, 0); 1786 return; 1787 } 1788 1789 queue->ersp_ratio = be16_to_cpu(rqst->connect_cmd.ersp_ratio); 1790 atomic_set(&queue->connected, 1); 1791 queue->sqhd = 0; /* best place to init value */ 1792 1793 /* format a response */ 1794 1795 iod->lsrsp->rsplen = sizeof(*acc); 1796 1797 nvme_fc_format_rsp_hdr(acc, FCNVME_LS_ACC, 1798 fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_cr_conn_acc)), 1799 FCNVME_LS_CREATE_CONNECTION); 1800 acc->connectid.desc_tag = cpu_to_be32(FCNVME_LSDESC_CONN_ID); 1801 acc->connectid.desc_len = 1802 fcnvme_lsdesc_len( 1803 sizeof(struct fcnvme_lsdesc_conn_id)); 1804 acc->connectid.connection_id = 1805 cpu_to_be64(nvmet_fc_makeconnid(iod->assoc, 1806 be16_to_cpu(rqst->connect_cmd.qid))); 1807 } 1808 1809 /* 1810 * Returns true if the LS response is to be transmit 1811 * Returns false if the LS response is to be delayed 1812 */ 1813 static int 1814 nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport, 1815 struct nvmet_fc_ls_iod *iod) 1816 { 1817 struct fcnvme_ls_disconnect_assoc_rqst *rqst = 1818 &iod->rqstbuf->rq_dis_assoc; 1819 struct fcnvme_ls_disconnect_assoc_acc *acc = 1820 &iod->rspbuf->rsp_dis_assoc; 1821 struct nvmet_fc_tgt_assoc *assoc = NULL; 1822 struct nvmet_fc_ls_iod *oldls = NULL; 1823 unsigned long flags; 1824 int ret = 0; 1825 1826 memset(acc, 0, sizeof(*acc)); 1827 1828 ret = nvmefc_vldt_lsreq_discon_assoc(iod->rqstdatalen, rqst); 1829 if (!ret) { 1830 /* match an active association - takes an assoc ref if !NULL */ 1831 assoc = nvmet_fc_find_target_assoc(tgtport, 1832 be64_to_cpu(rqst->associd.association_id)); 1833 iod->assoc = assoc; 1834 if (!assoc) 1835 ret = VERR_NO_ASSOC; 1836 } 1837 1838 if (ret || !assoc) { 1839 dev_err(tgtport->dev, 1840 "Disconnect LS failed: %s\n", 1841 validation_errors[ret]); 1842 iod->lsrsp->rsplen = nvme_fc_format_rjt(acc, 1843 sizeof(*acc), rqst->w0.ls_cmd, 1844 (ret == VERR_NO_ASSOC) ? 1845 FCNVME_RJT_RC_INV_ASSOC : 1846 FCNVME_RJT_RC_LOGIC, 1847 FCNVME_RJT_EXP_NONE, 0); 1848 return true; 1849 } 1850 1851 /* format a response */ 1852 1853 iod->lsrsp->rsplen = sizeof(*acc); 1854 1855 nvme_fc_format_rsp_hdr(acc, FCNVME_LS_ACC, 1856 fcnvme_lsdesc_len( 1857 sizeof(struct fcnvme_ls_disconnect_assoc_acc)), 1858 FCNVME_LS_DISCONNECT_ASSOC); 1859 1860 /* release get taken in nvmet_fc_find_target_assoc */ 1861 nvmet_fc_tgt_a_put(assoc); 1862 1863 /* 1864 * The rules for LS response says the response cannot 1865 * go back until ABTS's have been sent for all outstanding 1866 * I/O and a Disconnect Association LS has been sent. 1867 * So... save off the Disconnect LS to send the response 1868 * later. If there was a prior LS already saved, replace 1869 * it with the newer one and send a can't perform reject 1870 * on the older one. 1871 */ 1872 spin_lock_irqsave(&tgtport->lock, flags); 1873 oldls = assoc->rcv_disconn; 1874 assoc->rcv_disconn = iod; 1875 spin_unlock_irqrestore(&tgtport->lock, flags); 1876 1877 nvmet_fc_delete_target_assoc(assoc); 1878 1879 if (oldls) { 1880 dev_info(tgtport->dev, 1881 "{%d:%d} Multiple Disconnect Association LS's " 1882 "received\n", 1883 tgtport->fc_target_port.port_num, assoc->a_id); 1884 /* overwrite good response with bogus failure */ 1885 oldls->lsrsp->rsplen = nvme_fc_format_rjt(oldls->rspbuf, 1886 sizeof(*iod->rspbuf), 1887 /* ok to use rqst, LS is same */ 1888 rqst->w0.ls_cmd, 1889 FCNVME_RJT_RC_UNAB, 1890 FCNVME_RJT_EXP_NONE, 0); 1891 nvmet_fc_xmt_ls_rsp(tgtport, oldls); 1892 } 1893 1894 return false; 1895 } 1896 1897 1898 /* *********************** NVME Ctrl Routines **************************** */ 1899 1900 1901 static void nvmet_fc_fcp_nvme_cmd_done(struct nvmet_req *nvme_req); 1902 1903 static const struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops; 1904 1905 static void 1906 nvmet_fc_xmt_ls_rsp_done(struct nvmefc_ls_rsp *lsrsp) 1907 { 1908 struct nvmet_fc_ls_iod *iod = lsrsp->nvme_fc_private; 1909 struct nvmet_fc_tgtport *tgtport = iod->tgtport; 1910 1911 fc_dma_sync_single_for_cpu(tgtport->dev, iod->rspdma, 1912 sizeof(*iod->rspbuf), DMA_TO_DEVICE); 1913 nvmet_fc_free_ls_iod(tgtport, iod); 1914 nvmet_fc_tgtport_put(tgtport); 1915 } 1916 1917 static void 1918 nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport, 1919 struct nvmet_fc_ls_iod *iod) 1920 { 1921 int ret; 1922 1923 fc_dma_sync_single_for_device(tgtport->dev, iod->rspdma, 1924 sizeof(*iod->rspbuf), DMA_TO_DEVICE); 1925 1926 ret = tgtport->ops->xmt_ls_rsp(&tgtport->fc_target_port, iod->lsrsp); 1927 if (ret) 1928 nvmet_fc_xmt_ls_rsp_done(iod->lsrsp); 1929 } 1930 1931 /* 1932 * Actual processing routine for received FC-NVME LS Requests from the LLD 1933 */ 1934 static void 1935 nvmet_fc_handle_ls_rqst(struct nvmet_fc_tgtport *tgtport, 1936 struct nvmet_fc_ls_iod *iod) 1937 { 1938 struct fcnvme_ls_rqst_w0 *w0 = &iod->rqstbuf->rq_cr_assoc.w0; 1939 bool sendrsp = true; 1940 1941 iod->lsrsp->nvme_fc_private = iod; 1942 iod->lsrsp->rspbuf = iod->rspbuf; 1943 iod->lsrsp->rspdma = iod->rspdma; 1944 iod->lsrsp->done = nvmet_fc_xmt_ls_rsp_done; 1945 /* Be preventative. handlers will later set to valid length */ 1946 iod->lsrsp->rsplen = 0; 1947 1948 iod->assoc = NULL; 1949 1950 /* 1951 * handlers: 1952 * parse request input, execute the request, and format the 1953 * LS response 1954 */ 1955 switch (w0->ls_cmd) { 1956 case FCNVME_LS_CREATE_ASSOCIATION: 1957 /* Creates Association and initial Admin Queue/Connection */ 1958 nvmet_fc_ls_create_association(tgtport, iod); 1959 break; 1960 case FCNVME_LS_CREATE_CONNECTION: 1961 /* Creates an IO Queue/Connection */ 1962 nvmet_fc_ls_create_connection(tgtport, iod); 1963 break; 1964 case FCNVME_LS_DISCONNECT_ASSOC: 1965 /* Terminate a Queue/Connection or the Association */ 1966 sendrsp = nvmet_fc_ls_disconnect(tgtport, iod); 1967 break; 1968 default: 1969 iod->lsrsp->rsplen = nvme_fc_format_rjt(iod->rspbuf, 1970 sizeof(*iod->rspbuf), w0->ls_cmd, 1971 FCNVME_RJT_RC_INVAL, FCNVME_RJT_EXP_NONE, 0); 1972 } 1973 1974 if (sendrsp) 1975 nvmet_fc_xmt_ls_rsp(tgtport, iod); 1976 } 1977 1978 /* 1979 * Actual processing routine for received FC-NVME LS Requests from the LLD 1980 */ 1981 static void 1982 nvmet_fc_handle_ls_rqst_work(struct work_struct *work) 1983 { 1984 struct nvmet_fc_ls_iod *iod = 1985 container_of(work, struct nvmet_fc_ls_iod, work); 1986 struct nvmet_fc_tgtport *tgtport = iod->tgtport; 1987 1988 nvmet_fc_handle_ls_rqst(tgtport, iod); 1989 } 1990 1991 1992 /** 1993 * nvmet_fc_rcv_ls_req - transport entry point called by an LLDD 1994 * upon the reception of a NVME LS request. 1995 * 1996 * The nvmet-fc layer will copy payload to an internal structure for 1997 * processing. As such, upon completion of the routine, the LLDD may 1998 * immediately free/reuse the LS request buffer passed in the call. 1999 * 2000 * If this routine returns error, the LLDD should abort the exchange. 2001 * 2002 * @target_port: pointer to the (registered) target port the LS was 2003 * received on. 2004 * @lsrsp: pointer to a lsrsp structure to be used to reference 2005 * the exchange corresponding to the LS. 2006 * @lsreqbuf: pointer to the buffer containing the LS Request 2007 * @lsreqbuf_len: length, in bytes, of the received LS request 2008 */ 2009 int 2010 nvmet_fc_rcv_ls_req(struct nvmet_fc_target_port *target_port, 2011 void *hosthandle, 2012 struct nvmefc_ls_rsp *lsrsp, 2013 void *lsreqbuf, u32 lsreqbuf_len) 2014 { 2015 struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port); 2016 struct nvmet_fc_ls_iod *iod; 2017 struct fcnvme_ls_rqst_w0 *w0 = (struct fcnvme_ls_rqst_w0 *)lsreqbuf; 2018 2019 if (lsreqbuf_len > sizeof(union nvmefc_ls_requests)) { 2020 dev_info(tgtport->dev, 2021 "RCV %s LS failed: payload too large (%d)\n", 2022 (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ? 2023 nvmefc_ls_names[w0->ls_cmd] : "", 2024 lsreqbuf_len); 2025 return -E2BIG; 2026 } 2027 2028 if (!nvmet_fc_tgtport_get(tgtport)) { 2029 dev_info(tgtport->dev, 2030 "RCV %s LS failed: target deleting\n", 2031 (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ? 2032 nvmefc_ls_names[w0->ls_cmd] : ""); 2033 return -ESHUTDOWN; 2034 } 2035 2036 iod = nvmet_fc_alloc_ls_iod(tgtport); 2037 if (!iod) { 2038 dev_info(tgtport->dev, 2039 "RCV %s LS failed: context allocation failed\n", 2040 (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ? 2041 nvmefc_ls_names[w0->ls_cmd] : ""); 2042 nvmet_fc_tgtport_put(tgtport); 2043 return -ENOENT; 2044 } 2045 2046 iod->lsrsp = lsrsp; 2047 iod->fcpreq = NULL; 2048 memcpy(iod->rqstbuf, lsreqbuf, lsreqbuf_len); 2049 iod->rqstdatalen = lsreqbuf_len; 2050 iod->hosthandle = hosthandle; 2051 2052 schedule_work(&iod->work); 2053 2054 return 0; 2055 } 2056 EXPORT_SYMBOL_GPL(nvmet_fc_rcv_ls_req); 2057 2058 2059 /* 2060 * ********************** 2061 * Start of FCP handling 2062 * ********************** 2063 */ 2064 2065 static int 2066 nvmet_fc_alloc_tgt_pgs(struct nvmet_fc_fcp_iod *fod) 2067 { 2068 struct scatterlist *sg; 2069 unsigned int nent; 2070 2071 sg = sgl_alloc(fod->req.transfer_len, GFP_KERNEL, &nent); 2072 if (!sg) 2073 goto out; 2074 2075 fod->data_sg = sg; 2076 fod->data_sg_cnt = nent; 2077 fod->data_sg_cnt = fc_dma_map_sg(fod->tgtport->dev, sg, nent, 2078 ((fod->io_dir == NVMET_FCP_WRITE) ? 2079 DMA_FROM_DEVICE : DMA_TO_DEVICE)); 2080 /* note: write from initiator perspective */ 2081 fod->next_sg = fod->data_sg; 2082 2083 return 0; 2084 2085 out: 2086 return NVME_SC_INTERNAL; 2087 } 2088 2089 static void 2090 nvmet_fc_free_tgt_pgs(struct nvmet_fc_fcp_iod *fod) 2091 { 2092 if (!fod->data_sg || !fod->data_sg_cnt) 2093 return; 2094 2095 fc_dma_unmap_sg(fod->tgtport->dev, fod->data_sg, fod->data_sg_cnt, 2096 ((fod->io_dir == NVMET_FCP_WRITE) ? 2097 DMA_FROM_DEVICE : DMA_TO_DEVICE)); 2098 sgl_free(fod->data_sg); 2099 fod->data_sg = NULL; 2100 fod->data_sg_cnt = 0; 2101 } 2102 2103 2104 static bool 2105 queue_90percent_full(struct nvmet_fc_tgt_queue *q, u32 sqhd) 2106 { 2107 u32 sqtail, used; 2108 2109 /* egad, this is ugly. And sqtail is just a best guess */ 2110 sqtail = atomic_read(&q->sqtail) % q->sqsize; 2111 2112 used = (sqtail < sqhd) ? (sqtail + q->sqsize - sqhd) : (sqtail - sqhd); 2113 return ((used * 10) >= (((u32)(q->sqsize - 1) * 9))); 2114 } 2115 2116 /* 2117 * Prep RSP payload. 2118 * May be a NVMET_FCOP_RSP or NVMET_FCOP_READDATA_RSP op 2119 */ 2120 static void 2121 nvmet_fc_prep_fcp_rsp(struct nvmet_fc_tgtport *tgtport, 2122 struct nvmet_fc_fcp_iod *fod) 2123 { 2124 struct nvme_fc_ersp_iu *ersp = &fod->rspiubuf; 2125 struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common; 2126 struct nvme_completion *cqe = &ersp->cqe; 2127 u32 *cqewd = (u32 *)cqe; 2128 bool send_ersp = false; 2129 u32 rsn, rspcnt, xfr_length; 2130 2131 if (fod->fcpreq->op == NVMET_FCOP_READDATA_RSP) 2132 xfr_length = fod->req.transfer_len; 2133 else 2134 xfr_length = fod->offset; 2135 2136 /* 2137 * check to see if we can send a 0's rsp. 2138 * Note: to send a 0's response, the NVME-FC host transport will 2139 * recreate the CQE. The host transport knows: sq id, SQHD (last 2140 * seen in an ersp), and command_id. Thus it will create a 2141 * zero-filled CQE with those known fields filled in. Transport 2142 * must send an ersp for any condition where the cqe won't match 2143 * this. 2144 * 2145 * Here are the FC-NVME mandated cases where we must send an ersp: 2146 * every N responses, where N=ersp_ratio 2147 * force fabric commands to send ersp's (not in FC-NVME but good 2148 * practice) 2149 * normal cmds: any time status is non-zero, or status is zero 2150 * but words 0 or 1 are non-zero. 2151 * the SQ is 90% or more full 2152 * the cmd is a fused command 2153 * transferred data length not equal to cmd iu length 2154 */ 2155 rspcnt = atomic_inc_return(&fod->queue->zrspcnt); 2156 if (!(rspcnt % fod->queue->ersp_ratio) || 2157 nvme_is_fabrics((struct nvme_command *) sqe) || 2158 xfr_length != fod->req.transfer_len || 2159 (le16_to_cpu(cqe->status) & 0xFFFE) || cqewd[0] || cqewd[1] || 2160 (sqe->flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND)) || 2161 queue_90percent_full(fod->queue, le16_to_cpu(cqe->sq_head))) 2162 send_ersp = true; 2163 2164 /* re-set the fields */ 2165 fod->fcpreq->rspaddr = ersp; 2166 fod->fcpreq->rspdma = fod->rspdma; 2167 2168 if (!send_ersp) { 2169 memset(ersp, 0, NVME_FC_SIZEOF_ZEROS_RSP); 2170 fod->fcpreq->rsplen = NVME_FC_SIZEOF_ZEROS_RSP; 2171 } else { 2172 ersp->iu_len = cpu_to_be16(sizeof(*ersp)/sizeof(u32)); 2173 rsn = atomic_inc_return(&fod->queue->rsn); 2174 ersp->rsn = cpu_to_be32(rsn); 2175 ersp->xfrd_len = cpu_to_be32(xfr_length); 2176 fod->fcpreq->rsplen = sizeof(*ersp); 2177 } 2178 2179 fc_dma_sync_single_for_device(tgtport->dev, fod->rspdma, 2180 sizeof(fod->rspiubuf), DMA_TO_DEVICE); 2181 } 2182 2183 static void nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req *fcpreq); 2184 2185 static void 2186 nvmet_fc_abort_op(struct nvmet_fc_tgtport *tgtport, 2187 struct nvmet_fc_fcp_iod *fod) 2188 { 2189 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq; 2190 2191 /* data no longer needed */ 2192 nvmet_fc_free_tgt_pgs(fod); 2193 2194 /* 2195 * if an ABTS was received or we issued the fcp_abort early 2196 * don't call abort routine again. 2197 */ 2198 /* no need to take lock - lock was taken earlier to get here */ 2199 if (!fod->aborted) 2200 tgtport->ops->fcp_abort(&tgtport->fc_target_port, fcpreq); 2201 2202 nvmet_fc_free_fcp_iod(fod->queue, fod); 2203 } 2204 2205 static void 2206 nvmet_fc_xmt_fcp_rsp(struct nvmet_fc_tgtport *tgtport, 2207 struct nvmet_fc_fcp_iod *fod) 2208 { 2209 int ret; 2210 2211 fod->fcpreq->op = NVMET_FCOP_RSP; 2212 fod->fcpreq->timeout = 0; 2213 2214 nvmet_fc_prep_fcp_rsp(tgtport, fod); 2215 2216 ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq); 2217 if (ret) 2218 nvmet_fc_abort_op(tgtport, fod); 2219 } 2220 2221 static void 2222 nvmet_fc_transfer_fcp_data(struct nvmet_fc_tgtport *tgtport, 2223 struct nvmet_fc_fcp_iod *fod, u8 op) 2224 { 2225 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq; 2226 struct scatterlist *sg = fod->next_sg; 2227 unsigned long flags; 2228 u32 remaininglen = fod->req.transfer_len - fod->offset; 2229 u32 tlen = 0; 2230 int ret; 2231 2232 fcpreq->op = op; 2233 fcpreq->offset = fod->offset; 2234 fcpreq->timeout = NVME_FC_TGTOP_TIMEOUT_SEC; 2235 2236 /* 2237 * for next sequence: 2238 * break at a sg element boundary 2239 * attempt to keep sequence length capped at 2240 * NVMET_FC_MAX_SEQ_LENGTH but allow sequence to 2241 * be longer if a single sg element is larger 2242 * than that amount. This is done to avoid creating 2243 * a new sg list to use for the tgtport api. 2244 */ 2245 fcpreq->sg = sg; 2246 fcpreq->sg_cnt = 0; 2247 while (tlen < remaininglen && 2248 fcpreq->sg_cnt < tgtport->max_sg_cnt && 2249 tlen + sg_dma_len(sg) < NVMET_FC_MAX_SEQ_LENGTH) { 2250 fcpreq->sg_cnt++; 2251 tlen += sg_dma_len(sg); 2252 sg = sg_next(sg); 2253 } 2254 if (tlen < remaininglen && fcpreq->sg_cnt == 0) { 2255 fcpreq->sg_cnt++; 2256 tlen += min_t(u32, sg_dma_len(sg), remaininglen); 2257 sg = sg_next(sg); 2258 } 2259 if (tlen < remaininglen) 2260 fod->next_sg = sg; 2261 else 2262 fod->next_sg = NULL; 2263 2264 fcpreq->transfer_length = tlen; 2265 fcpreq->transferred_length = 0; 2266 fcpreq->fcp_error = 0; 2267 fcpreq->rsplen = 0; 2268 2269 /* 2270 * If the last READDATA request: check if LLDD supports 2271 * combined xfr with response. 2272 */ 2273 if ((op == NVMET_FCOP_READDATA) && 2274 ((fod->offset + fcpreq->transfer_length) == fod->req.transfer_len) && 2275 (tgtport->ops->target_features & NVMET_FCTGTFEAT_READDATA_RSP)) { 2276 fcpreq->op = NVMET_FCOP_READDATA_RSP; 2277 nvmet_fc_prep_fcp_rsp(tgtport, fod); 2278 } 2279 2280 ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq); 2281 if (ret) { 2282 /* 2283 * should be ok to set w/o lock as its in the thread of 2284 * execution (not an async timer routine) and doesn't 2285 * contend with any clearing action 2286 */ 2287 fod->abort = true; 2288 2289 if (op == NVMET_FCOP_WRITEDATA) { 2290 spin_lock_irqsave(&fod->flock, flags); 2291 fod->writedataactive = false; 2292 spin_unlock_irqrestore(&fod->flock, flags); 2293 nvmet_req_complete(&fod->req, NVME_SC_INTERNAL); 2294 } else /* NVMET_FCOP_READDATA or NVMET_FCOP_READDATA_RSP */ { 2295 fcpreq->fcp_error = ret; 2296 fcpreq->transferred_length = 0; 2297 nvmet_fc_xmt_fcp_op_done(fod->fcpreq); 2298 } 2299 } 2300 } 2301 2302 static inline bool 2303 __nvmet_fc_fod_op_abort(struct nvmet_fc_fcp_iod *fod, bool abort) 2304 { 2305 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq; 2306 struct nvmet_fc_tgtport *tgtport = fod->tgtport; 2307 2308 /* if in the middle of an io and we need to tear down */ 2309 if (abort) { 2310 if (fcpreq->op == NVMET_FCOP_WRITEDATA) { 2311 nvmet_req_complete(&fod->req, NVME_SC_INTERNAL); 2312 return true; 2313 } 2314 2315 nvmet_fc_abort_op(tgtport, fod); 2316 return true; 2317 } 2318 2319 return false; 2320 } 2321 2322 /* 2323 * actual done handler for FCP operations when completed by the lldd 2324 */ 2325 static void 2326 nvmet_fc_fod_op_done(struct nvmet_fc_fcp_iod *fod) 2327 { 2328 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq; 2329 struct nvmet_fc_tgtport *tgtport = fod->tgtport; 2330 unsigned long flags; 2331 bool abort; 2332 2333 spin_lock_irqsave(&fod->flock, flags); 2334 abort = fod->abort; 2335 fod->writedataactive = false; 2336 spin_unlock_irqrestore(&fod->flock, flags); 2337 2338 switch (fcpreq->op) { 2339 2340 case NVMET_FCOP_WRITEDATA: 2341 if (__nvmet_fc_fod_op_abort(fod, abort)) 2342 return; 2343 if (fcpreq->fcp_error || 2344 fcpreq->transferred_length != fcpreq->transfer_length) { 2345 spin_lock_irqsave(&fod->flock, flags); 2346 fod->abort = true; 2347 spin_unlock_irqrestore(&fod->flock, flags); 2348 2349 nvmet_req_complete(&fod->req, NVME_SC_INTERNAL); 2350 return; 2351 } 2352 2353 fod->offset += fcpreq->transferred_length; 2354 if (fod->offset != fod->req.transfer_len) { 2355 spin_lock_irqsave(&fod->flock, flags); 2356 fod->writedataactive = true; 2357 spin_unlock_irqrestore(&fod->flock, flags); 2358 2359 /* transfer the next chunk */ 2360 nvmet_fc_transfer_fcp_data(tgtport, fod, 2361 NVMET_FCOP_WRITEDATA); 2362 return; 2363 } 2364 2365 /* data transfer complete, resume with nvmet layer */ 2366 fod->req.execute(&fod->req); 2367 break; 2368 2369 case NVMET_FCOP_READDATA: 2370 case NVMET_FCOP_READDATA_RSP: 2371 if (__nvmet_fc_fod_op_abort(fod, abort)) 2372 return; 2373 if (fcpreq->fcp_error || 2374 fcpreq->transferred_length != fcpreq->transfer_length) { 2375 nvmet_fc_abort_op(tgtport, fod); 2376 return; 2377 } 2378 2379 /* success */ 2380 2381 if (fcpreq->op == NVMET_FCOP_READDATA_RSP) { 2382 /* data no longer needed */ 2383 nvmet_fc_free_tgt_pgs(fod); 2384 nvmet_fc_free_fcp_iod(fod->queue, fod); 2385 return; 2386 } 2387 2388 fod->offset += fcpreq->transferred_length; 2389 if (fod->offset != fod->req.transfer_len) { 2390 /* transfer the next chunk */ 2391 nvmet_fc_transfer_fcp_data(tgtport, fod, 2392 NVMET_FCOP_READDATA); 2393 return; 2394 } 2395 2396 /* data transfer complete, send response */ 2397 2398 /* data no longer needed */ 2399 nvmet_fc_free_tgt_pgs(fod); 2400 2401 nvmet_fc_xmt_fcp_rsp(tgtport, fod); 2402 2403 break; 2404 2405 case NVMET_FCOP_RSP: 2406 if (__nvmet_fc_fod_op_abort(fod, abort)) 2407 return; 2408 nvmet_fc_free_fcp_iod(fod->queue, fod); 2409 break; 2410 2411 default: 2412 break; 2413 } 2414 } 2415 2416 static void 2417 nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req *fcpreq) 2418 { 2419 struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private; 2420 2421 nvmet_fc_fod_op_done(fod); 2422 } 2423 2424 /* 2425 * actual completion handler after execution by the nvmet layer 2426 */ 2427 static void 2428 __nvmet_fc_fcp_nvme_cmd_done(struct nvmet_fc_tgtport *tgtport, 2429 struct nvmet_fc_fcp_iod *fod, int status) 2430 { 2431 struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common; 2432 struct nvme_completion *cqe = &fod->rspiubuf.cqe; 2433 unsigned long flags; 2434 bool abort; 2435 2436 spin_lock_irqsave(&fod->flock, flags); 2437 abort = fod->abort; 2438 spin_unlock_irqrestore(&fod->flock, flags); 2439 2440 /* if we have a CQE, snoop the last sq_head value */ 2441 if (!status) 2442 fod->queue->sqhd = cqe->sq_head; 2443 2444 if (abort) { 2445 nvmet_fc_abort_op(tgtport, fod); 2446 return; 2447 } 2448 2449 /* if an error handling the cmd post initial parsing */ 2450 if (status) { 2451 /* fudge up a failed CQE status for our transport error */ 2452 memset(cqe, 0, sizeof(*cqe)); 2453 cqe->sq_head = fod->queue->sqhd; /* echo last cqe sqhd */ 2454 cqe->sq_id = cpu_to_le16(fod->queue->qid); 2455 cqe->command_id = sqe->command_id; 2456 cqe->status = cpu_to_le16(status); 2457 } else { 2458 2459 /* 2460 * try to push the data even if the SQE status is non-zero. 2461 * There may be a status where data still was intended to 2462 * be moved 2463 */ 2464 if ((fod->io_dir == NVMET_FCP_READ) && (fod->data_sg_cnt)) { 2465 /* push the data over before sending rsp */ 2466 nvmet_fc_transfer_fcp_data(tgtport, fod, 2467 NVMET_FCOP_READDATA); 2468 return; 2469 } 2470 2471 /* writes & no data - fall thru */ 2472 } 2473 2474 /* data no longer needed */ 2475 nvmet_fc_free_tgt_pgs(fod); 2476 2477 nvmet_fc_xmt_fcp_rsp(tgtport, fod); 2478 } 2479 2480 2481 static void 2482 nvmet_fc_fcp_nvme_cmd_done(struct nvmet_req *nvme_req) 2483 { 2484 struct nvmet_fc_fcp_iod *fod = nvmet_req_to_fod(nvme_req); 2485 struct nvmet_fc_tgtport *tgtport = fod->tgtport; 2486 2487 __nvmet_fc_fcp_nvme_cmd_done(tgtport, fod, 0); 2488 } 2489 2490 2491 /* 2492 * Actual processing routine for received FC-NVME I/O Requests from the LLD 2493 */ 2494 static void 2495 nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport, 2496 struct nvmet_fc_fcp_iod *fod) 2497 { 2498 struct nvme_fc_cmd_iu *cmdiu = &fod->cmdiubuf; 2499 u32 xfrlen = be32_to_cpu(cmdiu->data_len); 2500 int ret; 2501 2502 /* 2503 * if there is no nvmet mapping to the targetport there 2504 * shouldn't be requests. just terminate them. 2505 */ 2506 if (!tgtport->pe) 2507 goto transport_error; 2508 2509 /* 2510 * Fused commands are currently not supported in the linux 2511 * implementation. 2512 * 2513 * As such, the implementation of the FC transport does not 2514 * look at the fused commands and order delivery to the upper 2515 * layer until we have both based on csn. 2516 */ 2517 2518 fod->fcpreq->done = nvmet_fc_xmt_fcp_op_done; 2519 2520 if (cmdiu->flags & FCNVME_CMD_FLAGS_WRITE) { 2521 fod->io_dir = NVMET_FCP_WRITE; 2522 if (!nvme_is_write(&cmdiu->sqe)) 2523 goto transport_error; 2524 } else if (cmdiu->flags & FCNVME_CMD_FLAGS_READ) { 2525 fod->io_dir = NVMET_FCP_READ; 2526 if (nvme_is_write(&cmdiu->sqe)) 2527 goto transport_error; 2528 } else { 2529 fod->io_dir = NVMET_FCP_NODATA; 2530 if (xfrlen) 2531 goto transport_error; 2532 } 2533 2534 fod->req.cmd = &fod->cmdiubuf.sqe; 2535 fod->req.cqe = &fod->rspiubuf.cqe; 2536 fod->req.port = tgtport->pe->port; 2537 2538 /* clear any response payload */ 2539 memset(&fod->rspiubuf, 0, sizeof(fod->rspiubuf)); 2540 2541 fod->data_sg = NULL; 2542 fod->data_sg_cnt = 0; 2543 2544 ret = nvmet_req_init(&fod->req, 2545 &fod->queue->nvme_cq, 2546 &fod->queue->nvme_sq, 2547 &nvmet_fc_tgt_fcp_ops); 2548 if (!ret) { 2549 /* bad SQE content or invalid ctrl state */ 2550 /* nvmet layer has already called op done to send rsp. */ 2551 return; 2552 } 2553 2554 fod->req.transfer_len = xfrlen; 2555 2556 /* keep a running counter of tail position */ 2557 atomic_inc(&fod->queue->sqtail); 2558 2559 if (fod->req.transfer_len) { 2560 ret = nvmet_fc_alloc_tgt_pgs(fod); 2561 if (ret) { 2562 nvmet_req_complete(&fod->req, ret); 2563 return; 2564 } 2565 } 2566 fod->req.sg = fod->data_sg; 2567 fod->req.sg_cnt = fod->data_sg_cnt; 2568 fod->offset = 0; 2569 2570 if (fod->io_dir == NVMET_FCP_WRITE) { 2571 /* pull the data over before invoking nvmet layer */ 2572 nvmet_fc_transfer_fcp_data(tgtport, fod, NVMET_FCOP_WRITEDATA); 2573 return; 2574 } 2575 2576 /* 2577 * Reads or no data: 2578 * 2579 * can invoke the nvmet_layer now. If read data, cmd completion will 2580 * push the data 2581 */ 2582 fod->req.execute(&fod->req); 2583 return; 2584 2585 transport_error: 2586 nvmet_fc_abort_op(tgtport, fod); 2587 } 2588 2589 /** 2590 * nvmet_fc_rcv_fcp_req - transport entry point called by an LLDD 2591 * upon the reception of a NVME FCP CMD IU. 2592 * 2593 * Pass a FC-NVME FCP CMD IU received from the FC link to the nvmet-fc 2594 * layer for processing. 2595 * 2596 * The nvmet_fc layer allocates a local job structure (struct 2597 * nvmet_fc_fcp_iod) from the queue for the io and copies the 2598 * CMD IU buffer to the job structure. As such, on a successful 2599 * completion (returns 0), the LLDD may immediately free/reuse 2600 * the CMD IU buffer passed in the call. 2601 * 2602 * However, in some circumstances, due to the packetized nature of FC 2603 * and the api of the FC LLDD which may issue a hw command to send the 2604 * response, but the LLDD may not get the hw completion for that command 2605 * and upcall the nvmet_fc layer before a new command may be 2606 * asynchronously received - its possible for a command to be received 2607 * before the LLDD and nvmet_fc have recycled the job structure. It gives 2608 * the appearance of more commands received than fits in the sq. 2609 * To alleviate this scenario, a temporary queue is maintained in the 2610 * transport for pending LLDD requests waiting for a queue job structure. 2611 * In these "overrun" cases, a temporary queue element is allocated 2612 * the LLDD request and CMD iu buffer information remembered, and the 2613 * routine returns a -EOVERFLOW status. Subsequently, when a queue job 2614 * structure is freed, it is immediately reallocated for anything on the 2615 * pending request list. The LLDDs defer_rcv() callback is called, 2616 * informing the LLDD that it may reuse the CMD IU buffer, and the io 2617 * is then started normally with the transport. 2618 * 2619 * The LLDD, when receiving an -EOVERFLOW completion status, is to treat 2620 * the completion as successful but must not reuse the CMD IU buffer 2621 * until the LLDD's defer_rcv() callback has been called for the 2622 * corresponding struct nvmefc_tgt_fcp_req pointer. 2623 * 2624 * If there is any other condition in which an error occurs, the 2625 * transport will return a non-zero status indicating the error. 2626 * In all cases other than -EOVERFLOW, the transport has not accepted the 2627 * request and the LLDD should abort the exchange. 2628 * 2629 * @target_port: pointer to the (registered) target port the FCP CMD IU 2630 * was received on. 2631 * @fcpreq: pointer to a fcpreq request structure to be used to reference 2632 * the exchange corresponding to the FCP Exchange. 2633 * @cmdiubuf: pointer to the buffer containing the FCP CMD IU 2634 * @cmdiubuf_len: length, in bytes, of the received FCP CMD IU 2635 */ 2636 int 2637 nvmet_fc_rcv_fcp_req(struct nvmet_fc_target_port *target_port, 2638 struct nvmefc_tgt_fcp_req *fcpreq, 2639 void *cmdiubuf, u32 cmdiubuf_len) 2640 { 2641 struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port); 2642 struct nvme_fc_cmd_iu *cmdiu = cmdiubuf; 2643 struct nvmet_fc_tgt_queue *queue; 2644 struct nvmet_fc_fcp_iod *fod; 2645 struct nvmet_fc_defer_fcp_req *deferfcp; 2646 unsigned long flags; 2647 2648 /* validate iu, so the connection id can be used to find the queue */ 2649 if ((cmdiubuf_len != sizeof(*cmdiu)) || 2650 (cmdiu->format_id != NVME_CMD_FORMAT_ID) || 2651 (cmdiu->fc_id != NVME_CMD_FC_ID) || 2652 (be16_to_cpu(cmdiu->iu_len) != (sizeof(*cmdiu)/4))) 2653 return -EIO; 2654 2655 queue = nvmet_fc_find_target_queue(tgtport, 2656 be64_to_cpu(cmdiu->connection_id)); 2657 if (!queue) 2658 return -ENOTCONN; 2659 2660 /* 2661 * note: reference taken by find_target_queue 2662 * After successful fod allocation, the fod will inherit the 2663 * ownership of that reference and will remove the reference 2664 * when the fod is freed. 2665 */ 2666 2667 spin_lock_irqsave(&queue->qlock, flags); 2668 2669 fod = nvmet_fc_alloc_fcp_iod(queue); 2670 if (fod) { 2671 spin_unlock_irqrestore(&queue->qlock, flags); 2672 2673 fcpreq->nvmet_fc_private = fod; 2674 fod->fcpreq = fcpreq; 2675 2676 memcpy(&fod->cmdiubuf, cmdiubuf, cmdiubuf_len); 2677 2678 nvmet_fc_queue_fcp_req(tgtport, queue, fcpreq); 2679 2680 return 0; 2681 } 2682 2683 if (!tgtport->ops->defer_rcv) { 2684 spin_unlock_irqrestore(&queue->qlock, flags); 2685 /* release the queue lookup reference */ 2686 nvmet_fc_tgt_q_put(queue); 2687 return -ENOENT; 2688 } 2689 2690 deferfcp = list_first_entry_or_null(&queue->avail_defer_list, 2691 struct nvmet_fc_defer_fcp_req, req_list); 2692 if (deferfcp) { 2693 /* Just re-use one that was previously allocated */ 2694 list_del(&deferfcp->req_list); 2695 } else { 2696 spin_unlock_irqrestore(&queue->qlock, flags); 2697 2698 /* Now we need to dynamically allocate one */ 2699 deferfcp = kmalloc(sizeof(*deferfcp), GFP_KERNEL); 2700 if (!deferfcp) { 2701 /* release the queue lookup reference */ 2702 nvmet_fc_tgt_q_put(queue); 2703 return -ENOMEM; 2704 } 2705 spin_lock_irqsave(&queue->qlock, flags); 2706 } 2707 2708 /* For now, use rspaddr / rsplen to save payload information */ 2709 fcpreq->rspaddr = cmdiubuf; 2710 fcpreq->rsplen = cmdiubuf_len; 2711 deferfcp->fcp_req = fcpreq; 2712 2713 /* defer processing till a fod becomes available */ 2714 list_add_tail(&deferfcp->req_list, &queue->pending_cmd_list); 2715 2716 /* NOTE: the queue lookup reference is still valid */ 2717 2718 spin_unlock_irqrestore(&queue->qlock, flags); 2719 2720 return -EOVERFLOW; 2721 } 2722 EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_req); 2723 2724 /** 2725 * nvmet_fc_rcv_fcp_abort - transport entry point called by an LLDD 2726 * upon the reception of an ABTS for a FCP command 2727 * 2728 * Notify the transport that an ABTS has been received for a FCP command 2729 * that had been given to the transport via nvmet_fc_rcv_fcp_req(). The 2730 * LLDD believes the command is still being worked on 2731 * (template_ops->fcp_req_release() has not been called). 2732 * 2733 * The transport will wait for any outstanding work (an op to the LLDD, 2734 * which the lldd should complete with error due to the ABTS; or the 2735 * completion from the nvmet layer of the nvme command), then will 2736 * stop processing and call the nvmet_fc_rcv_fcp_req() callback to 2737 * return the i/o context to the LLDD. The LLDD may send the BA_ACC 2738 * to the ABTS either after return from this function (assuming any 2739 * outstanding op work has been terminated) or upon the callback being 2740 * called. 2741 * 2742 * @target_port: pointer to the (registered) target port the FCP CMD IU 2743 * was received on. 2744 * @fcpreq: pointer to the fcpreq request structure that corresponds 2745 * to the exchange that received the ABTS. 2746 */ 2747 void 2748 nvmet_fc_rcv_fcp_abort(struct nvmet_fc_target_port *target_port, 2749 struct nvmefc_tgt_fcp_req *fcpreq) 2750 { 2751 struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private; 2752 struct nvmet_fc_tgt_queue *queue; 2753 unsigned long flags; 2754 2755 if (!fod || fod->fcpreq != fcpreq) 2756 /* job appears to have already completed, ignore abort */ 2757 return; 2758 2759 queue = fod->queue; 2760 2761 spin_lock_irqsave(&queue->qlock, flags); 2762 if (fod->active) { 2763 /* 2764 * mark as abort. The abort handler, invoked upon completion 2765 * of any work, will detect the aborted status and do the 2766 * callback. 2767 */ 2768 spin_lock(&fod->flock); 2769 fod->abort = true; 2770 fod->aborted = true; 2771 spin_unlock(&fod->flock); 2772 } 2773 spin_unlock_irqrestore(&queue->qlock, flags); 2774 } 2775 EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_abort); 2776 2777 2778 struct nvmet_fc_traddr { 2779 u64 nn; 2780 u64 pn; 2781 }; 2782 2783 static int 2784 __nvme_fc_parse_u64(substring_t *sstr, u64 *val) 2785 { 2786 u64 token64; 2787 2788 if (match_u64(sstr, &token64)) 2789 return -EINVAL; 2790 *val = token64; 2791 2792 return 0; 2793 } 2794 2795 /* 2796 * This routine validates and extracts the WWN's from the TRADDR string. 2797 * As kernel parsers need the 0x to determine number base, universally 2798 * build string to parse with 0x prefix before parsing name strings. 2799 */ 2800 static int 2801 nvme_fc_parse_traddr(struct nvmet_fc_traddr *traddr, char *buf, size_t blen) 2802 { 2803 char name[2 + NVME_FC_TRADDR_HEXNAMELEN + 1]; 2804 substring_t wwn = { name, &name[sizeof(name)-1] }; 2805 int nnoffset, pnoffset; 2806 2807 /* validate if string is one of the 2 allowed formats */ 2808 if (strnlen(buf, blen) == NVME_FC_TRADDR_MAXLENGTH && 2809 !strncmp(buf, "nn-0x", NVME_FC_TRADDR_OXNNLEN) && 2810 !strncmp(&buf[NVME_FC_TRADDR_MAX_PN_OFFSET], 2811 "pn-0x", NVME_FC_TRADDR_OXNNLEN)) { 2812 nnoffset = NVME_FC_TRADDR_OXNNLEN; 2813 pnoffset = NVME_FC_TRADDR_MAX_PN_OFFSET + 2814 NVME_FC_TRADDR_OXNNLEN; 2815 } else if ((strnlen(buf, blen) == NVME_FC_TRADDR_MINLENGTH && 2816 !strncmp(buf, "nn-", NVME_FC_TRADDR_NNLEN) && 2817 !strncmp(&buf[NVME_FC_TRADDR_MIN_PN_OFFSET], 2818 "pn-", NVME_FC_TRADDR_NNLEN))) { 2819 nnoffset = NVME_FC_TRADDR_NNLEN; 2820 pnoffset = NVME_FC_TRADDR_MIN_PN_OFFSET + NVME_FC_TRADDR_NNLEN; 2821 } else 2822 goto out_einval; 2823 2824 name[0] = '0'; 2825 name[1] = 'x'; 2826 name[2 + NVME_FC_TRADDR_HEXNAMELEN] = 0; 2827 2828 memcpy(&name[2], &buf[nnoffset], NVME_FC_TRADDR_HEXNAMELEN); 2829 if (__nvme_fc_parse_u64(&wwn, &traddr->nn)) 2830 goto out_einval; 2831 2832 memcpy(&name[2], &buf[pnoffset], NVME_FC_TRADDR_HEXNAMELEN); 2833 if (__nvme_fc_parse_u64(&wwn, &traddr->pn)) 2834 goto out_einval; 2835 2836 return 0; 2837 2838 out_einval: 2839 pr_warn("%s: bad traddr string\n", __func__); 2840 return -EINVAL; 2841 } 2842 2843 static int 2844 nvmet_fc_add_port(struct nvmet_port *port) 2845 { 2846 struct nvmet_fc_tgtport *tgtport; 2847 struct nvmet_fc_port_entry *pe; 2848 struct nvmet_fc_traddr traddr = { 0L, 0L }; 2849 unsigned long flags; 2850 int ret; 2851 2852 /* validate the address info */ 2853 if ((port->disc_addr.trtype != NVMF_TRTYPE_FC) || 2854 (port->disc_addr.adrfam != NVMF_ADDR_FAMILY_FC)) 2855 return -EINVAL; 2856 2857 /* map the traddr address info to a target port */ 2858 2859 ret = nvme_fc_parse_traddr(&traddr, port->disc_addr.traddr, 2860 sizeof(port->disc_addr.traddr)); 2861 if (ret) 2862 return ret; 2863 2864 pe = kzalloc(sizeof(*pe), GFP_KERNEL); 2865 if (!pe) 2866 return -ENOMEM; 2867 2868 ret = -ENXIO; 2869 spin_lock_irqsave(&nvmet_fc_tgtlock, flags); 2870 list_for_each_entry(tgtport, &nvmet_fc_target_list, tgt_list) { 2871 if ((tgtport->fc_target_port.node_name == traddr.nn) && 2872 (tgtport->fc_target_port.port_name == traddr.pn)) { 2873 /* a FC port can only be 1 nvmet port id */ 2874 if (!tgtport->pe) { 2875 nvmet_fc_portentry_bind(tgtport, pe, port); 2876 ret = 0; 2877 } else 2878 ret = -EALREADY; 2879 break; 2880 } 2881 } 2882 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); 2883 2884 if (ret) 2885 kfree(pe); 2886 2887 return ret; 2888 } 2889 2890 static void 2891 nvmet_fc_remove_port(struct nvmet_port *port) 2892 { 2893 struct nvmet_fc_port_entry *pe = port->priv; 2894 2895 nvmet_fc_portentry_unbind(pe); 2896 2897 kfree(pe); 2898 } 2899 2900 static void 2901 nvmet_fc_discovery_chg(struct nvmet_port *port) 2902 { 2903 struct nvmet_fc_port_entry *pe = port->priv; 2904 struct nvmet_fc_tgtport *tgtport = pe->tgtport; 2905 2906 if (tgtport && tgtport->ops->discovery_event) 2907 tgtport->ops->discovery_event(&tgtport->fc_target_port); 2908 } 2909 2910 static const struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops = { 2911 .owner = THIS_MODULE, 2912 .type = NVMF_TRTYPE_FC, 2913 .msdbd = 1, 2914 .add_port = nvmet_fc_add_port, 2915 .remove_port = nvmet_fc_remove_port, 2916 .queue_response = nvmet_fc_fcp_nvme_cmd_done, 2917 .delete_ctrl = nvmet_fc_delete_ctrl, 2918 .discovery_chg = nvmet_fc_discovery_chg, 2919 }; 2920 2921 static int __init nvmet_fc_init_module(void) 2922 { 2923 return nvmet_register_transport(&nvmet_fc_tgt_fcp_ops); 2924 } 2925 2926 static void __exit nvmet_fc_exit_module(void) 2927 { 2928 /* sanity check - all lports should be removed */ 2929 if (!list_empty(&nvmet_fc_target_list)) 2930 pr_warn("%s: targetport list not empty\n", __func__); 2931 2932 nvmet_unregister_transport(&nvmet_fc_tgt_fcp_ops); 2933 2934 ida_destroy(&nvmet_fc_tgtport_cnt); 2935 } 2936 2937 module_init(nvmet_fc_init_module); 2938 module_exit(nvmet_fc_exit_module); 2939 2940 MODULE_LICENSE("GPL v2"); 2941