1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2016 Avago Technologies. All rights reserved. 4 */ 5 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 6 #include <linux/module.h> 7 #include <linux/slab.h> 8 #include <linux/blk-mq.h> 9 #include <linux/parser.h> 10 #include <linux/random.h> 11 #include <uapi/scsi/fc/fc_fs.h> 12 #include <uapi/scsi/fc/fc_els.h> 13 14 #include "nvmet.h" 15 #include <linux/nvme-fc-driver.h> 16 #include <linux/nvme-fc.h> 17 #include "../host/fc.h" 18 19 20 /* *************************** Data Structures/Defines ****************** */ 21 22 23 #define NVMET_LS_CTX_COUNT 256 24 25 struct nvmet_fc_tgtport; 26 struct nvmet_fc_tgt_assoc; 27 28 struct nvmet_fc_ls_iod { /* for an LS RQST RCV */ 29 struct nvmefc_ls_rsp *lsrsp; 30 struct nvmefc_tgt_fcp_req *fcpreq; /* only if RS */ 31 32 struct list_head ls_rcv_list; /* tgtport->ls_rcv_list */ 33 34 struct nvmet_fc_tgtport *tgtport; 35 struct nvmet_fc_tgt_assoc *assoc; 36 void *hosthandle; 37 38 union nvmefc_ls_requests *rqstbuf; 39 union nvmefc_ls_responses *rspbuf; 40 u16 rqstdatalen; 41 dma_addr_t rspdma; 42 43 struct scatterlist sg[2]; 44 45 struct work_struct work; 46 } __aligned(sizeof(unsigned long long)); 47 48 struct nvmet_fc_ls_req_op { /* for an LS RQST XMT */ 49 struct nvmefc_ls_req ls_req; 50 51 struct nvmet_fc_tgtport *tgtport; 52 void *hosthandle; 53 54 int ls_error; 55 struct list_head lsreq_list; /* tgtport->ls_req_list */ 56 bool req_queued; 57 }; 58 59 60 /* desired maximum for a single sequence - if sg list allows it */ 61 #define NVMET_FC_MAX_SEQ_LENGTH (256 * 1024) 62 63 enum nvmet_fcp_datadir { 64 NVMET_FCP_NODATA, 65 NVMET_FCP_WRITE, 66 NVMET_FCP_READ, 67 NVMET_FCP_ABORTED, 68 }; 69 70 struct nvmet_fc_fcp_iod { 71 struct nvmefc_tgt_fcp_req *fcpreq; 72 73 struct nvme_fc_cmd_iu cmdiubuf; 74 struct nvme_fc_ersp_iu rspiubuf; 75 dma_addr_t rspdma; 76 struct scatterlist *next_sg; 77 struct scatterlist *data_sg; 78 int data_sg_cnt; 79 u32 offset; 80 enum nvmet_fcp_datadir io_dir; 81 bool active; 82 bool abort; 83 bool aborted; 84 bool writedataactive; 85 spinlock_t flock; 86 87 struct nvmet_req req; 88 struct work_struct defer_work; 89 90 struct nvmet_fc_tgtport *tgtport; 91 struct nvmet_fc_tgt_queue *queue; 92 93 struct list_head fcp_list; /* tgtport->fcp_list */ 94 }; 95 96 struct nvmet_fc_tgtport { 97 struct nvmet_fc_target_port fc_target_port; 98 99 struct list_head tgt_list; /* nvmet_fc_target_list */ 100 struct device *dev; /* dev for dma mapping */ 101 struct nvmet_fc_target_template *ops; 102 103 struct nvmet_fc_ls_iod *iod; 104 spinlock_t lock; 105 struct list_head ls_rcv_list; 106 struct list_head ls_req_list; 107 struct list_head ls_busylist; 108 struct list_head assoc_list; 109 struct list_head host_list; 110 struct ida assoc_cnt; 111 struct nvmet_fc_port_entry *pe; 112 struct kref ref; 113 u32 max_sg_cnt; 114 115 struct work_struct put_work; 116 }; 117 118 struct nvmet_fc_port_entry { 119 struct nvmet_fc_tgtport *tgtport; 120 struct nvmet_port *port; 121 u64 node_name; 122 u64 port_name; 123 struct list_head pe_list; 124 }; 125 126 struct nvmet_fc_defer_fcp_req { 127 struct list_head req_list; 128 struct nvmefc_tgt_fcp_req *fcp_req; 129 }; 130 131 struct nvmet_fc_tgt_queue { 132 bool ninetypercent; 133 u16 qid; 134 u16 sqsize; 135 u16 ersp_ratio; 136 __le16 sqhd; 137 atomic_t connected; 138 atomic_t sqtail; 139 atomic_t zrspcnt; 140 atomic_t rsn; 141 spinlock_t qlock; 142 struct nvmet_cq nvme_cq; 143 struct nvmet_sq nvme_sq; 144 struct nvmet_fc_tgt_assoc *assoc; 145 struct list_head fod_list; 146 struct list_head pending_cmd_list; 147 struct list_head avail_defer_list; 148 struct workqueue_struct *work_q; 149 struct kref ref; 150 /* array of fcp_iods */ 151 struct nvmet_fc_fcp_iod fod[] __counted_by(sqsize); 152 } __aligned(sizeof(unsigned long long)); 153 154 struct nvmet_fc_hostport { 155 struct nvmet_fc_tgtport *tgtport; 156 void *hosthandle; 157 struct list_head host_list; 158 struct kref ref; 159 u8 invalid; 160 }; 161 162 struct nvmet_fc_tgt_assoc { 163 u64 association_id; 164 u32 a_id; 165 atomic_t terminating; 166 struct nvmet_fc_tgtport *tgtport; 167 struct nvmet_fc_hostport *hostport; 168 struct nvmet_fc_ls_iod *rcv_disconn; 169 struct list_head a_list; 170 struct nvmet_fc_tgt_queue *queues[NVMET_NR_QUEUES + 1]; 171 struct kref ref; 172 struct work_struct del_work; 173 }; 174 175 176 static inline int 177 nvmet_fc_iodnum(struct nvmet_fc_ls_iod *iodptr) 178 { 179 return (iodptr - iodptr->tgtport->iod); 180 } 181 182 static inline int 183 nvmet_fc_fodnum(struct nvmet_fc_fcp_iod *fodptr) 184 { 185 return (fodptr - fodptr->queue->fod); 186 } 187 188 189 /* 190 * Association and Connection IDs: 191 * 192 * Association ID will have random number in upper 6 bytes and zero 193 * in lower 2 bytes 194 * 195 * Connection IDs will be Association ID with QID or'd in lower 2 bytes 196 * 197 * note: Association ID = Connection ID for queue 0 198 */ 199 #define BYTES_FOR_QID sizeof(u16) 200 #define BYTES_FOR_QID_SHIFT (BYTES_FOR_QID * 8) 201 #define NVMET_FC_QUEUEID_MASK ((u64)((1 << BYTES_FOR_QID_SHIFT) - 1)) 202 203 static inline u64 204 nvmet_fc_makeconnid(struct nvmet_fc_tgt_assoc *assoc, u16 qid) 205 { 206 return (assoc->association_id | qid); 207 } 208 209 static inline u64 210 nvmet_fc_getassociationid(u64 connectionid) 211 { 212 return connectionid & ~NVMET_FC_QUEUEID_MASK; 213 } 214 215 static inline u16 216 nvmet_fc_getqueueid(u64 connectionid) 217 { 218 return (u16)(connectionid & NVMET_FC_QUEUEID_MASK); 219 } 220 221 static inline struct nvmet_fc_tgtport * 222 targetport_to_tgtport(struct nvmet_fc_target_port *targetport) 223 { 224 return container_of(targetport, struct nvmet_fc_tgtport, 225 fc_target_port); 226 } 227 228 static inline struct nvmet_fc_fcp_iod * 229 nvmet_req_to_fod(struct nvmet_req *nvme_req) 230 { 231 return container_of(nvme_req, struct nvmet_fc_fcp_iod, req); 232 } 233 234 235 /* *************************** Globals **************************** */ 236 237 238 static DEFINE_SPINLOCK(nvmet_fc_tgtlock); 239 240 static LIST_HEAD(nvmet_fc_target_list); 241 static DEFINE_IDA(nvmet_fc_tgtport_cnt); 242 static LIST_HEAD(nvmet_fc_portentry_list); 243 244 245 static void nvmet_fc_handle_ls_rqst_work(struct work_struct *work); 246 static void nvmet_fc_fcp_rqst_op_defer_work(struct work_struct *work); 247 static void nvmet_fc_tgt_a_put(struct nvmet_fc_tgt_assoc *assoc); 248 static int nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc); 249 static void nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue); 250 static int nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue); 251 static void nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport); 252 static void nvmet_fc_put_tgtport_work(struct work_struct *work) 253 { 254 struct nvmet_fc_tgtport *tgtport = 255 container_of(work, struct nvmet_fc_tgtport, put_work); 256 257 nvmet_fc_tgtport_put(tgtport); 258 } 259 static int nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport); 260 static void nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport, 261 struct nvmet_fc_fcp_iod *fod); 262 static void nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc); 263 static void nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport, 264 struct nvmet_fc_ls_iod *iod); 265 266 267 /* *********************** FC-NVME DMA Handling **************************** */ 268 269 /* 270 * The fcloop device passes in a NULL device pointer. Real LLD's will 271 * pass in a valid device pointer. If NULL is passed to the dma mapping 272 * routines, depending on the platform, it may or may not succeed, and 273 * may crash. 274 * 275 * As such: 276 * Wrapper all the dma routines and check the dev pointer. 277 * 278 * If simple mappings (return just a dma address, we'll noop them, 279 * returning a dma address of 0. 280 * 281 * On more complex mappings (dma_map_sg), a pseudo routine fills 282 * in the scatter list, setting all dma addresses to 0. 283 */ 284 285 static inline dma_addr_t 286 fc_dma_map_single(struct device *dev, void *ptr, size_t size, 287 enum dma_data_direction dir) 288 { 289 return dev ? dma_map_single(dev, ptr, size, dir) : (dma_addr_t)0L; 290 } 291 292 static inline int 293 fc_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) 294 { 295 return dev ? dma_mapping_error(dev, dma_addr) : 0; 296 } 297 298 static inline void 299 fc_dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size, 300 enum dma_data_direction dir) 301 { 302 if (dev) 303 dma_unmap_single(dev, addr, size, dir); 304 } 305 306 static inline void 307 fc_dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size, 308 enum dma_data_direction dir) 309 { 310 if (dev) 311 dma_sync_single_for_cpu(dev, addr, size, dir); 312 } 313 314 static inline void 315 fc_dma_sync_single_for_device(struct device *dev, dma_addr_t addr, size_t size, 316 enum dma_data_direction dir) 317 { 318 if (dev) 319 dma_sync_single_for_device(dev, addr, size, dir); 320 } 321 322 /* pseudo dma_map_sg call */ 323 static int 324 fc_map_sg(struct scatterlist *sg, int nents) 325 { 326 struct scatterlist *s; 327 int i; 328 329 WARN_ON(nents == 0 || sg[0].length == 0); 330 331 for_each_sg(sg, s, nents, i) { 332 s->dma_address = 0L; 333 #ifdef CONFIG_NEED_SG_DMA_LENGTH 334 s->dma_length = s->length; 335 #endif 336 } 337 return nents; 338 } 339 340 static inline int 341 fc_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, 342 enum dma_data_direction dir) 343 { 344 return dev ? dma_map_sg(dev, sg, nents, dir) : fc_map_sg(sg, nents); 345 } 346 347 static inline void 348 fc_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, 349 enum dma_data_direction dir) 350 { 351 if (dev) 352 dma_unmap_sg(dev, sg, nents, dir); 353 } 354 355 356 /* ********************** FC-NVME LS XMT Handling ************************* */ 357 358 359 static void 360 __nvmet_fc_finish_ls_req(struct nvmet_fc_ls_req_op *lsop) 361 { 362 struct nvmet_fc_tgtport *tgtport = lsop->tgtport; 363 struct nvmefc_ls_req *lsreq = &lsop->ls_req; 364 unsigned long flags; 365 366 spin_lock_irqsave(&tgtport->lock, flags); 367 368 if (!lsop->req_queued) { 369 spin_unlock_irqrestore(&tgtport->lock, flags); 370 goto out_putwork; 371 } 372 373 list_del(&lsop->lsreq_list); 374 375 lsop->req_queued = false; 376 377 spin_unlock_irqrestore(&tgtport->lock, flags); 378 379 fc_dma_unmap_single(tgtport->dev, lsreq->rqstdma, 380 (lsreq->rqstlen + lsreq->rsplen), 381 DMA_BIDIRECTIONAL); 382 383 out_putwork: 384 queue_work(nvmet_wq, &tgtport->put_work); 385 } 386 387 static int 388 __nvmet_fc_send_ls_req(struct nvmet_fc_tgtport *tgtport, 389 struct nvmet_fc_ls_req_op *lsop, 390 void (*done)(struct nvmefc_ls_req *req, int status)) 391 { 392 struct nvmefc_ls_req *lsreq = &lsop->ls_req; 393 unsigned long flags; 394 int ret = 0; 395 396 if (!tgtport->ops->ls_req) 397 return -EOPNOTSUPP; 398 399 if (!nvmet_fc_tgtport_get(tgtport)) 400 return -ESHUTDOWN; 401 402 lsreq->done = done; 403 lsop->req_queued = false; 404 INIT_LIST_HEAD(&lsop->lsreq_list); 405 406 lsreq->rqstdma = fc_dma_map_single(tgtport->dev, lsreq->rqstaddr, 407 lsreq->rqstlen + lsreq->rsplen, 408 DMA_BIDIRECTIONAL); 409 if (fc_dma_mapping_error(tgtport->dev, lsreq->rqstdma)) { 410 ret = -EFAULT; 411 goto out_puttgtport; 412 } 413 lsreq->rspdma = lsreq->rqstdma + lsreq->rqstlen; 414 415 spin_lock_irqsave(&tgtport->lock, flags); 416 417 list_add_tail(&lsop->lsreq_list, &tgtport->ls_req_list); 418 419 lsop->req_queued = true; 420 421 spin_unlock_irqrestore(&tgtport->lock, flags); 422 423 ret = tgtport->ops->ls_req(&tgtport->fc_target_port, lsop->hosthandle, 424 lsreq); 425 if (ret) 426 goto out_unlink; 427 428 return 0; 429 430 out_unlink: 431 lsop->ls_error = ret; 432 spin_lock_irqsave(&tgtport->lock, flags); 433 lsop->req_queued = false; 434 list_del(&lsop->lsreq_list); 435 spin_unlock_irqrestore(&tgtport->lock, flags); 436 fc_dma_unmap_single(tgtport->dev, lsreq->rqstdma, 437 (lsreq->rqstlen + lsreq->rsplen), 438 DMA_BIDIRECTIONAL); 439 out_puttgtport: 440 nvmet_fc_tgtport_put(tgtport); 441 442 return ret; 443 } 444 445 static int 446 nvmet_fc_send_ls_req_async(struct nvmet_fc_tgtport *tgtport, 447 struct nvmet_fc_ls_req_op *lsop, 448 void (*done)(struct nvmefc_ls_req *req, int status)) 449 { 450 /* don't wait for completion */ 451 452 return __nvmet_fc_send_ls_req(tgtport, lsop, done); 453 } 454 455 static void 456 nvmet_fc_disconnect_assoc_done(struct nvmefc_ls_req *lsreq, int status) 457 { 458 struct nvmet_fc_ls_req_op *lsop = 459 container_of(lsreq, struct nvmet_fc_ls_req_op, ls_req); 460 461 __nvmet_fc_finish_ls_req(lsop); 462 463 /* fc-nvme target doesn't care about success or failure of cmd */ 464 465 kfree(lsop); 466 } 467 468 /* 469 * This routine sends a FC-NVME LS to disconnect (aka terminate) 470 * the FC-NVME Association. Terminating the association also 471 * terminates the FC-NVME connections (per queue, both admin and io 472 * queues) that are part of the association. E.g. things are torn 473 * down, and the related FC-NVME Association ID and Connection IDs 474 * become invalid. 475 * 476 * The behavior of the fc-nvme target is such that it's 477 * understanding of the association and connections will implicitly 478 * be torn down. The action is implicit as it may be due to a loss of 479 * connectivity with the fc-nvme host, so the target may never get a 480 * response even if it tried. As such, the action of this routine 481 * is to asynchronously send the LS, ignore any results of the LS, and 482 * continue on with terminating the association. If the fc-nvme host 483 * is present and receives the LS, it too can tear down. 484 */ 485 static void 486 nvmet_fc_xmt_disconnect_assoc(struct nvmet_fc_tgt_assoc *assoc) 487 { 488 struct nvmet_fc_tgtport *tgtport = assoc->tgtport; 489 struct fcnvme_ls_disconnect_assoc_rqst *discon_rqst; 490 struct fcnvme_ls_disconnect_assoc_acc *discon_acc; 491 struct nvmet_fc_ls_req_op *lsop; 492 struct nvmefc_ls_req *lsreq; 493 int ret; 494 495 /* 496 * If ls_req is NULL or no hosthandle, it's an older lldd and no 497 * message is normal. Otherwise, send unless the hostport has 498 * already been invalidated by the lldd. 499 */ 500 if (!tgtport->ops->ls_req || assoc->hostport->invalid) 501 return; 502 503 lsop = kzalloc((sizeof(*lsop) + 504 sizeof(*discon_rqst) + sizeof(*discon_acc) + 505 tgtport->ops->lsrqst_priv_sz), GFP_KERNEL); 506 if (!lsop) { 507 dev_info(tgtport->dev, 508 "{%d:%d} send Disconnect Association failed: ENOMEM\n", 509 tgtport->fc_target_port.port_num, assoc->a_id); 510 return; 511 } 512 513 discon_rqst = (struct fcnvme_ls_disconnect_assoc_rqst *)&lsop[1]; 514 discon_acc = (struct fcnvme_ls_disconnect_assoc_acc *)&discon_rqst[1]; 515 lsreq = &lsop->ls_req; 516 if (tgtport->ops->lsrqst_priv_sz) 517 lsreq->private = (void *)&discon_acc[1]; 518 else 519 lsreq->private = NULL; 520 521 lsop->tgtport = tgtport; 522 lsop->hosthandle = assoc->hostport->hosthandle; 523 524 nvmefc_fmt_lsreq_discon_assoc(lsreq, discon_rqst, discon_acc, 525 assoc->association_id); 526 527 ret = nvmet_fc_send_ls_req_async(tgtport, lsop, 528 nvmet_fc_disconnect_assoc_done); 529 if (ret) { 530 dev_info(tgtport->dev, 531 "{%d:%d} XMT Disconnect Association failed: %d\n", 532 tgtport->fc_target_port.port_num, assoc->a_id, ret); 533 kfree(lsop); 534 } 535 } 536 537 538 /* *********************** FC-NVME Port Management ************************ */ 539 540 541 static int 542 nvmet_fc_alloc_ls_iodlist(struct nvmet_fc_tgtport *tgtport) 543 { 544 struct nvmet_fc_ls_iod *iod; 545 int i; 546 547 iod = kcalloc(NVMET_LS_CTX_COUNT, sizeof(struct nvmet_fc_ls_iod), 548 GFP_KERNEL); 549 if (!iod) 550 return -ENOMEM; 551 552 tgtport->iod = iod; 553 554 for (i = 0; i < NVMET_LS_CTX_COUNT; iod++, i++) { 555 INIT_WORK(&iod->work, nvmet_fc_handle_ls_rqst_work); 556 iod->tgtport = tgtport; 557 list_add_tail(&iod->ls_rcv_list, &tgtport->ls_rcv_list); 558 559 iod->rqstbuf = kzalloc(sizeof(union nvmefc_ls_requests) + 560 sizeof(union nvmefc_ls_responses), 561 GFP_KERNEL); 562 if (!iod->rqstbuf) 563 goto out_fail; 564 565 iod->rspbuf = (union nvmefc_ls_responses *)&iod->rqstbuf[1]; 566 567 iod->rspdma = fc_dma_map_single(tgtport->dev, iod->rspbuf, 568 sizeof(*iod->rspbuf), 569 DMA_TO_DEVICE); 570 if (fc_dma_mapping_error(tgtport->dev, iod->rspdma)) 571 goto out_fail; 572 } 573 574 return 0; 575 576 out_fail: 577 kfree(iod->rqstbuf); 578 list_del(&iod->ls_rcv_list); 579 for (iod--, i--; i >= 0; iod--, i--) { 580 fc_dma_unmap_single(tgtport->dev, iod->rspdma, 581 sizeof(*iod->rspbuf), DMA_TO_DEVICE); 582 kfree(iod->rqstbuf); 583 list_del(&iod->ls_rcv_list); 584 } 585 586 kfree(iod); 587 588 return -EFAULT; 589 } 590 591 static void 592 nvmet_fc_free_ls_iodlist(struct nvmet_fc_tgtport *tgtport) 593 { 594 struct nvmet_fc_ls_iod *iod = tgtport->iod; 595 int i; 596 597 for (i = 0; i < NVMET_LS_CTX_COUNT; iod++, i++) { 598 fc_dma_unmap_single(tgtport->dev, 599 iod->rspdma, sizeof(*iod->rspbuf), 600 DMA_TO_DEVICE); 601 kfree(iod->rqstbuf); 602 list_del(&iod->ls_rcv_list); 603 } 604 kfree(tgtport->iod); 605 } 606 607 static struct nvmet_fc_ls_iod * 608 nvmet_fc_alloc_ls_iod(struct nvmet_fc_tgtport *tgtport) 609 { 610 struct nvmet_fc_ls_iod *iod; 611 unsigned long flags; 612 613 spin_lock_irqsave(&tgtport->lock, flags); 614 iod = list_first_entry_or_null(&tgtport->ls_rcv_list, 615 struct nvmet_fc_ls_iod, ls_rcv_list); 616 if (iod) 617 list_move_tail(&iod->ls_rcv_list, &tgtport->ls_busylist); 618 spin_unlock_irqrestore(&tgtport->lock, flags); 619 return iod; 620 } 621 622 623 static void 624 nvmet_fc_free_ls_iod(struct nvmet_fc_tgtport *tgtport, 625 struct nvmet_fc_ls_iod *iod) 626 { 627 unsigned long flags; 628 629 spin_lock_irqsave(&tgtport->lock, flags); 630 list_move(&iod->ls_rcv_list, &tgtport->ls_rcv_list); 631 spin_unlock_irqrestore(&tgtport->lock, flags); 632 } 633 634 static void 635 nvmet_fc_prep_fcp_iodlist(struct nvmet_fc_tgtport *tgtport, 636 struct nvmet_fc_tgt_queue *queue) 637 { 638 struct nvmet_fc_fcp_iod *fod = queue->fod; 639 int i; 640 641 for (i = 0; i < queue->sqsize; fod++, i++) { 642 INIT_WORK(&fod->defer_work, nvmet_fc_fcp_rqst_op_defer_work); 643 fod->tgtport = tgtport; 644 fod->queue = queue; 645 fod->active = false; 646 fod->abort = false; 647 fod->aborted = false; 648 fod->fcpreq = NULL; 649 list_add_tail(&fod->fcp_list, &queue->fod_list); 650 spin_lock_init(&fod->flock); 651 652 fod->rspdma = fc_dma_map_single(tgtport->dev, &fod->rspiubuf, 653 sizeof(fod->rspiubuf), DMA_TO_DEVICE); 654 if (fc_dma_mapping_error(tgtport->dev, fod->rspdma)) { 655 list_del(&fod->fcp_list); 656 for (fod--, i--; i >= 0; fod--, i--) { 657 fc_dma_unmap_single(tgtport->dev, fod->rspdma, 658 sizeof(fod->rspiubuf), 659 DMA_TO_DEVICE); 660 fod->rspdma = 0L; 661 list_del(&fod->fcp_list); 662 } 663 664 return; 665 } 666 } 667 } 668 669 static void 670 nvmet_fc_destroy_fcp_iodlist(struct nvmet_fc_tgtport *tgtport, 671 struct nvmet_fc_tgt_queue *queue) 672 { 673 struct nvmet_fc_fcp_iod *fod = queue->fod; 674 int i; 675 676 for (i = 0; i < queue->sqsize; fod++, i++) { 677 if (fod->rspdma) 678 fc_dma_unmap_single(tgtport->dev, fod->rspdma, 679 sizeof(fod->rspiubuf), DMA_TO_DEVICE); 680 } 681 } 682 683 static struct nvmet_fc_fcp_iod * 684 nvmet_fc_alloc_fcp_iod(struct nvmet_fc_tgt_queue *queue) 685 { 686 struct nvmet_fc_fcp_iod *fod; 687 688 lockdep_assert_held(&queue->qlock); 689 690 fod = list_first_entry_or_null(&queue->fod_list, 691 struct nvmet_fc_fcp_iod, fcp_list); 692 if (fod) { 693 list_del(&fod->fcp_list); 694 fod->active = true; 695 /* 696 * no queue reference is taken, as it was taken by the 697 * queue lookup just prior to the allocation. The iod 698 * will "inherit" that reference. 699 */ 700 } 701 return fod; 702 } 703 704 705 static void 706 nvmet_fc_queue_fcp_req(struct nvmet_fc_tgtport *tgtport, 707 struct nvmet_fc_tgt_queue *queue, 708 struct nvmefc_tgt_fcp_req *fcpreq) 709 { 710 struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private; 711 712 /* 713 * put all admin cmds on hw queue id 0. All io commands go to 714 * the respective hw queue based on a modulo basis 715 */ 716 fcpreq->hwqid = queue->qid ? 717 ((queue->qid - 1) % tgtport->ops->max_hw_queues) : 0; 718 719 nvmet_fc_handle_fcp_rqst(tgtport, fod); 720 } 721 722 static void 723 nvmet_fc_fcp_rqst_op_defer_work(struct work_struct *work) 724 { 725 struct nvmet_fc_fcp_iod *fod = 726 container_of(work, struct nvmet_fc_fcp_iod, defer_work); 727 728 /* Submit deferred IO for processing */ 729 nvmet_fc_queue_fcp_req(fod->tgtport, fod->queue, fod->fcpreq); 730 731 } 732 733 static void 734 nvmet_fc_free_fcp_iod(struct nvmet_fc_tgt_queue *queue, 735 struct nvmet_fc_fcp_iod *fod) 736 { 737 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq; 738 struct nvmet_fc_tgtport *tgtport = fod->tgtport; 739 struct nvmet_fc_defer_fcp_req *deferfcp; 740 unsigned long flags; 741 742 fc_dma_sync_single_for_cpu(tgtport->dev, fod->rspdma, 743 sizeof(fod->rspiubuf), DMA_TO_DEVICE); 744 745 fcpreq->nvmet_fc_private = NULL; 746 747 fod->active = false; 748 fod->abort = false; 749 fod->aborted = false; 750 fod->writedataactive = false; 751 fod->fcpreq = NULL; 752 753 tgtport->ops->fcp_req_release(&tgtport->fc_target_port, fcpreq); 754 755 /* release the queue lookup reference on the completed IO */ 756 nvmet_fc_tgt_q_put(queue); 757 758 spin_lock_irqsave(&queue->qlock, flags); 759 deferfcp = list_first_entry_or_null(&queue->pending_cmd_list, 760 struct nvmet_fc_defer_fcp_req, req_list); 761 if (!deferfcp) { 762 list_add_tail(&fod->fcp_list, &fod->queue->fod_list); 763 spin_unlock_irqrestore(&queue->qlock, flags); 764 return; 765 } 766 767 /* Re-use the fod for the next pending cmd that was deferred */ 768 list_del(&deferfcp->req_list); 769 770 fcpreq = deferfcp->fcp_req; 771 772 /* deferfcp can be reused for another IO at a later date */ 773 list_add_tail(&deferfcp->req_list, &queue->avail_defer_list); 774 775 spin_unlock_irqrestore(&queue->qlock, flags); 776 777 /* Save NVME CMD IO in fod */ 778 memcpy(&fod->cmdiubuf, fcpreq->rspaddr, fcpreq->rsplen); 779 780 /* Setup new fcpreq to be processed */ 781 fcpreq->rspaddr = NULL; 782 fcpreq->rsplen = 0; 783 fcpreq->nvmet_fc_private = fod; 784 fod->fcpreq = fcpreq; 785 fod->active = true; 786 787 /* inform LLDD IO is now being processed */ 788 tgtport->ops->defer_rcv(&tgtport->fc_target_port, fcpreq); 789 790 /* 791 * Leave the queue lookup get reference taken when 792 * fod was originally allocated. 793 */ 794 795 queue_work(queue->work_q, &fod->defer_work); 796 } 797 798 static struct nvmet_fc_tgt_queue * 799 nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc, 800 u16 qid, u16 sqsize) 801 { 802 struct nvmet_fc_tgt_queue *queue; 803 int ret; 804 805 if (qid > NVMET_NR_QUEUES) 806 return NULL; 807 808 queue = kzalloc(struct_size(queue, fod, sqsize), GFP_KERNEL); 809 if (!queue) 810 return NULL; 811 812 queue->work_q = alloc_workqueue("ntfc%d.%d.%d", 0, 0, 813 assoc->tgtport->fc_target_port.port_num, 814 assoc->a_id, qid); 815 if (!queue->work_q) 816 goto out_free_queue; 817 818 queue->qid = qid; 819 queue->sqsize = sqsize; 820 queue->assoc = assoc; 821 INIT_LIST_HEAD(&queue->fod_list); 822 INIT_LIST_HEAD(&queue->avail_defer_list); 823 INIT_LIST_HEAD(&queue->pending_cmd_list); 824 atomic_set(&queue->connected, 0); 825 atomic_set(&queue->sqtail, 0); 826 atomic_set(&queue->rsn, 1); 827 atomic_set(&queue->zrspcnt, 0); 828 spin_lock_init(&queue->qlock); 829 kref_init(&queue->ref); 830 831 nvmet_fc_prep_fcp_iodlist(assoc->tgtport, queue); 832 833 ret = nvmet_sq_init(&queue->nvme_sq); 834 if (ret) 835 goto out_fail_iodlist; 836 837 WARN_ON(assoc->queues[qid]); 838 assoc->queues[qid] = queue; 839 840 return queue; 841 842 out_fail_iodlist: 843 nvmet_fc_destroy_fcp_iodlist(assoc->tgtport, queue); 844 destroy_workqueue(queue->work_q); 845 out_free_queue: 846 kfree(queue); 847 return NULL; 848 } 849 850 851 static void 852 nvmet_fc_tgt_queue_free(struct kref *ref) 853 { 854 struct nvmet_fc_tgt_queue *queue = 855 container_of(ref, struct nvmet_fc_tgt_queue, ref); 856 857 nvmet_fc_destroy_fcp_iodlist(queue->assoc->tgtport, queue); 858 859 destroy_workqueue(queue->work_q); 860 861 kfree(queue); 862 } 863 864 static void 865 nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue) 866 { 867 kref_put(&queue->ref, nvmet_fc_tgt_queue_free); 868 } 869 870 static int 871 nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue) 872 { 873 return kref_get_unless_zero(&queue->ref); 874 } 875 876 877 static void 878 nvmet_fc_delete_target_queue(struct nvmet_fc_tgt_queue *queue) 879 { 880 struct nvmet_fc_tgtport *tgtport = queue->assoc->tgtport; 881 struct nvmet_fc_fcp_iod *fod = queue->fod; 882 struct nvmet_fc_defer_fcp_req *deferfcp, *tempptr; 883 unsigned long flags; 884 int i; 885 bool disconnect; 886 887 disconnect = atomic_xchg(&queue->connected, 0); 888 889 /* if not connected, nothing to do */ 890 if (!disconnect) 891 return; 892 893 spin_lock_irqsave(&queue->qlock, flags); 894 /* abort outstanding io's */ 895 for (i = 0; i < queue->sqsize; fod++, i++) { 896 if (fod->active) { 897 spin_lock(&fod->flock); 898 fod->abort = true; 899 /* 900 * only call lldd abort routine if waiting for 901 * writedata. other outstanding ops should finish 902 * on their own. 903 */ 904 if (fod->writedataactive) { 905 fod->aborted = true; 906 spin_unlock(&fod->flock); 907 tgtport->ops->fcp_abort( 908 &tgtport->fc_target_port, fod->fcpreq); 909 } else 910 spin_unlock(&fod->flock); 911 } 912 } 913 914 /* Cleanup defer'ed IOs in queue */ 915 list_for_each_entry_safe(deferfcp, tempptr, &queue->avail_defer_list, 916 req_list) { 917 list_del(&deferfcp->req_list); 918 kfree(deferfcp); 919 } 920 921 for (;;) { 922 deferfcp = list_first_entry_or_null(&queue->pending_cmd_list, 923 struct nvmet_fc_defer_fcp_req, req_list); 924 if (!deferfcp) 925 break; 926 927 list_del(&deferfcp->req_list); 928 spin_unlock_irqrestore(&queue->qlock, flags); 929 930 tgtport->ops->defer_rcv(&tgtport->fc_target_port, 931 deferfcp->fcp_req); 932 933 tgtport->ops->fcp_abort(&tgtport->fc_target_port, 934 deferfcp->fcp_req); 935 936 tgtport->ops->fcp_req_release(&tgtport->fc_target_port, 937 deferfcp->fcp_req); 938 939 /* release the queue lookup reference */ 940 nvmet_fc_tgt_q_put(queue); 941 942 kfree(deferfcp); 943 944 spin_lock_irqsave(&queue->qlock, flags); 945 } 946 spin_unlock_irqrestore(&queue->qlock, flags); 947 948 flush_workqueue(queue->work_q); 949 950 nvmet_sq_destroy(&queue->nvme_sq); 951 952 nvmet_fc_tgt_q_put(queue); 953 } 954 955 static struct nvmet_fc_tgt_queue * 956 nvmet_fc_find_target_queue(struct nvmet_fc_tgtport *tgtport, 957 u64 connection_id) 958 { 959 struct nvmet_fc_tgt_assoc *assoc; 960 struct nvmet_fc_tgt_queue *queue; 961 u64 association_id = nvmet_fc_getassociationid(connection_id); 962 u16 qid = nvmet_fc_getqueueid(connection_id); 963 964 if (qid > NVMET_NR_QUEUES) 965 return NULL; 966 967 rcu_read_lock(); 968 list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) { 969 if (association_id == assoc->association_id) { 970 queue = assoc->queues[qid]; 971 if (queue && 972 (!atomic_read(&queue->connected) || 973 !nvmet_fc_tgt_q_get(queue))) 974 queue = NULL; 975 rcu_read_unlock(); 976 return queue; 977 } 978 } 979 rcu_read_unlock(); 980 return NULL; 981 } 982 983 static void 984 nvmet_fc_hostport_free(struct kref *ref) 985 { 986 struct nvmet_fc_hostport *hostport = 987 container_of(ref, struct nvmet_fc_hostport, ref); 988 struct nvmet_fc_tgtport *tgtport = hostport->tgtport; 989 unsigned long flags; 990 991 spin_lock_irqsave(&tgtport->lock, flags); 992 list_del(&hostport->host_list); 993 spin_unlock_irqrestore(&tgtport->lock, flags); 994 if (tgtport->ops->host_release && hostport->invalid) 995 tgtport->ops->host_release(hostport->hosthandle); 996 kfree(hostport); 997 nvmet_fc_tgtport_put(tgtport); 998 } 999 1000 static void 1001 nvmet_fc_hostport_put(struct nvmet_fc_hostport *hostport) 1002 { 1003 kref_put(&hostport->ref, nvmet_fc_hostport_free); 1004 } 1005 1006 static int 1007 nvmet_fc_hostport_get(struct nvmet_fc_hostport *hostport) 1008 { 1009 return kref_get_unless_zero(&hostport->ref); 1010 } 1011 1012 static void 1013 nvmet_fc_free_hostport(struct nvmet_fc_hostport *hostport) 1014 { 1015 /* if LLDD not implemented, leave as NULL */ 1016 if (!hostport || !hostport->hosthandle) 1017 return; 1018 1019 nvmet_fc_hostport_put(hostport); 1020 } 1021 1022 static struct nvmet_fc_hostport * 1023 nvmet_fc_match_hostport(struct nvmet_fc_tgtport *tgtport, void *hosthandle) 1024 { 1025 struct nvmet_fc_hostport *host; 1026 1027 lockdep_assert_held(&tgtport->lock); 1028 1029 list_for_each_entry(host, &tgtport->host_list, host_list) { 1030 if (host->hosthandle == hosthandle && !host->invalid) { 1031 if (nvmet_fc_hostport_get(host)) 1032 return host; 1033 } 1034 } 1035 1036 return NULL; 1037 } 1038 1039 static struct nvmet_fc_hostport * 1040 nvmet_fc_alloc_hostport(struct nvmet_fc_tgtport *tgtport, void *hosthandle) 1041 { 1042 struct nvmet_fc_hostport *newhost, *match = NULL; 1043 unsigned long flags; 1044 1045 /* if LLDD not implemented, leave as NULL */ 1046 if (!hosthandle) 1047 return NULL; 1048 1049 /* 1050 * take reference for what will be the newly allocated hostport if 1051 * we end up using a new allocation 1052 */ 1053 if (!nvmet_fc_tgtport_get(tgtport)) 1054 return ERR_PTR(-EINVAL); 1055 1056 spin_lock_irqsave(&tgtport->lock, flags); 1057 match = nvmet_fc_match_hostport(tgtport, hosthandle); 1058 spin_unlock_irqrestore(&tgtport->lock, flags); 1059 1060 if (match) { 1061 /* no new allocation - release reference */ 1062 nvmet_fc_tgtport_put(tgtport); 1063 return match; 1064 } 1065 1066 newhost = kzalloc(sizeof(*newhost), GFP_KERNEL); 1067 if (!newhost) { 1068 /* no new allocation - release reference */ 1069 nvmet_fc_tgtport_put(tgtport); 1070 return ERR_PTR(-ENOMEM); 1071 } 1072 1073 spin_lock_irqsave(&tgtport->lock, flags); 1074 match = nvmet_fc_match_hostport(tgtport, hosthandle); 1075 if (match) { 1076 /* new allocation not needed */ 1077 kfree(newhost); 1078 newhost = match; 1079 } else { 1080 newhost->tgtport = tgtport; 1081 newhost->hosthandle = hosthandle; 1082 INIT_LIST_HEAD(&newhost->host_list); 1083 kref_init(&newhost->ref); 1084 1085 list_add_tail(&newhost->host_list, &tgtport->host_list); 1086 } 1087 spin_unlock_irqrestore(&tgtport->lock, flags); 1088 1089 return newhost; 1090 } 1091 1092 static void 1093 nvmet_fc_delete_assoc(struct nvmet_fc_tgt_assoc *assoc) 1094 { 1095 nvmet_fc_delete_target_assoc(assoc); 1096 nvmet_fc_tgt_a_put(assoc); 1097 } 1098 1099 static void 1100 nvmet_fc_delete_assoc_work(struct work_struct *work) 1101 { 1102 struct nvmet_fc_tgt_assoc *assoc = 1103 container_of(work, struct nvmet_fc_tgt_assoc, del_work); 1104 struct nvmet_fc_tgtport *tgtport = assoc->tgtport; 1105 1106 nvmet_fc_delete_assoc(assoc); 1107 nvmet_fc_tgtport_put(tgtport); 1108 } 1109 1110 static void 1111 nvmet_fc_schedule_delete_assoc(struct nvmet_fc_tgt_assoc *assoc) 1112 { 1113 nvmet_fc_tgtport_get(assoc->tgtport); 1114 queue_work(nvmet_wq, &assoc->del_work); 1115 } 1116 1117 static bool 1118 nvmet_fc_assoc_exits(struct nvmet_fc_tgtport *tgtport, u64 association_id) 1119 { 1120 struct nvmet_fc_tgt_assoc *a; 1121 1122 list_for_each_entry_rcu(a, &tgtport->assoc_list, a_list) { 1123 if (association_id == a->association_id) 1124 return true; 1125 } 1126 1127 return false; 1128 } 1129 1130 static struct nvmet_fc_tgt_assoc * 1131 nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport, void *hosthandle) 1132 { 1133 struct nvmet_fc_tgt_assoc *assoc; 1134 unsigned long flags; 1135 bool done; 1136 u64 ran; 1137 int idx; 1138 1139 if (!tgtport->pe) 1140 return NULL; 1141 1142 assoc = kzalloc(sizeof(*assoc), GFP_KERNEL); 1143 if (!assoc) 1144 return NULL; 1145 1146 idx = ida_alloc(&tgtport->assoc_cnt, GFP_KERNEL); 1147 if (idx < 0) 1148 goto out_free_assoc; 1149 1150 assoc->hostport = nvmet_fc_alloc_hostport(tgtport, hosthandle); 1151 if (IS_ERR(assoc->hostport)) 1152 goto out_ida; 1153 1154 assoc->tgtport = tgtport; 1155 assoc->a_id = idx; 1156 INIT_LIST_HEAD(&assoc->a_list); 1157 kref_init(&assoc->ref); 1158 INIT_WORK(&assoc->del_work, nvmet_fc_delete_assoc_work); 1159 atomic_set(&assoc->terminating, 0); 1160 1161 done = false; 1162 do { 1163 get_random_bytes(&ran, sizeof(ran) - BYTES_FOR_QID); 1164 ran = ran << BYTES_FOR_QID_SHIFT; 1165 1166 spin_lock_irqsave(&tgtport->lock, flags); 1167 rcu_read_lock(); 1168 if (!nvmet_fc_assoc_exits(tgtport, ran)) { 1169 assoc->association_id = ran; 1170 list_add_tail_rcu(&assoc->a_list, &tgtport->assoc_list); 1171 done = true; 1172 } 1173 rcu_read_unlock(); 1174 spin_unlock_irqrestore(&tgtport->lock, flags); 1175 } while (!done); 1176 1177 return assoc; 1178 1179 out_ida: 1180 ida_free(&tgtport->assoc_cnt, idx); 1181 out_free_assoc: 1182 kfree(assoc); 1183 return NULL; 1184 } 1185 1186 static void 1187 nvmet_fc_target_assoc_free(struct kref *ref) 1188 { 1189 struct nvmet_fc_tgt_assoc *assoc = 1190 container_of(ref, struct nvmet_fc_tgt_assoc, ref); 1191 struct nvmet_fc_tgtport *tgtport = assoc->tgtport; 1192 struct nvmet_fc_ls_iod *oldls; 1193 unsigned long flags; 1194 int i; 1195 1196 for (i = NVMET_NR_QUEUES; i >= 0; i--) { 1197 if (assoc->queues[i]) 1198 nvmet_fc_delete_target_queue(assoc->queues[i]); 1199 } 1200 1201 /* Send Disconnect now that all i/o has completed */ 1202 nvmet_fc_xmt_disconnect_assoc(assoc); 1203 1204 nvmet_fc_free_hostport(assoc->hostport); 1205 spin_lock_irqsave(&tgtport->lock, flags); 1206 oldls = assoc->rcv_disconn; 1207 spin_unlock_irqrestore(&tgtport->lock, flags); 1208 /* if pending Rcv Disconnect Association LS, send rsp now */ 1209 if (oldls) 1210 nvmet_fc_xmt_ls_rsp(tgtport, oldls); 1211 ida_free(&tgtport->assoc_cnt, assoc->a_id); 1212 dev_info(tgtport->dev, 1213 "{%d:%d} Association freed\n", 1214 tgtport->fc_target_port.port_num, assoc->a_id); 1215 kfree(assoc); 1216 } 1217 1218 static void 1219 nvmet_fc_tgt_a_put(struct nvmet_fc_tgt_assoc *assoc) 1220 { 1221 kref_put(&assoc->ref, nvmet_fc_target_assoc_free); 1222 } 1223 1224 static int 1225 nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc) 1226 { 1227 return kref_get_unless_zero(&assoc->ref); 1228 } 1229 1230 static void 1231 nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc) 1232 { 1233 struct nvmet_fc_tgtport *tgtport = assoc->tgtport; 1234 unsigned long flags; 1235 int i, terminating; 1236 1237 terminating = atomic_xchg(&assoc->terminating, 1); 1238 1239 /* if already terminating, do nothing */ 1240 if (terminating) 1241 return; 1242 1243 spin_lock_irqsave(&tgtport->lock, flags); 1244 list_del_rcu(&assoc->a_list); 1245 spin_unlock_irqrestore(&tgtport->lock, flags); 1246 1247 synchronize_rcu(); 1248 1249 /* ensure all in-flight I/Os have been processed */ 1250 for (i = NVMET_NR_QUEUES; i >= 0; i--) { 1251 if (assoc->queues[i]) 1252 flush_workqueue(assoc->queues[i]->work_q); 1253 } 1254 1255 dev_info(tgtport->dev, 1256 "{%d:%d} Association deleted\n", 1257 tgtport->fc_target_port.port_num, assoc->a_id); 1258 } 1259 1260 static struct nvmet_fc_tgt_assoc * 1261 nvmet_fc_find_target_assoc(struct nvmet_fc_tgtport *tgtport, 1262 u64 association_id) 1263 { 1264 struct nvmet_fc_tgt_assoc *assoc; 1265 struct nvmet_fc_tgt_assoc *ret = NULL; 1266 1267 rcu_read_lock(); 1268 list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) { 1269 if (association_id == assoc->association_id) { 1270 ret = assoc; 1271 if (!nvmet_fc_tgt_a_get(assoc)) 1272 ret = NULL; 1273 break; 1274 } 1275 } 1276 rcu_read_unlock(); 1277 1278 return ret; 1279 } 1280 1281 static void 1282 nvmet_fc_portentry_bind(struct nvmet_fc_tgtport *tgtport, 1283 struct nvmet_fc_port_entry *pe, 1284 struct nvmet_port *port) 1285 { 1286 lockdep_assert_held(&nvmet_fc_tgtlock); 1287 1288 pe->tgtport = tgtport; 1289 tgtport->pe = pe; 1290 1291 pe->port = port; 1292 port->priv = pe; 1293 1294 pe->node_name = tgtport->fc_target_port.node_name; 1295 pe->port_name = tgtport->fc_target_port.port_name; 1296 INIT_LIST_HEAD(&pe->pe_list); 1297 1298 list_add_tail(&pe->pe_list, &nvmet_fc_portentry_list); 1299 } 1300 1301 static void 1302 nvmet_fc_portentry_unbind(struct nvmet_fc_port_entry *pe) 1303 { 1304 unsigned long flags; 1305 1306 spin_lock_irqsave(&nvmet_fc_tgtlock, flags); 1307 if (pe->tgtport) 1308 pe->tgtport->pe = NULL; 1309 list_del(&pe->pe_list); 1310 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); 1311 } 1312 1313 /* 1314 * called when a targetport deregisters. Breaks the relationship 1315 * with the nvmet port, but leaves the port_entry in place so that 1316 * re-registration can resume operation. 1317 */ 1318 static void 1319 nvmet_fc_portentry_unbind_tgt(struct nvmet_fc_tgtport *tgtport) 1320 { 1321 struct nvmet_fc_port_entry *pe; 1322 unsigned long flags; 1323 1324 spin_lock_irqsave(&nvmet_fc_tgtlock, flags); 1325 pe = tgtport->pe; 1326 if (pe) 1327 pe->tgtport = NULL; 1328 tgtport->pe = NULL; 1329 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); 1330 } 1331 1332 /* 1333 * called when a new targetport is registered. Looks in the 1334 * existing nvmet port_entries to see if the nvmet layer is 1335 * configured for the targetport's wwn's. (the targetport existed, 1336 * nvmet configured, the lldd unregistered the tgtport, and is now 1337 * reregistering the same targetport). If so, set the nvmet port 1338 * port entry on the targetport. 1339 */ 1340 static void 1341 nvmet_fc_portentry_rebind_tgt(struct nvmet_fc_tgtport *tgtport) 1342 { 1343 struct nvmet_fc_port_entry *pe; 1344 unsigned long flags; 1345 1346 spin_lock_irqsave(&nvmet_fc_tgtlock, flags); 1347 list_for_each_entry(pe, &nvmet_fc_portentry_list, pe_list) { 1348 if (tgtport->fc_target_port.node_name == pe->node_name && 1349 tgtport->fc_target_port.port_name == pe->port_name) { 1350 WARN_ON(pe->tgtport); 1351 tgtport->pe = pe; 1352 pe->tgtport = tgtport; 1353 break; 1354 } 1355 } 1356 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); 1357 } 1358 1359 /** 1360 * nvmet_fc_register_targetport - transport entry point called by an 1361 * LLDD to register the existence of a local 1362 * NVME subystem FC port. 1363 * @pinfo: pointer to information about the port to be registered 1364 * @template: LLDD entrypoints and operational parameters for the port 1365 * @dev: physical hardware device node port corresponds to. Will be 1366 * used for DMA mappings 1367 * @portptr: pointer to a local port pointer. Upon success, the routine 1368 * will allocate a nvme_fc_local_port structure and place its 1369 * address in the local port pointer. Upon failure, local port 1370 * pointer will be set to NULL. 1371 * 1372 * Returns: 1373 * a completion status. Must be 0 upon success; a negative errno 1374 * (ex: -ENXIO) upon failure. 1375 */ 1376 int 1377 nvmet_fc_register_targetport(struct nvmet_fc_port_info *pinfo, 1378 struct nvmet_fc_target_template *template, 1379 struct device *dev, 1380 struct nvmet_fc_target_port **portptr) 1381 { 1382 struct nvmet_fc_tgtport *newrec; 1383 unsigned long flags; 1384 int ret, idx; 1385 1386 if (!template->xmt_ls_rsp || !template->fcp_op || 1387 !template->fcp_abort || 1388 !template->fcp_req_release || !template->targetport_delete || 1389 !template->max_hw_queues || !template->max_sgl_segments || 1390 !template->max_dif_sgl_segments || !template->dma_boundary) { 1391 ret = -EINVAL; 1392 goto out_regtgt_failed; 1393 } 1394 1395 newrec = kzalloc((sizeof(*newrec) + template->target_priv_sz), 1396 GFP_KERNEL); 1397 if (!newrec) { 1398 ret = -ENOMEM; 1399 goto out_regtgt_failed; 1400 } 1401 1402 idx = ida_alloc(&nvmet_fc_tgtport_cnt, GFP_KERNEL); 1403 if (idx < 0) { 1404 ret = -ENOSPC; 1405 goto out_fail_kfree; 1406 } 1407 1408 if (!get_device(dev) && dev) { 1409 ret = -ENODEV; 1410 goto out_ida_put; 1411 } 1412 1413 newrec->fc_target_port.node_name = pinfo->node_name; 1414 newrec->fc_target_port.port_name = pinfo->port_name; 1415 if (template->target_priv_sz) 1416 newrec->fc_target_port.private = &newrec[1]; 1417 else 1418 newrec->fc_target_port.private = NULL; 1419 newrec->fc_target_port.port_id = pinfo->port_id; 1420 newrec->fc_target_port.port_num = idx; 1421 INIT_LIST_HEAD(&newrec->tgt_list); 1422 newrec->dev = dev; 1423 newrec->ops = template; 1424 spin_lock_init(&newrec->lock); 1425 INIT_LIST_HEAD(&newrec->ls_rcv_list); 1426 INIT_LIST_HEAD(&newrec->ls_req_list); 1427 INIT_LIST_HEAD(&newrec->ls_busylist); 1428 INIT_LIST_HEAD(&newrec->assoc_list); 1429 INIT_LIST_HEAD(&newrec->host_list); 1430 kref_init(&newrec->ref); 1431 ida_init(&newrec->assoc_cnt); 1432 newrec->max_sg_cnt = template->max_sgl_segments; 1433 INIT_WORK(&newrec->put_work, nvmet_fc_put_tgtport_work); 1434 1435 ret = nvmet_fc_alloc_ls_iodlist(newrec); 1436 if (ret) { 1437 ret = -ENOMEM; 1438 goto out_free_newrec; 1439 } 1440 1441 nvmet_fc_portentry_rebind_tgt(newrec); 1442 1443 spin_lock_irqsave(&nvmet_fc_tgtlock, flags); 1444 list_add_tail(&newrec->tgt_list, &nvmet_fc_target_list); 1445 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); 1446 1447 *portptr = &newrec->fc_target_port; 1448 return 0; 1449 1450 out_free_newrec: 1451 put_device(dev); 1452 out_ida_put: 1453 ida_free(&nvmet_fc_tgtport_cnt, idx); 1454 out_fail_kfree: 1455 kfree(newrec); 1456 out_regtgt_failed: 1457 *portptr = NULL; 1458 return ret; 1459 } 1460 EXPORT_SYMBOL_GPL(nvmet_fc_register_targetport); 1461 1462 1463 static void 1464 nvmet_fc_free_tgtport(struct kref *ref) 1465 { 1466 struct nvmet_fc_tgtport *tgtport = 1467 container_of(ref, struct nvmet_fc_tgtport, ref); 1468 struct device *dev = tgtport->dev; 1469 unsigned long flags; 1470 1471 spin_lock_irqsave(&nvmet_fc_tgtlock, flags); 1472 list_del(&tgtport->tgt_list); 1473 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); 1474 1475 nvmet_fc_free_ls_iodlist(tgtport); 1476 1477 /* let the LLDD know we've finished tearing it down */ 1478 tgtport->ops->targetport_delete(&tgtport->fc_target_port); 1479 1480 ida_free(&nvmet_fc_tgtport_cnt, 1481 tgtport->fc_target_port.port_num); 1482 1483 ida_destroy(&tgtport->assoc_cnt); 1484 1485 kfree(tgtport); 1486 1487 put_device(dev); 1488 } 1489 1490 static void 1491 nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport) 1492 { 1493 kref_put(&tgtport->ref, nvmet_fc_free_tgtport); 1494 } 1495 1496 static int 1497 nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport) 1498 { 1499 return kref_get_unless_zero(&tgtport->ref); 1500 } 1501 1502 static void 1503 __nvmet_fc_free_assocs(struct nvmet_fc_tgtport *tgtport) 1504 { 1505 struct nvmet_fc_tgt_assoc *assoc; 1506 1507 rcu_read_lock(); 1508 list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) { 1509 if (!nvmet_fc_tgt_a_get(assoc)) 1510 continue; 1511 nvmet_fc_schedule_delete_assoc(assoc); 1512 nvmet_fc_tgt_a_put(assoc); 1513 } 1514 rcu_read_unlock(); 1515 } 1516 1517 /** 1518 * nvmet_fc_invalidate_host - transport entry point called by an LLDD 1519 * to remove references to a hosthandle for LS's. 1520 * 1521 * The nvmet-fc layer ensures that any references to the hosthandle 1522 * on the targetport are forgotten (set to NULL). The LLDD will 1523 * typically call this when a login with a remote host port has been 1524 * lost, thus LS's for the remote host port are no longer possible. 1525 * 1526 * If an LS request is outstanding to the targetport/hosthandle (or 1527 * issued concurrently with the call to invalidate the host), the 1528 * LLDD is responsible for terminating/aborting the LS and completing 1529 * the LS request. It is recommended that these terminations/aborts 1530 * occur after calling to invalidate the host handle to avoid additional 1531 * retries by the nvmet-fc transport. The nvmet-fc transport may 1532 * continue to reference host handle while it cleans up outstanding 1533 * NVME associations. The nvmet-fc transport will call the 1534 * ops->host_release() callback to notify the LLDD that all references 1535 * are complete and the related host handle can be recovered. 1536 * Note: if there are no references, the callback may be called before 1537 * the invalidate host call returns. 1538 * 1539 * @target_port: pointer to the (registered) target port that a prior 1540 * LS was received on and which supplied the transport the 1541 * hosthandle. 1542 * @hosthandle: the handle (pointer) that represents the host port 1543 * that no longer has connectivity and that LS's should 1544 * no longer be directed to. 1545 */ 1546 void 1547 nvmet_fc_invalidate_host(struct nvmet_fc_target_port *target_port, 1548 void *hosthandle) 1549 { 1550 struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port); 1551 struct nvmet_fc_tgt_assoc *assoc, *next; 1552 unsigned long flags; 1553 bool noassoc = true; 1554 1555 spin_lock_irqsave(&tgtport->lock, flags); 1556 list_for_each_entry_safe(assoc, next, 1557 &tgtport->assoc_list, a_list) { 1558 if (assoc->hostport->hosthandle != hosthandle) 1559 continue; 1560 if (!nvmet_fc_tgt_a_get(assoc)) 1561 continue; 1562 assoc->hostport->invalid = 1; 1563 noassoc = false; 1564 nvmet_fc_schedule_delete_assoc(assoc); 1565 nvmet_fc_tgt_a_put(assoc); 1566 } 1567 spin_unlock_irqrestore(&tgtport->lock, flags); 1568 1569 /* if there's nothing to wait for - call the callback */ 1570 if (noassoc && tgtport->ops->host_release) 1571 tgtport->ops->host_release(hosthandle); 1572 } 1573 EXPORT_SYMBOL_GPL(nvmet_fc_invalidate_host); 1574 1575 /* 1576 * nvmet layer has called to terminate an association 1577 */ 1578 static void 1579 nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl) 1580 { 1581 struct nvmet_fc_tgtport *tgtport, *next; 1582 struct nvmet_fc_tgt_assoc *assoc; 1583 struct nvmet_fc_tgt_queue *queue; 1584 unsigned long flags; 1585 bool found_ctrl = false; 1586 1587 /* this is a bit ugly, but don't want to make locks layered */ 1588 spin_lock_irqsave(&nvmet_fc_tgtlock, flags); 1589 list_for_each_entry_safe(tgtport, next, &nvmet_fc_target_list, 1590 tgt_list) { 1591 if (!nvmet_fc_tgtport_get(tgtport)) 1592 continue; 1593 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); 1594 1595 rcu_read_lock(); 1596 list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) { 1597 queue = assoc->queues[0]; 1598 if (queue && queue->nvme_sq.ctrl == ctrl) { 1599 if (nvmet_fc_tgt_a_get(assoc)) 1600 found_ctrl = true; 1601 break; 1602 } 1603 } 1604 rcu_read_unlock(); 1605 1606 nvmet_fc_tgtport_put(tgtport); 1607 1608 if (found_ctrl) { 1609 nvmet_fc_schedule_delete_assoc(assoc); 1610 nvmet_fc_tgt_a_put(assoc); 1611 return; 1612 } 1613 1614 spin_lock_irqsave(&nvmet_fc_tgtlock, flags); 1615 } 1616 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); 1617 } 1618 1619 /** 1620 * nvmet_fc_unregister_targetport - transport entry point called by an 1621 * LLDD to deregister/remove a previously 1622 * registered a local NVME subsystem FC port. 1623 * @target_port: pointer to the (registered) target port that is to be 1624 * deregistered. 1625 * 1626 * Returns: 1627 * a completion status. Must be 0 upon success; a negative errno 1628 * (ex: -ENXIO) upon failure. 1629 */ 1630 int 1631 nvmet_fc_unregister_targetport(struct nvmet_fc_target_port *target_port) 1632 { 1633 struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port); 1634 1635 nvmet_fc_portentry_unbind_tgt(tgtport); 1636 1637 /* terminate any outstanding associations */ 1638 __nvmet_fc_free_assocs(tgtport); 1639 1640 flush_workqueue(nvmet_wq); 1641 1642 /* 1643 * should terminate LS's as well. However, LS's will be generated 1644 * at the tail end of association termination, so they likely don't 1645 * exist yet. And even if they did, it's worthwhile to just let 1646 * them finish and targetport ref counting will clean things up. 1647 */ 1648 1649 nvmet_fc_tgtport_put(tgtport); 1650 1651 return 0; 1652 } 1653 EXPORT_SYMBOL_GPL(nvmet_fc_unregister_targetport); 1654 1655 1656 /* ********************** FC-NVME LS RCV Handling ************************* */ 1657 1658 1659 static void 1660 nvmet_fc_ls_create_association(struct nvmet_fc_tgtport *tgtport, 1661 struct nvmet_fc_ls_iod *iod) 1662 { 1663 struct fcnvme_ls_cr_assoc_rqst *rqst = &iod->rqstbuf->rq_cr_assoc; 1664 struct fcnvme_ls_cr_assoc_acc *acc = &iod->rspbuf->rsp_cr_assoc; 1665 struct nvmet_fc_tgt_queue *queue; 1666 int ret = 0; 1667 1668 memset(acc, 0, sizeof(*acc)); 1669 1670 /* 1671 * FC-NVME spec changes. There are initiators sending different 1672 * lengths as padding sizes for Create Association Cmd descriptor 1673 * was incorrect. 1674 * Accept anything of "minimum" length. Assume format per 1.15 1675 * spec (with HOSTID reduced to 16 bytes), ignore how long the 1676 * trailing pad length is. 1677 */ 1678 if (iod->rqstdatalen < FCNVME_LSDESC_CRA_RQST_MINLEN) 1679 ret = VERR_CR_ASSOC_LEN; 1680 else if (be32_to_cpu(rqst->desc_list_len) < 1681 FCNVME_LSDESC_CRA_RQST_MIN_LISTLEN) 1682 ret = VERR_CR_ASSOC_RQST_LEN; 1683 else if (rqst->assoc_cmd.desc_tag != 1684 cpu_to_be32(FCNVME_LSDESC_CREATE_ASSOC_CMD)) 1685 ret = VERR_CR_ASSOC_CMD; 1686 else if (be32_to_cpu(rqst->assoc_cmd.desc_len) < 1687 FCNVME_LSDESC_CRA_CMD_DESC_MIN_DESCLEN) 1688 ret = VERR_CR_ASSOC_CMD_LEN; 1689 else if (!rqst->assoc_cmd.ersp_ratio || 1690 (be16_to_cpu(rqst->assoc_cmd.ersp_ratio) >= 1691 be16_to_cpu(rqst->assoc_cmd.sqsize))) 1692 ret = VERR_ERSP_RATIO; 1693 1694 else { 1695 /* new association w/ admin queue */ 1696 iod->assoc = nvmet_fc_alloc_target_assoc( 1697 tgtport, iod->hosthandle); 1698 if (!iod->assoc) 1699 ret = VERR_ASSOC_ALLOC_FAIL; 1700 else { 1701 queue = nvmet_fc_alloc_target_queue(iod->assoc, 0, 1702 be16_to_cpu(rqst->assoc_cmd.sqsize)); 1703 if (!queue) { 1704 ret = VERR_QUEUE_ALLOC_FAIL; 1705 nvmet_fc_tgt_a_put(iod->assoc); 1706 } 1707 } 1708 } 1709 1710 if (ret) { 1711 dev_err(tgtport->dev, 1712 "Create Association LS failed: %s\n", 1713 validation_errors[ret]); 1714 iod->lsrsp->rsplen = nvme_fc_format_rjt(acc, 1715 sizeof(*acc), rqst->w0.ls_cmd, 1716 FCNVME_RJT_RC_LOGIC, 1717 FCNVME_RJT_EXP_NONE, 0); 1718 return; 1719 } 1720 1721 queue->ersp_ratio = be16_to_cpu(rqst->assoc_cmd.ersp_ratio); 1722 atomic_set(&queue->connected, 1); 1723 queue->sqhd = 0; /* best place to init value */ 1724 1725 dev_info(tgtport->dev, 1726 "{%d:%d} Association created\n", 1727 tgtport->fc_target_port.port_num, iod->assoc->a_id); 1728 1729 /* format a response */ 1730 1731 iod->lsrsp->rsplen = sizeof(*acc); 1732 1733 nvme_fc_format_rsp_hdr(acc, FCNVME_LS_ACC, 1734 fcnvme_lsdesc_len( 1735 sizeof(struct fcnvme_ls_cr_assoc_acc)), 1736 FCNVME_LS_CREATE_ASSOCIATION); 1737 acc->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID); 1738 acc->associd.desc_len = 1739 fcnvme_lsdesc_len( 1740 sizeof(struct fcnvme_lsdesc_assoc_id)); 1741 acc->associd.association_id = 1742 cpu_to_be64(nvmet_fc_makeconnid(iod->assoc, 0)); 1743 acc->connectid.desc_tag = cpu_to_be32(FCNVME_LSDESC_CONN_ID); 1744 acc->connectid.desc_len = 1745 fcnvme_lsdesc_len( 1746 sizeof(struct fcnvme_lsdesc_conn_id)); 1747 acc->connectid.connection_id = acc->associd.association_id; 1748 } 1749 1750 static void 1751 nvmet_fc_ls_create_connection(struct nvmet_fc_tgtport *tgtport, 1752 struct nvmet_fc_ls_iod *iod) 1753 { 1754 struct fcnvme_ls_cr_conn_rqst *rqst = &iod->rqstbuf->rq_cr_conn; 1755 struct fcnvme_ls_cr_conn_acc *acc = &iod->rspbuf->rsp_cr_conn; 1756 struct nvmet_fc_tgt_queue *queue; 1757 int ret = 0; 1758 1759 memset(acc, 0, sizeof(*acc)); 1760 1761 if (iod->rqstdatalen < sizeof(struct fcnvme_ls_cr_conn_rqst)) 1762 ret = VERR_CR_CONN_LEN; 1763 else if (rqst->desc_list_len != 1764 fcnvme_lsdesc_len( 1765 sizeof(struct fcnvme_ls_cr_conn_rqst))) 1766 ret = VERR_CR_CONN_RQST_LEN; 1767 else if (rqst->associd.desc_tag != cpu_to_be32(FCNVME_LSDESC_ASSOC_ID)) 1768 ret = VERR_ASSOC_ID; 1769 else if (rqst->associd.desc_len != 1770 fcnvme_lsdesc_len( 1771 sizeof(struct fcnvme_lsdesc_assoc_id))) 1772 ret = VERR_ASSOC_ID_LEN; 1773 else if (rqst->connect_cmd.desc_tag != 1774 cpu_to_be32(FCNVME_LSDESC_CREATE_CONN_CMD)) 1775 ret = VERR_CR_CONN_CMD; 1776 else if (rqst->connect_cmd.desc_len != 1777 fcnvme_lsdesc_len( 1778 sizeof(struct fcnvme_lsdesc_cr_conn_cmd))) 1779 ret = VERR_CR_CONN_CMD_LEN; 1780 else if (!rqst->connect_cmd.ersp_ratio || 1781 (be16_to_cpu(rqst->connect_cmd.ersp_ratio) >= 1782 be16_to_cpu(rqst->connect_cmd.sqsize))) 1783 ret = VERR_ERSP_RATIO; 1784 1785 else { 1786 /* new io queue */ 1787 iod->assoc = nvmet_fc_find_target_assoc(tgtport, 1788 be64_to_cpu(rqst->associd.association_id)); 1789 if (!iod->assoc) 1790 ret = VERR_NO_ASSOC; 1791 else { 1792 queue = nvmet_fc_alloc_target_queue(iod->assoc, 1793 be16_to_cpu(rqst->connect_cmd.qid), 1794 be16_to_cpu(rqst->connect_cmd.sqsize)); 1795 if (!queue) 1796 ret = VERR_QUEUE_ALLOC_FAIL; 1797 1798 /* release get taken in nvmet_fc_find_target_assoc */ 1799 nvmet_fc_tgt_a_put(iod->assoc); 1800 } 1801 } 1802 1803 if (ret) { 1804 dev_err(tgtport->dev, 1805 "Create Connection LS failed: %s\n", 1806 validation_errors[ret]); 1807 iod->lsrsp->rsplen = nvme_fc_format_rjt(acc, 1808 sizeof(*acc), rqst->w0.ls_cmd, 1809 (ret == VERR_NO_ASSOC) ? 1810 FCNVME_RJT_RC_INV_ASSOC : 1811 FCNVME_RJT_RC_LOGIC, 1812 FCNVME_RJT_EXP_NONE, 0); 1813 return; 1814 } 1815 1816 queue->ersp_ratio = be16_to_cpu(rqst->connect_cmd.ersp_ratio); 1817 atomic_set(&queue->connected, 1); 1818 queue->sqhd = 0; /* best place to init value */ 1819 1820 /* format a response */ 1821 1822 iod->lsrsp->rsplen = sizeof(*acc); 1823 1824 nvme_fc_format_rsp_hdr(acc, FCNVME_LS_ACC, 1825 fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_cr_conn_acc)), 1826 FCNVME_LS_CREATE_CONNECTION); 1827 acc->connectid.desc_tag = cpu_to_be32(FCNVME_LSDESC_CONN_ID); 1828 acc->connectid.desc_len = 1829 fcnvme_lsdesc_len( 1830 sizeof(struct fcnvme_lsdesc_conn_id)); 1831 acc->connectid.connection_id = 1832 cpu_to_be64(nvmet_fc_makeconnid(iod->assoc, 1833 be16_to_cpu(rqst->connect_cmd.qid))); 1834 } 1835 1836 /* 1837 * Returns true if the LS response is to be transmit 1838 * Returns false if the LS response is to be delayed 1839 */ 1840 static int 1841 nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport, 1842 struct nvmet_fc_ls_iod *iod) 1843 { 1844 struct fcnvme_ls_disconnect_assoc_rqst *rqst = 1845 &iod->rqstbuf->rq_dis_assoc; 1846 struct fcnvme_ls_disconnect_assoc_acc *acc = 1847 &iod->rspbuf->rsp_dis_assoc; 1848 struct nvmet_fc_tgt_assoc *assoc = NULL; 1849 struct nvmet_fc_ls_iod *oldls = NULL; 1850 unsigned long flags; 1851 int ret = 0; 1852 1853 memset(acc, 0, sizeof(*acc)); 1854 1855 ret = nvmefc_vldt_lsreq_discon_assoc(iod->rqstdatalen, rqst); 1856 if (!ret) { 1857 /* match an active association - takes an assoc ref if !NULL */ 1858 assoc = nvmet_fc_find_target_assoc(tgtport, 1859 be64_to_cpu(rqst->associd.association_id)); 1860 iod->assoc = assoc; 1861 if (!assoc) 1862 ret = VERR_NO_ASSOC; 1863 } 1864 1865 if (ret || !assoc) { 1866 dev_err(tgtport->dev, 1867 "Disconnect LS failed: %s\n", 1868 validation_errors[ret]); 1869 iod->lsrsp->rsplen = nvme_fc_format_rjt(acc, 1870 sizeof(*acc), rqst->w0.ls_cmd, 1871 (ret == VERR_NO_ASSOC) ? 1872 FCNVME_RJT_RC_INV_ASSOC : 1873 FCNVME_RJT_RC_LOGIC, 1874 FCNVME_RJT_EXP_NONE, 0); 1875 return true; 1876 } 1877 1878 /* format a response */ 1879 1880 iod->lsrsp->rsplen = sizeof(*acc); 1881 1882 nvme_fc_format_rsp_hdr(acc, FCNVME_LS_ACC, 1883 fcnvme_lsdesc_len( 1884 sizeof(struct fcnvme_ls_disconnect_assoc_acc)), 1885 FCNVME_LS_DISCONNECT_ASSOC); 1886 1887 /* 1888 * The rules for LS response says the response cannot 1889 * go back until ABTS's have been sent for all outstanding 1890 * I/O and a Disconnect Association LS has been sent. 1891 * So... save off the Disconnect LS to send the response 1892 * later. If there was a prior LS already saved, replace 1893 * it with the newer one and send a can't perform reject 1894 * on the older one. 1895 */ 1896 spin_lock_irqsave(&tgtport->lock, flags); 1897 oldls = assoc->rcv_disconn; 1898 assoc->rcv_disconn = iod; 1899 spin_unlock_irqrestore(&tgtport->lock, flags); 1900 1901 if (oldls) { 1902 dev_info(tgtport->dev, 1903 "{%d:%d} Multiple Disconnect Association LS's " 1904 "received\n", 1905 tgtport->fc_target_port.port_num, assoc->a_id); 1906 /* overwrite good response with bogus failure */ 1907 oldls->lsrsp->rsplen = nvme_fc_format_rjt(oldls->rspbuf, 1908 sizeof(*iod->rspbuf), 1909 /* ok to use rqst, LS is same */ 1910 rqst->w0.ls_cmd, 1911 FCNVME_RJT_RC_UNAB, 1912 FCNVME_RJT_EXP_NONE, 0); 1913 nvmet_fc_xmt_ls_rsp(tgtport, oldls); 1914 } 1915 1916 nvmet_fc_schedule_delete_assoc(assoc); 1917 nvmet_fc_tgt_a_put(assoc); 1918 1919 return false; 1920 } 1921 1922 1923 /* *********************** NVME Ctrl Routines **************************** */ 1924 1925 1926 static void nvmet_fc_fcp_nvme_cmd_done(struct nvmet_req *nvme_req); 1927 1928 static const struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops; 1929 1930 static void 1931 nvmet_fc_xmt_ls_rsp_done(struct nvmefc_ls_rsp *lsrsp) 1932 { 1933 struct nvmet_fc_ls_iod *iod = lsrsp->nvme_fc_private; 1934 struct nvmet_fc_tgtport *tgtport = iod->tgtport; 1935 1936 fc_dma_sync_single_for_cpu(tgtport->dev, iod->rspdma, 1937 sizeof(*iod->rspbuf), DMA_TO_DEVICE); 1938 nvmet_fc_free_ls_iod(tgtport, iod); 1939 nvmet_fc_tgtport_put(tgtport); 1940 } 1941 1942 static void 1943 nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport, 1944 struct nvmet_fc_ls_iod *iod) 1945 { 1946 int ret; 1947 1948 fc_dma_sync_single_for_device(tgtport->dev, iod->rspdma, 1949 sizeof(*iod->rspbuf), DMA_TO_DEVICE); 1950 1951 ret = tgtport->ops->xmt_ls_rsp(&tgtport->fc_target_port, iod->lsrsp); 1952 if (ret) 1953 nvmet_fc_xmt_ls_rsp_done(iod->lsrsp); 1954 } 1955 1956 /* 1957 * Actual processing routine for received FC-NVME LS Requests from the LLD 1958 */ 1959 static void 1960 nvmet_fc_handle_ls_rqst(struct nvmet_fc_tgtport *tgtport, 1961 struct nvmet_fc_ls_iod *iod) 1962 { 1963 struct fcnvme_ls_rqst_w0 *w0 = &iod->rqstbuf->rq_cr_assoc.w0; 1964 bool sendrsp = true; 1965 1966 iod->lsrsp->nvme_fc_private = iod; 1967 iod->lsrsp->rspbuf = iod->rspbuf; 1968 iod->lsrsp->rspdma = iod->rspdma; 1969 iod->lsrsp->done = nvmet_fc_xmt_ls_rsp_done; 1970 /* Be preventative. handlers will later set to valid length */ 1971 iod->lsrsp->rsplen = 0; 1972 1973 iod->assoc = NULL; 1974 1975 /* 1976 * handlers: 1977 * parse request input, execute the request, and format the 1978 * LS response 1979 */ 1980 switch (w0->ls_cmd) { 1981 case FCNVME_LS_CREATE_ASSOCIATION: 1982 /* Creates Association and initial Admin Queue/Connection */ 1983 nvmet_fc_ls_create_association(tgtport, iod); 1984 break; 1985 case FCNVME_LS_CREATE_CONNECTION: 1986 /* Creates an IO Queue/Connection */ 1987 nvmet_fc_ls_create_connection(tgtport, iod); 1988 break; 1989 case FCNVME_LS_DISCONNECT_ASSOC: 1990 /* Terminate a Queue/Connection or the Association */ 1991 sendrsp = nvmet_fc_ls_disconnect(tgtport, iod); 1992 break; 1993 default: 1994 iod->lsrsp->rsplen = nvme_fc_format_rjt(iod->rspbuf, 1995 sizeof(*iod->rspbuf), w0->ls_cmd, 1996 FCNVME_RJT_RC_INVAL, FCNVME_RJT_EXP_NONE, 0); 1997 } 1998 1999 if (sendrsp) 2000 nvmet_fc_xmt_ls_rsp(tgtport, iod); 2001 } 2002 2003 /* 2004 * Actual processing routine for received FC-NVME LS Requests from the LLD 2005 */ 2006 static void 2007 nvmet_fc_handle_ls_rqst_work(struct work_struct *work) 2008 { 2009 struct nvmet_fc_ls_iod *iod = 2010 container_of(work, struct nvmet_fc_ls_iod, work); 2011 struct nvmet_fc_tgtport *tgtport = iod->tgtport; 2012 2013 nvmet_fc_handle_ls_rqst(tgtport, iod); 2014 } 2015 2016 2017 /** 2018 * nvmet_fc_rcv_ls_req - transport entry point called by an LLDD 2019 * upon the reception of a NVME LS request. 2020 * 2021 * The nvmet-fc layer will copy payload to an internal structure for 2022 * processing. As such, upon completion of the routine, the LLDD may 2023 * immediately free/reuse the LS request buffer passed in the call. 2024 * 2025 * If this routine returns error, the LLDD should abort the exchange. 2026 * 2027 * @target_port: pointer to the (registered) target port the LS was 2028 * received on. 2029 * @hosthandle: pointer to the host specific data, gets stored in iod. 2030 * @lsrsp: pointer to a lsrsp structure to be used to reference 2031 * the exchange corresponding to the LS. 2032 * @lsreqbuf: pointer to the buffer containing the LS Request 2033 * @lsreqbuf_len: length, in bytes, of the received LS request 2034 */ 2035 int 2036 nvmet_fc_rcv_ls_req(struct nvmet_fc_target_port *target_port, 2037 void *hosthandle, 2038 struct nvmefc_ls_rsp *lsrsp, 2039 void *lsreqbuf, u32 lsreqbuf_len) 2040 { 2041 struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port); 2042 struct nvmet_fc_ls_iod *iod; 2043 struct fcnvme_ls_rqst_w0 *w0 = (struct fcnvme_ls_rqst_w0 *)lsreqbuf; 2044 2045 if (lsreqbuf_len > sizeof(union nvmefc_ls_requests)) { 2046 dev_info(tgtport->dev, 2047 "RCV %s LS failed: payload too large (%d)\n", 2048 (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ? 2049 nvmefc_ls_names[w0->ls_cmd] : "", 2050 lsreqbuf_len); 2051 return -E2BIG; 2052 } 2053 2054 if (!nvmet_fc_tgtport_get(tgtport)) { 2055 dev_info(tgtport->dev, 2056 "RCV %s LS failed: target deleting\n", 2057 (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ? 2058 nvmefc_ls_names[w0->ls_cmd] : ""); 2059 return -ESHUTDOWN; 2060 } 2061 2062 iod = nvmet_fc_alloc_ls_iod(tgtport); 2063 if (!iod) { 2064 dev_info(tgtport->dev, 2065 "RCV %s LS failed: context allocation failed\n", 2066 (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ? 2067 nvmefc_ls_names[w0->ls_cmd] : ""); 2068 nvmet_fc_tgtport_put(tgtport); 2069 return -ENOENT; 2070 } 2071 2072 iod->lsrsp = lsrsp; 2073 iod->fcpreq = NULL; 2074 memcpy(iod->rqstbuf, lsreqbuf, lsreqbuf_len); 2075 iod->rqstdatalen = lsreqbuf_len; 2076 iod->hosthandle = hosthandle; 2077 2078 queue_work(nvmet_wq, &iod->work); 2079 2080 return 0; 2081 } 2082 EXPORT_SYMBOL_GPL(nvmet_fc_rcv_ls_req); 2083 2084 2085 /* 2086 * ********************** 2087 * Start of FCP handling 2088 * ********************** 2089 */ 2090 2091 static int 2092 nvmet_fc_alloc_tgt_pgs(struct nvmet_fc_fcp_iod *fod) 2093 { 2094 struct scatterlist *sg; 2095 unsigned int nent; 2096 2097 sg = sgl_alloc(fod->req.transfer_len, GFP_KERNEL, &nent); 2098 if (!sg) 2099 goto out; 2100 2101 fod->data_sg = sg; 2102 fod->data_sg_cnt = nent; 2103 fod->data_sg_cnt = fc_dma_map_sg(fod->tgtport->dev, sg, nent, 2104 ((fod->io_dir == NVMET_FCP_WRITE) ? 2105 DMA_FROM_DEVICE : DMA_TO_DEVICE)); 2106 /* note: write from initiator perspective */ 2107 fod->next_sg = fod->data_sg; 2108 2109 return 0; 2110 2111 out: 2112 return NVME_SC_INTERNAL; 2113 } 2114 2115 static void 2116 nvmet_fc_free_tgt_pgs(struct nvmet_fc_fcp_iod *fod) 2117 { 2118 if (!fod->data_sg || !fod->data_sg_cnt) 2119 return; 2120 2121 fc_dma_unmap_sg(fod->tgtport->dev, fod->data_sg, fod->data_sg_cnt, 2122 ((fod->io_dir == NVMET_FCP_WRITE) ? 2123 DMA_FROM_DEVICE : DMA_TO_DEVICE)); 2124 sgl_free(fod->data_sg); 2125 fod->data_sg = NULL; 2126 fod->data_sg_cnt = 0; 2127 } 2128 2129 2130 static bool 2131 queue_90percent_full(struct nvmet_fc_tgt_queue *q, u32 sqhd) 2132 { 2133 u32 sqtail, used; 2134 2135 /* egad, this is ugly. And sqtail is just a best guess */ 2136 sqtail = atomic_read(&q->sqtail) % q->sqsize; 2137 2138 used = (sqtail < sqhd) ? (sqtail + q->sqsize - sqhd) : (sqtail - sqhd); 2139 return ((used * 10) >= (((u32)(q->sqsize - 1) * 9))); 2140 } 2141 2142 /* 2143 * Prep RSP payload. 2144 * May be a NVMET_FCOP_RSP or NVMET_FCOP_READDATA_RSP op 2145 */ 2146 static void 2147 nvmet_fc_prep_fcp_rsp(struct nvmet_fc_tgtport *tgtport, 2148 struct nvmet_fc_fcp_iod *fod) 2149 { 2150 struct nvme_fc_ersp_iu *ersp = &fod->rspiubuf; 2151 struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common; 2152 struct nvme_completion *cqe = &ersp->cqe; 2153 u32 *cqewd = (u32 *)cqe; 2154 bool send_ersp = false; 2155 u32 rsn, rspcnt, xfr_length; 2156 2157 if (fod->fcpreq->op == NVMET_FCOP_READDATA_RSP) 2158 xfr_length = fod->req.transfer_len; 2159 else 2160 xfr_length = fod->offset; 2161 2162 /* 2163 * check to see if we can send a 0's rsp. 2164 * Note: to send a 0's response, the NVME-FC host transport will 2165 * recreate the CQE. The host transport knows: sq id, SQHD (last 2166 * seen in an ersp), and command_id. Thus it will create a 2167 * zero-filled CQE with those known fields filled in. Transport 2168 * must send an ersp for any condition where the cqe won't match 2169 * this. 2170 * 2171 * Here are the FC-NVME mandated cases where we must send an ersp: 2172 * every N responses, where N=ersp_ratio 2173 * force fabric commands to send ersp's (not in FC-NVME but good 2174 * practice) 2175 * normal cmds: any time status is non-zero, or status is zero 2176 * but words 0 or 1 are non-zero. 2177 * the SQ is 90% or more full 2178 * the cmd is a fused command 2179 * transferred data length not equal to cmd iu length 2180 */ 2181 rspcnt = atomic_inc_return(&fod->queue->zrspcnt); 2182 if (!(rspcnt % fod->queue->ersp_ratio) || 2183 nvme_is_fabrics((struct nvme_command *) sqe) || 2184 xfr_length != fod->req.transfer_len || 2185 (le16_to_cpu(cqe->status) & 0xFFFE) || cqewd[0] || cqewd[1] || 2186 (sqe->flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND)) || 2187 queue_90percent_full(fod->queue, le16_to_cpu(cqe->sq_head))) 2188 send_ersp = true; 2189 2190 /* re-set the fields */ 2191 fod->fcpreq->rspaddr = ersp; 2192 fod->fcpreq->rspdma = fod->rspdma; 2193 2194 if (!send_ersp) { 2195 memset(ersp, 0, NVME_FC_SIZEOF_ZEROS_RSP); 2196 fod->fcpreq->rsplen = NVME_FC_SIZEOF_ZEROS_RSP; 2197 } else { 2198 ersp->iu_len = cpu_to_be16(sizeof(*ersp)/sizeof(u32)); 2199 rsn = atomic_inc_return(&fod->queue->rsn); 2200 ersp->rsn = cpu_to_be32(rsn); 2201 ersp->xfrd_len = cpu_to_be32(xfr_length); 2202 fod->fcpreq->rsplen = sizeof(*ersp); 2203 } 2204 2205 fc_dma_sync_single_for_device(tgtport->dev, fod->rspdma, 2206 sizeof(fod->rspiubuf), DMA_TO_DEVICE); 2207 } 2208 2209 static void nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req *fcpreq); 2210 2211 static void 2212 nvmet_fc_abort_op(struct nvmet_fc_tgtport *tgtport, 2213 struct nvmet_fc_fcp_iod *fod) 2214 { 2215 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq; 2216 2217 /* data no longer needed */ 2218 nvmet_fc_free_tgt_pgs(fod); 2219 2220 /* 2221 * if an ABTS was received or we issued the fcp_abort early 2222 * don't call abort routine again. 2223 */ 2224 /* no need to take lock - lock was taken earlier to get here */ 2225 if (!fod->aborted) 2226 tgtport->ops->fcp_abort(&tgtport->fc_target_port, fcpreq); 2227 2228 nvmet_fc_free_fcp_iod(fod->queue, fod); 2229 } 2230 2231 static void 2232 nvmet_fc_xmt_fcp_rsp(struct nvmet_fc_tgtport *tgtport, 2233 struct nvmet_fc_fcp_iod *fod) 2234 { 2235 int ret; 2236 2237 fod->fcpreq->op = NVMET_FCOP_RSP; 2238 fod->fcpreq->timeout = 0; 2239 2240 nvmet_fc_prep_fcp_rsp(tgtport, fod); 2241 2242 ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq); 2243 if (ret) 2244 nvmet_fc_abort_op(tgtport, fod); 2245 } 2246 2247 static void 2248 nvmet_fc_transfer_fcp_data(struct nvmet_fc_tgtport *tgtport, 2249 struct nvmet_fc_fcp_iod *fod, u8 op) 2250 { 2251 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq; 2252 struct scatterlist *sg = fod->next_sg; 2253 unsigned long flags; 2254 u32 remaininglen = fod->req.transfer_len - fod->offset; 2255 u32 tlen = 0; 2256 int ret; 2257 2258 fcpreq->op = op; 2259 fcpreq->offset = fod->offset; 2260 fcpreq->timeout = NVME_FC_TGTOP_TIMEOUT_SEC; 2261 2262 /* 2263 * for next sequence: 2264 * break at a sg element boundary 2265 * attempt to keep sequence length capped at 2266 * NVMET_FC_MAX_SEQ_LENGTH but allow sequence to 2267 * be longer if a single sg element is larger 2268 * than that amount. This is done to avoid creating 2269 * a new sg list to use for the tgtport api. 2270 */ 2271 fcpreq->sg = sg; 2272 fcpreq->sg_cnt = 0; 2273 while (tlen < remaininglen && 2274 fcpreq->sg_cnt < tgtport->max_sg_cnt && 2275 tlen + sg_dma_len(sg) < NVMET_FC_MAX_SEQ_LENGTH) { 2276 fcpreq->sg_cnt++; 2277 tlen += sg_dma_len(sg); 2278 sg = sg_next(sg); 2279 } 2280 if (tlen < remaininglen && fcpreq->sg_cnt == 0) { 2281 fcpreq->sg_cnt++; 2282 tlen += min_t(u32, sg_dma_len(sg), remaininglen); 2283 sg = sg_next(sg); 2284 } 2285 if (tlen < remaininglen) 2286 fod->next_sg = sg; 2287 else 2288 fod->next_sg = NULL; 2289 2290 fcpreq->transfer_length = tlen; 2291 fcpreq->transferred_length = 0; 2292 fcpreq->fcp_error = 0; 2293 fcpreq->rsplen = 0; 2294 2295 /* 2296 * If the last READDATA request: check if LLDD supports 2297 * combined xfr with response. 2298 */ 2299 if ((op == NVMET_FCOP_READDATA) && 2300 ((fod->offset + fcpreq->transfer_length) == fod->req.transfer_len) && 2301 (tgtport->ops->target_features & NVMET_FCTGTFEAT_READDATA_RSP)) { 2302 fcpreq->op = NVMET_FCOP_READDATA_RSP; 2303 nvmet_fc_prep_fcp_rsp(tgtport, fod); 2304 } 2305 2306 ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq); 2307 if (ret) { 2308 /* 2309 * should be ok to set w/o lock as its in the thread of 2310 * execution (not an async timer routine) and doesn't 2311 * contend with any clearing action 2312 */ 2313 fod->abort = true; 2314 2315 if (op == NVMET_FCOP_WRITEDATA) { 2316 spin_lock_irqsave(&fod->flock, flags); 2317 fod->writedataactive = false; 2318 spin_unlock_irqrestore(&fod->flock, flags); 2319 nvmet_req_complete(&fod->req, NVME_SC_INTERNAL); 2320 } else /* NVMET_FCOP_READDATA or NVMET_FCOP_READDATA_RSP */ { 2321 fcpreq->fcp_error = ret; 2322 fcpreq->transferred_length = 0; 2323 nvmet_fc_xmt_fcp_op_done(fod->fcpreq); 2324 } 2325 } 2326 } 2327 2328 static inline bool 2329 __nvmet_fc_fod_op_abort(struct nvmet_fc_fcp_iod *fod, bool abort) 2330 { 2331 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq; 2332 struct nvmet_fc_tgtport *tgtport = fod->tgtport; 2333 2334 /* if in the middle of an io and we need to tear down */ 2335 if (abort) { 2336 if (fcpreq->op == NVMET_FCOP_WRITEDATA) { 2337 nvmet_req_complete(&fod->req, NVME_SC_INTERNAL); 2338 return true; 2339 } 2340 2341 nvmet_fc_abort_op(tgtport, fod); 2342 return true; 2343 } 2344 2345 return false; 2346 } 2347 2348 /* 2349 * actual done handler for FCP operations when completed by the lldd 2350 */ 2351 static void 2352 nvmet_fc_fod_op_done(struct nvmet_fc_fcp_iod *fod) 2353 { 2354 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq; 2355 struct nvmet_fc_tgtport *tgtport = fod->tgtport; 2356 unsigned long flags; 2357 bool abort; 2358 2359 spin_lock_irqsave(&fod->flock, flags); 2360 abort = fod->abort; 2361 fod->writedataactive = false; 2362 spin_unlock_irqrestore(&fod->flock, flags); 2363 2364 switch (fcpreq->op) { 2365 2366 case NVMET_FCOP_WRITEDATA: 2367 if (__nvmet_fc_fod_op_abort(fod, abort)) 2368 return; 2369 if (fcpreq->fcp_error || 2370 fcpreq->transferred_length != fcpreq->transfer_length) { 2371 spin_lock_irqsave(&fod->flock, flags); 2372 fod->abort = true; 2373 spin_unlock_irqrestore(&fod->flock, flags); 2374 2375 nvmet_req_complete(&fod->req, NVME_SC_INTERNAL); 2376 return; 2377 } 2378 2379 fod->offset += fcpreq->transferred_length; 2380 if (fod->offset != fod->req.transfer_len) { 2381 spin_lock_irqsave(&fod->flock, flags); 2382 fod->writedataactive = true; 2383 spin_unlock_irqrestore(&fod->flock, flags); 2384 2385 /* transfer the next chunk */ 2386 nvmet_fc_transfer_fcp_data(tgtport, fod, 2387 NVMET_FCOP_WRITEDATA); 2388 return; 2389 } 2390 2391 /* data transfer complete, resume with nvmet layer */ 2392 fod->req.execute(&fod->req); 2393 break; 2394 2395 case NVMET_FCOP_READDATA: 2396 case NVMET_FCOP_READDATA_RSP: 2397 if (__nvmet_fc_fod_op_abort(fod, abort)) 2398 return; 2399 if (fcpreq->fcp_error || 2400 fcpreq->transferred_length != fcpreq->transfer_length) { 2401 nvmet_fc_abort_op(tgtport, fod); 2402 return; 2403 } 2404 2405 /* success */ 2406 2407 if (fcpreq->op == NVMET_FCOP_READDATA_RSP) { 2408 /* data no longer needed */ 2409 nvmet_fc_free_tgt_pgs(fod); 2410 nvmet_fc_free_fcp_iod(fod->queue, fod); 2411 return; 2412 } 2413 2414 fod->offset += fcpreq->transferred_length; 2415 if (fod->offset != fod->req.transfer_len) { 2416 /* transfer the next chunk */ 2417 nvmet_fc_transfer_fcp_data(tgtport, fod, 2418 NVMET_FCOP_READDATA); 2419 return; 2420 } 2421 2422 /* data transfer complete, send response */ 2423 2424 /* data no longer needed */ 2425 nvmet_fc_free_tgt_pgs(fod); 2426 2427 nvmet_fc_xmt_fcp_rsp(tgtport, fod); 2428 2429 break; 2430 2431 case NVMET_FCOP_RSP: 2432 if (__nvmet_fc_fod_op_abort(fod, abort)) 2433 return; 2434 nvmet_fc_free_fcp_iod(fod->queue, fod); 2435 break; 2436 2437 default: 2438 break; 2439 } 2440 } 2441 2442 static void 2443 nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req *fcpreq) 2444 { 2445 struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private; 2446 2447 nvmet_fc_fod_op_done(fod); 2448 } 2449 2450 /* 2451 * actual completion handler after execution by the nvmet layer 2452 */ 2453 static void 2454 __nvmet_fc_fcp_nvme_cmd_done(struct nvmet_fc_tgtport *tgtport, 2455 struct nvmet_fc_fcp_iod *fod, int status) 2456 { 2457 struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common; 2458 struct nvme_completion *cqe = &fod->rspiubuf.cqe; 2459 unsigned long flags; 2460 bool abort; 2461 2462 spin_lock_irqsave(&fod->flock, flags); 2463 abort = fod->abort; 2464 spin_unlock_irqrestore(&fod->flock, flags); 2465 2466 /* if we have a CQE, snoop the last sq_head value */ 2467 if (!status) 2468 fod->queue->sqhd = cqe->sq_head; 2469 2470 if (abort) { 2471 nvmet_fc_abort_op(tgtport, fod); 2472 return; 2473 } 2474 2475 /* if an error handling the cmd post initial parsing */ 2476 if (status) { 2477 /* fudge up a failed CQE status for our transport error */ 2478 memset(cqe, 0, sizeof(*cqe)); 2479 cqe->sq_head = fod->queue->sqhd; /* echo last cqe sqhd */ 2480 cqe->sq_id = cpu_to_le16(fod->queue->qid); 2481 cqe->command_id = sqe->command_id; 2482 cqe->status = cpu_to_le16(status); 2483 } else { 2484 2485 /* 2486 * try to push the data even if the SQE status is non-zero. 2487 * There may be a status where data still was intended to 2488 * be moved 2489 */ 2490 if ((fod->io_dir == NVMET_FCP_READ) && (fod->data_sg_cnt)) { 2491 /* push the data over before sending rsp */ 2492 nvmet_fc_transfer_fcp_data(tgtport, fod, 2493 NVMET_FCOP_READDATA); 2494 return; 2495 } 2496 2497 /* writes & no data - fall thru */ 2498 } 2499 2500 /* data no longer needed */ 2501 nvmet_fc_free_tgt_pgs(fod); 2502 2503 nvmet_fc_xmt_fcp_rsp(tgtport, fod); 2504 } 2505 2506 2507 static void 2508 nvmet_fc_fcp_nvme_cmd_done(struct nvmet_req *nvme_req) 2509 { 2510 struct nvmet_fc_fcp_iod *fod = nvmet_req_to_fod(nvme_req); 2511 struct nvmet_fc_tgtport *tgtport = fod->tgtport; 2512 2513 __nvmet_fc_fcp_nvme_cmd_done(tgtport, fod, 0); 2514 } 2515 2516 2517 /* 2518 * Actual processing routine for received FC-NVME I/O Requests from the LLD 2519 */ 2520 static void 2521 nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport, 2522 struct nvmet_fc_fcp_iod *fod) 2523 { 2524 struct nvme_fc_cmd_iu *cmdiu = &fod->cmdiubuf; 2525 u32 xfrlen = be32_to_cpu(cmdiu->data_len); 2526 int ret; 2527 2528 /* 2529 * Fused commands are currently not supported in the linux 2530 * implementation. 2531 * 2532 * As such, the implementation of the FC transport does not 2533 * look at the fused commands and order delivery to the upper 2534 * layer until we have both based on csn. 2535 */ 2536 2537 fod->fcpreq->done = nvmet_fc_xmt_fcp_op_done; 2538 2539 if (cmdiu->flags & FCNVME_CMD_FLAGS_WRITE) { 2540 fod->io_dir = NVMET_FCP_WRITE; 2541 if (!nvme_is_write(&cmdiu->sqe)) 2542 goto transport_error; 2543 } else if (cmdiu->flags & FCNVME_CMD_FLAGS_READ) { 2544 fod->io_dir = NVMET_FCP_READ; 2545 if (nvme_is_write(&cmdiu->sqe)) 2546 goto transport_error; 2547 } else { 2548 fod->io_dir = NVMET_FCP_NODATA; 2549 if (xfrlen) 2550 goto transport_error; 2551 } 2552 2553 fod->req.cmd = &fod->cmdiubuf.sqe; 2554 fod->req.cqe = &fod->rspiubuf.cqe; 2555 if (!tgtport->pe) 2556 goto transport_error; 2557 fod->req.port = tgtport->pe->port; 2558 2559 /* clear any response payload */ 2560 memset(&fod->rspiubuf, 0, sizeof(fod->rspiubuf)); 2561 2562 fod->data_sg = NULL; 2563 fod->data_sg_cnt = 0; 2564 2565 ret = nvmet_req_init(&fod->req, 2566 &fod->queue->nvme_cq, 2567 &fod->queue->nvme_sq, 2568 &nvmet_fc_tgt_fcp_ops); 2569 if (!ret) { 2570 /* bad SQE content or invalid ctrl state */ 2571 /* nvmet layer has already called op done to send rsp. */ 2572 return; 2573 } 2574 2575 fod->req.transfer_len = xfrlen; 2576 2577 /* keep a running counter of tail position */ 2578 atomic_inc(&fod->queue->sqtail); 2579 2580 if (fod->req.transfer_len) { 2581 ret = nvmet_fc_alloc_tgt_pgs(fod); 2582 if (ret) { 2583 nvmet_req_complete(&fod->req, ret); 2584 return; 2585 } 2586 } 2587 fod->req.sg = fod->data_sg; 2588 fod->req.sg_cnt = fod->data_sg_cnt; 2589 fod->offset = 0; 2590 2591 if (fod->io_dir == NVMET_FCP_WRITE) { 2592 /* pull the data over before invoking nvmet layer */ 2593 nvmet_fc_transfer_fcp_data(tgtport, fod, NVMET_FCOP_WRITEDATA); 2594 return; 2595 } 2596 2597 /* 2598 * Reads or no data: 2599 * 2600 * can invoke the nvmet_layer now. If read data, cmd completion will 2601 * push the data 2602 */ 2603 fod->req.execute(&fod->req); 2604 return; 2605 2606 transport_error: 2607 nvmet_fc_abort_op(tgtport, fod); 2608 } 2609 2610 /** 2611 * nvmet_fc_rcv_fcp_req - transport entry point called by an LLDD 2612 * upon the reception of a NVME FCP CMD IU. 2613 * 2614 * Pass a FC-NVME FCP CMD IU received from the FC link to the nvmet-fc 2615 * layer for processing. 2616 * 2617 * The nvmet_fc layer allocates a local job structure (struct 2618 * nvmet_fc_fcp_iod) from the queue for the io and copies the 2619 * CMD IU buffer to the job structure. As such, on a successful 2620 * completion (returns 0), the LLDD may immediately free/reuse 2621 * the CMD IU buffer passed in the call. 2622 * 2623 * However, in some circumstances, due to the packetized nature of FC 2624 * and the api of the FC LLDD which may issue a hw command to send the 2625 * response, but the LLDD may not get the hw completion for that command 2626 * and upcall the nvmet_fc layer before a new command may be 2627 * asynchronously received - its possible for a command to be received 2628 * before the LLDD and nvmet_fc have recycled the job structure. It gives 2629 * the appearance of more commands received than fits in the sq. 2630 * To alleviate this scenario, a temporary queue is maintained in the 2631 * transport for pending LLDD requests waiting for a queue job structure. 2632 * In these "overrun" cases, a temporary queue element is allocated 2633 * the LLDD request and CMD iu buffer information remembered, and the 2634 * routine returns a -EOVERFLOW status. Subsequently, when a queue job 2635 * structure is freed, it is immediately reallocated for anything on the 2636 * pending request list. The LLDDs defer_rcv() callback is called, 2637 * informing the LLDD that it may reuse the CMD IU buffer, and the io 2638 * is then started normally with the transport. 2639 * 2640 * The LLDD, when receiving an -EOVERFLOW completion status, is to treat 2641 * the completion as successful but must not reuse the CMD IU buffer 2642 * until the LLDD's defer_rcv() callback has been called for the 2643 * corresponding struct nvmefc_tgt_fcp_req pointer. 2644 * 2645 * If there is any other condition in which an error occurs, the 2646 * transport will return a non-zero status indicating the error. 2647 * In all cases other than -EOVERFLOW, the transport has not accepted the 2648 * request and the LLDD should abort the exchange. 2649 * 2650 * @target_port: pointer to the (registered) target port the FCP CMD IU 2651 * was received on. 2652 * @fcpreq: pointer to a fcpreq request structure to be used to reference 2653 * the exchange corresponding to the FCP Exchange. 2654 * @cmdiubuf: pointer to the buffer containing the FCP CMD IU 2655 * @cmdiubuf_len: length, in bytes, of the received FCP CMD IU 2656 */ 2657 int 2658 nvmet_fc_rcv_fcp_req(struct nvmet_fc_target_port *target_port, 2659 struct nvmefc_tgt_fcp_req *fcpreq, 2660 void *cmdiubuf, u32 cmdiubuf_len) 2661 { 2662 struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port); 2663 struct nvme_fc_cmd_iu *cmdiu = cmdiubuf; 2664 struct nvmet_fc_tgt_queue *queue; 2665 struct nvmet_fc_fcp_iod *fod; 2666 struct nvmet_fc_defer_fcp_req *deferfcp; 2667 unsigned long flags; 2668 2669 /* validate iu, so the connection id can be used to find the queue */ 2670 if ((cmdiubuf_len != sizeof(*cmdiu)) || 2671 (cmdiu->format_id != NVME_CMD_FORMAT_ID) || 2672 (cmdiu->fc_id != NVME_CMD_FC_ID) || 2673 (be16_to_cpu(cmdiu->iu_len) != (sizeof(*cmdiu)/4))) 2674 return -EIO; 2675 2676 queue = nvmet_fc_find_target_queue(tgtport, 2677 be64_to_cpu(cmdiu->connection_id)); 2678 if (!queue) 2679 return -ENOTCONN; 2680 2681 /* 2682 * note: reference taken by find_target_queue 2683 * After successful fod allocation, the fod will inherit the 2684 * ownership of that reference and will remove the reference 2685 * when the fod is freed. 2686 */ 2687 2688 spin_lock_irqsave(&queue->qlock, flags); 2689 2690 fod = nvmet_fc_alloc_fcp_iod(queue); 2691 if (fod) { 2692 spin_unlock_irqrestore(&queue->qlock, flags); 2693 2694 fcpreq->nvmet_fc_private = fod; 2695 fod->fcpreq = fcpreq; 2696 2697 memcpy(&fod->cmdiubuf, cmdiubuf, cmdiubuf_len); 2698 2699 nvmet_fc_queue_fcp_req(tgtport, queue, fcpreq); 2700 2701 return 0; 2702 } 2703 2704 if (!tgtport->ops->defer_rcv) { 2705 spin_unlock_irqrestore(&queue->qlock, flags); 2706 /* release the queue lookup reference */ 2707 nvmet_fc_tgt_q_put(queue); 2708 return -ENOENT; 2709 } 2710 2711 deferfcp = list_first_entry_or_null(&queue->avail_defer_list, 2712 struct nvmet_fc_defer_fcp_req, req_list); 2713 if (deferfcp) { 2714 /* Just re-use one that was previously allocated */ 2715 list_del(&deferfcp->req_list); 2716 } else { 2717 spin_unlock_irqrestore(&queue->qlock, flags); 2718 2719 /* Now we need to dynamically allocate one */ 2720 deferfcp = kmalloc(sizeof(*deferfcp), GFP_KERNEL); 2721 if (!deferfcp) { 2722 /* release the queue lookup reference */ 2723 nvmet_fc_tgt_q_put(queue); 2724 return -ENOMEM; 2725 } 2726 spin_lock_irqsave(&queue->qlock, flags); 2727 } 2728 2729 /* For now, use rspaddr / rsplen to save payload information */ 2730 fcpreq->rspaddr = cmdiubuf; 2731 fcpreq->rsplen = cmdiubuf_len; 2732 deferfcp->fcp_req = fcpreq; 2733 2734 /* defer processing till a fod becomes available */ 2735 list_add_tail(&deferfcp->req_list, &queue->pending_cmd_list); 2736 2737 /* NOTE: the queue lookup reference is still valid */ 2738 2739 spin_unlock_irqrestore(&queue->qlock, flags); 2740 2741 return -EOVERFLOW; 2742 } 2743 EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_req); 2744 2745 /** 2746 * nvmet_fc_rcv_fcp_abort - transport entry point called by an LLDD 2747 * upon the reception of an ABTS for a FCP command 2748 * 2749 * Notify the transport that an ABTS has been received for a FCP command 2750 * that had been given to the transport via nvmet_fc_rcv_fcp_req(). The 2751 * LLDD believes the command is still being worked on 2752 * (template_ops->fcp_req_release() has not been called). 2753 * 2754 * The transport will wait for any outstanding work (an op to the LLDD, 2755 * which the lldd should complete with error due to the ABTS; or the 2756 * completion from the nvmet layer of the nvme command), then will 2757 * stop processing and call the nvmet_fc_rcv_fcp_req() callback to 2758 * return the i/o context to the LLDD. The LLDD may send the BA_ACC 2759 * to the ABTS either after return from this function (assuming any 2760 * outstanding op work has been terminated) or upon the callback being 2761 * called. 2762 * 2763 * @target_port: pointer to the (registered) target port the FCP CMD IU 2764 * was received on. 2765 * @fcpreq: pointer to the fcpreq request structure that corresponds 2766 * to the exchange that received the ABTS. 2767 */ 2768 void 2769 nvmet_fc_rcv_fcp_abort(struct nvmet_fc_target_port *target_port, 2770 struct nvmefc_tgt_fcp_req *fcpreq) 2771 { 2772 struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private; 2773 struct nvmet_fc_tgt_queue *queue; 2774 unsigned long flags; 2775 2776 if (!fod || fod->fcpreq != fcpreq) 2777 /* job appears to have already completed, ignore abort */ 2778 return; 2779 2780 queue = fod->queue; 2781 2782 spin_lock_irqsave(&queue->qlock, flags); 2783 if (fod->active) { 2784 /* 2785 * mark as abort. The abort handler, invoked upon completion 2786 * of any work, will detect the aborted status and do the 2787 * callback. 2788 */ 2789 spin_lock(&fod->flock); 2790 fod->abort = true; 2791 fod->aborted = true; 2792 spin_unlock(&fod->flock); 2793 } 2794 spin_unlock_irqrestore(&queue->qlock, flags); 2795 } 2796 EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_abort); 2797 2798 2799 struct nvmet_fc_traddr { 2800 u64 nn; 2801 u64 pn; 2802 }; 2803 2804 static int 2805 __nvme_fc_parse_u64(substring_t *sstr, u64 *val) 2806 { 2807 u64 token64; 2808 2809 if (match_u64(sstr, &token64)) 2810 return -EINVAL; 2811 *val = token64; 2812 2813 return 0; 2814 } 2815 2816 /* 2817 * This routine validates and extracts the WWN's from the TRADDR string. 2818 * As kernel parsers need the 0x to determine number base, universally 2819 * build string to parse with 0x prefix before parsing name strings. 2820 */ 2821 static int 2822 nvme_fc_parse_traddr(struct nvmet_fc_traddr *traddr, char *buf, size_t blen) 2823 { 2824 char name[2 + NVME_FC_TRADDR_HEXNAMELEN + 1]; 2825 substring_t wwn = { name, &name[sizeof(name)-1] }; 2826 int nnoffset, pnoffset; 2827 2828 /* validate if string is one of the 2 allowed formats */ 2829 if (strnlen(buf, blen) == NVME_FC_TRADDR_MAXLENGTH && 2830 !strncmp(buf, "nn-0x", NVME_FC_TRADDR_OXNNLEN) && 2831 !strncmp(&buf[NVME_FC_TRADDR_MAX_PN_OFFSET], 2832 "pn-0x", NVME_FC_TRADDR_OXNNLEN)) { 2833 nnoffset = NVME_FC_TRADDR_OXNNLEN; 2834 pnoffset = NVME_FC_TRADDR_MAX_PN_OFFSET + 2835 NVME_FC_TRADDR_OXNNLEN; 2836 } else if ((strnlen(buf, blen) == NVME_FC_TRADDR_MINLENGTH && 2837 !strncmp(buf, "nn-", NVME_FC_TRADDR_NNLEN) && 2838 !strncmp(&buf[NVME_FC_TRADDR_MIN_PN_OFFSET], 2839 "pn-", NVME_FC_TRADDR_NNLEN))) { 2840 nnoffset = NVME_FC_TRADDR_NNLEN; 2841 pnoffset = NVME_FC_TRADDR_MIN_PN_OFFSET + NVME_FC_TRADDR_NNLEN; 2842 } else 2843 goto out_einval; 2844 2845 name[0] = '0'; 2846 name[1] = 'x'; 2847 name[2 + NVME_FC_TRADDR_HEXNAMELEN] = 0; 2848 2849 memcpy(&name[2], &buf[nnoffset], NVME_FC_TRADDR_HEXNAMELEN); 2850 if (__nvme_fc_parse_u64(&wwn, &traddr->nn)) 2851 goto out_einval; 2852 2853 memcpy(&name[2], &buf[pnoffset], NVME_FC_TRADDR_HEXNAMELEN); 2854 if (__nvme_fc_parse_u64(&wwn, &traddr->pn)) 2855 goto out_einval; 2856 2857 return 0; 2858 2859 out_einval: 2860 pr_warn("%s: bad traddr string\n", __func__); 2861 return -EINVAL; 2862 } 2863 2864 static int 2865 nvmet_fc_add_port(struct nvmet_port *port) 2866 { 2867 struct nvmet_fc_tgtport *tgtport; 2868 struct nvmet_fc_port_entry *pe; 2869 struct nvmet_fc_traddr traddr = { 0L, 0L }; 2870 unsigned long flags; 2871 int ret; 2872 2873 /* validate the address info */ 2874 if ((port->disc_addr.trtype != NVMF_TRTYPE_FC) || 2875 (port->disc_addr.adrfam != NVMF_ADDR_FAMILY_FC)) 2876 return -EINVAL; 2877 2878 /* map the traddr address info to a target port */ 2879 2880 ret = nvme_fc_parse_traddr(&traddr, port->disc_addr.traddr, 2881 sizeof(port->disc_addr.traddr)); 2882 if (ret) 2883 return ret; 2884 2885 pe = kzalloc(sizeof(*pe), GFP_KERNEL); 2886 if (!pe) 2887 return -ENOMEM; 2888 2889 ret = -ENXIO; 2890 spin_lock_irqsave(&nvmet_fc_tgtlock, flags); 2891 list_for_each_entry(tgtport, &nvmet_fc_target_list, tgt_list) { 2892 if ((tgtport->fc_target_port.node_name == traddr.nn) && 2893 (tgtport->fc_target_port.port_name == traddr.pn)) { 2894 /* a FC port can only be 1 nvmet port id */ 2895 if (!tgtport->pe) { 2896 nvmet_fc_portentry_bind(tgtport, pe, port); 2897 ret = 0; 2898 } else 2899 ret = -EALREADY; 2900 break; 2901 } 2902 } 2903 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); 2904 2905 if (ret) 2906 kfree(pe); 2907 2908 return ret; 2909 } 2910 2911 static void 2912 nvmet_fc_remove_port(struct nvmet_port *port) 2913 { 2914 struct nvmet_fc_port_entry *pe = port->priv; 2915 2916 nvmet_fc_portentry_unbind(pe); 2917 2918 /* terminate any outstanding associations */ 2919 __nvmet_fc_free_assocs(pe->tgtport); 2920 2921 kfree(pe); 2922 } 2923 2924 static void 2925 nvmet_fc_discovery_chg(struct nvmet_port *port) 2926 { 2927 struct nvmet_fc_port_entry *pe = port->priv; 2928 struct nvmet_fc_tgtport *tgtport = pe->tgtport; 2929 2930 if (tgtport && tgtport->ops->discovery_event) 2931 tgtport->ops->discovery_event(&tgtport->fc_target_port); 2932 } 2933 2934 static const struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops = { 2935 .owner = THIS_MODULE, 2936 .type = NVMF_TRTYPE_FC, 2937 .msdbd = 1, 2938 .add_port = nvmet_fc_add_port, 2939 .remove_port = nvmet_fc_remove_port, 2940 .queue_response = nvmet_fc_fcp_nvme_cmd_done, 2941 .delete_ctrl = nvmet_fc_delete_ctrl, 2942 .discovery_chg = nvmet_fc_discovery_chg, 2943 }; 2944 2945 static int __init nvmet_fc_init_module(void) 2946 { 2947 return nvmet_register_transport(&nvmet_fc_tgt_fcp_ops); 2948 } 2949 2950 static void __exit nvmet_fc_exit_module(void) 2951 { 2952 /* ensure any shutdown operation, e.g. delete ctrls have finished */ 2953 flush_workqueue(nvmet_wq); 2954 2955 /* sanity check - all lports should be removed */ 2956 if (!list_empty(&nvmet_fc_target_list)) 2957 pr_warn("%s: targetport list not empty\n", __func__); 2958 2959 nvmet_unregister_transport(&nvmet_fc_tgt_fcp_ops); 2960 2961 ida_destroy(&nvmet_fc_tgtport_cnt); 2962 } 2963 2964 module_init(nvmet_fc_init_module); 2965 module_exit(nvmet_fc_exit_module); 2966 2967 MODULE_DESCRIPTION("NVMe target FC transport driver"); 2968 MODULE_LICENSE("GPL v2"); 2969