1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2016 Avago Technologies. All rights reserved. 4 */ 5 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 6 #include <linux/module.h> 7 #include <linux/slab.h> 8 #include <linux/blk-mq.h> 9 #include <linux/parser.h> 10 #include <linux/random.h> 11 #include <uapi/scsi/fc/fc_fs.h> 12 #include <uapi/scsi/fc/fc_els.h> 13 14 #include "nvmet.h" 15 #include <linux/nvme-fc-driver.h> 16 #include <linux/nvme-fc.h> 17 #include "../host/fc.h" 18 19 20 /* *************************** Data Structures/Defines ****************** */ 21 22 23 #define NVMET_LS_CTX_COUNT 256 24 25 struct nvmet_fc_tgtport; 26 struct nvmet_fc_tgt_assoc; 27 28 struct nvmet_fc_ls_iod { /* for an LS RQST RCV */ 29 struct nvmefc_ls_rsp *lsrsp; 30 struct nvmefc_tgt_fcp_req *fcpreq; /* only if RS */ 31 32 struct list_head ls_rcv_list; /* tgtport->ls_rcv_list */ 33 34 struct nvmet_fc_tgtport *tgtport; 35 struct nvmet_fc_tgt_assoc *assoc; 36 void *hosthandle; 37 38 union nvmefc_ls_requests *rqstbuf; 39 union nvmefc_ls_responses *rspbuf; 40 u16 rqstdatalen; 41 dma_addr_t rspdma; 42 43 struct scatterlist sg[2]; 44 45 struct work_struct work; 46 } __aligned(sizeof(unsigned long long)); 47 48 struct nvmet_fc_ls_req_op { /* for an LS RQST XMT */ 49 struct nvmefc_ls_req ls_req; 50 51 struct nvmet_fc_tgtport *tgtport; 52 void *hosthandle; 53 54 int ls_error; 55 struct list_head lsreq_list; /* tgtport->ls_req_list */ 56 bool req_queued; 57 }; 58 59 60 /* desired maximum for a single sequence - if sg list allows it */ 61 #define NVMET_FC_MAX_SEQ_LENGTH (256 * 1024) 62 63 enum nvmet_fcp_datadir { 64 NVMET_FCP_NODATA, 65 NVMET_FCP_WRITE, 66 NVMET_FCP_READ, 67 NVMET_FCP_ABORTED, 68 }; 69 70 struct nvmet_fc_fcp_iod { 71 struct nvmefc_tgt_fcp_req *fcpreq; 72 73 struct nvme_fc_cmd_iu cmdiubuf; 74 struct nvme_fc_ersp_iu rspiubuf; 75 dma_addr_t rspdma; 76 struct scatterlist *next_sg; 77 struct scatterlist *data_sg; 78 int data_sg_cnt; 79 u32 offset; 80 enum nvmet_fcp_datadir io_dir; 81 bool active; 82 bool abort; 83 bool aborted; 84 bool writedataactive; 85 spinlock_t flock; 86 87 struct nvmet_req req; 88 struct work_struct defer_work; 89 90 struct nvmet_fc_tgtport *tgtport; 91 struct nvmet_fc_tgt_queue *queue; 92 93 struct list_head fcp_list; /* tgtport->fcp_list */ 94 }; 95 96 struct nvmet_fc_tgtport { 97 struct nvmet_fc_target_port fc_target_port; 98 99 struct list_head tgt_list; /* nvmet_fc_target_list */ 100 struct device *dev; /* dev for dma mapping */ 101 struct nvmet_fc_target_template *ops; 102 103 struct nvmet_fc_ls_iod *iod; 104 spinlock_t lock; 105 struct list_head ls_rcv_list; 106 struct list_head ls_req_list; 107 struct list_head ls_busylist; 108 struct list_head assoc_list; 109 struct list_head host_list; 110 struct ida assoc_cnt; 111 struct nvmet_fc_port_entry *pe; 112 struct kref ref; 113 u32 max_sg_cnt; 114 115 struct work_struct put_work; 116 }; 117 118 struct nvmet_fc_port_entry { 119 struct nvmet_fc_tgtport *tgtport; 120 struct nvmet_port *port; 121 u64 node_name; 122 u64 port_name; 123 struct list_head pe_list; 124 }; 125 126 struct nvmet_fc_defer_fcp_req { 127 struct list_head req_list; 128 struct nvmefc_tgt_fcp_req *fcp_req; 129 }; 130 131 struct nvmet_fc_tgt_queue { 132 bool ninetypercent; 133 u16 qid; 134 u16 sqsize; 135 u16 ersp_ratio; 136 __le16 sqhd; 137 atomic_t connected; 138 atomic_t sqtail; 139 atomic_t zrspcnt; 140 atomic_t rsn; 141 spinlock_t qlock; 142 struct nvmet_cq nvme_cq; 143 struct nvmet_sq nvme_sq; 144 struct nvmet_fc_tgt_assoc *assoc; 145 struct list_head fod_list; 146 struct list_head pending_cmd_list; 147 struct list_head avail_defer_list; 148 struct workqueue_struct *work_q; 149 struct kref ref; 150 /* array of fcp_iods */ 151 struct nvmet_fc_fcp_iod fod[] /* __counted_by(sqsize) */; 152 } __aligned(sizeof(unsigned long long)); 153 154 struct nvmet_fc_hostport { 155 struct nvmet_fc_tgtport *tgtport; 156 void *hosthandle; 157 struct list_head host_list; 158 struct kref ref; 159 u8 invalid; 160 }; 161 162 struct nvmet_fc_tgt_assoc { 163 u64 association_id; 164 u32 a_id; 165 atomic_t terminating; 166 struct nvmet_fc_tgtport *tgtport; 167 struct nvmet_fc_hostport *hostport; 168 struct nvmet_fc_ls_iod *rcv_disconn; 169 struct list_head a_list; 170 struct nvmet_fc_tgt_queue *queues[NVMET_NR_QUEUES + 1]; 171 struct kref ref; 172 struct work_struct del_work; 173 }; 174 175 /* 176 * Association and Connection IDs: 177 * 178 * Association ID will have random number in upper 6 bytes and zero 179 * in lower 2 bytes 180 * 181 * Connection IDs will be Association ID with QID or'd in lower 2 bytes 182 * 183 * note: Association ID = Connection ID for queue 0 184 */ 185 #define BYTES_FOR_QID sizeof(u16) 186 #define BYTES_FOR_QID_SHIFT (BYTES_FOR_QID * 8) 187 #define NVMET_FC_QUEUEID_MASK ((u64)((1 << BYTES_FOR_QID_SHIFT) - 1)) 188 189 static inline u64 190 nvmet_fc_makeconnid(struct nvmet_fc_tgt_assoc *assoc, u16 qid) 191 { 192 return (assoc->association_id | qid); 193 } 194 195 static inline u64 196 nvmet_fc_getassociationid(u64 connectionid) 197 { 198 return connectionid & ~NVMET_FC_QUEUEID_MASK; 199 } 200 201 static inline u16 202 nvmet_fc_getqueueid(u64 connectionid) 203 { 204 return (u16)(connectionid & NVMET_FC_QUEUEID_MASK); 205 } 206 207 static inline struct nvmet_fc_tgtport * 208 targetport_to_tgtport(struct nvmet_fc_target_port *targetport) 209 { 210 return container_of(targetport, struct nvmet_fc_tgtport, 211 fc_target_port); 212 } 213 214 static inline struct nvmet_fc_fcp_iod * 215 nvmet_req_to_fod(struct nvmet_req *nvme_req) 216 { 217 return container_of(nvme_req, struct nvmet_fc_fcp_iod, req); 218 } 219 220 221 /* *************************** Globals **************************** */ 222 223 224 static DEFINE_SPINLOCK(nvmet_fc_tgtlock); 225 226 static LIST_HEAD(nvmet_fc_target_list); 227 static DEFINE_IDA(nvmet_fc_tgtport_cnt); 228 static LIST_HEAD(nvmet_fc_portentry_list); 229 230 231 static void nvmet_fc_handle_ls_rqst_work(struct work_struct *work); 232 static void nvmet_fc_fcp_rqst_op_defer_work(struct work_struct *work); 233 static void nvmet_fc_tgt_a_put(struct nvmet_fc_tgt_assoc *assoc); 234 static int nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc); 235 static void nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue); 236 static int nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue); 237 static void nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport); 238 static void nvmet_fc_put_tgtport_work(struct work_struct *work) 239 { 240 struct nvmet_fc_tgtport *tgtport = 241 container_of(work, struct nvmet_fc_tgtport, put_work); 242 243 nvmet_fc_tgtport_put(tgtport); 244 } 245 static int nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport); 246 static void nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport, 247 struct nvmet_fc_fcp_iod *fod); 248 static void nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc); 249 static void nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport, 250 struct nvmet_fc_ls_iod *iod); 251 252 253 /* *********************** FC-NVME DMA Handling **************************** */ 254 255 /* 256 * The fcloop device passes in a NULL device pointer. Real LLD's will 257 * pass in a valid device pointer. If NULL is passed to the dma mapping 258 * routines, depending on the platform, it may or may not succeed, and 259 * may crash. 260 * 261 * As such: 262 * Wrapper all the dma routines and check the dev pointer. 263 * 264 * If simple mappings (return just a dma address, we'll noop them, 265 * returning a dma address of 0. 266 * 267 * On more complex mappings (dma_map_sg), a pseudo routine fills 268 * in the scatter list, setting all dma addresses to 0. 269 */ 270 271 static inline dma_addr_t 272 fc_dma_map_single(struct device *dev, void *ptr, size_t size, 273 enum dma_data_direction dir) 274 { 275 return dev ? dma_map_single(dev, ptr, size, dir) : (dma_addr_t)0L; 276 } 277 278 static inline int 279 fc_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) 280 { 281 return dev ? dma_mapping_error(dev, dma_addr) : 0; 282 } 283 284 static inline void 285 fc_dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size, 286 enum dma_data_direction dir) 287 { 288 if (dev) 289 dma_unmap_single(dev, addr, size, dir); 290 } 291 292 static inline void 293 fc_dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size, 294 enum dma_data_direction dir) 295 { 296 if (dev) 297 dma_sync_single_for_cpu(dev, addr, size, dir); 298 } 299 300 static inline void 301 fc_dma_sync_single_for_device(struct device *dev, dma_addr_t addr, size_t size, 302 enum dma_data_direction dir) 303 { 304 if (dev) 305 dma_sync_single_for_device(dev, addr, size, dir); 306 } 307 308 /* pseudo dma_map_sg call */ 309 static int 310 fc_map_sg(struct scatterlist *sg, int nents) 311 { 312 struct scatterlist *s; 313 int i; 314 315 WARN_ON(nents == 0 || sg[0].length == 0); 316 317 for_each_sg(sg, s, nents, i) { 318 s->dma_address = 0L; 319 #ifdef CONFIG_NEED_SG_DMA_LENGTH 320 s->dma_length = s->length; 321 #endif 322 } 323 return nents; 324 } 325 326 static inline int 327 fc_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, 328 enum dma_data_direction dir) 329 { 330 return dev ? dma_map_sg(dev, sg, nents, dir) : fc_map_sg(sg, nents); 331 } 332 333 static inline void 334 fc_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, 335 enum dma_data_direction dir) 336 { 337 if (dev) 338 dma_unmap_sg(dev, sg, nents, dir); 339 } 340 341 342 /* ********************** FC-NVME LS XMT Handling ************************* */ 343 344 345 static void 346 __nvmet_fc_finish_ls_req(struct nvmet_fc_ls_req_op *lsop) 347 { 348 struct nvmet_fc_tgtport *tgtport = lsop->tgtport; 349 struct nvmefc_ls_req *lsreq = &lsop->ls_req; 350 unsigned long flags; 351 352 spin_lock_irqsave(&tgtport->lock, flags); 353 354 if (!lsop->req_queued) { 355 spin_unlock_irqrestore(&tgtport->lock, flags); 356 goto out_putwork; 357 } 358 359 list_del(&lsop->lsreq_list); 360 361 lsop->req_queued = false; 362 363 spin_unlock_irqrestore(&tgtport->lock, flags); 364 365 fc_dma_unmap_single(tgtport->dev, lsreq->rqstdma, 366 (lsreq->rqstlen + lsreq->rsplen), 367 DMA_BIDIRECTIONAL); 368 369 out_putwork: 370 queue_work(nvmet_wq, &tgtport->put_work); 371 } 372 373 static int 374 __nvmet_fc_send_ls_req(struct nvmet_fc_tgtport *tgtport, 375 struct nvmet_fc_ls_req_op *lsop, 376 void (*done)(struct nvmefc_ls_req *req, int status)) 377 { 378 struct nvmefc_ls_req *lsreq = &lsop->ls_req; 379 unsigned long flags; 380 int ret = 0; 381 382 if (!tgtport->ops->ls_req) 383 return -EOPNOTSUPP; 384 385 if (!nvmet_fc_tgtport_get(tgtport)) 386 return -ESHUTDOWN; 387 388 lsreq->done = done; 389 lsop->req_queued = false; 390 INIT_LIST_HEAD(&lsop->lsreq_list); 391 392 lsreq->rqstdma = fc_dma_map_single(tgtport->dev, lsreq->rqstaddr, 393 lsreq->rqstlen + lsreq->rsplen, 394 DMA_BIDIRECTIONAL); 395 if (fc_dma_mapping_error(tgtport->dev, lsreq->rqstdma)) { 396 ret = -EFAULT; 397 goto out_puttgtport; 398 } 399 lsreq->rspdma = lsreq->rqstdma + lsreq->rqstlen; 400 401 spin_lock_irqsave(&tgtport->lock, flags); 402 403 list_add_tail(&lsop->lsreq_list, &tgtport->ls_req_list); 404 405 lsop->req_queued = true; 406 407 spin_unlock_irqrestore(&tgtport->lock, flags); 408 409 ret = tgtport->ops->ls_req(&tgtport->fc_target_port, lsop->hosthandle, 410 lsreq); 411 if (ret) 412 goto out_unlink; 413 414 return 0; 415 416 out_unlink: 417 lsop->ls_error = ret; 418 spin_lock_irqsave(&tgtport->lock, flags); 419 lsop->req_queued = false; 420 list_del(&lsop->lsreq_list); 421 spin_unlock_irqrestore(&tgtport->lock, flags); 422 fc_dma_unmap_single(tgtport->dev, lsreq->rqstdma, 423 (lsreq->rqstlen + lsreq->rsplen), 424 DMA_BIDIRECTIONAL); 425 out_puttgtport: 426 nvmet_fc_tgtport_put(tgtport); 427 428 return ret; 429 } 430 431 static int 432 nvmet_fc_send_ls_req_async(struct nvmet_fc_tgtport *tgtport, 433 struct nvmet_fc_ls_req_op *lsop, 434 void (*done)(struct nvmefc_ls_req *req, int status)) 435 { 436 /* don't wait for completion */ 437 438 return __nvmet_fc_send_ls_req(tgtport, lsop, done); 439 } 440 441 static void 442 nvmet_fc_disconnect_assoc_done(struct nvmefc_ls_req *lsreq, int status) 443 { 444 struct nvmet_fc_ls_req_op *lsop = 445 container_of(lsreq, struct nvmet_fc_ls_req_op, ls_req); 446 447 __nvmet_fc_finish_ls_req(lsop); 448 449 /* fc-nvme target doesn't care about success or failure of cmd */ 450 451 kfree(lsop); 452 } 453 454 /* 455 * This routine sends a FC-NVME LS to disconnect (aka terminate) 456 * the FC-NVME Association. Terminating the association also 457 * terminates the FC-NVME connections (per queue, both admin and io 458 * queues) that are part of the association. E.g. things are torn 459 * down, and the related FC-NVME Association ID and Connection IDs 460 * become invalid. 461 * 462 * The behavior of the fc-nvme target is such that it's 463 * understanding of the association and connections will implicitly 464 * be torn down. The action is implicit as it may be due to a loss of 465 * connectivity with the fc-nvme host, so the target may never get a 466 * response even if it tried. As such, the action of this routine 467 * is to asynchronously send the LS, ignore any results of the LS, and 468 * continue on with terminating the association. If the fc-nvme host 469 * is present and receives the LS, it too can tear down. 470 */ 471 static void 472 nvmet_fc_xmt_disconnect_assoc(struct nvmet_fc_tgt_assoc *assoc) 473 { 474 struct nvmet_fc_tgtport *tgtport = assoc->tgtport; 475 struct fcnvme_ls_disconnect_assoc_rqst *discon_rqst; 476 struct fcnvme_ls_disconnect_assoc_acc *discon_acc; 477 struct nvmet_fc_ls_req_op *lsop; 478 struct nvmefc_ls_req *lsreq; 479 int ret; 480 481 /* 482 * If ls_req is NULL or no hosthandle, it's an older lldd and no 483 * message is normal. Otherwise, send unless the hostport has 484 * already been invalidated by the lldd. 485 */ 486 if (!tgtport->ops->ls_req || assoc->hostport->invalid) 487 return; 488 489 lsop = kzalloc((sizeof(*lsop) + 490 sizeof(*discon_rqst) + sizeof(*discon_acc) + 491 tgtport->ops->lsrqst_priv_sz), GFP_KERNEL); 492 if (!lsop) { 493 dev_info(tgtport->dev, 494 "{%d:%d} send Disconnect Association failed: ENOMEM\n", 495 tgtport->fc_target_port.port_num, assoc->a_id); 496 return; 497 } 498 499 discon_rqst = (struct fcnvme_ls_disconnect_assoc_rqst *)&lsop[1]; 500 discon_acc = (struct fcnvme_ls_disconnect_assoc_acc *)&discon_rqst[1]; 501 lsreq = &lsop->ls_req; 502 if (tgtport->ops->lsrqst_priv_sz) 503 lsreq->private = (void *)&discon_acc[1]; 504 else 505 lsreq->private = NULL; 506 507 lsop->tgtport = tgtport; 508 lsop->hosthandle = assoc->hostport->hosthandle; 509 510 nvmefc_fmt_lsreq_discon_assoc(lsreq, discon_rqst, discon_acc, 511 assoc->association_id); 512 513 ret = nvmet_fc_send_ls_req_async(tgtport, lsop, 514 nvmet_fc_disconnect_assoc_done); 515 if (ret) { 516 dev_info(tgtport->dev, 517 "{%d:%d} XMT Disconnect Association failed: %d\n", 518 tgtport->fc_target_port.port_num, assoc->a_id, ret); 519 kfree(lsop); 520 } 521 } 522 523 524 /* *********************** FC-NVME Port Management ************************ */ 525 526 527 static int 528 nvmet_fc_alloc_ls_iodlist(struct nvmet_fc_tgtport *tgtport) 529 { 530 struct nvmet_fc_ls_iod *iod; 531 int i; 532 533 iod = kcalloc(NVMET_LS_CTX_COUNT, sizeof(struct nvmet_fc_ls_iod), 534 GFP_KERNEL); 535 if (!iod) 536 return -ENOMEM; 537 538 tgtport->iod = iod; 539 540 for (i = 0; i < NVMET_LS_CTX_COUNT; iod++, i++) { 541 INIT_WORK(&iod->work, nvmet_fc_handle_ls_rqst_work); 542 iod->tgtport = tgtport; 543 list_add_tail(&iod->ls_rcv_list, &tgtport->ls_rcv_list); 544 545 iod->rqstbuf = kzalloc(sizeof(union nvmefc_ls_requests) + 546 sizeof(union nvmefc_ls_responses), 547 GFP_KERNEL); 548 if (!iod->rqstbuf) 549 goto out_fail; 550 551 iod->rspbuf = (union nvmefc_ls_responses *)&iod->rqstbuf[1]; 552 553 iod->rspdma = fc_dma_map_single(tgtport->dev, iod->rspbuf, 554 sizeof(*iod->rspbuf), 555 DMA_TO_DEVICE); 556 if (fc_dma_mapping_error(tgtport->dev, iod->rspdma)) 557 goto out_fail; 558 } 559 560 return 0; 561 562 out_fail: 563 kfree(iod->rqstbuf); 564 list_del(&iod->ls_rcv_list); 565 for (iod--, i--; i >= 0; iod--, i--) { 566 fc_dma_unmap_single(tgtport->dev, iod->rspdma, 567 sizeof(*iod->rspbuf), DMA_TO_DEVICE); 568 kfree(iod->rqstbuf); 569 list_del(&iod->ls_rcv_list); 570 } 571 572 kfree(iod); 573 574 return -EFAULT; 575 } 576 577 static void 578 nvmet_fc_free_ls_iodlist(struct nvmet_fc_tgtport *tgtport) 579 { 580 struct nvmet_fc_ls_iod *iod = tgtport->iod; 581 int i; 582 583 for (i = 0; i < NVMET_LS_CTX_COUNT; iod++, i++) { 584 fc_dma_unmap_single(tgtport->dev, 585 iod->rspdma, sizeof(*iod->rspbuf), 586 DMA_TO_DEVICE); 587 kfree(iod->rqstbuf); 588 list_del(&iod->ls_rcv_list); 589 } 590 kfree(tgtport->iod); 591 } 592 593 static struct nvmet_fc_ls_iod * 594 nvmet_fc_alloc_ls_iod(struct nvmet_fc_tgtport *tgtport) 595 { 596 struct nvmet_fc_ls_iod *iod; 597 unsigned long flags; 598 599 spin_lock_irqsave(&tgtport->lock, flags); 600 iod = list_first_entry_or_null(&tgtport->ls_rcv_list, 601 struct nvmet_fc_ls_iod, ls_rcv_list); 602 if (iod) 603 list_move_tail(&iod->ls_rcv_list, &tgtport->ls_busylist); 604 spin_unlock_irqrestore(&tgtport->lock, flags); 605 return iod; 606 } 607 608 609 static void 610 nvmet_fc_free_ls_iod(struct nvmet_fc_tgtport *tgtport, 611 struct nvmet_fc_ls_iod *iod) 612 { 613 unsigned long flags; 614 615 spin_lock_irqsave(&tgtport->lock, flags); 616 list_move(&iod->ls_rcv_list, &tgtport->ls_rcv_list); 617 spin_unlock_irqrestore(&tgtport->lock, flags); 618 } 619 620 static void 621 nvmet_fc_prep_fcp_iodlist(struct nvmet_fc_tgtport *tgtport, 622 struct nvmet_fc_tgt_queue *queue) 623 { 624 struct nvmet_fc_fcp_iod *fod = queue->fod; 625 int i; 626 627 for (i = 0; i < queue->sqsize; fod++, i++) { 628 INIT_WORK(&fod->defer_work, nvmet_fc_fcp_rqst_op_defer_work); 629 fod->tgtport = tgtport; 630 fod->queue = queue; 631 fod->active = false; 632 fod->abort = false; 633 fod->aborted = false; 634 fod->fcpreq = NULL; 635 list_add_tail(&fod->fcp_list, &queue->fod_list); 636 spin_lock_init(&fod->flock); 637 638 fod->rspdma = fc_dma_map_single(tgtport->dev, &fod->rspiubuf, 639 sizeof(fod->rspiubuf), DMA_TO_DEVICE); 640 if (fc_dma_mapping_error(tgtport->dev, fod->rspdma)) { 641 list_del(&fod->fcp_list); 642 for (fod--, i--; i >= 0; fod--, i--) { 643 fc_dma_unmap_single(tgtport->dev, fod->rspdma, 644 sizeof(fod->rspiubuf), 645 DMA_TO_DEVICE); 646 fod->rspdma = 0L; 647 list_del(&fod->fcp_list); 648 } 649 650 return; 651 } 652 } 653 } 654 655 static void 656 nvmet_fc_destroy_fcp_iodlist(struct nvmet_fc_tgtport *tgtport, 657 struct nvmet_fc_tgt_queue *queue) 658 { 659 struct nvmet_fc_fcp_iod *fod = queue->fod; 660 int i; 661 662 for (i = 0; i < queue->sqsize; fod++, i++) { 663 if (fod->rspdma) 664 fc_dma_unmap_single(tgtport->dev, fod->rspdma, 665 sizeof(fod->rspiubuf), DMA_TO_DEVICE); 666 } 667 } 668 669 static struct nvmet_fc_fcp_iod * 670 nvmet_fc_alloc_fcp_iod(struct nvmet_fc_tgt_queue *queue) 671 { 672 struct nvmet_fc_fcp_iod *fod; 673 674 lockdep_assert_held(&queue->qlock); 675 676 fod = list_first_entry_or_null(&queue->fod_list, 677 struct nvmet_fc_fcp_iod, fcp_list); 678 if (fod) { 679 list_del(&fod->fcp_list); 680 fod->active = true; 681 /* 682 * no queue reference is taken, as it was taken by the 683 * queue lookup just prior to the allocation. The iod 684 * will "inherit" that reference. 685 */ 686 } 687 return fod; 688 } 689 690 691 static void 692 nvmet_fc_queue_fcp_req(struct nvmet_fc_tgtport *tgtport, 693 struct nvmet_fc_tgt_queue *queue, 694 struct nvmefc_tgt_fcp_req *fcpreq) 695 { 696 struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private; 697 698 /* 699 * put all admin cmds on hw queue id 0. All io commands go to 700 * the respective hw queue based on a modulo basis 701 */ 702 fcpreq->hwqid = queue->qid ? 703 ((queue->qid - 1) % tgtport->ops->max_hw_queues) : 0; 704 705 nvmet_fc_handle_fcp_rqst(tgtport, fod); 706 } 707 708 static void 709 nvmet_fc_fcp_rqst_op_defer_work(struct work_struct *work) 710 { 711 struct nvmet_fc_fcp_iod *fod = 712 container_of(work, struct nvmet_fc_fcp_iod, defer_work); 713 714 /* Submit deferred IO for processing */ 715 nvmet_fc_queue_fcp_req(fod->tgtport, fod->queue, fod->fcpreq); 716 717 } 718 719 static void 720 nvmet_fc_free_fcp_iod(struct nvmet_fc_tgt_queue *queue, 721 struct nvmet_fc_fcp_iod *fod) 722 { 723 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq; 724 struct nvmet_fc_tgtport *tgtport = fod->tgtport; 725 struct nvmet_fc_defer_fcp_req *deferfcp; 726 unsigned long flags; 727 728 fc_dma_sync_single_for_cpu(tgtport->dev, fod->rspdma, 729 sizeof(fod->rspiubuf), DMA_TO_DEVICE); 730 731 fcpreq->nvmet_fc_private = NULL; 732 733 fod->active = false; 734 fod->abort = false; 735 fod->aborted = false; 736 fod->writedataactive = false; 737 fod->fcpreq = NULL; 738 739 tgtport->ops->fcp_req_release(&tgtport->fc_target_port, fcpreq); 740 741 /* release the queue lookup reference on the completed IO */ 742 nvmet_fc_tgt_q_put(queue); 743 744 spin_lock_irqsave(&queue->qlock, flags); 745 deferfcp = list_first_entry_or_null(&queue->pending_cmd_list, 746 struct nvmet_fc_defer_fcp_req, req_list); 747 if (!deferfcp) { 748 list_add_tail(&fod->fcp_list, &fod->queue->fod_list); 749 spin_unlock_irqrestore(&queue->qlock, flags); 750 return; 751 } 752 753 /* Re-use the fod for the next pending cmd that was deferred */ 754 list_del(&deferfcp->req_list); 755 756 fcpreq = deferfcp->fcp_req; 757 758 /* deferfcp can be reused for another IO at a later date */ 759 list_add_tail(&deferfcp->req_list, &queue->avail_defer_list); 760 761 spin_unlock_irqrestore(&queue->qlock, flags); 762 763 /* Save NVME CMD IO in fod */ 764 memcpy(&fod->cmdiubuf, fcpreq->rspaddr, fcpreq->rsplen); 765 766 /* Setup new fcpreq to be processed */ 767 fcpreq->rspaddr = NULL; 768 fcpreq->rsplen = 0; 769 fcpreq->nvmet_fc_private = fod; 770 fod->fcpreq = fcpreq; 771 fod->active = true; 772 773 /* inform LLDD IO is now being processed */ 774 tgtport->ops->defer_rcv(&tgtport->fc_target_port, fcpreq); 775 776 /* 777 * Leave the queue lookup get reference taken when 778 * fod was originally allocated. 779 */ 780 781 queue_work(queue->work_q, &fod->defer_work); 782 } 783 784 static struct nvmet_fc_tgt_queue * 785 nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc, 786 u16 qid, u16 sqsize) 787 { 788 struct nvmet_fc_tgt_queue *queue; 789 int ret; 790 791 if (qid > NVMET_NR_QUEUES) 792 return NULL; 793 794 queue = kzalloc(struct_size(queue, fod, sqsize), GFP_KERNEL); 795 if (!queue) 796 return NULL; 797 798 queue->work_q = alloc_workqueue("ntfc%d.%d.%d", 0, 0, 799 assoc->tgtport->fc_target_port.port_num, 800 assoc->a_id, qid); 801 if (!queue->work_q) 802 goto out_free_queue; 803 804 queue->qid = qid; 805 queue->sqsize = sqsize; 806 queue->assoc = assoc; 807 INIT_LIST_HEAD(&queue->fod_list); 808 INIT_LIST_HEAD(&queue->avail_defer_list); 809 INIT_LIST_HEAD(&queue->pending_cmd_list); 810 atomic_set(&queue->connected, 0); 811 atomic_set(&queue->sqtail, 0); 812 atomic_set(&queue->rsn, 1); 813 atomic_set(&queue->zrspcnt, 0); 814 spin_lock_init(&queue->qlock); 815 kref_init(&queue->ref); 816 817 nvmet_fc_prep_fcp_iodlist(assoc->tgtport, queue); 818 819 ret = nvmet_sq_init(&queue->nvme_sq); 820 if (ret) 821 goto out_fail_iodlist; 822 823 WARN_ON(assoc->queues[qid]); 824 assoc->queues[qid] = queue; 825 826 return queue; 827 828 out_fail_iodlist: 829 nvmet_fc_destroy_fcp_iodlist(assoc->tgtport, queue); 830 destroy_workqueue(queue->work_q); 831 out_free_queue: 832 kfree(queue); 833 return NULL; 834 } 835 836 837 static void 838 nvmet_fc_tgt_queue_free(struct kref *ref) 839 { 840 struct nvmet_fc_tgt_queue *queue = 841 container_of(ref, struct nvmet_fc_tgt_queue, ref); 842 843 nvmet_fc_destroy_fcp_iodlist(queue->assoc->tgtport, queue); 844 845 destroy_workqueue(queue->work_q); 846 847 kfree(queue); 848 } 849 850 static void 851 nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue) 852 { 853 kref_put(&queue->ref, nvmet_fc_tgt_queue_free); 854 } 855 856 static int 857 nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue) 858 { 859 return kref_get_unless_zero(&queue->ref); 860 } 861 862 863 static void 864 nvmet_fc_delete_target_queue(struct nvmet_fc_tgt_queue *queue) 865 { 866 struct nvmet_fc_tgtport *tgtport = queue->assoc->tgtport; 867 struct nvmet_fc_fcp_iod *fod = queue->fod; 868 struct nvmet_fc_defer_fcp_req *deferfcp, *tempptr; 869 unsigned long flags; 870 int i; 871 bool disconnect; 872 873 disconnect = atomic_xchg(&queue->connected, 0); 874 875 /* if not connected, nothing to do */ 876 if (!disconnect) 877 return; 878 879 spin_lock_irqsave(&queue->qlock, flags); 880 /* abort outstanding io's */ 881 for (i = 0; i < queue->sqsize; fod++, i++) { 882 if (fod->active) { 883 spin_lock(&fod->flock); 884 fod->abort = true; 885 /* 886 * only call lldd abort routine if waiting for 887 * writedata. other outstanding ops should finish 888 * on their own. 889 */ 890 if (fod->writedataactive) { 891 fod->aborted = true; 892 spin_unlock(&fod->flock); 893 tgtport->ops->fcp_abort( 894 &tgtport->fc_target_port, fod->fcpreq); 895 } else 896 spin_unlock(&fod->flock); 897 } 898 } 899 900 /* Cleanup defer'ed IOs in queue */ 901 list_for_each_entry_safe(deferfcp, tempptr, &queue->avail_defer_list, 902 req_list) { 903 list_del(&deferfcp->req_list); 904 kfree(deferfcp); 905 } 906 907 for (;;) { 908 deferfcp = list_first_entry_or_null(&queue->pending_cmd_list, 909 struct nvmet_fc_defer_fcp_req, req_list); 910 if (!deferfcp) 911 break; 912 913 list_del(&deferfcp->req_list); 914 spin_unlock_irqrestore(&queue->qlock, flags); 915 916 tgtport->ops->defer_rcv(&tgtport->fc_target_port, 917 deferfcp->fcp_req); 918 919 tgtport->ops->fcp_abort(&tgtport->fc_target_port, 920 deferfcp->fcp_req); 921 922 tgtport->ops->fcp_req_release(&tgtport->fc_target_port, 923 deferfcp->fcp_req); 924 925 /* release the queue lookup reference */ 926 nvmet_fc_tgt_q_put(queue); 927 928 kfree(deferfcp); 929 930 spin_lock_irqsave(&queue->qlock, flags); 931 } 932 spin_unlock_irqrestore(&queue->qlock, flags); 933 934 flush_workqueue(queue->work_q); 935 936 nvmet_sq_destroy(&queue->nvme_sq); 937 938 nvmet_fc_tgt_q_put(queue); 939 } 940 941 static struct nvmet_fc_tgt_queue * 942 nvmet_fc_find_target_queue(struct nvmet_fc_tgtport *tgtport, 943 u64 connection_id) 944 { 945 struct nvmet_fc_tgt_assoc *assoc; 946 struct nvmet_fc_tgt_queue *queue; 947 u64 association_id = nvmet_fc_getassociationid(connection_id); 948 u16 qid = nvmet_fc_getqueueid(connection_id); 949 950 if (qid > NVMET_NR_QUEUES) 951 return NULL; 952 953 rcu_read_lock(); 954 list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) { 955 if (association_id == assoc->association_id) { 956 queue = assoc->queues[qid]; 957 if (queue && 958 (!atomic_read(&queue->connected) || 959 !nvmet_fc_tgt_q_get(queue))) 960 queue = NULL; 961 rcu_read_unlock(); 962 return queue; 963 } 964 } 965 rcu_read_unlock(); 966 return NULL; 967 } 968 969 static void 970 nvmet_fc_hostport_free(struct kref *ref) 971 { 972 struct nvmet_fc_hostport *hostport = 973 container_of(ref, struct nvmet_fc_hostport, ref); 974 struct nvmet_fc_tgtport *tgtport = hostport->tgtport; 975 unsigned long flags; 976 977 spin_lock_irqsave(&tgtport->lock, flags); 978 list_del(&hostport->host_list); 979 spin_unlock_irqrestore(&tgtport->lock, flags); 980 if (tgtport->ops->host_release && hostport->invalid) 981 tgtport->ops->host_release(hostport->hosthandle); 982 kfree(hostport); 983 nvmet_fc_tgtport_put(tgtport); 984 } 985 986 static void 987 nvmet_fc_hostport_put(struct nvmet_fc_hostport *hostport) 988 { 989 kref_put(&hostport->ref, nvmet_fc_hostport_free); 990 } 991 992 static int 993 nvmet_fc_hostport_get(struct nvmet_fc_hostport *hostport) 994 { 995 return kref_get_unless_zero(&hostport->ref); 996 } 997 998 static struct nvmet_fc_hostport * 999 nvmet_fc_match_hostport(struct nvmet_fc_tgtport *tgtport, void *hosthandle) 1000 { 1001 struct nvmet_fc_hostport *host; 1002 1003 lockdep_assert_held(&tgtport->lock); 1004 1005 list_for_each_entry(host, &tgtport->host_list, host_list) { 1006 if (host->hosthandle == hosthandle && !host->invalid) { 1007 if (nvmet_fc_hostport_get(host)) 1008 return host; 1009 } 1010 } 1011 1012 return NULL; 1013 } 1014 1015 static struct nvmet_fc_hostport * 1016 nvmet_fc_alloc_hostport(struct nvmet_fc_tgtport *tgtport, void *hosthandle) 1017 { 1018 struct nvmet_fc_hostport *newhost, *match = NULL; 1019 unsigned long flags; 1020 1021 /* 1022 * Caller holds a reference on tgtport. 1023 */ 1024 1025 /* if LLDD not implemented, leave as NULL */ 1026 if (!hosthandle) 1027 return NULL; 1028 1029 spin_lock_irqsave(&tgtport->lock, flags); 1030 match = nvmet_fc_match_hostport(tgtport, hosthandle); 1031 spin_unlock_irqrestore(&tgtport->lock, flags); 1032 1033 if (match) 1034 return match; 1035 1036 newhost = kzalloc(sizeof(*newhost), GFP_KERNEL); 1037 if (!newhost) 1038 return ERR_PTR(-ENOMEM); 1039 1040 spin_lock_irqsave(&tgtport->lock, flags); 1041 match = nvmet_fc_match_hostport(tgtport, hosthandle); 1042 if (match) { 1043 /* new allocation not needed */ 1044 kfree(newhost); 1045 newhost = match; 1046 } else { 1047 nvmet_fc_tgtport_get(tgtport); 1048 newhost->tgtport = tgtport; 1049 newhost->hosthandle = hosthandle; 1050 INIT_LIST_HEAD(&newhost->host_list); 1051 kref_init(&newhost->ref); 1052 1053 list_add_tail(&newhost->host_list, &tgtport->host_list); 1054 } 1055 spin_unlock_irqrestore(&tgtport->lock, flags); 1056 1057 return newhost; 1058 } 1059 1060 static void 1061 nvmet_fc_delete_assoc_work(struct work_struct *work) 1062 { 1063 struct nvmet_fc_tgt_assoc *assoc = 1064 container_of(work, struct nvmet_fc_tgt_assoc, del_work); 1065 struct nvmet_fc_tgtport *tgtport = assoc->tgtport; 1066 1067 nvmet_fc_delete_target_assoc(assoc); 1068 nvmet_fc_tgt_a_put(assoc); 1069 nvmet_fc_tgtport_put(tgtport); 1070 } 1071 1072 static void 1073 nvmet_fc_schedule_delete_assoc(struct nvmet_fc_tgt_assoc *assoc) 1074 { 1075 nvmet_fc_tgtport_get(assoc->tgtport); 1076 if (!queue_work(nvmet_wq, &assoc->del_work)) 1077 nvmet_fc_tgtport_put(assoc->tgtport); 1078 } 1079 1080 static bool 1081 nvmet_fc_assoc_exists(struct nvmet_fc_tgtport *tgtport, u64 association_id) 1082 { 1083 struct nvmet_fc_tgt_assoc *a; 1084 bool found = false; 1085 1086 rcu_read_lock(); 1087 list_for_each_entry_rcu(a, &tgtport->assoc_list, a_list) { 1088 if (association_id == a->association_id) { 1089 found = true; 1090 break; 1091 } 1092 } 1093 rcu_read_unlock(); 1094 1095 return found; 1096 } 1097 1098 static struct nvmet_fc_tgt_assoc * 1099 nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport, void *hosthandle) 1100 { 1101 struct nvmet_fc_tgt_assoc *assoc; 1102 unsigned long flags; 1103 bool done; 1104 u64 ran; 1105 int idx; 1106 1107 if (!tgtport->pe) 1108 return NULL; 1109 1110 assoc = kzalloc(sizeof(*assoc), GFP_KERNEL); 1111 if (!assoc) 1112 return NULL; 1113 1114 idx = ida_alloc(&tgtport->assoc_cnt, GFP_KERNEL); 1115 if (idx < 0) 1116 goto out_free_assoc; 1117 1118 assoc->hostport = nvmet_fc_alloc_hostport(tgtport, hosthandle); 1119 if (IS_ERR(assoc->hostport)) 1120 goto out_ida; 1121 1122 assoc->tgtport = tgtport; 1123 nvmet_fc_tgtport_get(tgtport); 1124 assoc->a_id = idx; 1125 INIT_LIST_HEAD(&assoc->a_list); 1126 kref_init(&assoc->ref); 1127 INIT_WORK(&assoc->del_work, nvmet_fc_delete_assoc_work); 1128 atomic_set(&assoc->terminating, 0); 1129 1130 done = false; 1131 do { 1132 get_random_bytes(&ran, sizeof(ran) - BYTES_FOR_QID); 1133 ran = ran << BYTES_FOR_QID_SHIFT; 1134 1135 spin_lock_irqsave(&tgtport->lock, flags); 1136 if (!nvmet_fc_assoc_exists(tgtport, ran)) { 1137 assoc->association_id = ran; 1138 list_add_tail_rcu(&assoc->a_list, &tgtport->assoc_list); 1139 done = true; 1140 } 1141 spin_unlock_irqrestore(&tgtport->lock, flags); 1142 } while (!done); 1143 1144 return assoc; 1145 1146 out_ida: 1147 ida_free(&tgtport->assoc_cnt, idx); 1148 out_free_assoc: 1149 kfree(assoc); 1150 return NULL; 1151 } 1152 1153 static void 1154 nvmet_fc_target_assoc_free(struct kref *ref) 1155 { 1156 struct nvmet_fc_tgt_assoc *assoc = 1157 container_of(ref, struct nvmet_fc_tgt_assoc, ref); 1158 struct nvmet_fc_tgtport *tgtport = assoc->tgtport; 1159 struct nvmet_fc_ls_iod *oldls; 1160 unsigned long flags; 1161 int i; 1162 1163 for (i = NVMET_NR_QUEUES; i >= 0; i--) { 1164 if (assoc->queues[i]) 1165 nvmet_fc_delete_target_queue(assoc->queues[i]); 1166 } 1167 1168 /* Send Disconnect now that all i/o has completed */ 1169 nvmet_fc_xmt_disconnect_assoc(assoc); 1170 1171 nvmet_fc_hostport_put(assoc->hostport); 1172 spin_lock_irqsave(&tgtport->lock, flags); 1173 oldls = assoc->rcv_disconn; 1174 spin_unlock_irqrestore(&tgtport->lock, flags); 1175 /* if pending Rcv Disconnect Association LS, send rsp now */ 1176 if (oldls) 1177 nvmet_fc_xmt_ls_rsp(tgtport, oldls); 1178 ida_free(&tgtport->assoc_cnt, assoc->a_id); 1179 dev_info(tgtport->dev, 1180 "{%d:%d} Association freed\n", 1181 tgtport->fc_target_port.port_num, assoc->a_id); 1182 kfree(assoc); 1183 } 1184 1185 static void 1186 nvmet_fc_tgt_a_put(struct nvmet_fc_tgt_assoc *assoc) 1187 { 1188 kref_put(&assoc->ref, nvmet_fc_target_assoc_free); 1189 } 1190 1191 static int 1192 nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc) 1193 { 1194 return kref_get_unless_zero(&assoc->ref); 1195 } 1196 1197 static void 1198 nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc) 1199 { 1200 struct nvmet_fc_tgtport *tgtport = assoc->tgtport; 1201 unsigned long flags; 1202 int i, terminating; 1203 1204 terminating = atomic_xchg(&assoc->terminating, 1); 1205 1206 /* if already terminating, do nothing */ 1207 if (terminating) 1208 return; 1209 1210 spin_lock_irqsave(&tgtport->lock, flags); 1211 list_del_rcu(&assoc->a_list); 1212 spin_unlock_irqrestore(&tgtport->lock, flags); 1213 1214 synchronize_rcu(); 1215 1216 /* ensure all in-flight I/Os have been processed */ 1217 for (i = NVMET_NR_QUEUES; i >= 0; i--) { 1218 if (assoc->queues[i]) 1219 flush_workqueue(assoc->queues[i]->work_q); 1220 } 1221 1222 dev_info(tgtport->dev, 1223 "{%d:%d} Association deleted\n", 1224 tgtport->fc_target_port.port_num, assoc->a_id); 1225 1226 nvmet_fc_tgtport_put(tgtport); 1227 } 1228 1229 static struct nvmet_fc_tgt_assoc * 1230 nvmet_fc_find_target_assoc(struct nvmet_fc_tgtport *tgtport, 1231 u64 association_id) 1232 { 1233 struct nvmet_fc_tgt_assoc *assoc; 1234 struct nvmet_fc_tgt_assoc *ret = NULL; 1235 1236 rcu_read_lock(); 1237 list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) { 1238 if (association_id == assoc->association_id) { 1239 ret = assoc; 1240 if (!nvmet_fc_tgt_a_get(assoc)) 1241 ret = NULL; 1242 break; 1243 } 1244 } 1245 rcu_read_unlock(); 1246 1247 return ret; 1248 } 1249 1250 static void 1251 nvmet_fc_portentry_bind(struct nvmet_fc_tgtport *tgtport, 1252 struct nvmet_fc_port_entry *pe, 1253 struct nvmet_port *port) 1254 { 1255 lockdep_assert_held(&nvmet_fc_tgtlock); 1256 1257 pe->tgtport = tgtport; 1258 tgtport->pe = pe; 1259 1260 pe->port = port; 1261 port->priv = pe; 1262 1263 pe->node_name = tgtport->fc_target_port.node_name; 1264 pe->port_name = tgtport->fc_target_port.port_name; 1265 INIT_LIST_HEAD(&pe->pe_list); 1266 1267 list_add_tail(&pe->pe_list, &nvmet_fc_portentry_list); 1268 } 1269 1270 static void 1271 nvmet_fc_portentry_unbind(struct nvmet_fc_port_entry *pe) 1272 { 1273 unsigned long flags; 1274 1275 spin_lock_irqsave(&nvmet_fc_tgtlock, flags); 1276 if (pe->tgtport) 1277 pe->tgtport->pe = NULL; 1278 list_del(&pe->pe_list); 1279 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); 1280 } 1281 1282 /* 1283 * called when a targetport deregisters. Breaks the relationship 1284 * with the nvmet port, but leaves the port_entry in place so that 1285 * re-registration can resume operation. 1286 */ 1287 static void 1288 nvmet_fc_portentry_unbind_tgt(struct nvmet_fc_tgtport *tgtport) 1289 { 1290 struct nvmet_fc_port_entry *pe; 1291 unsigned long flags; 1292 1293 spin_lock_irqsave(&nvmet_fc_tgtlock, flags); 1294 pe = tgtport->pe; 1295 if (pe) 1296 pe->tgtport = NULL; 1297 tgtport->pe = NULL; 1298 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); 1299 } 1300 1301 /* 1302 * called when a new targetport is registered. Looks in the 1303 * existing nvmet port_entries to see if the nvmet layer is 1304 * configured for the targetport's wwn's. (the targetport existed, 1305 * nvmet configured, the lldd unregistered the tgtport, and is now 1306 * reregistering the same targetport). If so, set the nvmet port 1307 * port entry on the targetport. 1308 */ 1309 static void 1310 nvmet_fc_portentry_rebind_tgt(struct nvmet_fc_tgtport *tgtport) 1311 { 1312 struct nvmet_fc_port_entry *pe; 1313 unsigned long flags; 1314 1315 spin_lock_irqsave(&nvmet_fc_tgtlock, flags); 1316 list_for_each_entry(pe, &nvmet_fc_portentry_list, pe_list) { 1317 if (tgtport->fc_target_port.node_name == pe->node_name && 1318 tgtport->fc_target_port.port_name == pe->port_name) { 1319 WARN_ON(pe->tgtport); 1320 tgtport->pe = pe; 1321 pe->tgtport = tgtport; 1322 break; 1323 } 1324 } 1325 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); 1326 } 1327 1328 /** 1329 * nvmet_fc_register_targetport - transport entry point called by an 1330 * LLDD to register the existence of a local 1331 * NVME subystem FC port. 1332 * @pinfo: pointer to information about the port to be registered 1333 * @template: LLDD entrypoints and operational parameters for the port 1334 * @dev: physical hardware device node port corresponds to. Will be 1335 * used for DMA mappings 1336 * @portptr: pointer to a local port pointer. Upon success, the routine 1337 * will allocate a nvme_fc_local_port structure and place its 1338 * address in the local port pointer. Upon failure, local port 1339 * pointer will be set to NULL. 1340 * 1341 * Returns: 1342 * a completion status. Must be 0 upon success; a negative errno 1343 * (ex: -ENXIO) upon failure. 1344 */ 1345 int 1346 nvmet_fc_register_targetport(struct nvmet_fc_port_info *pinfo, 1347 struct nvmet_fc_target_template *template, 1348 struct device *dev, 1349 struct nvmet_fc_target_port **portptr) 1350 { 1351 struct nvmet_fc_tgtport *newrec; 1352 unsigned long flags; 1353 int ret, idx; 1354 1355 if (!template->xmt_ls_rsp || !template->fcp_op || 1356 !template->fcp_abort || 1357 !template->fcp_req_release || !template->targetport_delete || 1358 !template->max_hw_queues || !template->max_sgl_segments || 1359 !template->max_dif_sgl_segments || !template->dma_boundary) { 1360 ret = -EINVAL; 1361 goto out_regtgt_failed; 1362 } 1363 1364 newrec = kzalloc((sizeof(*newrec) + template->target_priv_sz), 1365 GFP_KERNEL); 1366 if (!newrec) { 1367 ret = -ENOMEM; 1368 goto out_regtgt_failed; 1369 } 1370 1371 idx = ida_alloc(&nvmet_fc_tgtport_cnt, GFP_KERNEL); 1372 if (idx < 0) { 1373 ret = -ENOSPC; 1374 goto out_fail_kfree; 1375 } 1376 1377 if (!get_device(dev) && dev) { 1378 ret = -ENODEV; 1379 goto out_ida_put; 1380 } 1381 1382 newrec->fc_target_port.node_name = pinfo->node_name; 1383 newrec->fc_target_port.port_name = pinfo->port_name; 1384 if (template->target_priv_sz) 1385 newrec->fc_target_port.private = &newrec[1]; 1386 else 1387 newrec->fc_target_port.private = NULL; 1388 newrec->fc_target_port.port_id = pinfo->port_id; 1389 newrec->fc_target_port.port_num = idx; 1390 INIT_LIST_HEAD(&newrec->tgt_list); 1391 newrec->dev = dev; 1392 newrec->ops = template; 1393 spin_lock_init(&newrec->lock); 1394 INIT_LIST_HEAD(&newrec->ls_rcv_list); 1395 INIT_LIST_HEAD(&newrec->ls_req_list); 1396 INIT_LIST_HEAD(&newrec->ls_busylist); 1397 INIT_LIST_HEAD(&newrec->assoc_list); 1398 INIT_LIST_HEAD(&newrec->host_list); 1399 kref_init(&newrec->ref); 1400 ida_init(&newrec->assoc_cnt); 1401 newrec->max_sg_cnt = template->max_sgl_segments; 1402 INIT_WORK(&newrec->put_work, nvmet_fc_put_tgtport_work); 1403 1404 ret = nvmet_fc_alloc_ls_iodlist(newrec); 1405 if (ret) { 1406 ret = -ENOMEM; 1407 goto out_free_newrec; 1408 } 1409 1410 nvmet_fc_portentry_rebind_tgt(newrec); 1411 1412 spin_lock_irqsave(&nvmet_fc_tgtlock, flags); 1413 list_add_tail(&newrec->tgt_list, &nvmet_fc_target_list); 1414 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); 1415 1416 *portptr = &newrec->fc_target_port; 1417 return 0; 1418 1419 out_free_newrec: 1420 put_device(dev); 1421 out_ida_put: 1422 ida_free(&nvmet_fc_tgtport_cnt, idx); 1423 out_fail_kfree: 1424 kfree(newrec); 1425 out_regtgt_failed: 1426 *portptr = NULL; 1427 return ret; 1428 } 1429 EXPORT_SYMBOL_GPL(nvmet_fc_register_targetport); 1430 1431 1432 static void 1433 nvmet_fc_free_tgtport(struct kref *ref) 1434 { 1435 struct nvmet_fc_tgtport *tgtport = 1436 container_of(ref, struct nvmet_fc_tgtport, ref); 1437 struct device *dev = tgtport->dev; 1438 1439 nvmet_fc_free_ls_iodlist(tgtport); 1440 1441 /* let the LLDD know we've finished tearing it down */ 1442 tgtport->ops->targetport_delete(&tgtport->fc_target_port); 1443 1444 ida_free(&nvmet_fc_tgtport_cnt, 1445 tgtport->fc_target_port.port_num); 1446 1447 ida_destroy(&tgtport->assoc_cnt); 1448 1449 kfree(tgtport); 1450 1451 put_device(dev); 1452 } 1453 1454 static void 1455 nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport) 1456 { 1457 kref_put(&tgtport->ref, nvmet_fc_free_tgtport); 1458 } 1459 1460 static int 1461 nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport) 1462 { 1463 return kref_get_unless_zero(&tgtport->ref); 1464 } 1465 1466 static void 1467 __nvmet_fc_free_assocs(struct nvmet_fc_tgtport *tgtport) 1468 { 1469 struct nvmet_fc_tgt_assoc *assoc; 1470 1471 rcu_read_lock(); 1472 list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) { 1473 if (!nvmet_fc_tgt_a_get(assoc)) 1474 continue; 1475 nvmet_fc_schedule_delete_assoc(assoc); 1476 nvmet_fc_tgt_a_put(assoc); 1477 } 1478 rcu_read_unlock(); 1479 } 1480 1481 /** 1482 * nvmet_fc_invalidate_host - transport entry point called by an LLDD 1483 * to remove references to a hosthandle for LS's. 1484 * 1485 * The nvmet-fc layer ensures that any references to the hosthandle 1486 * on the targetport are forgotten (set to NULL). The LLDD will 1487 * typically call this when a login with a remote host port has been 1488 * lost, thus LS's for the remote host port are no longer possible. 1489 * 1490 * If an LS request is outstanding to the targetport/hosthandle (or 1491 * issued concurrently with the call to invalidate the host), the 1492 * LLDD is responsible for terminating/aborting the LS and completing 1493 * the LS request. It is recommended that these terminations/aborts 1494 * occur after calling to invalidate the host handle to avoid additional 1495 * retries by the nvmet-fc transport. The nvmet-fc transport may 1496 * continue to reference host handle while it cleans up outstanding 1497 * NVME associations. The nvmet-fc transport will call the 1498 * ops->host_release() callback to notify the LLDD that all references 1499 * are complete and the related host handle can be recovered. 1500 * Note: if there are no references, the callback may be called before 1501 * the invalidate host call returns. 1502 * 1503 * @target_port: pointer to the (registered) target port that a prior 1504 * LS was received on and which supplied the transport the 1505 * hosthandle. 1506 * @hosthandle: the handle (pointer) that represents the host port 1507 * that no longer has connectivity and that LS's should 1508 * no longer be directed to. 1509 */ 1510 void 1511 nvmet_fc_invalidate_host(struct nvmet_fc_target_port *target_port, 1512 void *hosthandle) 1513 { 1514 struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port); 1515 struct nvmet_fc_tgt_assoc *assoc, *next; 1516 unsigned long flags; 1517 bool noassoc = true; 1518 1519 spin_lock_irqsave(&tgtport->lock, flags); 1520 list_for_each_entry_safe(assoc, next, 1521 &tgtport->assoc_list, a_list) { 1522 if (assoc->hostport->hosthandle != hosthandle) 1523 continue; 1524 if (!nvmet_fc_tgt_a_get(assoc)) 1525 continue; 1526 assoc->hostport->invalid = 1; 1527 noassoc = false; 1528 nvmet_fc_schedule_delete_assoc(assoc); 1529 nvmet_fc_tgt_a_put(assoc); 1530 } 1531 spin_unlock_irqrestore(&tgtport->lock, flags); 1532 1533 /* if there's nothing to wait for - call the callback */ 1534 if (noassoc && tgtport->ops->host_release) 1535 tgtport->ops->host_release(hosthandle); 1536 } 1537 EXPORT_SYMBOL_GPL(nvmet_fc_invalidate_host); 1538 1539 /* 1540 * nvmet layer has called to terminate an association 1541 */ 1542 static void 1543 nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl) 1544 { 1545 struct nvmet_fc_tgtport *tgtport, *next; 1546 struct nvmet_fc_tgt_assoc *assoc; 1547 struct nvmet_fc_tgt_queue *queue; 1548 unsigned long flags; 1549 bool found_ctrl = false; 1550 1551 /* this is a bit ugly, but don't want to make locks layered */ 1552 spin_lock_irqsave(&nvmet_fc_tgtlock, flags); 1553 list_for_each_entry_safe(tgtport, next, &nvmet_fc_target_list, 1554 tgt_list) { 1555 if (!nvmet_fc_tgtport_get(tgtport)) 1556 continue; 1557 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); 1558 1559 rcu_read_lock(); 1560 list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) { 1561 queue = assoc->queues[0]; 1562 if (queue && queue->nvme_sq.ctrl == ctrl) { 1563 if (nvmet_fc_tgt_a_get(assoc)) 1564 found_ctrl = true; 1565 break; 1566 } 1567 } 1568 rcu_read_unlock(); 1569 1570 nvmet_fc_tgtport_put(tgtport); 1571 1572 if (found_ctrl) { 1573 nvmet_fc_schedule_delete_assoc(assoc); 1574 nvmet_fc_tgt_a_put(assoc); 1575 return; 1576 } 1577 1578 spin_lock_irqsave(&nvmet_fc_tgtlock, flags); 1579 } 1580 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); 1581 } 1582 1583 /** 1584 * nvmet_fc_unregister_targetport - transport entry point called by an 1585 * LLDD to deregister/remove a previously 1586 * registered a local NVME subsystem FC port. 1587 * @target_port: pointer to the (registered) target port that is to be 1588 * deregistered. 1589 * 1590 * Returns: 1591 * a completion status. Must be 0 upon success; a negative errno 1592 * (ex: -ENXIO) upon failure. 1593 */ 1594 int 1595 nvmet_fc_unregister_targetport(struct nvmet_fc_target_port *target_port) 1596 { 1597 struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port); 1598 unsigned long flags; 1599 1600 spin_lock_irqsave(&nvmet_fc_tgtlock, flags); 1601 list_del(&tgtport->tgt_list); 1602 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); 1603 1604 nvmet_fc_portentry_unbind_tgt(tgtport); 1605 1606 /* terminate any outstanding associations */ 1607 __nvmet_fc_free_assocs(tgtport); 1608 1609 flush_workqueue(nvmet_wq); 1610 1611 /* 1612 * should terminate LS's as well. However, LS's will be generated 1613 * at the tail end of association termination, so they likely don't 1614 * exist yet. And even if they did, it's worthwhile to just let 1615 * them finish and targetport ref counting will clean things up. 1616 */ 1617 1618 nvmet_fc_tgtport_put(tgtport); 1619 1620 return 0; 1621 } 1622 EXPORT_SYMBOL_GPL(nvmet_fc_unregister_targetport); 1623 1624 1625 /* ********************** FC-NVME LS RCV Handling ************************* */ 1626 1627 1628 static void 1629 nvmet_fc_ls_create_association(struct nvmet_fc_tgtport *tgtport, 1630 struct nvmet_fc_ls_iod *iod) 1631 { 1632 struct fcnvme_ls_cr_assoc_rqst *rqst = &iod->rqstbuf->rq_cr_assoc; 1633 struct fcnvme_ls_cr_assoc_acc *acc = &iod->rspbuf->rsp_cr_assoc; 1634 struct nvmet_fc_tgt_queue *queue; 1635 int ret = 0; 1636 1637 memset(acc, 0, sizeof(*acc)); 1638 1639 /* 1640 * FC-NVME spec changes. There are initiators sending different 1641 * lengths as padding sizes for Create Association Cmd descriptor 1642 * was incorrect. 1643 * Accept anything of "minimum" length. Assume format per 1.15 1644 * spec (with HOSTID reduced to 16 bytes), ignore how long the 1645 * trailing pad length is. 1646 */ 1647 if (iod->rqstdatalen < FCNVME_LSDESC_CRA_RQST_MINLEN) 1648 ret = VERR_CR_ASSOC_LEN; 1649 else if (be32_to_cpu(rqst->desc_list_len) < 1650 FCNVME_LSDESC_CRA_RQST_MIN_LISTLEN) 1651 ret = VERR_CR_ASSOC_RQST_LEN; 1652 else if (rqst->assoc_cmd.desc_tag != 1653 cpu_to_be32(FCNVME_LSDESC_CREATE_ASSOC_CMD)) 1654 ret = VERR_CR_ASSOC_CMD; 1655 else if (be32_to_cpu(rqst->assoc_cmd.desc_len) < 1656 FCNVME_LSDESC_CRA_CMD_DESC_MIN_DESCLEN) 1657 ret = VERR_CR_ASSOC_CMD_LEN; 1658 else if (!rqst->assoc_cmd.ersp_ratio || 1659 (be16_to_cpu(rqst->assoc_cmd.ersp_ratio) >= 1660 be16_to_cpu(rqst->assoc_cmd.sqsize))) 1661 ret = VERR_ERSP_RATIO; 1662 1663 else { 1664 /* new association w/ admin queue */ 1665 iod->assoc = nvmet_fc_alloc_target_assoc( 1666 tgtport, iod->hosthandle); 1667 if (!iod->assoc) 1668 ret = VERR_ASSOC_ALLOC_FAIL; 1669 else { 1670 queue = nvmet_fc_alloc_target_queue(iod->assoc, 0, 1671 be16_to_cpu(rqst->assoc_cmd.sqsize)); 1672 if (!queue) { 1673 ret = VERR_QUEUE_ALLOC_FAIL; 1674 nvmet_fc_tgt_a_put(iod->assoc); 1675 } 1676 } 1677 } 1678 1679 if (ret) { 1680 dev_err(tgtport->dev, 1681 "Create Association LS failed: %s\n", 1682 validation_errors[ret]); 1683 iod->lsrsp->rsplen = nvme_fc_format_rjt(acc, 1684 sizeof(*acc), rqst->w0.ls_cmd, 1685 FCNVME_RJT_RC_LOGIC, 1686 FCNVME_RJT_EXP_NONE, 0); 1687 return; 1688 } 1689 1690 queue->ersp_ratio = be16_to_cpu(rqst->assoc_cmd.ersp_ratio); 1691 atomic_set(&queue->connected, 1); 1692 queue->sqhd = 0; /* best place to init value */ 1693 1694 dev_info(tgtport->dev, 1695 "{%d:%d} Association created\n", 1696 tgtport->fc_target_port.port_num, iod->assoc->a_id); 1697 1698 /* format a response */ 1699 1700 iod->lsrsp->rsplen = sizeof(*acc); 1701 1702 nvme_fc_format_rsp_hdr(acc, FCNVME_LS_ACC, 1703 fcnvme_lsdesc_len( 1704 sizeof(struct fcnvme_ls_cr_assoc_acc)), 1705 FCNVME_LS_CREATE_ASSOCIATION); 1706 acc->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID); 1707 acc->associd.desc_len = 1708 fcnvme_lsdesc_len( 1709 sizeof(struct fcnvme_lsdesc_assoc_id)); 1710 acc->associd.association_id = 1711 cpu_to_be64(nvmet_fc_makeconnid(iod->assoc, 0)); 1712 acc->connectid.desc_tag = cpu_to_be32(FCNVME_LSDESC_CONN_ID); 1713 acc->connectid.desc_len = 1714 fcnvme_lsdesc_len( 1715 sizeof(struct fcnvme_lsdesc_conn_id)); 1716 acc->connectid.connection_id = acc->associd.association_id; 1717 } 1718 1719 static void 1720 nvmet_fc_ls_create_connection(struct nvmet_fc_tgtport *tgtport, 1721 struct nvmet_fc_ls_iod *iod) 1722 { 1723 struct fcnvme_ls_cr_conn_rqst *rqst = &iod->rqstbuf->rq_cr_conn; 1724 struct fcnvme_ls_cr_conn_acc *acc = &iod->rspbuf->rsp_cr_conn; 1725 struct nvmet_fc_tgt_queue *queue; 1726 int ret = 0; 1727 1728 memset(acc, 0, sizeof(*acc)); 1729 1730 if (iod->rqstdatalen < sizeof(struct fcnvme_ls_cr_conn_rqst)) 1731 ret = VERR_CR_CONN_LEN; 1732 else if (rqst->desc_list_len != 1733 fcnvme_lsdesc_len( 1734 sizeof(struct fcnvme_ls_cr_conn_rqst))) 1735 ret = VERR_CR_CONN_RQST_LEN; 1736 else if (rqst->associd.desc_tag != cpu_to_be32(FCNVME_LSDESC_ASSOC_ID)) 1737 ret = VERR_ASSOC_ID; 1738 else if (rqst->associd.desc_len != 1739 fcnvme_lsdesc_len( 1740 sizeof(struct fcnvme_lsdesc_assoc_id))) 1741 ret = VERR_ASSOC_ID_LEN; 1742 else if (rqst->connect_cmd.desc_tag != 1743 cpu_to_be32(FCNVME_LSDESC_CREATE_CONN_CMD)) 1744 ret = VERR_CR_CONN_CMD; 1745 else if (rqst->connect_cmd.desc_len != 1746 fcnvme_lsdesc_len( 1747 sizeof(struct fcnvme_lsdesc_cr_conn_cmd))) 1748 ret = VERR_CR_CONN_CMD_LEN; 1749 else if (!rqst->connect_cmd.ersp_ratio || 1750 (be16_to_cpu(rqst->connect_cmd.ersp_ratio) >= 1751 be16_to_cpu(rqst->connect_cmd.sqsize))) 1752 ret = VERR_ERSP_RATIO; 1753 1754 else { 1755 /* new io queue */ 1756 iod->assoc = nvmet_fc_find_target_assoc(tgtport, 1757 be64_to_cpu(rqst->associd.association_id)); 1758 if (!iod->assoc) 1759 ret = VERR_NO_ASSOC; 1760 else { 1761 queue = nvmet_fc_alloc_target_queue(iod->assoc, 1762 be16_to_cpu(rqst->connect_cmd.qid), 1763 be16_to_cpu(rqst->connect_cmd.sqsize)); 1764 if (!queue) 1765 ret = VERR_QUEUE_ALLOC_FAIL; 1766 1767 /* release get taken in nvmet_fc_find_target_assoc */ 1768 nvmet_fc_tgt_a_put(iod->assoc); 1769 } 1770 } 1771 1772 if (ret) { 1773 dev_err(tgtport->dev, 1774 "Create Connection LS failed: %s\n", 1775 validation_errors[ret]); 1776 iod->lsrsp->rsplen = nvme_fc_format_rjt(acc, 1777 sizeof(*acc), rqst->w0.ls_cmd, 1778 (ret == VERR_NO_ASSOC) ? 1779 FCNVME_RJT_RC_INV_ASSOC : 1780 FCNVME_RJT_RC_LOGIC, 1781 FCNVME_RJT_EXP_NONE, 0); 1782 return; 1783 } 1784 1785 queue->ersp_ratio = be16_to_cpu(rqst->connect_cmd.ersp_ratio); 1786 atomic_set(&queue->connected, 1); 1787 queue->sqhd = 0; /* best place to init value */ 1788 1789 /* format a response */ 1790 1791 iod->lsrsp->rsplen = sizeof(*acc); 1792 1793 nvme_fc_format_rsp_hdr(acc, FCNVME_LS_ACC, 1794 fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_cr_conn_acc)), 1795 FCNVME_LS_CREATE_CONNECTION); 1796 acc->connectid.desc_tag = cpu_to_be32(FCNVME_LSDESC_CONN_ID); 1797 acc->connectid.desc_len = 1798 fcnvme_lsdesc_len( 1799 sizeof(struct fcnvme_lsdesc_conn_id)); 1800 acc->connectid.connection_id = 1801 cpu_to_be64(nvmet_fc_makeconnid(iod->assoc, 1802 be16_to_cpu(rqst->connect_cmd.qid))); 1803 } 1804 1805 /* 1806 * Returns true if the LS response is to be transmit 1807 * Returns false if the LS response is to be delayed 1808 */ 1809 static int 1810 nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport, 1811 struct nvmet_fc_ls_iod *iod) 1812 { 1813 struct fcnvme_ls_disconnect_assoc_rqst *rqst = 1814 &iod->rqstbuf->rq_dis_assoc; 1815 struct fcnvme_ls_disconnect_assoc_acc *acc = 1816 &iod->rspbuf->rsp_dis_assoc; 1817 struct nvmet_fc_tgt_assoc *assoc = NULL; 1818 struct nvmet_fc_ls_iod *oldls = NULL; 1819 unsigned long flags; 1820 int ret = 0; 1821 1822 memset(acc, 0, sizeof(*acc)); 1823 1824 ret = nvmefc_vldt_lsreq_discon_assoc(iod->rqstdatalen, rqst); 1825 if (!ret) { 1826 /* match an active association - takes an assoc ref if !NULL */ 1827 assoc = nvmet_fc_find_target_assoc(tgtport, 1828 be64_to_cpu(rqst->associd.association_id)); 1829 iod->assoc = assoc; 1830 if (!assoc) 1831 ret = VERR_NO_ASSOC; 1832 } 1833 1834 if (ret || !assoc) { 1835 dev_err(tgtport->dev, 1836 "Disconnect LS failed: %s\n", 1837 validation_errors[ret]); 1838 iod->lsrsp->rsplen = nvme_fc_format_rjt(acc, 1839 sizeof(*acc), rqst->w0.ls_cmd, 1840 (ret == VERR_NO_ASSOC) ? 1841 FCNVME_RJT_RC_INV_ASSOC : 1842 FCNVME_RJT_RC_LOGIC, 1843 FCNVME_RJT_EXP_NONE, 0); 1844 return true; 1845 } 1846 1847 /* format a response */ 1848 1849 iod->lsrsp->rsplen = sizeof(*acc); 1850 1851 nvme_fc_format_rsp_hdr(acc, FCNVME_LS_ACC, 1852 fcnvme_lsdesc_len( 1853 sizeof(struct fcnvme_ls_disconnect_assoc_acc)), 1854 FCNVME_LS_DISCONNECT_ASSOC); 1855 1856 /* 1857 * The rules for LS response says the response cannot 1858 * go back until ABTS's have been sent for all outstanding 1859 * I/O and a Disconnect Association LS has been sent. 1860 * So... save off the Disconnect LS to send the response 1861 * later. If there was a prior LS already saved, replace 1862 * it with the newer one and send a can't perform reject 1863 * on the older one. 1864 */ 1865 spin_lock_irqsave(&tgtport->lock, flags); 1866 oldls = assoc->rcv_disconn; 1867 assoc->rcv_disconn = iod; 1868 spin_unlock_irqrestore(&tgtport->lock, flags); 1869 1870 if (oldls) { 1871 dev_info(tgtport->dev, 1872 "{%d:%d} Multiple Disconnect Association LS's " 1873 "received\n", 1874 tgtport->fc_target_port.port_num, assoc->a_id); 1875 /* overwrite good response with bogus failure */ 1876 oldls->lsrsp->rsplen = nvme_fc_format_rjt(oldls->rspbuf, 1877 sizeof(*iod->rspbuf), 1878 /* ok to use rqst, LS is same */ 1879 rqst->w0.ls_cmd, 1880 FCNVME_RJT_RC_UNAB, 1881 FCNVME_RJT_EXP_NONE, 0); 1882 nvmet_fc_xmt_ls_rsp(tgtport, oldls); 1883 } 1884 1885 nvmet_fc_schedule_delete_assoc(assoc); 1886 nvmet_fc_tgt_a_put(assoc); 1887 1888 return false; 1889 } 1890 1891 1892 /* *********************** NVME Ctrl Routines **************************** */ 1893 1894 1895 static void nvmet_fc_fcp_nvme_cmd_done(struct nvmet_req *nvme_req); 1896 1897 static const struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops; 1898 1899 static void 1900 nvmet_fc_xmt_ls_rsp_done(struct nvmefc_ls_rsp *lsrsp) 1901 { 1902 struct nvmet_fc_ls_iod *iod = lsrsp->nvme_fc_private; 1903 struct nvmet_fc_tgtport *tgtport = iod->tgtport; 1904 1905 fc_dma_sync_single_for_cpu(tgtport->dev, iod->rspdma, 1906 sizeof(*iod->rspbuf), DMA_TO_DEVICE); 1907 nvmet_fc_free_ls_iod(tgtport, iod); 1908 nvmet_fc_tgtport_put(tgtport); 1909 } 1910 1911 static void 1912 nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport, 1913 struct nvmet_fc_ls_iod *iod) 1914 { 1915 int ret; 1916 1917 fc_dma_sync_single_for_device(tgtport->dev, iod->rspdma, 1918 sizeof(*iod->rspbuf), DMA_TO_DEVICE); 1919 1920 ret = tgtport->ops->xmt_ls_rsp(&tgtport->fc_target_port, iod->lsrsp); 1921 if (ret) 1922 nvmet_fc_xmt_ls_rsp_done(iod->lsrsp); 1923 } 1924 1925 /* 1926 * Actual processing routine for received FC-NVME LS Requests from the LLD 1927 */ 1928 static void 1929 nvmet_fc_handle_ls_rqst(struct nvmet_fc_tgtport *tgtport, 1930 struct nvmet_fc_ls_iod *iod) 1931 { 1932 struct fcnvme_ls_rqst_w0 *w0 = &iod->rqstbuf->rq_cr_assoc.w0; 1933 bool sendrsp = true; 1934 1935 iod->lsrsp->nvme_fc_private = iod; 1936 iod->lsrsp->rspbuf = iod->rspbuf; 1937 iod->lsrsp->rspdma = iod->rspdma; 1938 iod->lsrsp->done = nvmet_fc_xmt_ls_rsp_done; 1939 /* Be preventative. handlers will later set to valid length */ 1940 iod->lsrsp->rsplen = 0; 1941 1942 iod->assoc = NULL; 1943 1944 /* 1945 * handlers: 1946 * parse request input, execute the request, and format the 1947 * LS response 1948 */ 1949 switch (w0->ls_cmd) { 1950 case FCNVME_LS_CREATE_ASSOCIATION: 1951 /* Creates Association and initial Admin Queue/Connection */ 1952 nvmet_fc_ls_create_association(tgtport, iod); 1953 break; 1954 case FCNVME_LS_CREATE_CONNECTION: 1955 /* Creates an IO Queue/Connection */ 1956 nvmet_fc_ls_create_connection(tgtport, iod); 1957 break; 1958 case FCNVME_LS_DISCONNECT_ASSOC: 1959 /* Terminate a Queue/Connection or the Association */ 1960 sendrsp = nvmet_fc_ls_disconnect(tgtport, iod); 1961 break; 1962 default: 1963 iod->lsrsp->rsplen = nvme_fc_format_rjt(iod->rspbuf, 1964 sizeof(*iod->rspbuf), w0->ls_cmd, 1965 FCNVME_RJT_RC_INVAL, FCNVME_RJT_EXP_NONE, 0); 1966 } 1967 1968 if (sendrsp) 1969 nvmet_fc_xmt_ls_rsp(tgtport, iod); 1970 } 1971 1972 /* 1973 * Actual processing routine for received FC-NVME LS Requests from the LLD 1974 */ 1975 static void 1976 nvmet_fc_handle_ls_rqst_work(struct work_struct *work) 1977 { 1978 struct nvmet_fc_ls_iod *iod = 1979 container_of(work, struct nvmet_fc_ls_iod, work); 1980 struct nvmet_fc_tgtport *tgtport = iod->tgtport; 1981 1982 nvmet_fc_handle_ls_rqst(tgtport, iod); 1983 } 1984 1985 1986 /** 1987 * nvmet_fc_rcv_ls_req - transport entry point called by an LLDD 1988 * upon the reception of a NVME LS request. 1989 * 1990 * The nvmet-fc layer will copy payload to an internal structure for 1991 * processing. As such, upon completion of the routine, the LLDD may 1992 * immediately free/reuse the LS request buffer passed in the call. 1993 * 1994 * If this routine returns error, the LLDD should abort the exchange. 1995 * 1996 * @target_port: pointer to the (registered) target port the LS was 1997 * received on. 1998 * @hosthandle: pointer to the host specific data, gets stored in iod. 1999 * @lsrsp: pointer to a lsrsp structure to be used to reference 2000 * the exchange corresponding to the LS. 2001 * @lsreqbuf: pointer to the buffer containing the LS Request 2002 * @lsreqbuf_len: length, in bytes, of the received LS request 2003 */ 2004 int 2005 nvmet_fc_rcv_ls_req(struct nvmet_fc_target_port *target_port, 2006 void *hosthandle, 2007 struct nvmefc_ls_rsp *lsrsp, 2008 void *lsreqbuf, u32 lsreqbuf_len) 2009 { 2010 struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port); 2011 struct nvmet_fc_ls_iod *iod; 2012 struct fcnvme_ls_rqst_w0 *w0 = (struct fcnvme_ls_rqst_w0 *)lsreqbuf; 2013 2014 if (lsreqbuf_len > sizeof(union nvmefc_ls_requests)) { 2015 dev_info(tgtport->dev, 2016 "RCV %s LS failed: payload too large (%d)\n", 2017 (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ? 2018 nvmefc_ls_names[w0->ls_cmd] : "", 2019 lsreqbuf_len); 2020 return -E2BIG; 2021 } 2022 2023 if (!nvmet_fc_tgtport_get(tgtport)) { 2024 dev_info(tgtport->dev, 2025 "RCV %s LS failed: target deleting\n", 2026 (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ? 2027 nvmefc_ls_names[w0->ls_cmd] : ""); 2028 return -ESHUTDOWN; 2029 } 2030 2031 iod = nvmet_fc_alloc_ls_iod(tgtport); 2032 if (!iod) { 2033 dev_info(tgtport->dev, 2034 "RCV %s LS failed: context allocation failed\n", 2035 (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ? 2036 nvmefc_ls_names[w0->ls_cmd] : ""); 2037 nvmet_fc_tgtport_put(tgtport); 2038 return -ENOENT; 2039 } 2040 2041 iod->lsrsp = lsrsp; 2042 iod->fcpreq = NULL; 2043 memcpy(iod->rqstbuf, lsreqbuf, lsreqbuf_len); 2044 iod->rqstdatalen = lsreqbuf_len; 2045 iod->hosthandle = hosthandle; 2046 2047 queue_work(nvmet_wq, &iod->work); 2048 2049 return 0; 2050 } 2051 EXPORT_SYMBOL_GPL(nvmet_fc_rcv_ls_req); 2052 2053 2054 /* 2055 * ********************** 2056 * Start of FCP handling 2057 * ********************** 2058 */ 2059 2060 static int 2061 nvmet_fc_alloc_tgt_pgs(struct nvmet_fc_fcp_iod *fod) 2062 { 2063 struct scatterlist *sg; 2064 unsigned int nent; 2065 2066 sg = sgl_alloc(fod->req.transfer_len, GFP_KERNEL, &nent); 2067 if (!sg) 2068 goto out; 2069 2070 fod->data_sg = sg; 2071 fod->data_sg_cnt = nent; 2072 fod->data_sg_cnt = fc_dma_map_sg(fod->tgtport->dev, sg, nent, 2073 ((fod->io_dir == NVMET_FCP_WRITE) ? 2074 DMA_FROM_DEVICE : DMA_TO_DEVICE)); 2075 /* note: write from initiator perspective */ 2076 fod->next_sg = fod->data_sg; 2077 2078 return 0; 2079 2080 out: 2081 return NVME_SC_INTERNAL; 2082 } 2083 2084 static void 2085 nvmet_fc_free_tgt_pgs(struct nvmet_fc_fcp_iod *fod) 2086 { 2087 if (!fod->data_sg || !fod->data_sg_cnt) 2088 return; 2089 2090 fc_dma_unmap_sg(fod->tgtport->dev, fod->data_sg, fod->data_sg_cnt, 2091 ((fod->io_dir == NVMET_FCP_WRITE) ? 2092 DMA_FROM_DEVICE : DMA_TO_DEVICE)); 2093 sgl_free(fod->data_sg); 2094 fod->data_sg = NULL; 2095 fod->data_sg_cnt = 0; 2096 } 2097 2098 2099 static bool 2100 queue_90percent_full(struct nvmet_fc_tgt_queue *q, u32 sqhd) 2101 { 2102 u32 sqtail, used; 2103 2104 /* egad, this is ugly. And sqtail is just a best guess */ 2105 sqtail = atomic_read(&q->sqtail) % q->sqsize; 2106 2107 used = (sqtail < sqhd) ? (sqtail + q->sqsize - sqhd) : (sqtail - sqhd); 2108 return ((used * 10) >= (((u32)(q->sqsize - 1) * 9))); 2109 } 2110 2111 /* 2112 * Prep RSP payload. 2113 * May be a NVMET_FCOP_RSP or NVMET_FCOP_READDATA_RSP op 2114 */ 2115 static void 2116 nvmet_fc_prep_fcp_rsp(struct nvmet_fc_tgtport *tgtport, 2117 struct nvmet_fc_fcp_iod *fod) 2118 { 2119 struct nvme_fc_ersp_iu *ersp = &fod->rspiubuf; 2120 struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common; 2121 struct nvme_completion *cqe = &ersp->cqe; 2122 u32 *cqewd = (u32 *)cqe; 2123 bool send_ersp = false; 2124 u32 rsn, rspcnt, xfr_length; 2125 2126 if (fod->fcpreq->op == NVMET_FCOP_READDATA_RSP) 2127 xfr_length = fod->req.transfer_len; 2128 else 2129 xfr_length = fod->offset; 2130 2131 /* 2132 * check to see if we can send a 0's rsp. 2133 * Note: to send a 0's response, the NVME-FC host transport will 2134 * recreate the CQE. The host transport knows: sq id, SQHD (last 2135 * seen in an ersp), and command_id. Thus it will create a 2136 * zero-filled CQE with those known fields filled in. Transport 2137 * must send an ersp for any condition where the cqe won't match 2138 * this. 2139 * 2140 * Here are the FC-NVME mandated cases where we must send an ersp: 2141 * every N responses, where N=ersp_ratio 2142 * force fabric commands to send ersp's (not in FC-NVME but good 2143 * practice) 2144 * normal cmds: any time status is non-zero, or status is zero 2145 * but words 0 or 1 are non-zero. 2146 * the SQ is 90% or more full 2147 * the cmd is a fused command 2148 * transferred data length not equal to cmd iu length 2149 */ 2150 rspcnt = atomic_inc_return(&fod->queue->zrspcnt); 2151 if (!(rspcnt % fod->queue->ersp_ratio) || 2152 nvme_is_fabrics((struct nvme_command *) sqe) || 2153 xfr_length != fod->req.transfer_len || 2154 (le16_to_cpu(cqe->status) & 0xFFFE) || cqewd[0] || cqewd[1] || 2155 (sqe->flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND)) || 2156 queue_90percent_full(fod->queue, le16_to_cpu(cqe->sq_head))) 2157 send_ersp = true; 2158 2159 /* re-set the fields */ 2160 fod->fcpreq->rspaddr = ersp; 2161 fod->fcpreq->rspdma = fod->rspdma; 2162 2163 if (!send_ersp) { 2164 memset(ersp, 0, NVME_FC_SIZEOF_ZEROS_RSP); 2165 fod->fcpreq->rsplen = NVME_FC_SIZEOF_ZEROS_RSP; 2166 } else { 2167 ersp->iu_len = cpu_to_be16(sizeof(*ersp)/sizeof(u32)); 2168 rsn = atomic_inc_return(&fod->queue->rsn); 2169 ersp->rsn = cpu_to_be32(rsn); 2170 ersp->xfrd_len = cpu_to_be32(xfr_length); 2171 fod->fcpreq->rsplen = sizeof(*ersp); 2172 } 2173 2174 fc_dma_sync_single_for_device(tgtport->dev, fod->rspdma, 2175 sizeof(fod->rspiubuf), DMA_TO_DEVICE); 2176 } 2177 2178 static void nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req *fcpreq); 2179 2180 static void 2181 nvmet_fc_abort_op(struct nvmet_fc_tgtport *tgtport, 2182 struct nvmet_fc_fcp_iod *fod) 2183 { 2184 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq; 2185 2186 /* data no longer needed */ 2187 nvmet_fc_free_tgt_pgs(fod); 2188 2189 /* 2190 * if an ABTS was received or we issued the fcp_abort early 2191 * don't call abort routine again. 2192 */ 2193 /* no need to take lock - lock was taken earlier to get here */ 2194 if (!fod->aborted) 2195 tgtport->ops->fcp_abort(&tgtport->fc_target_port, fcpreq); 2196 2197 nvmet_fc_free_fcp_iod(fod->queue, fod); 2198 } 2199 2200 static void 2201 nvmet_fc_xmt_fcp_rsp(struct nvmet_fc_tgtport *tgtport, 2202 struct nvmet_fc_fcp_iod *fod) 2203 { 2204 int ret; 2205 2206 fod->fcpreq->op = NVMET_FCOP_RSP; 2207 fod->fcpreq->timeout = 0; 2208 2209 nvmet_fc_prep_fcp_rsp(tgtport, fod); 2210 2211 ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq); 2212 if (ret) 2213 nvmet_fc_abort_op(tgtport, fod); 2214 } 2215 2216 static void 2217 nvmet_fc_transfer_fcp_data(struct nvmet_fc_tgtport *tgtport, 2218 struct nvmet_fc_fcp_iod *fod, u8 op) 2219 { 2220 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq; 2221 struct scatterlist *sg = fod->next_sg; 2222 unsigned long flags; 2223 u32 remaininglen = fod->req.transfer_len - fod->offset; 2224 u32 tlen = 0; 2225 int ret; 2226 2227 fcpreq->op = op; 2228 fcpreq->offset = fod->offset; 2229 fcpreq->timeout = NVME_FC_TGTOP_TIMEOUT_SEC; 2230 2231 /* 2232 * for next sequence: 2233 * break at a sg element boundary 2234 * attempt to keep sequence length capped at 2235 * NVMET_FC_MAX_SEQ_LENGTH but allow sequence to 2236 * be longer if a single sg element is larger 2237 * than that amount. This is done to avoid creating 2238 * a new sg list to use for the tgtport api. 2239 */ 2240 fcpreq->sg = sg; 2241 fcpreq->sg_cnt = 0; 2242 while (tlen < remaininglen && 2243 fcpreq->sg_cnt < tgtport->max_sg_cnt && 2244 tlen + sg_dma_len(sg) < NVMET_FC_MAX_SEQ_LENGTH) { 2245 fcpreq->sg_cnt++; 2246 tlen += sg_dma_len(sg); 2247 sg = sg_next(sg); 2248 } 2249 if (tlen < remaininglen && fcpreq->sg_cnt == 0) { 2250 fcpreq->sg_cnt++; 2251 tlen += min_t(u32, sg_dma_len(sg), remaininglen); 2252 sg = sg_next(sg); 2253 } 2254 if (tlen < remaininglen) 2255 fod->next_sg = sg; 2256 else 2257 fod->next_sg = NULL; 2258 2259 fcpreq->transfer_length = tlen; 2260 fcpreq->transferred_length = 0; 2261 fcpreq->fcp_error = 0; 2262 fcpreq->rsplen = 0; 2263 2264 /* 2265 * If the last READDATA request: check if LLDD supports 2266 * combined xfr with response. 2267 */ 2268 if ((op == NVMET_FCOP_READDATA) && 2269 ((fod->offset + fcpreq->transfer_length) == fod->req.transfer_len) && 2270 (tgtport->ops->target_features & NVMET_FCTGTFEAT_READDATA_RSP)) { 2271 fcpreq->op = NVMET_FCOP_READDATA_RSP; 2272 nvmet_fc_prep_fcp_rsp(tgtport, fod); 2273 } 2274 2275 ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq); 2276 if (ret) { 2277 /* 2278 * should be ok to set w/o lock as its in the thread of 2279 * execution (not an async timer routine) and doesn't 2280 * contend with any clearing action 2281 */ 2282 fod->abort = true; 2283 2284 if (op == NVMET_FCOP_WRITEDATA) { 2285 spin_lock_irqsave(&fod->flock, flags); 2286 fod->writedataactive = false; 2287 spin_unlock_irqrestore(&fod->flock, flags); 2288 nvmet_req_complete(&fod->req, NVME_SC_INTERNAL); 2289 } else /* NVMET_FCOP_READDATA or NVMET_FCOP_READDATA_RSP */ { 2290 fcpreq->fcp_error = ret; 2291 fcpreq->transferred_length = 0; 2292 nvmet_fc_xmt_fcp_op_done(fod->fcpreq); 2293 } 2294 } 2295 } 2296 2297 static inline bool 2298 __nvmet_fc_fod_op_abort(struct nvmet_fc_fcp_iod *fod, bool abort) 2299 { 2300 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq; 2301 struct nvmet_fc_tgtport *tgtport = fod->tgtport; 2302 2303 /* if in the middle of an io and we need to tear down */ 2304 if (abort) { 2305 if (fcpreq->op == NVMET_FCOP_WRITEDATA) { 2306 nvmet_req_complete(&fod->req, NVME_SC_INTERNAL); 2307 return true; 2308 } 2309 2310 nvmet_fc_abort_op(tgtport, fod); 2311 return true; 2312 } 2313 2314 return false; 2315 } 2316 2317 /* 2318 * actual done handler for FCP operations when completed by the lldd 2319 */ 2320 static void 2321 nvmet_fc_fod_op_done(struct nvmet_fc_fcp_iod *fod) 2322 { 2323 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq; 2324 struct nvmet_fc_tgtport *tgtport = fod->tgtport; 2325 unsigned long flags; 2326 bool abort; 2327 2328 spin_lock_irqsave(&fod->flock, flags); 2329 abort = fod->abort; 2330 fod->writedataactive = false; 2331 spin_unlock_irqrestore(&fod->flock, flags); 2332 2333 switch (fcpreq->op) { 2334 2335 case NVMET_FCOP_WRITEDATA: 2336 if (__nvmet_fc_fod_op_abort(fod, abort)) 2337 return; 2338 if (fcpreq->fcp_error || 2339 fcpreq->transferred_length != fcpreq->transfer_length) { 2340 spin_lock_irqsave(&fod->flock, flags); 2341 fod->abort = true; 2342 spin_unlock_irqrestore(&fod->flock, flags); 2343 2344 nvmet_req_complete(&fod->req, NVME_SC_INTERNAL); 2345 return; 2346 } 2347 2348 fod->offset += fcpreq->transferred_length; 2349 if (fod->offset != fod->req.transfer_len) { 2350 spin_lock_irqsave(&fod->flock, flags); 2351 fod->writedataactive = true; 2352 spin_unlock_irqrestore(&fod->flock, flags); 2353 2354 /* transfer the next chunk */ 2355 nvmet_fc_transfer_fcp_data(tgtport, fod, 2356 NVMET_FCOP_WRITEDATA); 2357 return; 2358 } 2359 2360 /* data transfer complete, resume with nvmet layer */ 2361 fod->req.execute(&fod->req); 2362 break; 2363 2364 case NVMET_FCOP_READDATA: 2365 case NVMET_FCOP_READDATA_RSP: 2366 if (__nvmet_fc_fod_op_abort(fod, abort)) 2367 return; 2368 if (fcpreq->fcp_error || 2369 fcpreq->transferred_length != fcpreq->transfer_length) { 2370 nvmet_fc_abort_op(tgtport, fod); 2371 return; 2372 } 2373 2374 /* success */ 2375 2376 if (fcpreq->op == NVMET_FCOP_READDATA_RSP) { 2377 /* data no longer needed */ 2378 nvmet_fc_free_tgt_pgs(fod); 2379 nvmet_fc_free_fcp_iod(fod->queue, fod); 2380 return; 2381 } 2382 2383 fod->offset += fcpreq->transferred_length; 2384 if (fod->offset != fod->req.transfer_len) { 2385 /* transfer the next chunk */ 2386 nvmet_fc_transfer_fcp_data(tgtport, fod, 2387 NVMET_FCOP_READDATA); 2388 return; 2389 } 2390 2391 /* data transfer complete, send response */ 2392 2393 /* data no longer needed */ 2394 nvmet_fc_free_tgt_pgs(fod); 2395 2396 nvmet_fc_xmt_fcp_rsp(tgtport, fod); 2397 2398 break; 2399 2400 case NVMET_FCOP_RSP: 2401 if (__nvmet_fc_fod_op_abort(fod, abort)) 2402 return; 2403 nvmet_fc_free_fcp_iod(fod->queue, fod); 2404 break; 2405 2406 default: 2407 break; 2408 } 2409 } 2410 2411 static void 2412 nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req *fcpreq) 2413 { 2414 struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private; 2415 2416 nvmet_fc_fod_op_done(fod); 2417 } 2418 2419 /* 2420 * actual completion handler after execution by the nvmet layer 2421 */ 2422 static void 2423 __nvmet_fc_fcp_nvme_cmd_done(struct nvmet_fc_tgtport *tgtport, 2424 struct nvmet_fc_fcp_iod *fod, int status) 2425 { 2426 struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common; 2427 struct nvme_completion *cqe = &fod->rspiubuf.cqe; 2428 unsigned long flags; 2429 bool abort; 2430 2431 spin_lock_irqsave(&fod->flock, flags); 2432 abort = fod->abort; 2433 spin_unlock_irqrestore(&fod->flock, flags); 2434 2435 /* if we have a CQE, snoop the last sq_head value */ 2436 if (!status) 2437 fod->queue->sqhd = cqe->sq_head; 2438 2439 if (abort) { 2440 nvmet_fc_abort_op(tgtport, fod); 2441 return; 2442 } 2443 2444 /* if an error handling the cmd post initial parsing */ 2445 if (status) { 2446 /* fudge up a failed CQE status for our transport error */ 2447 memset(cqe, 0, sizeof(*cqe)); 2448 cqe->sq_head = fod->queue->sqhd; /* echo last cqe sqhd */ 2449 cqe->sq_id = cpu_to_le16(fod->queue->qid); 2450 cqe->command_id = sqe->command_id; 2451 cqe->status = cpu_to_le16(status); 2452 } else { 2453 2454 /* 2455 * try to push the data even if the SQE status is non-zero. 2456 * There may be a status where data still was intended to 2457 * be moved 2458 */ 2459 if ((fod->io_dir == NVMET_FCP_READ) && (fod->data_sg_cnt)) { 2460 /* push the data over before sending rsp */ 2461 nvmet_fc_transfer_fcp_data(tgtport, fod, 2462 NVMET_FCOP_READDATA); 2463 return; 2464 } 2465 2466 /* writes & no data - fall thru */ 2467 } 2468 2469 /* data no longer needed */ 2470 nvmet_fc_free_tgt_pgs(fod); 2471 2472 nvmet_fc_xmt_fcp_rsp(tgtport, fod); 2473 } 2474 2475 2476 static void 2477 nvmet_fc_fcp_nvme_cmd_done(struct nvmet_req *nvme_req) 2478 { 2479 struct nvmet_fc_fcp_iod *fod = nvmet_req_to_fod(nvme_req); 2480 struct nvmet_fc_tgtport *tgtport = fod->tgtport; 2481 2482 __nvmet_fc_fcp_nvme_cmd_done(tgtport, fod, 0); 2483 } 2484 2485 2486 /* 2487 * Actual processing routine for received FC-NVME I/O Requests from the LLD 2488 */ 2489 static void 2490 nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport, 2491 struct nvmet_fc_fcp_iod *fod) 2492 { 2493 struct nvme_fc_cmd_iu *cmdiu = &fod->cmdiubuf; 2494 u32 xfrlen = be32_to_cpu(cmdiu->data_len); 2495 int ret; 2496 2497 /* 2498 * Fused commands are currently not supported in the linux 2499 * implementation. 2500 * 2501 * As such, the implementation of the FC transport does not 2502 * look at the fused commands and order delivery to the upper 2503 * layer until we have both based on csn. 2504 */ 2505 2506 fod->fcpreq->done = nvmet_fc_xmt_fcp_op_done; 2507 2508 if (cmdiu->flags & FCNVME_CMD_FLAGS_WRITE) { 2509 fod->io_dir = NVMET_FCP_WRITE; 2510 if (!nvme_is_write(&cmdiu->sqe)) 2511 goto transport_error; 2512 } else if (cmdiu->flags & FCNVME_CMD_FLAGS_READ) { 2513 fod->io_dir = NVMET_FCP_READ; 2514 if (nvme_is_write(&cmdiu->sqe)) 2515 goto transport_error; 2516 } else { 2517 fod->io_dir = NVMET_FCP_NODATA; 2518 if (xfrlen) 2519 goto transport_error; 2520 } 2521 2522 fod->req.cmd = &fod->cmdiubuf.sqe; 2523 fod->req.cqe = &fod->rspiubuf.cqe; 2524 if (!tgtport->pe) 2525 goto transport_error; 2526 fod->req.port = tgtport->pe->port; 2527 2528 /* clear any response payload */ 2529 memset(&fod->rspiubuf, 0, sizeof(fod->rspiubuf)); 2530 2531 fod->data_sg = NULL; 2532 fod->data_sg_cnt = 0; 2533 2534 ret = nvmet_req_init(&fod->req, 2535 &fod->queue->nvme_cq, 2536 &fod->queue->nvme_sq, 2537 &nvmet_fc_tgt_fcp_ops); 2538 if (!ret) { 2539 /* bad SQE content or invalid ctrl state */ 2540 /* nvmet layer has already called op done to send rsp. */ 2541 return; 2542 } 2543 2544 fod->req.transfer_len = xfrlen; 2545 2546 /* keep a running counter of tail position */ 2547 atomic_inc(&fod->queue->sqtail); 2548 2549 if (fod->req.transfer_len) { 2550 ret = nvmet_fc_alloc_tgt_pgs(fod); 2551 if (ret) { 2552 nvmet_req_complete(&fod->req, ret); 2553 return; 2554 } 2555 } 2556 fod->req.sg = fod->data_sg; 2557 fod->req.sg_cnt = fod->data_sg_cnt; 2558 fod->offset = 0; 2559 2560 if (fod->io_dir == NVMET_FCP_WRITE) { 2561 /* pull the data over before invoking nvmet layer */ 2562 nvmet_fc_transfer_fcp_data(tgtport, fod, NVMET_FCOP_WRITEDATA); 2563 return; 2564 } 2565 2566 /* 2567 * Reads or no data: 2568 * 2569 * can invoke the nvmet_layer now. If read data, cmd completion will 2570 * push the data 2571 */ 2572 fod->req.execute(&fod->req); 2573 return; 2574 2575 transport_error: 2576 nvmet_fc_abort_op(tgtport, fod); 2577 } 2578 2579 /** 2580 * nvmet_fc_rcv_fcp_req - transport entry point called by an LLDD 2581 * upon the reception of a NVME FCP CMD IU. 2582 * 2583 * Pass a FC-NVME FCP CMD IU received from the FC link to the nvmet-fc 2584 * layer for processing. 2585 * 2586 * The nvmet_fc layer allocates a local job structure (struct 2587 * nvmet_fc_fcp_iod) from the queue for the io and copies the 2588 * CMD IU buffer to the job structure. As such, on a successful 2589 * completion (returns 0), the LLDD may immediately free/reuse 2590 * the CMD IU buffer passed in the call. 2591 * 2592 * However, in some circumstances, due to the packetized nature of FC 2593 * and the api of the FC LLDD which may issue a hw command to send the 2594 * response, but the LLDD may not get the hw completion for that command 2595 * and upcall the nvmet_fc layer before a new command may be 2596 * asynchronously received - its possible for a command to be received 2597 * before the LLDD and nvmet_fc have recycled the job structure. It gives 2598 * the appearance of more commands received than fits in the sq. 2599 * To alleviate this scenario, a temporary queue is maintained in the 2600 * transport for pending LLDD requests waiting for a queue job structure. 2601 * In these "overrun" cases, a temporary queue element is allocated 2602 * the LLDD request and CMD iu buffer information remembered, and the 2603 * routine returns a -EOVERFLOW status. Subsequently, when a queue job 2604 * structure is freed, it is immediately reallocated for anything on the 2605 * pending request list. The LLDDs defer_rcv() callback is called, 2606 * informing the LLDD that it may reuse the CMD IU buffer, and the io 2607 * is then started normally with the transport. 2608 * 2609 * The LLDD, when receiving an -EOVERFLOW completion status, is to treat 2610 * the completion as successful but must not reuse the CMD IU buffer 2611 * until the LLDD's defer_rcv() callback has been called for the 2612 * corresponding struct nvmefc_tgt_fcp_req pointer. 2613 * 2614 * If there is any other condition in which an error occurs, the 2615 * transport will return a non-zero status indicating the error. 2616 * In all cases other than -EOVERFLOW, the transport has not accepted the 2617 * request and the LLDD should abort the exchange. 2618 * 2619 * @target_port: pointer to the (registered) target port the FCP CMD IU 2620 * was received on. 2621 * @fcpreq: pointer to a fcpreq request structure to be used to reference 2622 * the exchange corresponding to the FCP Exchange. 2623 * @cmdiubuf: pointer to the buffer containing the FCP CMD IU 2624 * @cmdiubuf_len: length, in bytes, of the received FCP CMD IU 2625 */ 2626 int 2627 nvmet_fc_rcv_fcp_req(struct nvmet_fc_target_port *target_port, 2628 struct nvmefc_tgt_fcp_req *fcpreq, 2629 void *cmdiubuf, u32 cmdiubuf_len) 2630 { 2631 struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port); 2632 struct nvme_fc_cmd_iu *cmdiu = cmdiubuf; 2633 struct nvmet_fc_tgt_queue *queue; 2634 struct nvmet_fc_fcp_iod *fod; 2635 struct nvmet_fc_defer_fcp_req *deferfcp; 2636 unsigned long flags; 2637 2638 /* validate iu, so the connection id can be used to find the queue */ 2639 if ((cmdiubuf_len != sizeof(*cmdiu)) || 2640 (cmdiu->format_id != NVME_CMD_FORMAT_ID) || 2641 (cmdiu->fc_id != NVME_CMD_FC_ID) || 2642 (be16_to_cpu(cmdiu->iu_len) != (sizeof(*cmdiu)/4))) 2643 return -EIO; 2644 2645 queue = nvmet_fc_find_target_queue(tgtport, 2646 be64_to_cpu(cmdiu->connection_id)); 2647 if (!queue) 2648 return -ENOTCONN; 2649 2650 /* 2651 * note: reference taken by find_target_queue 2652 * After successful fod allocation, the fod will inherit the 2653 * ownership of that reference and will remove the reference 2654 * when the fod is freed. 2655 */ 2656 2657 spin_lock_irqsave(&queue->qlock, flags); 2658 2659 fod = nvmet_fc_alloc_fcp_iod(queue); 2660 if (fod) { 2661 spin_unlock_irqrestore(&queue->qlock, flags); 2662 2663 fcpreq->nvmet_fc_private = fod; 2664 fod->fcpreq = fcpreq; 2665 2666 memcpy(&fod->cmdiubuf, cmdiubuf, cmdiubuf_len); 2667 2668 nvmet_fc_queue_fcp_req(tgtport, queue, fcpreq); 2669 2670 return 0; 2671 } 2672 2673 if (!tgtport->ops->defer_rcv) { 2674 spin_unlock_irqrestore(&queue->qlock, flags); 2675 /* release the queue lookup reference */ 2676 nvmet_fc_tgt_q_put(queue); 2677 return -ENOENT; 2678 } 2679 2680 deferfcp = list_first_entry_or_null(&queue->avail_defer_list, 2681 struct nvmet_fc_defer_fcp_req, req_list); 2682 if (deferfcp) { 2683 /* Just re-use one that was previously allocated */ 2684 list_del(&deferfcp->req_list); 2685 } else { 2686 spin_unlock_irqrestore(&queue->qlock, flags); 2687 2688 /* Now we need to dynamically allocate one */ 2689 deferfcp = kmalloc(sizeof(*deferfcp), GFP_KERNEL); 2690 if (!deferfcp) { 2691 /* release the queue lookup reference */ 2692 nvmet_fc_tgt_q_put(queue); 2693 return -ENOMEM; 2694 } 2695 spin_lock_irqsave(&queue->qlock, flags); 2696 } 2697 2698 /* For now, use rspaddr / rsplen to save payload information */ 2699 fcpreq->rspaddr = cmdiubuf; 2700 fcpreq->rsplen = cmdiubuf_len; 2701 deferfcp->fcp_req = fcpreq; 2702 2703 /* defer processing till a fod becomes available */ 2704 list_add_tail(&deferfcp->req_list, &queue->pending_cmd_list); 2705 2706 /* NOTE: the queue lookup reference is still valid */ 2707 2708 spin_unlock_irqrestore(&queue->qlock, flags); 2709 2710 return -EOVERFLOW; 2711 } 2712 EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_req); 2713 2714 /** 2715 * nvmet_fc_rcv_fcp_abort - transport entry point called by an LLDD 2716 * upon the reception of an ABTS for a FCP command 2717 * 2718 * Notify the transport that an ABTS has been received for a FCP command 2719 * that had been given to the transport via nvmet_fc_rcv_fcp_req(). The 2720 * LLDD believes the command is still being worked on 2721 * (template_ops->fcp_req_release() has not been called). 2722 * 2723 * The transport will wait for any outstanding work (an op to the LLDD, 2724 * which the lldd should complete with error due to the ABTS; or the 2725 * completion from the nvmet layer of the nvme command), then will 2726 * stop processing and call the nvmet_fc_rcv_fcp_req() callback to 2727 * return the i/o context to the LLDD. The LLDD may send the BA_ACC 2728 * to the ABTS either after return from this function (assuming any 2729 * outstanding op work has been terminated) or upon the callback being 2730 * called. 2731 * 2732 * @target_port: pointer to the (registered) target port the FCP CMD IU 2733 * was received on. 2734 * @fcpreq: pointer to the fcpreq request structure that corresponds 2735 * to the exchange that received the ABTS. 2736 */ 2737 void 2738 nvmet_fc_rcv_fcp_abort(struct nvmet_fc_target_port *target_port, 2739 struct nvmefc_tgt_fcp_req *fcpreq) 2740 { 2741 struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private; 2742 struct nvmet_fc_tgt_queue *queue; 2743 unsigned long flags; 2744 2745 if (!fod || fod->fcpreq != fcpreq) 2746 /* job appears to have already completed, ignore abort */ 2747 return; 2748 2749 queue = fod->queue; 2750 2751 spin_lock_irqsave(&queue->qlock, flags); 2752 if (fod->active) { 2753 /* 2754 * mark as abort. The abort handler, invoked upon completion 2755 * of any work, will detect the aborted status and do the 2756 * callback. 2757 */ 2758 spin_lock(&fod->flock); 2759 fod->abort = true; 2760 fod->aborted = true; 2761 spin_unlock(&fod->flock); 2762 } 2763 spin_unlock_irqrestore(&queue->qlock, flags); 2764 } 2765 EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_abort); 2766 2767 2768 struct nvmet_fc_traddr { 2769 u64 nn; 2770 u64 pn; 2771 }; 2772 2773 static int 2774 __nvme_fc_parse_u64(substring_t *sstr, u64 *val) 2775 { 2776 u64 token64; 2777 2778 if (match_u64(sstr, &token64)) 2779 return -EINVAL; 2780 *val = token64; 2781 2782 return 0; 2783 } 2784 2785 /* 2786 * This routine validates and extracts the WWN's from the TRADDR string. 2787 * As kernel parsers need the 0x to determine number base, universally 2788 * build string to parse with 0x prefix before parsing name strings. 2789 */ 2790 static int 2791 nvme_fc_parse_traddr(struct nvmet_fc_traddr *traddr, char *buf, size_t blen) 2792 { 2793 char name[2 + NVME_FC_TRADDR_HEXNAMELEN + 1]; 2794 substring_t wwn = { name, &name[sizeof(name)-1] }; 2795 int nnoffset, pnoffset; 2796 2797 /* validate if string is one of the 2 allowed formats */ 2798 if (strnlen(buf, blen) == NVME_FC_TRADDR_MAXLENGTH && 2799 !strncmp(buf, "nn-0x", NVME_FC_TRADDR_OXNNLEN) && 2800 !strncmp(&buf[NVME_FC_TRADDR_MAX_PN_OFFSET], 2801 "pn-0x", NVME_FC_TRADDR_OXNNLEN)) { 2802 nnoffset = NVME_FC_TRADDR_OXNNLEN; 2803 pnoffset = NVME_FC_TRADDR_MAX_PN_OFFSET + 2804 NVME_FC_TRADDR_OXNNLEN; 2805 } else if ((strnlen(buf, blen) == NVME_FC_TRADDR_MINLENGTH && 2806 !strncmp(buf, "nn-", NVME_FC_TRADDR_NNLEN) && 2807 !strncmp(&buf[NVME_FC_TRADDR_MIN_PN_OFFSET], 2808 "pn-", NVME_FC_TRADDR_NNLEN))) { 2809 nnoffset = NVME_FC_TRADDR_NNLEN; 2810 pnoffset = NVME_FC_TRADDR_MIN_PN_OFFSET + NVME_FC_TRADDR_NNLEN; 2811 } else 2812 goto out_einval; 2813 2814 name[0] = '0'; 2815 name[1] = 'x'; 2816 name[2 + NVME_FC_TRADDR_HEXNAMELEN] = 0; 2817 2818 memcpy(&name[2], &buf[nnoffset], NVME_FC_TRADDR_HEXNAMELEN); 2819 if (__nvme_fc_parse_u64(&wwn, &traddr->nn)) 2820 goto out_einval; 2821 2822 memcpy(&name[2], &buf[pnoffset], NVME_FC_TRADDR_HEXNAMELEN); 2823 if (__nvme_fc_parse_u64(&wwn, &traddr->pn)) 2824 goto out_einval; 2825 2826 return 0; 2827 2828 out_einval: 2829 pr_warn("%s: bad traddr string\n", __func__); 2830 return -EINVAL; 2831 } 2832 2833 static int 2834 nvmet_fc_add_port(struct nvmet_port *port) 2835 { 2836 struct nvmet_fc_tgtport *tgtport; 2837 struct nvmet_fc_port_entry *pe; 2838 struct nvmet_fc_traddr traddr = { 0L, 0L }; 2839 unsigned long flags; 2840 int ret; 2841 2842 /* validate the address info */ 2843 if ((port->disc_addr.trtype != NVMF_TRTYPE_FC) || 2844 (port->disc_addr.adrfam != NVMF_ADDR_FAMILY_FC)) 2845 return -EINVAL; 2846 2847 /* map the traddr address info to a target port */ 2848 2849 ret = nvme_fc_parse_traddr(&traddr, port->disc_addr.traddr, 2850 sizeof(port->disc_addr.traddr)); 2851 if (ret) 2852 return ret; 2853 2854 pe = kzalloc(sizeof(*pe), GFP_KERNEL); 2855 if (!pe) 2856 return -ENOMEM; 2857 2858 ret = -ENXIO; 2859 spin_lock_irqsave(&nvmet_fc_tgtlock, flags); 2860 list_for_each_entry(tgtport, &nvmet_fc_target_list, tgt_list) { 2861 if ((tgtport->fc_target_port.node_name == traddr.nn) && 2862 (tgtport->fc_target_port.port_name == traddr.pn)) { 2863 /* a FC port can only be 1 nvmet port id */ 2864 if (!tgtport->pe) { 2865 nvmet_fc_portentry_bind(tgtport, pe, port); 2866 ret = 0; 2867 } else 2868 ret = -EALREADY; 2869 break; 2870 } 2871 } 2872 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); 2873 2874 if (ret) 2875 kfree(pe); 2876 2877 return ret; 2878 } 2879 2880 static void 2881 nvmet_fc_remove_port(struct nvmet_port *port) 2882 { 2883 struct nvmet_fc_port_entry *pe = port->priv; 2884 2885 nvmet_fc_portentry_unbind(pe); 2886 2887 /* terminate any outstanding associations */ 2888 __nvmet_fc_free_assocs(pe->tgtport); 2889 2890 kfree(pe); 2891 } 2892 2893 static void 2894 nvmet_fc_discovery_chg(struct nvmet_port *port) 2895 { 2896 struct nvmet_fc_port_entry *pe = port->priv; 2897 struct nvmet_fc_tgtport *tgtport = pe->tgtport; 2898 2899 if (tgtport && tgtport->ops->discovery_event) 2900 tgtport->ops->discovery_event(&tgtport->fc_target_port); 2901 } 2902 2903 static ssize_t 2904 nvmet_fc_host_traddr(struct nvmet_ctrl *ctrl, 2905 char *traddr, size_t traddr_size) 2906 { 2907 struct nvmet_sq *sq = ctrl->sqs[0]; 2908 struct nvmet_fc_tgt_queue *queue = 2909 container_of(sq, struct nvmet_fc_tgt_queue, nvme_sq); 2910 struct nvmet_fc_tgtport *tgtport = queue->assoc ? queue->assoc->tgtport : NULL; 2911 struct nvmet_fc_hostport *hostport = queue->assoc ? queue->assoc->hostport : NULL; 2912 u64 wwnn, wwpn; 2913 ssize_t ret = 0; 2914 2915 if (!tgtport || !nvmet_fc_tgtport_get(tgtport)) 2916 return -ENODEV; 2917 if (!hostport || !nvmet_fc_hostport_get(hostport)) { 2918 ret = -ENODEV; 2919 goto out_put; 2920 } 2921 2922 if (tgtport->ops->host_traddr) { 2923 ret = tgtport->ops->host_traddr(hostport->hosthandle, &wwnn, &wwpn); 2924 if (ret) 2925 goto out_put_host; 2926 ret = snprintf(traddr, traddr_size, "nn-0x%llx:pn-0x%llx", wwnn, wwpn); 2927 } 2928 out_put_host: 2929 nvmet_fc_hostport_put(hostport); 2930 out_put: 2931 nvmet_fc_tgtport_put(tgtport); 2932 return ret; 2933 } 2934 2935 static const struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops = { 2936 .owner = THIS_MODULE, 2937 .type = NVMF_TRTYPE_FC, 2938 .msdbd = 1, 2939 .add_port = nvmet_fc_add_port, 2940 .remove_port = nvmet_fc_remove_port, 2941 .queue_response = nvmet_fc_fcp_nvme_cmd_done, 2942 .delete_ctrl = nvmet_fc_delete_ctrl, 2943 .discovery_chg = nvmet_fc_discovery_chg, 2944 .host_traddr = nvmet_fc_host_traddr, 2945 }; 2946 2947 static int __init nvmet_fc_init_module(void) 2948 { 2949 return nvmet_register_transport(&nvmet_fc_tgt_fcp_ops); 2950 } 2951 2952 static void __exit nvmet_fc_exit_module(void) 2953 { 2954 /* ensure any shutdown operation, e.g. delete ctrls have finished */ 2955 flush_workqueue(nvmet_wq); 2956 2957 /* sanity check - all lports should be removed */ 2958 if (!list_empty(&nvmet_fc_target_list)) 2959 pr_warn("%s: targetport list not empty\n", __func__); 2960 2961 nvmet_unregister_transport(&nvmet_fc_tgt_fcp_ops); 2962 2963 ida_destroy(&nvmet_fc_tgtport_cnt); 2964 } 2965 2966 module_init(nvmet_fc_init_module); 2967 module_exit(nvmet_fc_exit_module); 2968 2969 MODULE_DESCRIPTION("NVMe target FC transport driver"); 2970 MODULE_LICENSE("GPL v2"); 2971