1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2016 Avago Technologies. All rights reserved. 4 */ 5 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 6 #include <linux/module.h> 7 #include <linux/slab.h> 8 #include <linux/blk-mq.h> 9 #include <linux/parser.h> 10 #include <linux/random.h> 11 #include <uapi/scsi/fc/fc_fs.h> 12 #include <uapi/scsi/fc/fc_els.h> 13 14 #include "nvmet.h" 15 #include <linux/nvme-fc-driver.h> 16 #include <linux/nvme-fc.h> 17 #include "../host/fc.h" 18 19 20 /* *************************** Data Structures/Defines ****************** */ 21 22 23 #define NVMET_LS_CTX_COUNT 256 24 25 struct nvmet_fc_tgtport; 26 struct nvmet_fc_tgt_assoc; 27 28 struct nvmet_fc_ls_iod { /* for an LS RQST RCV */ 29 struct nvmefc_ls_rsp *lsrsp; 30 struct nvmefc_tgt_fcp_req *fcpreq; /* only if RS */ 31 32 struct list_head ls_rcv_list; /* tgtport->ls_rcv_list */ 33 34 struct nvmet_fc_tgtport *tgtport; 35 struct nvmet_fc_tgt_assoc *assoc; 36 void *hosthandle; 37 38 union nvmefc_ls_requests *rqstbuf; 39 union nvmefc_ls_responses *rspbuf; 40 u16 rqstdatalen; 41 dma_addr_t rspdma; 42 43 struct scatterlist sg[2]; 44 45 struct work_struct work; 46 } __aligned(sizeof(unsigned long long)); 47 48 struct nvmet_fc_ls_req_op { /* for an LS RQST XMT */ 49 struct nvmefc_ls_req ls_req; 50 51 struct nvmet_fc_tgtport *tgtport; 52 void *hosthandle; 53 54 int ls_error; 55 struct list_head lsreq_list; /* tgtport->ls_req_list */ 56 bool req_queued; 57 }; 58 59 60 /* desired maximum for a single sequence - if sg list allows it */ 61 #define NVMET_FC_MAX_SEQ_LENGTH (256 * 1024) 62 63 enum nvmet_fcp_datadir { 64 NVMET_FCP_NODATA, 65 NVMET_FCP_WRITE, 66 NVMET_FCP_READ, 67 NVMET_FCP_ABORTED, 68 }; 69 70 struct nvmet_fc_fcp_iod { 71 struct nvmefc_tgt_fcp_req *fcpreq; 72 73 struct nvme_fc_cmd_iu cmdiubuf; 74 struct nvme_fc_ersp_iu rspiubuf; 75 dma_addr_t rspdma; 76 struct scatterlist *next_sg; 77 struct scatterlist *data_sg; 78 int data_sg_cnt; 79 u32 offset; 80 enum nvmet_fcp_datadir io_dir; 81 bool active; 82 bool abort; 83 bool aborted; 84 bool writedataactive; 85 spinlock_t flock; 86 87 struct nvmet_req req; 88 struct work_struct defer_work; 89 90 struct nvmet_fc_tgtport *tgtport; 91 struct nvmet_fc_tgt_queue *queue; 92 93 struct list_head fcp_list; /* tgtport->fcp_list */ 94 }; 95 96 struct nvmet_fc_tgtport { 97 struct nvmet_fc_target_port fc_target_port; 98 99 struct list_head tgt_list; /* nvmet_fc_target_list */ 100 struct device *dev; /* dev for dma mapping */ 101 struct nvmet_fc_target_template *ops; 102 103 struct nvmet_fc_ls_iod *iod; 104 spinlock_t lock; 105 struct list_head ls_rcv_list; 106 struct list_head ls_req_list; 107 struct list_head ls_busylist; 108 struct list_head assoc_list; 109 struct list_head host_list; 110 struct ida assoc_cnt; 111 struct nvmet_fc_port_entry *pe; 112 struct kref ref; 113 u32 max_sg_cnt; 114 115 struct work_struct put_work; 116 }; 117 118 struct nvmet_fc_port_entry { 119 struct nvmet_fc_tgtport *tgtport; 120 struct nvmet_port *port; 121 u64 node_name; 122 u64 port_name; 123 struct list_head pe_list; 124 }; 125 126 struct nvmet_fc_defer_fcp_req { 127 struct list_head req_list; 128 struct nvmefc_tgt_fcp_req *fcp_req; 129 }; 130 131 struct nvmet_fc_tgt_queue { 132 bool ninetypercent; 133 u16 qid; 134 u16 sqsize; 135 u16 ersp_ratio; 136 __le16 sqhd; 137 atomic_t connected; 138 atomic_t sqtail; 139 atomic_t zrspcnt; 140 atomic_t rsn; 141 spinlock_t qlock; 142 struct nvmet_cq nvme_cq; 143 struct nvmet_sq nvme_sq; 144 struct nvmet_fc_tgt_assoc *assoc; 145 struct list_head fod_list; 146 struct list_head pending_cmd_list; 147 struct list_head avail_defer_list; 148 struct workqueue_struct *work_q; 149 struct kref ref; 150 /* array of fcp_iods */ 151 struct nvmet_fc_fcp_iod fod[] /* __counted_by(sqsize) */; 152 } __aligned(sizeof(unsigned long long)); 153 154 struct nvmet_fc_hostport { 155 struct nvmet_fc_tgtport *tgtport; 156 void *hosthandle; 157 struct list_head host_list; 158 struct kref ref; 159 u8 invalid; 160 }; 161 162 struct nvmet_fc_tgt_assoc { 163 u64 association_id; 164 u32 a_id; 165 atomic_t terminating; 166 struct nvmet_fc_tgtport *tgtport; 167 struct nvmet_fc_hostport *hostport; 168 struct nvmet_fc_ls_iod *rcv_disconn; 169 struct list_head a_list; 170 struct nvmet_fc_tgt_queue *queues[NVMET_NR_QUEUES + 1]; 171 struct kref ref; 172 struct work_struct del_work; 173 }; 174 175 /* 176 * Association and Connection IDs: 177 * 178 * Association ID will have random number in upper 6 bytes and zero 179 * in lower 2 bytes 180 * 181 * Connection IDs will be Association ID with QID or'd in lower 2 bytes 182 * 183 * note: Association ID = Connection ID for queue 0 184 */ 185 #define BYTES_FOR_QID sizeof(u16) 186 #define BYTES_FOR_QID_SHIFT (BYTES_FOR_QID * 8) 187 #define NVMET_FC_QUEUEID_MASK ((u64)((1 << BYTES_FOR_QID_SHIFT) - 1)) 188 189 static inline u64 190 nvmet_fc_makeconnid(struct nvmet_fc_tgt_assoc *assoc, u16 qid) 191 { 192 return (assoc->association_id | qid); 193 } 194 195 static inline u64 196 nvmet_fc_getassociationid(u64 connectionid) 197 { 198 return connectionid & ~NVMET_FC_QUEUEID_MASK; 199 } 200 201 static inline u16 202 nvmet_fc_getqueueid(u64 connectionid) 203 { 204 return (u16)(connectionid & NVMET_FC_QUEUEID_MASK); 205 } 206 207 static inline struct nvmet_fc_tgtport * 208 targetport_to_tgtport(struct nvmet_fc_target_port *targetport) 209 { 210 return container_of(targetport, struct nvmet_fc_tgtport, 211 fc_target_port); 212 } 213 214 static inline struct nvmet_fc_fcp_iod * 215 nvmet_req_to_fod(struct nvmet_req *nvme_req) 216 { 217 return container_of(nvme_req, struct nvmet_fc_fcp_iod, req); 218 } 219 220 221 /* *************************** Globals **************************** */ 222 223 224 static DEFINE_SPINLOCK(nvmet_fc_tgtlock); 225 226 static LIST_HEAD(nvmet_fc_target_list); 227 static DEFINE_IDA(nvmet_fc_tgtport_cnt); 228 static LIST_HEAD(nvmet_fc_portentry_list); 229 230 231 static void nvmet_fc_handle_ls_rqst_work(struct work_struct *work); 232 static void nvmet_fc_fcp_rqst_op_defer_work(struct work_struct *work); 233 static void nvmet_fc_tgt_a_put(struct nvmet_fc_tgt_assoc *assoc); 234 static int nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc); 235 static void nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue); 236 static int nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue); 237 static void nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport); 238 static void nvmet_fc_put_tgtport_work(struct work_struct *work) 239 { 240 struct nvmet_fc_tgtport *tgtport = 241 container_of(work, struct nvmet_fc_tgtport, put_work); 242 243 nvmet_fc_tgtport_put(tgtport); 244 } 245 static int nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport); 246 static void nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport, 247 struct nvmet_fc_fcp_iod *fod); 248 static void nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc); 249 static void nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport, 250 struct nvmet_fc_ls_iod *iod); 251 252 253 /* *********************** FC-NVME DMA Handling **************************** */ 254 255 /* 256 * The fcloop device passes in a NULL device pointer. Real LLD's will 257 * pass in a valid device pointer. If NULL is passed to the dma mapping 258 * routines, depending on the platform, it may or may not succeed, and 259 * may crash. 260 * 261 * As such: 262 * Wrapper all the dma routines and check the dev pointer. 263 * 264 * If simple mappings (return just a dma address, we'll noop them, 265 * returning a dma address of 0. 266 * 267 * On more complex mappings (dma_map_sg), a pseudo routine fills 268 * in the scatter list, setting all dma addresses to 0. 269 */ 270 271 static inline dma_addr_t 272 fc_dma_map_single(struct device *dev, void *ptr, size_t size, 273 enum dma_data_direction dir) 274 { 275 return dev ? dma_map_single(dev, ptr, size, dir) : (dma_addr_t)0L; 276 } 277 278 static inline int 279 fc_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) 280 { 281 return dev ? dma_mapping_error(dev, dma_addr) : 0; 282 } 283 284 static inline void 285 fc_dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size, 286 enum dma_data_direction dir) 287 { 288 if (dev) 289 dma_unmap_single(dev, addr, size, dir); 290 } 291 292 static inline void 293 fc_dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size, 294 enum dma_data_direction dir) 295 { 296 if (dev) 297 dma_sync_single_for_cpu(dev, addr, size, dir); 298 } 299 300 static inline void 301 fc_dma_sync_single_for_device(struct device *dev, dma_addr_t addr, size_t size, 302 enum dma_data_direction dir) 303 { 304 if (dev) 305 dma_sync_single_for_device(dev, addr, size, dir); 306 } 307 308 /* pseudo dma_map_sg call */ 309 static int 310 fc_map_sg(struct scatterlist *sg, int nents) 311 { 312 struct scatterlist *s; 313 int i; 314 315 WARN_ON(nents == 0 || sg[0].length == 0); 316 317 for_each_sg(sg, s, nents, i) { 318 s->dma_address = 0L; 319 #ifdef CONFIG_NEED_SG_DMA_LENGTH 320 s->dma_length = s->length; 321 #endif 322 } 323 return nents; 324 } 325 326 static inline int 327 fc_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, 328 enum dma_data_direction dir) 329 { 330 return dev ? dma_map_sg(dev, sg, nents, dir) : fc_map_sg(sg, nents); 331 } 332 333 static inline void 334 fc_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, 335 enum dma_data_direction dir) 336 { 337 if (dev) 338 dma_unmap_sg(dev, sg, nents, dir); 339 } 340 341 342 /* ********************** FC-NVME LS XMT Handling ************************* */ 343 344 345 static void 346 __nvmet_fc_finish_ls_req(struct nvmet_fc_ls_req_op *lsop) 347 { 348 struct nvmet_fc_tgtport *tgtport = lsop->tgtport; 349 struct nvmefc_ls_req *lsreq = &lsop->ls_req; 350 unsigned long flags; 351 352 spin_lock_irqsave(&tgtport->lock, flags); 353 354 if (!lsop->req_queued) { 355 spin_unlock_irqrestore(&tgtport->lock, flags); 356 goto out_putwork; 357 } 358 359 list_del(&lsop->lsreq_list); 360 361 lsop->req_queued = false; 362 363 spin_unlock_irqrestore(&tgtport->lock, flags); 364 365 fc_dma_unmap_single(tgtport->dev, lsreq->rqstdma, 366 (lsreq->rqstlen + lsreq->rsplen), 367 DMA_BIDIRECTIONAL); 368 369 out_putwork: 370 queue_work(nvmet_wq, &tgtport->put_work); 371 } 372 373 static int 374 __nvmet_fc_send_ls_req(struct nvmet_fc_tgtport *tgtport, 375 struct nvmet_fc_ls_req_op *lsop, 376 void (*done)(struct nvmefc_ls_req *req, int status)) 377 { 378 struct nvmefc_ls_req *lsreq = &lsop->ls_req; 379 unsigned long flags; 380 int ret = 0; 381 382 if (!tgtport->ops->ls_req) 383 return -EOPNOTSUPP; 384 385 if (!nvmet_fc_tgtport_get(tgtport)) 386 return -ESHUTDOWN; 387 388 lsreq->done = done; 389 lsop->req_queued = false; 390 INIT_LIST_HEAD(&lsop->lsreq_list); 391 392 lsreq->rqstdma = fc_dma_map_single(tgtport->dev, lsreq->rqstaddr, 393 lsreq->rqstlen + lsreq->rsplen, 394 DMA_BIDIRECTIONAL); 395 if (fc_dma_mapping_error(tgtport->dev, lsreq->rqstdma)) { 396 ret = -EFAULT; 397 goto out_puttgtport; 398 } 399 lsreq->rspdma = lsreq->rqstdma + lsreq->rqstlen; 400 401 spin_lock_irqsave(&tgtport->lock, flags); 402 403 list_add_tail(&lsop->lsreq_list, &tgtport->ls_req_list); 404 405 lsop->req_queued = true; 406 407 spin_unlock_irqrestore(&tgtport->lock, flags); 408 409 ret = tgtport->ops->ls_req(&tgtport->fc_target_port, lsop->hosthandle, 410 lsreq); 411 if (ret) 412 goto out_unlink; 413 414 return 0; 415 416 out_unlink: 417 lsop->ls_error = ret; 418 spin_lock_irqsave(&tgtport->lock, flags); 419 lsop->req_queued = false; 420 list_del(&lsop->lsreq_list); 421 spin_unlock_irqrestore(&tgtport->lock, flags); 422 fc_dma_unmap_single(tgtport->dev, lsreq->rqstdma, 423 (lsreq->rqstlen + lsreq->rsplen), 424 DMA_BIDIRECTIONAL); 425 out_puttgtport: 426 nvmet_fc_tgtport_put(tgtport); 427 428 return ret; 429 } 430 431 static int 432 nvmet_fc_send_ls_req_async(struct nvmet_fc_tgtport *tgtport, 433 struct nvmet_fc_ls_req_op *lsop, 434 void (*done)(struct nvmefc_ls_req *req, int status)) 435 { 436 /* don't wait for completion */ 437 438 return __nvmet_fc_send_ls_req(tgtport, lsop, done); 439 } 440 441 static void 442 nvmet_fc_disconnect_assoc_done(struct nvmefc_ls_req *lsreq, int status) 443 { 444 struct nvmet_fc_ls_req_op *lsop = 445 container_of(lsreq, struct nvmet_fc_ls_req_op, ls_req); 446 447 __nvmet_fc_finish_ls_req(lsop); 448 449 /* fc-nvme target doesn't care about success or failure of cmd */ 450 451 kfree(lsop); 452 } 453 454 /* 455 * This routine sends a FC-NVME LS to disconnect (aka terminate) 456 * the FC-NVME Association. Terminating the association also 457 * terminates the FC-NVME connections (per queue, both admin and io 458 * queues) that are part of the association. E.g. things are torn 459 * down, and the related FC-NVME Association ID and Connection IDs 460 * become invalid. 461 * 462 * The behavior of the fc-nvme target is such that it's 463 * understanding of the association and connections will implicitly 464 * be torn down. The action is implicit as it may be due to a loss of 465 * connectivity with the fc-nvme host, so the target may never get a 466 * response even if it tried. As such, the action of this routine 467 * is to asynchronously send the LS, ignore any results of the LS, and 468 * continue on with terminating the association. If the fc-nvme host 469 * is present and receives the LS, it too can tear down. 470 */ 471 static void 472 nvmet_fc_xmt_disconnect_assoc(struct nvmet_fc_tgt_assoc *assoc) 473 { 474 struct nvmet_fc_tgtport *tgtport = assoc->tgtport; 475 struct fcnvme_ls_disconnect_assoc_rqst *discon_rqst; 476 struct fcnvme_ls_disconnect_assoc_acc *discon_acc; 477 struct nvmet_fc_ls_req_op *lsop; 478 struct nvmefc_ls_req *lsreq; 479 int ret; 480 481 /* 482 * If ls_req is NULL or no hosthandle, it's an older lldd and no 483 * message is normal. Otherwise, send unless the hostport has 484 * already been invalidated by the lldd. 485 */ 486 if (!tgtport->ops->ls_req || assoc->hostport->invalid) 487 return; 488 489 lsop = kzalloc((sizeof(*lsop) + 490 sizeof(*discon_rqst) + sizeof(*discon_acc) + 491 tgtport->ops->lsrqst_priv_sz), GFP_KERNEL); 492 if (!lsop) { 493 dev_info(tgtport->dev, 494 "{%d:%d} send Disconnect Association failed: ENOMEM\n", 495 tgtport->fc_target_port.port_num, assoc->a_id); 496 return; 497 } 498 499 discon_rqst = (struct fcnvme_ls_disconnect_assoc_rqst *)&lsop[1]; 500 discon_acc = (struct fcnvme_ls_disconnect_assoc_acc *)&discon_rqst[1]; 501 lsreq = &lsop->ls_req; 502 if (tgtport->ops->lsrqst_priv_sz) 503 lsreq->private = (void *)&discon_acc[1]; 504 else 505 lsreq->private = NULL; 506 507 lsop->tgtport = tgtport; 508 lsop->hosthandle = assoc->hostport->hosthandle; 509 510 nvmefc_fmt_lsreq_discon_assoc(lsreq, discon_rqst, discon_acc, 511 assoc->association_id); 512 513 ret = nvmet_fc_send_ls_req_async(tgtport, lsop, 514 nvmet_fc_disconnect_assoc_done); 515 if (ret) { 516 dev_info(tgtport->dev, 517 "{%d:%d} XMT Disconnect Association failed: %d\n", 518 tgtport->fc_target_port.port_num, assoc->a_id, ret); 519 kfree(lsop); 520 } 521 } 522 523 524 /* *********************** FC-NVME Port Management ************************ */ 525 526 527 static int 528 nvmet_fc_alloc_ls_iodlist(struct nvmet_fc_tgtport *tgtport) 529 { 530 struct nvmet_fc_ls_iod *iod; 531 int i; 532 533 iod = kcalloc(NVMET_LS_CTX_COUNT, sizeof(struct nvmet_fc_ls_iod), 534 GFP_KERNEL); 535 if (!iod) 536 return -ENOMEM; 537 538 tgtport->iod = iod; 539 540 for (i = 0; i < NVMET_LS_CTX_COUNT; iod++, i++) { 541 INIT_WORK(&iod->work, nvmet_fc_handle_ls_rqst_work); 542 iod->tgtport = tgtport; 543 list_add_tail(&iod->ls_rcv_list, &tgtport->ls_rcv_list); 544 545 iod->rqstbuf = kzalloc(sizeof(union nvmefc_ls_requests) + 546 sizeof(union nvmefc_ls_responses), 547 GFP_KERNEL); 548 if (!iod->rqstbuf) 549 goto out_fail; 550 551 iod->rspbuf = (union nvmefc_ls_responses *)&iod->rqstbuf[1]; 552 553 iod->rspdma = fc_dma_map_single(tgtport->dev, iod->rspbuf, 554 sizeof(*iod->rspbuf), 555 DMA_TO_DEVICE); 556 if (fc_dma_mapping_error(tgtport->dev, iod->rspdma)) 557 goto out_fail; 558 } 559 560 return 0; 561 562 out_fail: 563 kfree(iod->rqstbuf); 564 list_del(&iod->ls_rcv_list); 565 for (iod--, i--; i >= 0; iod--, i--) { 566 fc_dma_unmap_single(tgtport->dev, iod->rspdma, 567 sizeof(*iod->rspbuf), DMA_TO_DEVICE); 568 kfree(iod->rqstbuf); 569 list_del(&iod->ls_rcv_list); 570 } 571 572 kfree(iod); 573 574 return -EFAULT; 575 } 576 577 static void 578 nvmet_fc_free_ls_iodlist(struct nvmet_fc_tgtport *tgtport) 579 { 580 struct nvmet_fc_ls_iod *iod = tgtport->iod; 581 int i; 582 583 for (i = 0; i < NVMET_LS_CTX_COUNT; iod++, i++) { 584 fc_dma_unmap_single(tgtport->dev, 585 iod->rspdma, sizeof(*iod->rspbuf), 586 DMA_TO_DEVICE); 587 kfree(iod->rqstbuf); 588 list_del(&iod->ls_rcv_list); 589 } 590 kfree(tgtport->iod); 591 } 592 593 static struct nvmet_fc_ls_iod * 594 nvmet_fc_alloc_ls_iod(struct nvmet_fc_tgtport *tgtport) 595 { 596 struct nvmet_fc_ls_iod *iod; 597 unsigned long flags; 598 599 spin_lock_irqsave(&tgtport->lock, flags); 600 iod = list_first_entry_or_null(&tgtport->ls_rcv_list, 601 struct nvmet_fc_ls_iod, ls_rcv_list); 602 if (iod) 603 list_move_tail(&iod->ls_rcv_list, &tgtport->ls_busylist); 604 spin_unlock_irqrestore(&tgtport->lock, flags); 605 return iod; 606 } 607 608 609 static void 610 nvmet_fc_free_ls_iod(struct nvmet_fc_tgtport *tgtport, 611 struct nvmet_fc_ls_iod *iod) 612 { 613 unsigned long flags; 614 615 spin_lock_irqsave(&tgtport->lock, flags); 616 list_move(&iod->ls_rcv_list, &tgtport->ls_rcv_list); 617 spin_unlock_irqrestore(&tgtport->lock, flags); 618 } 619 620 static void 621 nvmet_fc_prep_fcp_iodlist(struct nvmet_fc_tgtport *tgtport, 622 struct nvmet_fc_tgt_queue *queue) 623 { 624 struct nvmet_fc_fcp_iod *fod = queue->fod; 625 int i; 626 627 for (i = 0; i < queue->sqsize; fod++, i++) { 628 INIT_WORK(&fod->defer_work, nvmet_fc_fcp_rqst_op_defer_work); 629 fod->tgtport = tgtport; 630 fod->queue = queue; 631 fod->active = false; 632 fod->abort = false; 633 fod->aborted = false; 634 fod->fcpreq = NULL; 635 list_add_tail(&fod->fcp_list, &queue->fod_list); 636 spin_lock_init(&fod->flock); 637 638 fod->rspdma = fc_dma_map_single(tgtport->dev, &fod->rspiubuf, 639 sizeof(fod->rspiubuf), DMA_TO_DEVICE); 640 if (fc_dma_mapping_error(tgtport->dev, fod->rspdma)) { 641 list_del(&fod->fcp_list); 642 for (fod--, i--; i >= 0; fod--, i--) { 643 fc_dma_unmap_single(tgtport->dev, fod->rspdma, 644 sizeof(fod->rspiubuf), 645 DMA_TO_DEVICE); 646 fod->rspdma = 0L; 647 list_del(&fod->fcp_list); 648 } 649 650 return; 651 } 652 } 653 } 654 655 static void 656 nvmet_fc_destroy_fcp_iodlist(struct nvmet_fc_tgtport *tgtport, 657 struct nvmet_fc_tgt_queue *queue) 658 { 659 struct nvmet_fc_fcp_iod *fod = queue->fod; 660 int i; 661 662 for (i = 0; i < queue->sqsize; fod++, i++) { 663 if (fod->rspdma) 664 fc_dma_unmap_single(tgtport->dev, fod->rspdma, 665 sizeof(fod->rspiubuf), DMA_TO_DEVICE); 666 } 667 } 668 669 static struct nvmet_fc_fcp_iod * 670 nvmet_fc_alloc_fcp_iod(struct nvmet_fc_tgt_queue *queue) 671 { 672 struct nvmet_fc_fcp_iod *fod; 673 674 lockdep_assert_held(&queue->qlock); 675 676 fod = list_first_entry_or_null(&queue->fod_list, 677 struct nvmet_fc_fcp_iod, fcp_list); 678 if (fod) { 679 list_del(&fod->fcp_list); 680 fod->active = true; 681 /* 682 * no queue reference is taken, as it was taken by the 683 * queue lookup just prior to the allocation. The iod 684 * will "inherit" that reference. 685 */ 686 } 687 return fod; 688 } 689 690 691 static void 692 nvmet_fc_queue_fcp_req(struct nvmet_fc_tgtport *tgtport, 693 struct nvmet_fc_tgt_queue *queue, 694 struct nvmefc_tgt_fcp_req *fcpreq) 695 { 696 struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private; 697 698 /* 699 * put all admin cmds on hw queue id 0. All io commands go to 700 * the respective hw queue based on a modulo basis 701 */ 702 fcpreq->hwqid = queue->qid ? 703 ((queue->qid - 1) % tgtport->ops->max_hw_queues) : 0; 704 705 nvmet_fc_handle_fcp_rqst(tgtport, fod); 706 } 707 708 static void 709 nvmet_fc_fcp_rqst_op_defer_work(struct work_struct *work) 710 { 711 struct nvmet_fc_fcp_iod *fod = 712 container_of(work, struct nvmet_fc_fcp_iod, defer_work); 713 714 /* Submit deferred IO for processing */ 715 nvmet_fc_queue_fcp_req(fod->tgtport, fod->queue, fod->fcpreq); 716 717 } 718 719 static void 720 nvmet_fc_free_fcp_iod(struct nvmet_fc_tgt_queue *queue, 721 struct nvmet_fc_fcp_iod *fod) 722 { 723 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq; 724 struct nvmet_fc_tgtport *tgtport = fod->tgtport; 725 struct nvmet_fc_defer_fcp_req *deferfcp; 726 unsigned long flags; 727 728 fc_dma_sync_single_for_cpu(tgtport->dev, fod->rspdma, 729 sizeof(fod->rspiubuf), DMA_TO_DEVICE); 730 731 fcpreq->nvmet_fc_private = NULL; 732 733 fod->active = false; 734 fod->abort = false; 735 fod->aborted = false; 736 fod->writedataactive = false; 737 fod->fcpreq = NULL; 738 739 tgtport->ops->fcp_req_release(&tgtport->fc_target_port, fcpreq); 740 741 /* release the queue lookup reference on the completed IO */ 742 nvmet_fc_tgt_q_put(queue); 743 744 spin_lock_irqsave(&queue->qlock, flags); 745 deferfcp = list_first_entry_or_null(&queue->pending_cmd_list, 746 struct nvmet_fc_defer_fcp_req, req_list); 747 if (!deferfcp) { 748 list_add_tail(&fod->fcp_list, &fod->queue->fod_list); 749 spin_unlock_irqrestore(&queue->qlock, flags); 750 return; 751 } 752 753 /* Re-use the fod for the next pending cmd that was deferred */ 754 list_del(&deferfcp->req_list); 755 756 fcpreq = deferfcp->fcp_req; 757 758 /* deferfcp can be reused for another IO at a later date */ 759 list_add_tail(&deferfcp->req_list, &queue->avail_defer_list); 760 761 spin_unlock_irqrestore(&queue->qlock, flags); 762 763 /* Save NVME CMD IO in fod */ 764 memcpy(&fod->cmdiubuf, fcpreq->rspaddr, fcpreq->rsplen); 765 766 /* Setup new fcpreq to be processed */ 767 fcpreq->rspaddr = NULL; 768 fcpreq->rsplen = 0; 769 fcpreq->nvmet_fc_private = fod; 770 fod->fcpreq = fcpreq; 771 fod->active = true; 772 773 /* inform LLDD IO is now being processed */ 774 tgtport->ops->defer_rcv(&tgtport->fc_target_port, fcpreq); 775 776 /* 777 * Leave the queue lookup get reference taken when 778 * fod was originally allocated. 779 */ 780 781 queue_work(queue->work_q, &fod->defer_work); 782 } 783 784 static struct nvmet_fc_tgt_queue * 785 nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc, 786 u16 qid, u16 sqsize) 787 { 788 struct nvmet_fc_tgt_queue *queue; 789 int ret; 790 791 if (qid > NVMET_NR_QUEUES) 792 return NULL; 793 794 queue = kzalloc(struct_size(queue, fod, sqsize), GFP_KERNEL); 795 if (!queue) 796 return NULL; 797 798 queue->work_q = alloc_workqueue("ntfc%d.%d.%d", 0, 0, 799 assoc->tgtport->fc_target_port.port_num, 800 assoc->a_id, qid); 801 if (!queue->work_q) 802 goto out_free_queue; 803 804 queue->qid = qid; 805 queue->sqsize = sqsize; 806 queue->assoc = assoc; 807 INIT_LIST_HEAD(&queue->fod_list); 808 INIT_LIST_HEAD(&queue->avail_defer_list); 809 INIT_LIST_HEAD(&queue->pending_cmd_list); 810 atomic_set(&queue->connected, 0); 811 atomic_set(&queue->sqtail, 0); 812 atomic_set(&queue->rsn, 1); 813 atomic_set(&queue->zrspcnt, 0); 814 spin_lock_init(&queue->qlock); 815 kref_init(&queue->ref); 816 817 nvmet_fc_prep_fcp_iodlist(assoc->tgtport, queue); 818 819 ret = nvmet_sq_init(&queue->nvme_sq); 820 if (ret) 821 goto out_fail_iodlist; 822 823 WARN_ON(assoc->queues[qid]); 824 assoc->queues[qid] = queue; 825 826 return queue; 827 828 out_fail_iodlist: 829 nvmet_fc_destroy_fcp_iodlist(assoc->tgtport, queue); 830 destroy_workqueue(queue->work_q); 831 out_free_queue: 832 kfree(queue); 833 return NULL; 834 } 835 836 837 static void 838 nvmet_fc_tgt_queue_free(struct kref *ref) 839 { 840 struct nvmet_fc_tgt_queue *queue = 841 container_of(ref, struct nvmet_fc_tgt_queue, ref); 842 843 nvmet_fc_destroy_fcp_iodlist(queue->assoc->tgtport, queue); 844 845 destroy_workqueue(queue->work_q); 846 847 kfree(queue); 848 } 849 850 static void 851 nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue) 852 { 853 kref_put(&queue->ref, nvmet_fc_tgt_queue_free); 854 } 855 856 static int 857 nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue) 858 { 859 return kref_get_unless_zero(&queue->ref); 860 } 861 862 863 static void 864 nvmet_fc_delete_target_queue(struct nvmet_fc_tgt_queue *queue) 865 { 866 struct nvmet_fc_tgtport *tgtport = queue->assoc->tgtport; 867 struct nvmet_fc_fcp_iod *fod = queue->fod; 868 struct nvmet_fc_defer_fcp_req *deferfcp, *tempptr; 869 unsigned long flags; 870 int i; 871 bool disconnect; 872 873 disconnect = atomic_xchg(&queue->connected, 0); 874 875 /* if not connected, nothing to do */ 876 if (!disconnect) 877 return; 878 879 spin_lock_irqsave(&queue->qlock, flags); 880 /* abort outstanding io's */ 881 for (i = 0; i < queue->sqsize; fod++, i++) { 882 if (fod->active) { 883 spin_lock(&fod->flock); 884 fod->abort = true; 885 /* 886 * only call lldd abort routine if waiting for 887 * writedata. other outstanding ops should finish 888 * on their own. 889 */ 890 if (fod->writedataactive) { 891 fod->aborted = true; 892 spin_unlock(&fod->flock); 893 tgtport->ops->fcp_abort( 894 &tgtport->fc_target_port, fod->fcpreq); 895 } else 896 spin_unlock(&fod->flock); 897 } 898 } 899 900 /* Cleanup defer'ed IOs in queue */ 901 list_for_each_entry_safe(deferfcp, tempptr, &queue->avail_defer_list, 902 req_list) { 903 list_del(&deferfcp->req_list); 904 kfree(deferfcp); 905 } 906 907 for (;;) { 908 deferfcp = list_first_entry_or_null(&queue->pending_cmd_list, 909 struct nvmet_fc_defer_fcp_req, req_list); 910 if (!deferfcp) 911 break; 912 913 list_del(&deferfcp->req_list); 914 spin_unlock_irqrestore(&queue->qlock, flags); 915 916 tgtport->ops->defer_rcv(&tgtport->fc_target_port, 917 deferfcp->fcp_req); 918 919 tgtport->ops->fcp_abort(&tgtport->fc_target_port, 920 deferfcp->fcp_req); 921 922 tgtport->ops->fcp_req_release(&tgtport->fc_target_port, 923 deferfcp->fcp_req); 924 925 /* release the queue lookup reference */ 926 nvmet_fc_tgt_q_put(queue); 927 928 kfree(deferfcp); 929 930 spin_lock_irqsave(&queue->qlock, flags); 931 } 932 spin_unlock_irqrestore(&queue->qlock, flags); 933 934 flush_workqueue(queue->work_q); 935 936 nvmet_sq_destroy(&queue->nvme_sq); 937 938 nvmet_fc_tgt_q_put(queue); 939 } 940 941 static struct nvmet_fc_tgt_queue * 942 nvmet_fc_find_target_queue(struct nvmet_fc_tgtport *tgtport, 943 u64 connection_id) 944 { 945 struct nvmet_fc_tgt_assoc *assoc; 946 struct nvmet_fc_tgt_queue *queue; 947 u64 association_id = nvmet_fc_getassociationid(connection_id); 948 u16 qid = nvmet_fc_getqueueid(connection_id); 949 950 if (qid > NVMET_NR_QUEUES) 951 return NULL; 952 953 rcu_read_lock(); 954 list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) { 955 if (association_id == assoc->association_id) { 956 queue = assoc->queues[qid]; 957 if (queue && 958 (!atomic_read(&queue->connected) || 959 !nvmet_fc_tgt_q_get(queue))) 960 queue = NULL; 961 rcu_read_unlock(); 962 return queue; 963 } 964 } 965 rcu_read_unlock(); 966 return NULL; 967 } 968 969 static void 970 nvmet_fc_hostport_free(struct kref *ref) 971 { 972 struct nvmet_fc_hostport *hostport = 973 container_of(ref, struct nvmet_fc_hostport, ref); 974 struct nvmet_fc_tgtport *tgtport = hostport->tgtport; 975 unsigned long flags; 976 977 spin_lock_irqsave(&tgtport->lock, flags); 978 list_del(&hostport->host_list); 979 spin_unlock_irqrestore(&tgtport->lock, flags); 980 if (tgtport->ops->host_release && hostport->invalid) 981 tgtport->ops->host_release(hostport->hosthandle); 982 kfree(hostport); 983 nvmet_fc_tgtport_put(tgtport); 984 } 985 986 static void 987 nvmet_fc_hostport_put(struct nvmet_fc_hostport *hostport) 988 { 989 kref_put(&hostport->ref, nvmet_fc_hostport_free); 990 } 991 992 static int 993 nvmet_fc_hostport_get(struct nvmet_fc_hostport *hostport) 994 { 995 return kref_get_unless_zero(&hostport->ref); 996 } 997 998 static void 999 nvmet_fc_free_hostport(struct nvmet_fc_hostport *hostport) 1000 { 1001 /* if LLDD not implemented, leave as NULL */ 1002 if (!hostport || !hostport->hosthandle) 1003 return; 1004 1005 nvmet_fc_hostport_put(hostport); 1006 } 1007 1008 static struct nvmet_fc_hostport * 1009 nvmet_fc_match_hostport(struct nvmet_fc_tgtport *tgtport, void *hosthandle) 1010 { 1011 struct nvmet_fc_hostport *host; 1012 1013 lockdep_assert_held(&tgtport->lock); 1014 1015 list_for_each_entry(host, &tgtport->host_list, host_list) { 1016 if (host->hosthandle == hosthandle && !host->invalid) { 1017 if (nvmet_fc_hostport_get(host)) 1018 return host; 1019 } 1020 } 1021 1022 return NULL; 1023 } 1024 1025 static struct nvmet_fc_hostport * 1026 nvmet_fc_alloc_hostport(struct nvmet_fc_tgtport *tgtport, void *hosthandle) 1027 { 1028 struct nvmet_fc_hostport *newhost, *match = NULL; 1029 unsigned long flags; 1030 1031 /* if LLDD not implemented, leave as NULL */ 1032 if (!hosthandle) 1033 return NULL; 1034 1035 /* 1036 * take reference for what will be the newly allocated hostport if 1037 * we end up using a new allocation 1038 */ 1039 if (!nvmet_fc_tgtport_get(tgtport)) 1040 return ERR_PTR(-EINVAL); 1041 1042 spin_lock_irqsave(&tgtport->lock, flags); 1043 match = nvmet_fc_match_hostport(tgtport, hosthandle); 1044 spin_unlock_irqrestore(&tgtport->lock, flags); 1045 1046 if (match) { 1047 /* no new allocation - release reference */ 1048 nvmet_fc_tgtport_put(tgtport); 1049 return match; 1050 } 1051 1052 newhost = kzalloc(sizeof(*newhost), GFP_KERNEL); 1053 if (!newhost) { 1054 /* no new allocation - release reference */ 1055 nvmet_fc_tgtport_put(tgtport); 1056 return ERR_PTR(-ENOMEM); 1057 } 1058 1059 spin_lock_irqsave(&tgtport->lock, flags); 1060 match = nvmet_fc_match_hostport(tgtport, hosthandle); 1061 if (match) { 1062 /* new allocation not needed */ 1063 kfree(newhost); 1064 newhost = match; 1065 } else { 1066 newhost->tgtport = tgtport; 1067 newhost->hosthandle = hosthandle; 1068 INIT_LIST_HEAD(&newhost->host_list); 1069 kref_init(&newhost->ref); 1070 1071 list_add_tail(&newhost->host_list, &tgtport->host_list); 1072 } 1073 spin_unlock_irqrestore(&tgtport->lock, flags); 1074 1075 return newhost; 1076 } 1077 1078 static void 1079 nvmet_fc_delete_assoc(struct nvmet_fc_tgt_assoc *assoc) 1080 { 1081 nvmet_fc_delete_target_assoc(assoc); 1082 nvmet_fc_tgt_a_put(assoc); 1083 } 1084 1085 static void 1086 nvmet_fc_delete_assoc_work(struct work_struct *work) 1087 { 1088 struct nvmet_fc_tgt_assoc *assoc = 1089 container_of(work, struct nvmet_fc_tgt_assoc, del_work); 1090 struct nvmet_fc_tgtport *tgtport = assoc->tgtport; 1091 1092 nvmet_fc_delete_assoc(assoc); 1093 nvmet_fc_tgtport_put(tgtport); 1094 } 1095 1096 static void 1097 nvmet_fc_schedule_delete_assoc(struct nvmet_fc_tgt_assoc *assoc) 1098 { 1099 nvmet_fc_tgtport_get(assoc->tgtport); 1100 queue_work(nvmet_wq, &assoc->del_work); 1101 } 1102 1103 static bool 1104 nvmet_fc_assoc_exists(struct nvmet_fc_tgtport *tgtport, u64 association_id) 1105 { 1106 struct nvmet_fc_tgt_assoc *a; 1107 bool found = false; 1108 1109 rcu_read_lock(); 1110 list_for_each_entry_rcu(a, &tgtport->assoc_list, a_list) { 1111 if (association_id == a->association_id) { 1112 found = true; 1113 break; 1114 } 1115 } 1116 rcu_read_unlock(); 1117 1118 return found; 1119 } 1120 1121 static struct nvmet_fc_tgt_assoc * 1122 nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport, void *hosthandle) 1123 { 1124 struct nvmet_fc_tgt_assoc *assoc; 1125 unsigned long flags; 1126 bool done; 1127 u64 ran; 1128 int idx; 1129 1130 if (!tgtport->pe) 1131 return NULL; 1132 1133 assoc = kzalloc(sizeof(*assoc), GFP_KERNEL); 1134 if (!assoc) 1135 return NULL; 1136 1137 idx = ida_alloc(&tgtport->assoc_cnt, GFP_KERNEL); 1138 if (idx < 0) 1139 goto out_free_assoc; 1140 1141 assoc->hostport = nvmet_fc_alloc_hostport(tgtport, hosthandle); 1142 if (IS_ERR(assoc->hostport)) 1143 goto out_ida; 1144 1145 assoc->tgtport = tgtport; 1146 assoc->a_id = idx; 1147 INIT_LIST_HEAD(&assoc->a_list); 1148 kref_init(&assoc->ref); 1149 INIT_WORK(&assoc->del_work, nvmet_fc_delete_assoc_work); 1150 atomic_set(&assoc->terminating, 0); 1151 1152 done = false; 1153 do { 1154 get_random_bytes(&ran, sizeof(ran) - BYTES_FOR_QID); 1155 ran = ran << BYTES_FOR_QID_SHIFT; 1156 1157 spin_lock_irqsave(&tgtport->lock, flags); 1158 if (!nvmet_fc_assoc_exists(tgtport, ran)) { 1159 assoc->association_id = ran; 1160 list_add_tail_rcu(&assoc->a_list, &tgtport->assoc_list); 1161 done = true; 1162 } 1163 spin_unlock_irqrestore(&tgtport->lock, flags); 1164 } while (!done); 1165 1166 return assoc; 1167 1168 out_ida: 1169 ida_free(&tgtport->assoc_cnt, idx); 1170 out_free_assoc: 1171 kfree(assoc); 1172 return NULL; 1173 } 1174 1175 static void 1176 nvmet_fc_target_assoc_free(struct kref *ref) 1177 { 1178 struct nvmet_fc_tgt_assoc *assoc = 1179 container_of(ref, struct nvmet_fc_tgt_assoc, ref); 1180 struct nvmet_fc_tgtport *tgtport = assoc->tgtport; 1181 struct nvmet_fc_ls_iod *oldls; 1182 unsigned long flags; 1183 int i; 1184 1185 for (i = NVMET_NR_QUEUES; i >= 0; i--) { 1186 if (assoc->queues[i]) 1187 nvmet_fc_delete_target_queue(assoc->queues[i]); 1188 } 1189 1190 /* Send Disconnect now that all i/o has completed */ 1191 nvmet_fc_xmt_disconnect_assoc(assoc); 1192 1193 nvmet_fc_free_hostport(assoc->hostport); 1194 spin_lock_irqsave(&tgtport->lock, flags); 1195 oldls = assoc->rcv_disconn; 1196 spin_unlock_irqrestore(&tgtport->lock, flags); 1197 /* if pending Rcv Disconnect Association LS, send rsp now */ 1198 if (oldls) 1199 nvmet_fc_xmt_ls_rsp(tgtport, oldls); 1200 ida_free(&tgtport->assoc_cnt, assoc->a_id); 1201 dev_info(tgtport->dev, 1202 "{%d:%d} Association freed\n", 1203 tgtport->fc_target_port.port_num, assoc->a_id); 1204 kfree(assoc); 1205 } 1206 1207 static void 1208 nvmet_fc_tgt_a_put(struct nvmet_fc_tgt_assoc *assoc) 1209 { 1210 kref_put(&assoc->ref, nvmet_fc_target_assoc_free); 1211 } 1212 1213 static int 1214 nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc) 1215 { 1216 return kref_get_unless_zero(&assoc->ref); 1217 } 1218 1219 static void 1220 nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc) 1221 { 1222 struct nvmet_fc_tgtport *tgtport = assoc->tgtport; 1223 unsigned long flags; 1224 int i, terminating; 1225 1226 terminating = atomic_xchg(&assoc->terminating, 1); 1227 1228 /* if already terminating, do nothing */ 1229 if (terminating) 1230 return; 1231 1232 spin_lock_irqsave(&tgtport->lock, flags); 1233 list_del_rcu(&assoc->a_list); 1234 spin_unlock_irqrestore(&tgtport->lock, flags); 1235 1236 synchronize_rcu(); 1237 1238 /* ensure all in-flight I/Os have been processed */ 1239 for (i = NVMET_NR_QUEUES; i >= 0; i--) { 1240 if (assoc->queues[i]) 1241 flush_workqueue(assoc->queues[i]->work_q); 1242 } 1243 1244 dev_info(tgtport->dev, 1245 "{%d:%d} Association deleted\n", 1246 tgtport->fc_target_port.port_num, assoc->a_id); 1247 } 1248 1249 static struct nvmet_fc_tgt_assoc * 1250 nvmet_fc_find_target_assoc(struct nvmet_fc_tgtport *tgtport, 1251 u64 association_id) 1252 { 1253 struct nvmet_fc_tgt_assoc *assoc; 1254 struct nvmet_fc_tgt_assoc *ret = NULL; 1255 1256 rcu_read_lock(); 1257 list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) { 1258 if (association_id == assoc->association_id) { 1259 ret = assoc; 1260 if (!nvmet_fc_tgt_a_get(assoc)) 1261 ret = NULL; 1262 break; 1263 } 1264 } 1265 rcu_read_unlock(); 1266 1267 return ret; 1268 } 1269 1270 static void 1271 nvmet_fc_portentry_bind(struct nvmet_fc_tgtport *tgtport, 1272 struct nvmet_fc_port_entry *pe, 1273 struct nvmet_port *port) 1274 { 1275 lockdep_assert_held(&nvmet_fc_tgtlock); 1276 1277 pe->tgtport = tgtport; 1278 tgtport->pe = pe; 1279 1280 pe->port = port; 1281 port->priv = pe; 1282 1283 pe->node_name = tgtport->fc_target_port.node_name; 1284 pe->port_name = tgtport->fc_target_port.port_name; 1285 INIT_LIST_HEAD(&pe->pe_list); 1286 1287 list_add_tail(&pe->pe_list, &nvmet_fc_portentry_list); 1288 } 1289 1290 static void 1291 nvmet_fc_portentry_unbind(struct nvmet_fc_port_entry *pe) 1292 { 1293 unsigned long flags; 1294 1295 spin_lock_irqsave(&nvmet_fc_tgtlock, flags); 1296 if (pe->tgtport) 1297 pe->tgtport->pe = NULL; 1298 list_del(&pe->pe_list); 1299 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); 1300 } 1301 1302 /* 1303 * called when a targetport deregisters. Breaks the relationship 1304 * with the nvmet port, but leaves the port_entry in place so that 1305 * re-registration can resume operation. 1306 */ 1307 static void 1308 nvmet_fc_portentry_unbind_tgt(struct nvmet_fc_tgtport *tgtport) 1309 { 1310 struct nvmet_fc_port_entry *pe; 1311 unsigned long flags; 1312 1313 spin_lock_irqsave(&nvmet_fc_tgtlock, flags); 1314 pe = tgtport->pe; 1315 if (pe) 1316 pe->tgtport = NULL; 1317 tgtport->pe = NULL; 1318 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); 1319 } 1320 1321 /* 1322 * called when a new targetport is registered. Looks in the 1323 * existing nvmet port_entries to see if the nvmet layer is 1324 * configured for the targetport's wwn's. (the targetport existed, 1325 * nvmet configured, the lldd unregistered the tgtport, and is now 1326 * reregistering the same targetport). If so, set the nvmet port 1327 * port entry on the targetport. 1328 */ 1329 static void 1330 nvmet_fc_portentry_rebind_tgt(struct nvmet_fc_tgtport *tgtport) 1331 { 1332 struct nvmet_fc_port_entry *pe; 1333 unsigned long flags; 1334 1335 spin_lock_irqsave(&nvmet_fc_tgtlock, flags); 1336 list_for_each_entry(pe, &nvmet_fc_portentry_list, pe_list) { 1337 if (tgtport->fc_target_port.node_name == pe->node_name && 1338 tgtport->fc_target_port.port_name == pe->port_name) { 1339 WARN_ON(pe->tgtport); 1340 tgtport->pe = pe; 1341 pe->tgtport = tgtport; 1342 break; 1343 } 1344 } 1345 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); 1346 } 1347 1348 /** 1349 * nvmet_fc_register_targetport - transport entry point called by an 1350 * LLDD to register the existence of a local 1351 * NVME subystem FC port. 1352 * @pinfo: pointer to information about the port to be registered 1353 * @template: LLDD entrypoints and operational parameters for the port 1354 * @dev: physical hardware device node port corresponds to. Will be 1355 * used for DMA mappings 1356 * @portptr: pointer to a local port pointer. Upon success, the routine 1357 * will allocate a nvme_fc_local_port structure and place its 1358 * address in the local port pointer. Upon failure, local port 1359 * pointer will be set to NULL. 1360 * 1361 * Returns: 1362 * a completion status. Must be 0 upon success; a negative errno 1363 * (ex: -ENXIO) upon failure. 1364 */ 1365 int 1366 nvmet_fc_register_targetport(struct nvmet_fc_port_info *pinfo, 1367 struct nvmet_fc_target_template *template, 1368 struct device *dev, 1369 struct nvmet_fc_target_port **portptr) 1370 { 1371 struct nvmet_fc_tgtport *newrec; 1372 unsigned long flags; 1373 int ret, idx; 1374 1375 if (!template->xmt_ls_rsp || !template->fcp_op || 1376 !template->fcp_abort || 1377 !template->fcp_req_release || !template->targetport_delete || 1378 !template->max_hw_queues || !template->max_sgl_segments || 1379 !template->max_dif_sgl_segments || !template->dma_boundary) { 1380 ret = -EINVAL; 1381 goto out_regtgt_failed; 1382 } 1383 1384 newrec = kzalloc((sizeof(*newrec) + template->target_priv_sz), 1385 GFP_KERNEL); 1386 if (!newrec) { 1387 ret = -ENOMEM; 1388 goto out_regtgt_failed; 1389 } 1390 1391 idx = ida_alloc(&nvmet_fc_tgtport_cnt, GFP_KERNEL); 1392 if (idx < 0) { 1393 ret = -ENOSPC; 1394 goto out_fail_kfree; 1395 } 1396 1397 if (!get_device(dev) && dev) { 1398 ret = -ENODEV; 1399 goto out_ida_put; 1400 } 1401 1402 newrec->fc_target_port.node_name = pinfo->node_name; 1403 newrec->fc_target_port.port_name = pinfo->port_name; 1404 if (template->target_priv_sz) 1405 newrec->fc_target_port.private = &newrec[1]; 1406 else 1407 newrec->fc_target_port.private = NULL; 1408 newrec->fc_target_port.port_id = pinfo->port_id; 1409 newrec->fc_target_port.port_num = idx; 1410 INIT_LIST_HEAD(&newrec->tgt_list); 1411 newrec->dev = dev; 1412 newrec->ops = template; 1413 spin_lock_init(&newrec->lock); 1414 INIT_LIST_HEAD(&newrec->ls_rcv_list); 1415 INIT_LIST_HEAD(&newrec->ls_req_list); 1416 INIT_LIST_HEAD(&newrec->ls_busylist); 1417 INIT_LIST_HEAD(&newrec->assoc_list); 1418 INIT_LIST_HEAD(&newrec->host_list); 1419 kref_init(&newrec->ref); 1420 ida_init(&newrec->assoc_cnt); 1421 newrec->max_sg_cnt = template->max_sgl_segments; 1422 INIT_WORK(&newrec->put_work, nvmet_fc_put_tgtport_work); 1423 1424 ret = nvmet_fc_alloc_ls_iodlist(newrec); 1425 if (ret) { 1426 ret = -ENOMEM; 1427 goto out_free_newrec; 1428 } 1429 1430 nvmet_fc_portentry_rebind_tgt(newrec); 1431 1432 spin_lock_irqsave(&nvmet_fc_tgtlock, flags); 1433 list_add_tail(&newrec->tgt_list, &nvmet_fc_target_list); 1434 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); 1435 1436 *portptr = &newrec->fc_target_port; 1437 return 0; 1438 1439 out_free_newrec: 1440 put_device(dev); 1441 out_ida_put: 1442 ida_free(&nvmet_fc_tgtport_cnt, idx); 1443 out_fail_kfree: 1444 kfree(newrec); 1445 out_regtgt_failed: 1446 *portptr = NULL; 1447 return ret; 1448 } 1449 EXPORT_SYMBOL_GPL(nvmet_fc_register_targetport); 1450 1451 1452 static void 1453 nvmet_fc_free_tgtport(struct kref *ref) 1454 { 1455 struct nvmet_fc_tgtport *tgtport = 1456 container_of(ref, struct nvmet_fc_tgtport, ref); 1457 struct device *dev = tgtport->dev; 1458 unsigned long flags; 1459 1460 spin_lock_irqsave(&nvmet_fc_tgtlock, flags); 1461 list_del(&tgtport->tgt_list); 1462 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); 1463 1464 nvmet_fc_free_ls_iodlist(tgtport); 1465 1466 /* let the LLDD know we've finished tearing it down */ 1467 tgtport->ops->targetport_delete(&tgtport->fc_target_port); 1468 1469 ida_free(&nvmet_fc_tgtport_cnt, 1470 tgtport->fc_target_port.port_num); 1471 1472 ida_destroy(&tgtport->assoc_cnt); 1473 1474 kfree(tgtport); 1475 1476 put_device(dev); 1477 } 1478 1479 static void 1480 nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport) 1481 { 1482 kref_put(&tgtport->ref, nvmet_fc_free_tgtport); 1483 } 1484 1485 static int 1486 nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport) 1487 { 1488 return kref_get_unless_zero(&tgtport->ref); 1489 } 1490 1491 static void 1492 __nvmet_fc_free_assocs(struct nvmet_fc_tgtport *tgtport) 1493 { 1494 struct nvmet_fc_tgt_assoc *assoc; 1495 1496 rcu_read_lock(); 1497 list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) { 1498 if (!nvmet_fc_tgt_a_get(assoc)) 1499 continue; 1500 nvmet_fc_schedule_delete_assoc(assoc); 1501 nvmet_fc_tgt_a_put(assoc); 1502 } 1503 rcu_read_unlock(); 1504 } 1505 1506 /** 1507 * nvmet_fc_invalidate_host - transport entry point called by an LLDD 1508 * to remove references to a hosthandle for LS's. 1509 * 1510 * The nvmet-fc layer ensures that any references to the hosthandle 1511 * on the targetport are forgotten (set to NULL). The LLDD will 1512 * typically call this when a login with a remote host port has been 1513 * lost, thus LS's for the remote host port are no longer possible. 1514 * 1515 * If an LS request is outstanding to the targetport/hosthandle (or 1516 * issued concurrently with the call to invalidate the host), the 1517 * LLDD is responsible for terminating/aborting the LS and completing 1518 * the LS request. It is recommended that these terminations/aborts 1519 * occur after calling to invalidate the host handle to avoid additional 1520 * retries by the nvmet-fc transport. The nvmet-fc transport may 1521 * continue to reference host handle while it cleans up outstanding 1522 * NVME associations. The nvmet-fc transport will call the 1523 * ops->host_release() callback to notify the LLDD that all references 1524 * are complete and the related host handle can be recovered. 1525 * Note: if there are no references, the callback may be called before 1526 * the invalidate host call returns. 1527 * 1528 * @target_port: pointer to the (registered) target port that a prior 1529 * LS was received on and which supplied the transport the 1530 * hosthandle. 1531 * @hosthandle: the handle (pointer) that represents the host port 1532 * that no longer has connectivity and that LS's should 1533 * no longer be directed to. 1534 */ 1535 void 1536 nvmet_fc_invalidate_host(struct nvmet_fc_target_port *target_port, 1537 void *hosthandle) 1538 { 1539 struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port); 1540 struct nvmet_fc_tgt_assoc *assoc, *next; 1541 unsigned long flags; 1542 bool noassoc = true; 1543 1544 spin_lock_irqsave(&tgtport->lock, flags); 1545 list_for_each_entry_safe(assoc, next, 1546 &tgtport->assoc_list, a_list) { 1547 if (assoc->hostport->hosthandle != hosthandle) 1548 continue; 1549 if (!nvmet_fc_tgt_a_get(assoc)) 1550 continue; 1551 assoc->hostport->invalid = 1; 1552 noassoc = false; 1553 nvmet_fc_schedule_delete_assoc(assoc); 1554 nvmet_fc_tgt_a_put(assoc); 1555 } 1556 spin_unlock_irqrestore(&tgtport->lock, flags); 1557 1558 /* if there's nothing to wait for - call the callback */ 1559 if (noassoc && tgtport->ops->host_release) 1560 tgtport->ops->host_release(hosthandle); 1561 } 1562 EXPORT_SYMBOL_GPL(nvmet_fc_invalidate_host); 1563 1564 /* 1565 * nvmet layer has called to terminate an association 1566 */ 1567 static void 1568 nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl) 1569 { 1570 struct nvmet_fc_tgtport *tgtport, *next; 1571 struct nvmet_fc_tgt_assoc *assoc; 1572 struct nvmet_fc_tgt_queue *queue; 1573 unsigned long flags; 1574 bool found_ctrl = false; 1575 1576 /* this is a bit ugly, but don't want to make locks layered */ 1577 spin_lock_irqsave(&nvmet_fc_tgtlock, flags); 1578 list_for_each_entry_safe(tgtport, next, &nvmet_fc_target_list, 1579 tgt_list) { 1580 if (!nvmet_fc_tgtport_get(tgtport)) 1581 continue; 1582 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); 1583 1584 rcu_read_lock(); 1585 list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) { 1586 queue = assoc->queues[0]; 1587 if (queue && queue->nvme_sq.ctrl == ctrl) { 1588 if (nvmet_fc_tgt_a_get(assoc)) 1589 found_ctrl = true; 1590 break; 1591 } 1592 } 1593 rcu_read_unlock(); 1594 1595 nvmet_fc_tgtport_put(tgtport); 1596 1597 if (found_ctrl) { 1598 nvmet_fc_schedule_delete_assoc(assoc); 1599 nvmet_fc_tgt_a_put(assoc); 1600 return; 1601 } 1602 1603 spin_lock_irqsave(&nvmet_fc_tgtlock, flags); 1604 } 1605 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); 1606 } 1607 1608 /** 1609 * nvmet_fc_unregister_targetport - transport entry point called by an 1610 * LLDD to deregister/remove a previously 1611 * registered a local NVME subsystem FC port. 1612 * @target_port: pointer to the (registered) target port that is to be 1613 * deregistered. 1614 * 1615 * Returns: 1616 * a completion status. Must be 0 upon success; a negative errno 1617 * (ex: -ENXIO) upon failure. 1618 */ 1619 int 1620 nvmet_fc_unregister_targetport(struct nvmet_fc_target_port *target_port) 1621 { 1622 struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port); 1623 1624 nvmet_fc_portentry_unbind_tgt(tgtport); 1625 1626 /* terminate any outstanding associations */ 1627 __nvmet_fc_free_assocs(tgtport); 1628 1629 flush_workqueue(nvmet_wq); 1630 1631 /* 1632 * should terminate LS's as well. However, LS's will be generated 1633 * at the tail end of association termination, so they likely don't 1634 * exist yet. And even if they did, it's worthwhile to just let 1635 * them finish and targetport ref counting will clean things up. 1636 */ 1637 1638 nvmet_fc_tgtport_put(tgtport); 1639 1640 return 0; 1641 } 1642 EXPORT_SYMBOL_GPL(nvmet_fc_unregister_targetport); 1643 1644 1645 /* ********************** FC-NVME LS RCV Handling ************************* */ 1646 1647 1648 static void 1649 nvmet_fc_ls_create_association(struct nvmet_fc_tgtport *tgtport, 1650 struct nvmet_fc_ls_iod *iod) 1651 { 1652 struct fcnvme_ls_cr_assoc_rqst *rqst = &iod->rqstbuf->rq_cr_assoc; 1653 struct fcnvme_ls_cr_assoc_acc *acc = &iod->rspbuf->rsp_cr_assoc; 1654 struct nvmet_fc_tgt_queue *queue; 1655 int ret = 0; 1656 1657 memset(acc, 0, sizeof(*acc)); 1658 1659 /* 1660 * FC-NVME spec changes. There are initiators sending different 1661 * lengths as padding sizes for Create Association Cmd descriptor 1662 * was incorrect. 1663 * Accept anything of "minimum" length. Assume format per 1.15 1664 * spec (with HOSTID reduced to 16 bytes), ignore how long the 1665 * trailing pad length is. 1666 */ 1667 if (iod->rqstdatalen < FCNVME_LSDESC_CRA_RQST_MINLEN) 1668 ret = VERR_CR_ASSOC_LEN; 1669 else if (be32_to_cpu(rqst->desc_list_len) < 1670 FCNVME_LSDESC_CRA_RQST_MIN_LISTLEN) 1671 ret = VERR_CR_ASSOC_RQST_LEN; 1672 else if (rqst->assoc_cmd.desc_tag != 1673 cpu_to_be32(FCNVME_LSDESC_CREATE_ASSOC_CMD)) 1674 ret = VERR_CR_ASSOC_CMD; 1675 else if (be32_to_cpu(rqst->assoc_cmd.desc_len) < 1676 FCNVME_LSDESC_CRA_CMD_DESC_MIN_DESCLEN) 1677 ret = VERR_CR_ASSOC_CMD_LEN; 1678 else if (!rqst->assoc_cmd.ersp_ratio || 1679 (be16_to_cpu(rqst->assoc_cmd.ersp_ratio) >= 1680 be16_to_cpu(rqst->assoc_cmd.sqsize))) 1681 ret = VERR_ERSP_RATIO; 1682 1683 else { 1684 /* new association w/ admin queue */ 1685 iod->assoc = nvmet_fc_alloc_target_assoc( 1686 tgtport, iod->hosthandle); 1687 if (!iod->assoc) 1688 ret = VERR_ASSOC_ALLOC_FAIL; 1689 else { 1690 queue = nvmet_fc_alloc_target_queue(iod->assoc, 0, 1691 be16_to_cpu(rqst->assoc_cmd.sqsize)); 1692 if (!queue) { 1693 ret = VERR_QUEUE_ALLOC_FAIL; 1694 nvmet_fc_tgt_a_put(iod->assoc); 1695 } 1696 } 1697 } 1698 1699 if (ret) { 1700 dev_err(tgtport->dev, 1701 "Create Association LS failed: %s\n", 1702 validation_errors[ret]); 1703 iod->lsrsp->rsplen = nvme_fc_format_rjt(acc, 1704 sizeof(*acc), rqst->w0.ls_cmd, 1705 FCNVME_RJT_RC_LOGIC, 1706 FCNVME_RJT_EXP_NONE, 0); 1707 return; 1708 } 1709 1710 queue->ersp_ratio = be16_to_cpu(rqst->assoc_cmd.ersp_ratio); 1711 atomic_set(&queue->connected, 1); 1712 queue->sqhd = 0; /* best place to init value */ 1713 1714 dev_info(tgtport->dev, 1715 "{%d:%d} Association created\n", 1716 tgtport->fc_target_port.port_num, iod->assoc->a_id); 1717 1718 /* format a response */ 1719 1720 iod->lsrsp->rsplen = sizeof(*acc); 1721 1722 nvme_fc_format_rsp_hdr(acc, FCNVME_LS_ACC, 1723 fcnvme_lsdesc_len( 1724 sizeof(struct fcnvme_ls_cr_assoc_acc)), 1725 FCNVME_LS_CREATE_ASSOCIATION); 1726 acc->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID); 1727 acc->associd.desc_len = 1728 fcnvme_lsdesc_len( 1729 sizeof(struct fcnvme_lsdesc_assoc_id)); 1730 acc->associd.association_id = 1731 cpu_to_be64(nvmet_fc_makeconnid(iod->assoc, 0)); 1732 acc->connectid.desc_tag = cpu_to_be32(FCNVME_LSDESC_CONN_ID); 1733 acc->connectid.desc_len = 1734 fcnvme_lsdesc_len( 1735 sizeof(struct fcnvme_lsdesc_conn_id)); 1736 acc->connectid.connection_id = acc->associd.association_id; 1737 } 1738 1739 static void 1740 nvmet_fc_ls_create_connection(struct nvmet_fc_tgtport *tgtport, 1741 struct nvmet_fc_ls_iod *iod) 1742 { 1743 struct fcnvme_ls_cr_conn_rqst *rqst = &iod->rqstbuf->rq_cr_conn; 1744 struct fcnvme_ls_cr_conn_acc *acc = &iod->rspbuf->rsp_cr_conn; 1745 struct nvmet_fc_tgt_queue *queue; 1746 int ret = 0; 1747 1748 memset(acc, 0, sizeof(*acc)); 1749 1750 if (iod->rqstdatalen < sizeof(struct fcnvme_ls_cr_conn_rqst)) 1751 ret = VERR_CR_CONN_LEN; 1752 else if (rqst->desc_list_len != 1753 fcnvme_lsdesc_len( 1754 sizeof(struct fcnvme_ls_cr_conn_rqst))) 1755 ret = VERR_CR_CONN_RQST_LEN; 1756 else if (rqst->associd.desc_tag != cpu_to_be32(FCNVME_LSDESC_ASSOC_ID)) 1757 ret = VERR_ASSOC_ID; 1758 else if (rqst->associd.desc_len != 1759 fcnvme_lsdesc_len( 1760 sizeof(struct fcnvme_lsdesc_assoc_id))) 1761 ret = VERR_ASSOC_ID_LEN; 1762 else if (rqst->connect_cmd.desc_tag != 1763 cpu_to_be32(FCNVME_LSDESC_CREATE_CONN_CMD)) 1764 ret = VERR_CR_CONN_CMD; 1765 else if (rqst->connect_cmd.desc_len != 1766 fcnvme_lsdesc_len( 1767 sizeof(struct fcnvme_lsdesc_cr_conn_cmd))) 1768 ret = VERR_CR_CONN_CMD_LEN; 1769 else if (!rqst->connect_cmd.ersp_ratio || 1770 (be16_to_cpu(rqst->connect_cmd.ersp_ratio) >= 1771 be16_to_cpu(rqst->connect_cmd.sqsize))) 1772 ret = VERR_ERSP_RATIO; 1773 1774 else { 1775 /* new io queue */ 1776 iod->assoc = nvmet_fc_find_target_assoc(tgtport, 1777 be64_to_cpu(rqst->associd.association_id)); 1778 if (!iod->assoc) 1779 ret = VERR_NO_ASSOC; 1780 else { 1781 queue = nvmet_fc_alloc_target_queue(iod->assoc, 1782 be16_to_cpu(rqst->connect_cmd.qid), 1783 be16_to_cpu(rqst->connect_cmd.sqsize)); 1784 if (!queue) 1785 ret = VERR_QUEUE_ALLOC_FAIL; 1786 1787 /* release get taken in nvmet_fc_find_target_assoc */ 1788 nvmet_fc_tgt_a_put(iod->assoc); 1789 } 1790 } 1791 1792 if (ret) { 1793 dev_err(tgtport->dev, 1794 "Create Connection LS failed: %s\n", 1795 validation_errors[ret]); 1796 iod->lsrsp->rsplen = nvme_fc_format_rjt(acc, 1797 sizeof(*acc), rqst->w0.ls_cmd, 1798 (ret == VERR_NO_ASSOC) ? 1799 FCNVME_RJT_RC_INV_ASSOC : 1800 FCNVME_RJT_RC_LOGIC, 1801 FCNVME_RJT_EXP_NONE, 0); 1802 return; 1803 } 1804 1805 queue->ersp_ratio = be16_to_cpu(rqst->connect_cmd.ersp_ratio); 1806 atomic_set(&queue->connected, 1); 1807 queue->sqhd = 0; /* best place to init value */ 1808 1809 /* format a response */ 1810 1811 iod->lsrsp->rsplen = sizeof(*acc); 1812 1813 nvme_fc_format_rsp_hdr(acc, FCNVME_LS_ACC, 1814 fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_cr_conn_acc)), 1815 FCNVME_LS_CREATE_CONNECTION); 1816 acc->connectid.desc_tag = cpu_to_be32(FCNVME_LSDESC_CONN_ID); 1817 acc->connectid.desc_len = 1818 fcnvme_lsdesc_len( 1819 sizeof(struct fcnvme_lsdesc_conn_id)); 1820 acc->connectid.connection_id = 1821 cpu_to_be64(nvmet_fc_makeconnid(iod->assoc, 1822 be16_to_cpu(rqst->connect_cmd.qid))); 1823 } 1824 1825 /* 1826 * Returns true if the LS response is to be transmit 1827 * Returns false if the LS response is to be delayed 1828 */ 1829 static int 1830 nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport, 1831 struct nvmet_fc_ls_iod *iod) 1832 { 1833 struct fcnvme_ls_disconnect_assoc_rqst *rqst = 1834 &iod->rqstbuf->rq_dis_assoc; 1835 struct fcnvme_ls_disconnect_assoc_acc *acc = 1836 &iod->rspbuf->rsp_dis_assoc; 1837 struct nvmet_fc_tgt_assoc *assoc = NULL; 1838 struct nvmet_fc_ls_iod *oldls = NULL; 1839 unsigned long flags; 1840 int ret = 0; 1841 1842 memset(acc, 0, sizeof(*acc)); 1843 1844 ret = nvmefc_vldt_lsreq_discon_assoc(iod->rqstdatalen, rqst); 1845 if (!ret) { 1846 /* match an active association - takes an assoc ref if !NULL */ 1847 assoc = nvmet_fc_find_target_assoc(tgtport, 1848 be64_to_cpu(rqst->associd.association_id)); 1849 iod->assoc = assoc; 1850 if (!assoc) 1851 ret = VERR_NO_ASSOC; 1852 } 1853 1854 if (ret || !assoc) { 1855 dev_err(tgtport->dev, 1856 "Disconnect LS failed: %s\n", 1857 validation_errors[ret]); 1858 iod->lsrsp->rsplen = nvme_fc_format_rjt(acc, 1859 sizeof(*acc), rqst->w0.ls_cmd, 1860 (ret == VERR_NO_ASSOC) ? 1861 FCNVME_RJT_RC_INV_ASSOC : 1862 FCNVME_RJT_RC_LOGIC, 1863 FCNVME_RJT_EXP_NONE, 0); 1864 return true; 1865 } 1866 1867 /* format a response */ 1868 1869 iod->lsrsp->rsplen = sizeof(*acc); 1870 1871 nvme_fc_format_rsp_hdr(acc, FCNVME_LS_ACC, 1872 fcnvme_lsdesc_len( 1873 sizeof(struct fcnvme_ls_disconnect_assoc_acc)), 1874 FCNVME_LS_DISCONNECT_ASSOC); 1875 1876 /* 1877 * The rules for LS response says the response cannot 1878 * go back until ABTS's have been sent for all outstanding 1879 * I/O and a Disconnect Association LS has been sent. 1880 * So... save off the Disconnect LS to send the response 1881 * later. If there was a prior LS already saved, replace 1882 * it with the newer one and send a can't perform reject 1883 * on the older one. 1884 */ 1885 spin_lock_irqsave(&tgtport->lock, flags); 1886 oldls = assoc->rcv_disconn; 1887 assoc->rcv_disconn = iod; 1888 spin_unlock_irqrestore(&tgtport->lock, flags); 1889 1890 if (oldls) { 1891 dev_info(tgtport->dev, 1892 "{%d:%d} Multiple Disconnect Association LS's " 1893 "received\n", 1894 tgtport->fc_target_port.port_num, assoc->a_id); 1895 /* overwrite good response with bogus failure */ 1896 oldls->lsrsp->rsplen = nvme_fc_format_rjt(oldls->rspbuf, 1897 sizeof(*iod->rspbuf), 1898 /* ok to use rqst, LS is same */ 1899 rqst->w0.ls_cmd, 1900 FCNVME_RJT_RC_UNAB, 1901 FCNVME_RJT_EXP_NONE, 0); 1902 nvmet_fc_xmt_ls_rsp(tgtport, oldls); 1903 } 1904 1905 nvmet_fc_schedule_delete_assoc(assoc); 1906 nvmet_fc_tgt_a_put(assoc); 1907 1908 return false; 1909 } 1910 1911 1912 /* *********************** NVME Ctrl Routines **************************** */ 1913 1914 1915 static void nvmet_fc_fcp_nvme_cmd_done(struct nvmet_req *nvme_req); 1916 1917 static const struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops; 1918 1919 static void 1920 nvmet_fc_xmt_ls_rsp_done(struct nvmefc_ls_rsp *lsrsp) 1921 { 1922 struct nvmet_fc_ls_iod *iod = lsrsp->nvme_fc_private; 1923 struct nvmet_fc_tgtport *tgtport = iod->tgtport; 1924 1925 fc_dma_sync_single_for_cpu(tgtport->dev, iod->rspdma, 1926 sizeof(*iod->rspbuf), DMA_TO_DEVICE); 1927 nvmet_fc_free_ls_iod(tgtport, iod); 1928 nvmet_fc_tgtport_put(tgtport); 1929 } 1930 1931 static void 1932 nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport, 1933 struct nvmet_fc_ls_iod *iod) 1934 { 1935 int ret; 1936 1937 fc_dma_sync_single_for_device(tgtport->dev, iod->rspdma, 1938 sizeof(*iod->rspbuf), DMA_TO_DEVICE); 1939 1940 ret = tgtport->ops->xmt_ls_rsp(&tgtport->fc_target_port, iod->lsrsp); 1941 if (ret) 1942 nvmet_fc_xmt_ls_rsp_done(iod->lsrsp); 1943 } 1944 1945 /* 1946 * Actual processing routine for received FC-NVME LS Requests from the LLD 1947 */ 1948 static void 1949 nvmet_fc_handle_ls_rqst(struct nvmet_fc_tgtport *tgtport, 1950 struct nvmet_fc_ls_iod *iod) 1951 { 1952 struct fcnvme_ls_rqst_w0 *w0 = &iod->rqstbuf->rq_cr_assoc.w0; 1953 bool sendrsp = true; 1954 1955 iod->lsrsp->nvme_fc_private = iod; 1956 iod->lsrsp->rspbuf = iod->rspbuf; 1957 iod->lsrsp->rspdma = iod->rspdma; 1958 iod->lsrsp->done = nvmet_fc_xmt_ls_rsp_done; 1959 /* Be preventative. handlers will later set to valid length */ 1960 iod->lsrsp->rsplen = 0; 1961 1962 iod->assoc = NULL; 1963 1964 /* 1965 * handlers: 1966 * parse request input, execute the request, and format the 1967 * LS response 1968 */ 1969 switch (w0->ls_cmd) { 1970 case FCNVME_LS_CREATE_ASSOCIATION: 1971 /* Creates Association and initial Admin Queue/Connection */ 1972 nvmet_fc_ls_create_association(tgtport, iod); 1973 break; 1974 case FCNVME_LS_CREATE_CONNECTION: 1975 /* Creates an IO Queue/Connection */ 1976 nvmet_fc_ls_create_connection(tgtport, iod); 1977 break; 1978 case FCNVME_LS_DISCONNECT_ASSOC: 1979 /* Terminate a Queue/Connection or the Association */ 1980 sendrsp = nvmet_fc_ls_disconnect(tgtport, iod); 1981 break; 1982 default: 1983 iod->lsrsp->rsplen = nvme_fc_format_rjt(iod->rspbuf, 1984 sizeof(*iod->rspbuf), w0->ls_cmd, 1985 FCNVME_RJT_RC_INVAL, FCNVME_RJT_EXP_NONE, 0); 1986 } 1987 1988 if (sendrsp) 1989 nvmet_fc_xmt_ls_rsp(tgtport, iod); 1990 } 1991 1992 /* 1993 * Actual processing routine for received FC-NVME LS Requests from the LLD 1994 */ 1995 static void 1996 nvmet_fc_handle_ls_rqst_work(struct work_struct *work) 1997 { 1998 struct nvmet_fc_ls_iod *iod = 1999 container_of(work, struct nvmet_fc_ls_iod, work); 2000 struct nvmet_fc_tgtport *tgtport = iod->tgtport; 2001 2002 nvmet_fc_handle_ls_rqst(tgtport, iod); 2003 } 2004 2005 2006 /** 2007 * nvmet_fc_rcv_ls_req - transport entry point called by an LLDD 2008 * upon the reception of a NVME LS request. 2009 * 2010 * The nvmet-fc layer will copy payload to an internal structure for 2011 * processing. As such, upon completion of the routine, the LLDD may 2012 * immediately free/reuse the LS request buffer passed in the call. 2013 * 2014 * If this routine returns error, the LLDD should abort the exchange. 2015 * 2016 * @target_port: pointer to the (registered) target port the LS was 2017 * received on. 2018 * @hosthandle: pointer to the host specific data, gets stored in iod. 2019 * @lsrsp: pointer to a lsrsp structure to be used to reference 2020 * the exchange corresponding to the LS. 2021 * @lsreqbuf: pointer to the buffer containing the LS Request 2022 * @lsreqbuf_len: length, in bytes, of the received LS request 2023 */ 2024 int 2025 nvmet_fc_rcv_ls_req(struct nvmet_fc_target_port *target_port, 2026 void *hosthandle, 2027 struct nvmefc_ls_rsp *lsrsp, 2028 void *lsreqbuf, u32 lsreqbuf_len) 2029 { 2030 struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port); 2031 struct nvmet_fc_ls_iod *iod; 2032 struct fcnvme_ls_rqst_w0 *w0 = (struct fcnvme_ls_rqst_w0 *)lsreqbuf; 2033 2034 if (lsreqbuf_len > sizeof(union nvmefc_ls_requests)) { 2035 dev_info(tgtport->dev, 2036 "RCV %s LS failed: payload too large (%d)\n", 2037 (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ? 2038 nvmefc_ls_names[w0->ls_cmd] : "", 2039 lsreqbuf_len); 2040 return -E2BIG; 2041 } 2042 2043 if (!nvmet_fc_tgtport_get(tgtport)) { 2044 dev_info(tgtport->dev, 2045 "RCV %s LS failed: target deleting\n", 2046 (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ? 2047 nvmefc_ls_names[w0->ls_cmd] : ""); 2048 return -ESHUTDOWN; 2049 } 2050 2051 iod = nvmet_fc_alloc_ls_iod(tgtport); 2052 if (!iod) { 2053 dev_info(tgtport->dev, 2054 "RCV %s LS failed: context allocation failed\n", 2055 (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ? 2056 nvmefc_ls_names[w0->ls_cmd] : ""); 2057 nvmet_fc_tgtport_put(tgtport); 2058 return -ENOENT; 2059 } 2060 2061 iod->lsrsp = lsrsp; 2062 iod->fcpreq = NULL; 2063 memcpy(iod->rqstbuf, lsreqbuf, lsreqbuf_len); 2064 iod->rqstdatalen = lsreqbuf_len; 2065 iod->hosthandle = hosthandle; 2066 2067 queue_work(nvmet_wq, &iod->work); 2068 2069 return 0; 2070 } 2071 EXPORT_SYMBOL_GPL(nvmet_fc_rcv_ls_req); 2072 2073 2074 /* 2075 * ********************** 2076 * Start of FCP handling 2077 * ********************** 2078 */ 2079 2080 static int 2081 nvmet_fc_alloc_tgt_pgs(struct nvmet_fc_fcp_iod *fod) 2082 { 2083 struct scatterlist *sg; 2084 unsigned int nent; 2085 2086 sg = sgl_alloc(fod->req.transfer_len, GFP_KERNEL, &nent); 2087 if (!sg) 2088 goto out; 2089 2090 fod->data_sg = sg; 2091 fod->data_sg_cnt = nent; 2092 fod->data_sg_cnt = fc_dma_map_sg(fod->tgtport->dev, sg, nent, 2093 ((fod->io_dir == NVMET_FCP_WRITE) ? 2094 DMA_FROM_DEVICE : DMA_TO_DEVICE)); 2095 /* note: write from initiator perspective */ 2096 fod->next_sg = fod->data_sg; 2097 2098 return 0; 2099 2100 out: 2101 return NVME_SC_INTERNAL; 2102 } 2103 2104 static void 2105 nvmet_fc_free_tgt_pgs(struct nvmet_fc_fcp_iod *fod) 2106 { 2107 if (!fod->data_sg || !fod->data_sg_cnt) 2108 return; 2109 2110 fc_dma_unmap_sg(fod->tgtport->dev, fod->data_sg, fod->data_sg_cnt, 2111 ((fod->io_dir == NVMET_FCP_WRITE) ? 2112 DMA_FROM_DEVICE : DMA_TO_DEVICE)); 2113 sgl_free(fod->data_sg); 2114 fod->data_sg = NULL; 2115 fod->data_sg_cnt = 0; 2116 } 2117 2118 2119 static bool 2120 queue_90percent_full(struct nvmet_fc_tgt_queue *q, u32 sqhd) 2121 { 2122 u32 sqtail, used; 2123 2124 /* egad, this is ugly. And sqtail is just a best guess */ 2125 sqtail = atomic_read(&q->sqtail) % q->sqsize; 2126 2127 used = (sqtail < sqhd) ? (sqtail + q->sqsize - sqhd) : (sqtail - sqhd); 2128 return ((used * 10) >= (((u32)(q->sqsize - 1) * 9))); 2129 } 2130 2131 /* 2132 * Prep RSP payload. 2133 * May be a NVMET_FCOP_RSP or NVMET_FCOP_READDATA_RSP op 2134 */ 2135 static void 2136 nvmet_fc_prep_fcp_rsp(struct nvmet_fc_tgtport *tgtport, 2137 struct nvmet_fc_fcp_iod *fod) 2138 { 2139 struct nvme_fc_ersp_iu *ersp = &fod->rspiubuf; 2140 struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common; 2141 struct nvme_completion *cqe = &ersp->cqe; 2142 u32 *cqewd = (u32 *)cqe; 2143 bool send_ersp = false; 2144 u32 rsn, rspcnt, xfr_length; 2145 2146 if (fod->fcpreq->op == NVMET_FCOP_READDATA_RSP) 2147 xfr_length = fod->req.transfer_len; 2148 else 2149 xfr_length = fod->offset; 2150 2151 /* 2152 * check to see if we can send a 0's rsp. 2153 * Note: to send a 0's response, the NVME-FC host transport will 2154 * recreate the CQE. The host transport knows: sq id, SQHD (last 2155 * seen in an ersp), and command_id. Thus it will create a 2156 * zero-filled CQE with those known fields filled in. Transport 2157 * must send an ersp for any condition where the cqe won't match 2158 * this. 2159 * 2160 * Here are the FC-NVME mandated cases where we must send an ersp: 2161 * every N responses, where N=ersp_ratio 2162 * force fabric commands to send ersp's (not in FC-NVME but good 2163 * practice) 2164 * normal cmds: any time status is non-zero, or status is zero 2165 * but words 0 or 1 are non-zero. 2166 * the SQ is 90% or more full 2167 * the cmd is a fused command 2168 * transferred data length not equal to cmd iu length 2169 */ 2170 rspcnt = atomic_inc_return(&fod->queue->zrspcnt); 2171 if (!(rspcnt % fod->queue->ersp_ratio) || 2172 nvme_is_fabrics((struct nvme_command *) sqe) || 2173 xfr_length != fod->req.transfer_len || 2174 (le16_to_cpu(cqe->status) & 0xFFFE) || cqewd[0] || cqewd[1] || 2175 (sqe->flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND)) || 2176 queue_90percent_full(fod->queue, le16_to_cpu(cqe->sq_head))) 2177 send_ersp = true; 2178 2179 /* re-set the fields */ 2180 fod->fcpreq->rspaddr = ersp; 2181 fod->fcpreq->rspdma = fod->rspdma; 2182 2183 if (!send_ersp) { 2184 memset(ersp, 0, NVME_FC_SIZEOF_ZEROS_RSP); 2185 fod->fcpreq->rsplen = NVME_FC_SIZEOF_ZEROS_RSP; 2186 } else { 2187 ersp->iu_len = cpu_to_be16(sizeof(*ersp)/sizeof(u32)); 2188 rsn = atomic_inc_return(&fod->queue->rsn); 2189 ersp->rsn = cpu_to_be32(rsn); 2190 ersp->xfrd_len = cpu_to_be32(xfr_length); 2191 fod->fcpreq->rsplen = sizeof(*ersp); 2192 } 2193 2194 fc_dma_sync_single_for_device(tgtport->dev, fod->rspdma, 2195 sizeof(fod->rspiubuf), DMA_TO_DEVICE); 2196 } 2197 2198 static void nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req *fcpreq); 2199 2200 static void 2201 nvmet_fc_abort_op(struct nvmet_fc_tgtport *tgtport, 2202 struct nvmet_fc_fcp_iod *fod) 2203 { 2204 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq; 2205 2206 /* data no longer needed */ 2207 nvmet_fc_free_tgt_pgs(fod); 2208 2209 /* 2210 * if an ABTS was received or we issued the fcp_abort early 2211 * don't call abort routine again. 2212 */ 2213 /* no need to take lock - lock was taken earlier to get here */ 2214 if (!fod->aborted) 2215 tgtport->ops->fcp_abort(&tgtport->fc_target_port, fcpreq); 2216 2217 nvmet_fc_free_fcp_iod(fod->queue, fod); 2218 } 2219 2220 static void 2221 nvmet_fc_xmt_fcp_rsp(struct nvmet_fc_tgtport *tgtport, 2222 struct nvmet_fc_fcp_iod *fod) 2223 { 2224 int ret; 2225 2226 fod->fcpreq->op = NVMET_FCOP_RSP; 2227 fod->fcpreq->timeout = 0; 2228 2229 nvmet_fc_prep_fcp_rsp(tgtport, fod); 2230 2231 ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq); 2232 if (ret) 2233 nvmet_fc_abort_op(tgtport, fod); 2234 } 2235 2236 static void 2237 nvmet_fc_transfer_fcp_data(struct nvmet_fc_tgtport *tgtport, 2238 struct nvmet_fc_fcp_iod *fod, u8 op) 2239 { 2240 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq; 2241 struct scatterlist *sg = fod->next_sg; 2242 unsigned long flags; 2243 u32 remaininglen = fod->req.transfer_len - fod->offset; 2244 u32 tlen = 0; 2245 int ret; 2246 2247 fcpreq->op = op; 2248 fcpreq->offset = fod->offset; 2249 fcpreq->timeout = NVME_FC_TGTOP_TIMEOUT_SEC; 2250 2251 /* 2252 * for next sequence: 2253 * break at a sg element boundary 2254 * attempt to keep sequence length capped at 2255 * NVMET_FC_MAX_SEQ_LENGTH but allow sequence to 2256 * be longer if a single sg element is larger 2257 * than that amount. This is done to avoid creating 2258 * a new sg list to use for the tgtport api. 2259 */ 2260 fcpreq->sg = sg; 2261 fcpreq->sg_cnt = 0; 2262 while (tlen < remaininglen && 2263 fcpreq->sg_cnt < tgtport->max_sg_cnt && 2264 tlen + sg_dma_len(sg) < NVMET_FC_MAX_SEQ_LENGTH) { 2265 fcpreq->sg_cnt++; 2266 tlen += sg_dma_len(sg); 2267 sg = sg_next(sg); 2268 } 2269 if (tlen < remaininglen && fcpreq->sg_cnt == 0) { 2270 fcpreq->sg_cnt++; 2271 tlen += min_t(u32, sg_dma_len(sg), remaininglen); 2272 sg = sg_next(sg); 2273 } 2274 if (tlen < remaininglen) 2275 fod->next_sg = sg; 2276 else 2277 fod->next_sg = NULL; 2278 2279 fcpreq->transfer_length = tlen; 2280 fcpreq->transferred_length = 0; 2281 fcpreq->fcp_error = 0; 2282 fcpreq->rsplen = 0; 2283 2284 /* 2285 * If the last READDATA request: check if LLDD supports 2286 * combined xfr with response. 2287 */ 2288 if ((op == NVMET_FCOP_READDATA) && 2289 ((fod->offset + fcpreq->transfer_length) == fod->req.transfer_len) && 2290 (tgtport->ops->target_features & NVMET_FCTGTFEAT_READDATA_RSP)) { 2291 fcpreq->op = NVMET_FCOP_READDATA_RSP; 2292 nvmet_fc_prep_fcp_rsp(tgtport, fod); 2293 } 2294 2295 ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq); 2296 if (ret) { 2297 /* 2298 * should be ok to set w/o lock as its in the thread of 2299 * execution (not an async timer routine) and doesn't 2300 * contend with any clearing action 2301 */ 2302 fod->abort = true; 2303 2304 if (op == NVMET_FCOP_WRITEDATA) { 2305 spin_lock_irqsave(&fod->flock, flags); 2306 fod->writedataactive = false; 2307 spin_unlock_irqrestore(&fod->flock, flags); 2308 nvmet_req_complete(&fod->req, NVME_SC_INTERNAL); 2309 } else /* NVMET_FCOP_READDATA or NVMET_FCOP_READDATA_RSP */ { 2310 fcpreq->fcp_error = ret; 2311 fcpreq->transferred_length = 0; 2312 nvmet_fc_xmt_fcp_op_done(fod->fcpreq); 2313 } 2314 } 2315 } 2316 2317 static inline bool 2318 __nvmet_fc_fod_op_abort(struct nvmet_fc_fcp_iod *fod, bool abort) 2319 { 2320 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq; 2321 struct nvmet_fc_tgtport *tgtport = fod->tgtport; 2322 2323 /* if in the middle of an io and we need to tear down */ 2324 if (abort) { 2325 if (fcpreq->op == NVMET_FCOP_WRITEDATA) { 2326 nvmet_req_complete(&fod->req, NVME_SC_INTERNAL); 2327 return true; 2328 } 2329 2330 nvmet_fc_abort_op(tgtport, fod); 2331 return true; 2332 } 2333 2334 return false; 2335 } 2336 2337 /* 2338 * actual done handler for FCP operations when completed by the lldd 2339 */ 2340 static void 2341 nvmet_fc_fod_op_done(struct nvmet_fc_fcp_iod *fod) 2342 { 2343 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq; 2344 struct nvmet_fc_tgtport *tgtport = fod->tgtport; 2345 unsigned long flags; 2346 bool abort; 2347 2348 spin_lock_irqsave(&fod->flock, flags); 2349 abort = fod->abort; 2350 fod->writedataactive = false; 2351 spin_unlock_irqrestore(&fod->flock, flags); 2352 2353 switch (fcpreq->op) { 2354 2355 case NVMET_FCOP_WRITEDATA: 2356 if (__nvmet_fc_fod_op_abort(fod, abort)) 2357 return; 2358 if (fcpreq->fcp_error || 2359 fcpreq->transferred_length != fcpreq->transfer_length) { 2360 spin_lock_irqsave(&fod->flock, flags); 2361 fod->abort = true; 2362 spin_unlock_irqrestore(&fod->flock, flags); 2363 2364 nvmet_req_complete(&fod->req, NVME_SC_INTERNAL); 2365 return; 2366 } 2367 2368 fod->offset += fcpreq->transferred_length; 2369 if (fod->offset != fod->req.transfer_len) { 2370 spin_lock_irqsave(&fod->flock, flags); 2371 fod->writedataactive = true; 2372 spin_unlock_irqrestore(&fod->flock, flags); 2373 2374 /* transfer the next chunk */ 2375 nvmet_fc_transfer_fcp_data(tgtport, fod, 2376 NVMET_FCOP_WRITEDATA); 2377 return; 2378 } 2379 2380 /* data transfer complete, resume with nvmet layer */ 2381 fod->req.execute(&fod->req); 2382 break; 2383 2384 case NVMET_FCOP_READDATA: 2385 case NVMET_FCOP_READDATA_RSP: 2386 if (__nvmet_fc_fod_op_abort(fod, abort)) 2387 return; 2388 if (fcpreq->fcp_error || 2389 fcpreq->transferred_length != fcpreq->transfer_length) { 2390 nvmet_fc_abort_op(tgtport, fod); 2391 return; 2392 } 2393 2394 /* success */ 2395 2396 if (fcpreq->op == NVMET_FCOP_READDATA_RSP) { 2397 /* data no longer needed */ 2398 nvmet_fc_free_tgt_pgs(fod); 2399 nvmet_fc_free_fcp_iod(fod->queue, fod); 2400 return; 2401 } 2402 2403 fod->offset += fcpreq->transferred_length; 2404 if (fod->offset != fod->req.transfer_len) { 2405 /* transfer the next chunk */ 2406 nvmet_fc_transfer_fcp_data(tgtport, fod, 2407 NVMET_FCOP_READDATA); 2408 return; 2409 } 2410 2411 /* data transfer complete, send response */ 2412 2413 /* data no longer needed */ 2414 nvmet_fc_free_tgt_pgs(fod); 2415 2416 nvmet_fc_xmt_fcp_rsp(tgtport, fod); 2417 2418 break; 2419 2420 case NVMET_FCOP_RSP: 2421 if (__nvmet_fc_fod_op_abort(fod, abort)) 2422 return; 2423 nvmet_fc_free_fcp_iod(fod->queue, fod); 2424 break; 2425 2426 default: 2427 break; 2428 } 2429 } 2430 2431 static void 2432 nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req *fcpreq) 2433 { 2434 struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private; 2435 2436 nvmet_fc_fod_op_done(fod); 2437 } 2438 2439 /* 2440 * actual completion handler after execution by the nvmet layer 2441 */ 2442 static void 2443 __nvmet_fc_fcp_nvme_cmd_done(struct nvmet_fc_tgtport *tgtport, 2444 struct nvmet_fc_fcp_iod *fod, int status) 2445 { 2446 struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common; 2447 struct nvme_completion *cqe = &fod->rspiubuf.cqe; 2448 unsigned long flags; 2449 bool abort; 2450 2451 spin_lock_irqsave(&fod->flock, flags); 2452 abort = fod->abort; 2453 spin_unlock_irqrestore(&fod->flock, flags); 2454 2455 /* if we have a CQE, snoop the last sq_head value */ 2456 if (!status) 2457 fod->queue->sqhd = cqe->sq_head; 2458 2459 if (abort) { 2460 nvmet_fc_abort_op(tgtport, fod); 2461 return; 2462 } 2463 2464 /* if an error handling the cmd post initial parsing */ 2465 if (status) { 2466 /* fudge up a failed CQE status for our transport error */ 2467 memset(cqe, 0, sizeof(*cqe)); 2468 cqe->sq_head = fod->queue->sqhd; /* echo last cqe sqhd */ 2469 cqe->sq_id = cpu_to_le16(fod->queue->qid); 2470 cqe->command_id = sqe->command_id; 2471 cqe->status = cpu_to_le16(status); 2472 } else { 2473 2474 /* 2475 * try to push the data even if the SQE status is non-zero. 2476 * There may be a status where data still was intended to 2477 * be moved 2478 */ 2479 if ((fod->io_dir == NVMET_FCP_READ) && (fod->data_sg_cnt)) { 2480 /* push the data over before sending rsp */ 2481 nvmet_fc_transfer_fcp_data(tgtport, fod, 2482 NVMET_FCOP_READDATA); 2483 return; 2484 } 2485 2486 /* writes & no data - fall thru */ 2487 } 2488 2489 /* data no longer needed */ 2490 nvmet_fc_free_tgt_pgs(fod); 2491 2492 nvmet_fc_xmt_fcp_rsp(tgtport, fod); 2493 } 2494 2495 2496 static void 2497 nvmet_fc_fcp_nvme_cmd_done(struct nvmet_req *nvme_req) 2498 { 2499 struct nvmet_fc_fcp_iod *fod = nvmet_req_to_fod(nvme_req); 2500 struct nvmet_fc_tgtport *tgtport = fod->tgtport; 2501 2502 __nvmet_fc_fcp_nvme_cmd_done(tgtport, fod, 0); 2503 } 2504 2505 2506 /* 2507 * Actual processing routine for received FC-NVME I/O Requests from the LLD 2508 */ 2509 static void 2510 nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport, 2511 struct nvmet_fc_fcp_iod *fod) 2512 { 2513 struct nvme_fc_cmd_iu *cmdiu = &fod->cmdiubuf; 2514 u32 xfrlen = be32_to_cpu(cmdiu->data_len); 2515 int ret; 2516 2517 /* 2518 * Fused commands are currently not supported in the linux 2519 * implementation. 2520 * 2521 * As such, the implementation of the FC transport does not 2522 * look at the fused commands and order delivery to the upper 2523 * layer until we have both based on csn. 2524 */ 2525 2526 fod->fcpreq->done = nvmet_fc_xmt_fcp_op_done; 2527 2528 if (cmdiu->flags & FCNVME_CMD_FLAGS_WRITE) { 2529 fod->io_dir = NVMET_FCP_WRITE; 2530 if (!nvme_is_write(&cmdiu->sqe)) 2531 goto transport_error; 2532 } else if (cmdiu->flags & FCNVME_CMD_FLAGS_READ) { 2533 fod->io_dir = NVMET_FCP_READ; 2534 if (nvme_is_write(&cmdiu->sqe)) 2535 goto transport_error; 2536 } else { 2537 fod->io_dir = NVMET_FCP_NODATA; 2538 if (xfrlen) 2539 goto transport_error; 2540 } 2541 2542 fod->req.cmd = &fod->cmdiubuf.sqe; 2543 fod->req.cqe = &fod->rspiubuf.cqe; 2544 if (!tgtport->pe) 2545 goto transport_error; 2546 fod->req.port = tgtport->pe->port; 2547 2548 /* clear any response payload */ 2549 memset(&fod->rspiubuf, 0, sizeof(fod->rspiubuf)); 2550 2551 fod->data_sg = NULL; 2552 fod->data_sg_cnt = 0; 2553 2554 ret = nvmet_req_init(&fod->req, 2555 &fod->queue->nvme_cq, 2556 &fod->queue->nvme_sq, 2557 &nvmet_fc_tgt_fcp_ops); 2558 if (!ret) { 2559 /* bad SQE content or invalid ctrl state */ 2560 /* nvmet layer has already called op done to send rsp. */ 2561 return; 2562 } 2563 2564 fod->req.transfer_len = xfrlen; 2565 2566 /* keep a running counter of tail position */ 2567 atomic_inc(&fod->queue->sqtail); 2568 2569 if (fod->req.transfer_len) { 2570 ret = nvmet_fc_alloc_tgt_pgs(fod); 2571 if (ret) { 2572 nvmet_req_complete(&fod->req, ret); 2573 return; 2574 } 2575 } 2576 fod->req.sg = fod->data_sg; 2577 fod->req.sg_cnt = fod->data_sg_cnt; 2578 fod->offset = 0; 2579 2580 if (fod->io_dir == NVMET_FCP_WRITE) { 2581 /* pull the data over before invoking nvmet layer */ 2582 nvmet_fc_transfer_fcp_data(tgtport, fod, NVMET_FCOP_WRITEDATA); 2583 return; 2584 } 2585 2586 /* 2587 * Reads or no data: 2588 * 2589 * can invoke the nvmet_layer now. If read data, cmd completion will 2590 * push the data 2591 */ 2592 fod->req.execute(&fod->req); 2593 return; 2594 2595 transport_error: 2596 nvmet_fc_abort_op(tgtport, fod); 2597 } 2598 2599 /** 2600 * nvmet_fc_rcv_fcp_req - transport entry point called by an LLDD 2601 * upon the reception of a NVME FCP CMD IU. 2602 * 2603 * Pass a FC-NVME FCP CMD IU received from the FC link to the nvmet-fc 2604 * layer for processing. 2605 * 2606 * The nvmet_fc layer allocates a local job structure (struct 2607 * nvmet_fc_fcp_iod) from the queue for the io and copies the 2608 * CMD IU buffer to the job structure. As such, on a successful 2609 * completion (returns 0), the LLDD may immediately free/reuse 2610 * the CMD IU buffer passed in the call. 2611 * 2612 * However, in some circumstances, due to the packetized nature of FC 2613 * and the api of the FC LLDD which may issue a hw command to send the 2614 * response, but the LLDD may not get the hw completion for that command 2615 * and upcall the nvmet_fc layer before a new command may be 2616 * asynchronously received - its possible for a command to be received 2617 * before the LLDD and nvmet_fc have recycled the job structure. It gives 2618 * the appearance of more commands received than fits in the sq. 2619 * To alleviate this scenario, a temporary queue is maintained in the 2620 * transport for pending LLDD requests waiting for a queue job structure. 2621 * In these "overrun" cases, a temporary queue element is allocated 2622 * the LLDD request and CMD iu buffer information remembered, and the 2623 * routine returns a -EOVERFLOW status. Subsequently, when a queue job 2624 * structure is freed, it is immediately reallocated for anything on the 2625 * pending request list. The LLDDs defer_rcv() callback is called, 2626 * informing the LLDD that it may reuse the CMD IU buffer, and the io 2627 * is then started normally with the transport. 2628 * 2629 * The LLDD, when receiving an -EOVERFLOW completion status, is to treat 2630 * the completion as successful but must not reuse the CMD IU buffer 2631 * until the LLDD's defer_rcv() callback has been called for the 2632 * corresponding struct nvmefc_tgt_fcp_req pointer. 2633 * 2634 * If there is any other condition in which an error occurs, the 2635 * transport will return a non-zero status indicating the error. 2636 * In all cases other than -EOVERFLOW, the transport has not accepted the 2637 * request and the LLDD should abort the exchange. 2638 * 2639 * @target_port: pointer to the (registered) target port the FCP CMD IU 2640 * was received on. 2641 * @fcpreq: pointer to a fcpreq request structure to be used to reference 2642 * the exchange corresponding to the FCP Exchange. 2643 * @cmdiubuf: pointer to the buffer containing the FCP CMD IU 2644 * @cmdiubuf_len: length, in bytes, of the received FCP CMD IU 2645 */ 2646 int 2647 nvmet_fc_rcv_fcp_req(struct nvmet_fc_target_port *target_port, 2648 struct nvmefc_tgt_fcp_req *fcpreq, 2649 void *cmdiubuf, u32 cmdiubuf_len) 2650 { 2651 struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port); 2652 struct nvme_fc_cmd_iu *cmdiu = cmdiubuf; 2653 struct nvmet_fc_tgt_queue *queue; 2654 struct nvmet_fc_fcp_iod *fod; 2655 struct nvmet_fc_defer_fcp_req *deferfcp; 2656 unsigned long flags; 2657 2658 /* validate iu, so the connection id can be used to find the queue */ 2659 if ((cmdiubuf_len != sizeof(*cmdiu)) || 2660 (cmdiu->format_id != NVME_CMD_FORMAT_ID) || 2661 (cmdiu->fc_id != NVME_CMD_FC_ID) || 2662 (be16_to_cpu(cmdiu->iu_len) != (sizeof(*cmdiu)/4))) 2663 return -EIO; 2664 2665 queue = nvmet_fc_find_target_queue(tgtport, 2666 be64_to_cpu(cmdiu->connection_id)); 2667 if (!queue) 2668 return -ENOTCONN; 2669 2670 /* 2671 * note: reference taken by find_target_queue 2672 * After successful fod allocation, the fod will inherit the 2673 * ownership of that reference and will remove the reference 2674 * when the fod is freed. 2675 */ 2676 2677 spin_lock_irqsave(&queue->qlock, flags); 2678 2679 fod = nvmet_fc_alloc_fcp_iod(queue); 2680 if (fod) { 2681 spin_unlock_irqrestore(&queue->qlock, flags); 2682 2683 fcpreq->nvmet_fc_private = fod; 2684 fod->fcpreq = fcpreq; 2685 2686 memcpy(&fod->cmdiubuf, cmdiubuf, cmdiubuf_len); 2687 2688 nvmet_fc_queue_fcp_req(tgtport, queue, fcpreq); 2689 2690 return 0; 2691 } 2692 2693 if (!tgtport->ops->defer_rcv) { 2694 spin_unlock_irqrestore(&queue->qlock, flags); 2695 /* release the queue lookup reference */ 2696 nvmet_fc_tgt_q_put(queue); 2697 return -ENOENT; 2698 } 2699 2700 deferfcp = list_first_entry_or_null(&queue->avail_defer_list, 2701 struct nvmet_fc_defer_fcp_req, req_list); 2702 if (deferfcp) { 2703 /* Just re-use one that was previously allocated */ 2704 list_del(&deferfcp->req_list); 2705 } else { 2706 spin_unlock_irqrestore(&queue->qlock, flags); 2707 2708 /* Now we need to dynamically allocate one */ 2709 deferfcp = kmalloc(sizeof(*deferfcp), GFP_KERNEL); 2710 if (!deferfcp) { 2711 /* release the queue lookup reference */ 2712 nvmet_fc_tgt_q_put(queue); 2713 return -ENOMEM; 2714 } 2715 spin_lock_irqsave(&queue->qlock, flags); 2716 } 2717 2718 /* For now, use rspaddr / rsplen to save payload information */ 2719 fcpreq->rspaddr = cmdiubuf; 2720 fcpreq->rsplen = cmdiubuf_len; 2721 deferfcp->fcp_req = fcpreq; 2722 2723 /* defer processing till a fod becomes available */ 2724 list_add_tail(&deferfcp->req_list, &queue->pending_cmd_list); 2725 2726 /* NOTE: the queue lookup reference is still valid */ 2727 2728 spin_unlock_irqrestore(&queue->qlock, flags); 2729 2730 return -EOVERFLOW; 2731 } 2732 EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_req); 2733 2734 /** 2735 * nvmet_fc_rcv_fcp_abort - transport entry point called by an LLDD 2736 * upon the reception of an ABTS for a FCP command 2737 * 2738 * Notify the transport that an ABTS has been received for a FCP command 2739 * that had been given to the transport via nvmet_fc_rcv_fcp_req(). The 2740 * LLDD believes the command is still being worked on 2741 * (template_ops->fcp_req_release() has not been called). 2742 * 2743 * The transport will wait for any outstanding work (an op to the LLDD, 2744 * which the lldd should complete with error due to the ABTS; or the 2745 * completion from the nvmet layer of the nvme command), then will 2746 * stop processing and call the nvmet_fc_rcv_fcp_req() callback to 2747 * return the i/o context to the LLDD. The LLDD may send the BA_ACC 2748 * to the ABTS either after return from this function (assuming any 2749 * outstanding op work has been terminated) or upon the callback being 2750 * called. 2751 * 2752 * @target_port: pointer to the (registered) target port the FCP CMD IU 2753 * was received on. 2754 * @fcpreq: pointer to the fcpreq request structure that corresponds 2755 * to the exchange that received the ABTS. 2756 */ 2757 void 2758 nvmet_fc_rcv_fcp_abort(struct nvmet_fc_target_port *target_port, 2759 struct nvmefc_tgt_fcp_req *fcpreq) 2760 { 2761 struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private; 2762 struct nvmet_fc_tgt_queue *queue; 2763 unsigned long flags; 2764 2765 if (!fod || fod->fcpreq != fcpreq) 2766 /* job appears to have already completed, ignore abort */ 2767 return; 2768 2769 queue = fod->queue; 2770 2771 spin_lock_irqsave(&queue->qlock, flags); 2772 if (fod->active) { 2773 /* 2774 * mark as abort. The abort handler, invoked upon completion 2775 * of any work, will detect the aborted status and do the 2776 * callback. 2777 */ 2778 spin_lock(&fod->flock); 2779 fod->abort = true; 2780 fod->aborted = true; 2781 spin_unlock(&fod->flock); 2782 } 2783 spin_unlock_irqrestore(&queue->qlock, flags); 2784 } 2785 EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_abort); 2786 2787 2788 struct nvmet_fc_traddr { 2789 u64 nn; 2790 u64 pn; 2791 }; 2792 2793 static int 2794 __nvme_fc_parse_u64(substring_t *sstr, u64 *val) 2795 { 2796 u64 token64; 2797 2798 if (match_u64(sstr, &token64)) 2799 return -EINVAL; 2800 *val = token64; 2801 2802 return 0; 2803 } 2804 2805 /* 2806 * This routine validates and extracts the WWN's from the TRADDR string. 2807 * As kernel parsers need the 0x to determine number base, universally 2808 * build string to parse with 0x prefix before parsing name strings. 2809 */ 2810 static int 2811 nvme_fc_parse_traddr(struct nvmet_fc_traddr *traddr, char *buf, size_t blen) 2812 { 2813 char name[2 + NVME_FC_TRADDR_HEXNAMELEN + 1]; 2814 substring_t wwn = { name, &name[sizeof(name)-1] }; 2815 int nnoffset, pnoffset; 2816 2817 /* validate if string is one of the 2 allowed formats */ 2818 if (strnlen(buf, blen) == NVME_FC_TRADDR_MAXLENGTH && 2819 !strncmp(buf, "nn-0x", NVME_FC_TRADDR_OXNNLEN) && 2820 !strncmp(&buf[NVME_FC_TRADDR_MAX_PN_OFFSET], 2821 "pn-0x", NVME_FC_TRADDR_OXNNLEN)) { 2822 nnoffset = NVME_FC_TRADDR_OXNNLEN; 2823 pnoffset = NVME_FC_TRADDR_MAX_PN_OFFSET + 2824 NVME_FC_TRADDR_OXNNLEN; 2825 } else if ((strnlen(buf, blen) == NVME_FC_TRADDR_MINLENGTH && 2826 !strncmp(buf, "nn-", NVME_FC_TRADDR_NNLEN) && 2827 !strncmp(&buf[NVME_FC_TRADDR_MIN_PN_OFFSET], 2828 "pn-", NVME_FC_TRADDR_NNLEN))) { 2829 nnoffset = NVME_FC_TRADDR_NNLEN; 2830 pnoffset = NVME_FC_TRADDR_MIN_PN_OFFSET + NVME_FC_TRADDR_NNLEN; 2831 } else 2832 goto out_einval; 2833 2834 name[0] = '0'; 2835 name[1] = 'x'; 2836 name[2 + NVME_FC_TRADDR_HEXNAMELEN] = 0; 2837 2838 memcpy(&name[2], &buf[nnoffset], NVME_FC_TRADDR_HEXNAMELEN); 2839 if (__nvme_fc_parse_u64(&wwn, &traddr->nn)) 2840 goto out_einval; 2841 2842 memcpy(&name[2], &buf[pnoffset], NVME_FC_TRADDR_HEXNAMELEN); 2843 if (__nvme_fc_parse_u64(&wwn, &traddr->pn)) 2844 goto out_einval; 2845 2846 return 0; 2847 2848 out_einval: 2849 pr_warn("%s: bad traddr string\n", __func__); 2850 return -EINVAL; 2851 } 2852 2853 static int 2854 nvmet_fc_add_port(struct nvmet_port *port) 2855 { 2856 struct nvmet_fc_tgtport *tgtport; 2857 struct nvmet_fc_port_entry *pe; 2858 struct nvmet_fc_traddr traddr = { 0L, 0L }; 2859 unsigned long flags; 2860 int ret; 2861 2862 /* validate the address info */ 2863 if ((port->disc_addr.trtype != NVMF_TRTYPE_FC) || 2864 (port->disc_addr.adrfam != NVMF_ADDR_FAMILY_FC)) 2865 return -EINVAL; 2866 2867 /* map the traddr address info to a target port */ 2868 2869 ret = nvme_fc_parse_traddr(&traddr, port->disc_addr.traddr, 2870 sizeof(port->disc_addr.traddr)); 2871 if (ret) 2872 return ret; 2873 2874 pe = kzalloc(sizeof(*pe), GFP_KERNEL); 2875 if (!pe) 2876 return -ENOMEM; 2877 2878 ret = -ENXIO; 2879 spin_lock_irqsave(&nvmet_fc_tgtlock, flags); 2880 list_for_each_entry(tgtport, &nvmet_fc_target_list, tgt_list) { 2881 if ((tgtport->fc_target_port.node_name == traddr.nn) && 2882 (tgtport->fc_target_port.port_name == traddr.pn)) { 2883 /* a FC port can only be 1 nvmet port id */ 2884 if (!tgtport->pe) { 2885 nvmet_fc_portentry_bind(tgtport, pe, port); 2886 ret = 0; 2887 } else 2888 ret = -EALREADY; 2889 break; 2890 } 2891 } 2892 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); 2893 2894 if (ret) 2895 kfree(pe); 2896 2897 return ret; 2898 } 2899 2900 static void 2901 nvmet_fc_remove_port(struct nvmet_port *port) 2902 { 2903 struct nvmet_fc_port_entry *pe = port->priv; 2904 2905 nvmet_fc_portentry_unbind(pe); 2906 2907 /* terminate any outstanding associations */ 2908 __nvmet_fc_free_assocs(pe->tgtport); 2909 2910 kfree(pe); 2911 } 2912 2913 static void 2914 nvmet_fc_discovery_chg(struct nvmet_port *port) 2915 { 2916 struct nvmet_fc_port_entry *pe = port->priv; 2917 struct nvmet_fc_tgtport *tgtport = pe->tgtport; 2918 2919 if (tgtport && tgtport->ops->discovery_event) 2920 tgtport->ops->discovery_event(&tgtport->fc_target_port); 2921 } 2922 2923 static ssize_t 2924 nvmet_fc_host_traddr(struct nvmet_ctrl *ctrl, 2925 char *traddr, size_t traddr_size) 2926 { 2927 struct nvmet_sq *sq = ctrl->sqs[0]; 2928 struct nvmet_fc_tgt_queue *queue = 2929 container_of(sq, struct nvmet_fc_tgt_queue, nvme_sq); 2930 struct nvmet_fc_tgtport *tgtport = queue->assoc ? queue->assoc->tgtport : NULL; 2931 struct nvmet_fc_hostport *hostport = queue->assoc ? queue->assoc->hostport : NULL; 2932 u64 wwnn, wwpn; 2933 ssize_t ret = 0; 2934 2935 if (!tgtport || !nvmet_fc_tgtport_get(tgtport)) 2936 return -ENODEV; 2937 if (!hostport || !nvmet_fc_hostport_get(hostport)) { 2938 ret = -ENODEV; 2939 goto out_put; 2940 } 2941 2942 if (tgtport->ops->host_traddr) { 2943 ret = tgtport->ops->host_traddr(hostport->hosthandle, &wwnn, &wwpn); 2944 if (ret) 2945 goto out_put_host; 2946 ret = snprintf(traddr, traddr_size, "nn-0x%llx:pn-0x%llx", wwnn, wwpn); 2947 } 2948 out_put_host: 2949 nvmet_fc_hostport_put(hostport); 2950 out_put: 2951 nvmet_fc_tgtport_put(tgtport); 2952 return ret; 2953 } 2954 2955 static const struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops = { 2956 .owner = THIS_MODULE, 2957 .type = NVMF_TRTYPE_FC, 2958 .msdbd = 1, 2959 .add_port = nvmet_fc_add_port, 2960 .remove_port = nvmet_fc_remove_port, 2961 .queue_response = nvmet_fc_fcp_nvme_cmd_done, 2962 .delete_ctrl = nvmet_fc_delete_ctrl, 2963 .discovery_chg = nvmet_fc_discovery_chg, 2964 .host_traddr = nvmet_fc_host_traddr, 2965 }; 2966 2967 static int __init nvmet_fc_init_module(void) 2968 { 2969 return nvmet_register_transport(&nvmet_fc_tgt_fcp_ops); 2970 } 2971 2972 static void __exit nvmet_fc_exit_module(void) 2973 { 2974 /* ensure any shutdown operation, e.g. delete ctrls have finished */ 2975 flush_workqueue(nvmet_wq); 2976 2977 /* sanity check - all lports should be removed */ 2978 if (!list_empty(&nvmet_fc_target_list)) 2979 pr_warn("%s: targetport list not empty\n", __func__); 2980 2981 nvmet_unregister_transport(&nvmet_fc_tgt_fcp_ops); 2982 2983 ida_destroy(&nvmet_fc_tgtport_cnt); 2984 } 2985 2986 module_init(nvmet_fc_init_module); 2987 module_exit(nvmet_fc_exit_module); 2988 2989 MODULE_DESCRIPTION("NVMe target FC transport driver"); 2990 MODULE_LICENSE("GPL v2"); 2991