1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2016 Avago Technologies. All rights reserved. 4 */ 5 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 6 #include <linux/module.h> 7 #include <linux/slab.h> 8 #include <linux/blk-mq.h> 9 #include <linux/parser.h> 10 #include <linux/random.h> 11 #include <uapi/scsi/fc/fc_fs.h> 12 #include <uapi/scsi/fc/fc_els.h> 13 14 #include "nvmet.h" 15 #include <linux/nvme-fc-driver.h> 16 #include <linux/nvme-fc.h> 17 #include "../host/fc.h" 18 19 20 /* *************************** Data Structures/Defines ****************** */ 21 22 23 #define NVMET_LS_CTX_COUNT 256 24 25 struct nvmet_fc_tgtport; 26 struct nvmet_fc_tgt_assoc; 27 28 struct nvmet_fc_ls_iod { /* for an LS RQST RCV */ 29 struct nvmefc_ls_rsp *lsrsp; 30 struct nvmefc_tgt_fcp_req *fcpreq; /* only if RS */ 31 32 struct list_head ls_rcv_list; /* tgtport->ls_rcv_list */ 33 34 struct nvmet_fc_tgtport *tgtport; 35 struct nvmet_fc_tgt_assoc *assoc; 36 void *hosthandle; 37 38 union nvmefc_ls_requests *rqstbuf; 39 union nvmefc_ls_responses *rspbuf; 40 u16 rqstdatalen; 41 dma_addr_t rspdma; 42 43 struct scatterlist sg[2]; 44 45 struct work_struct work; 46 } __aligned(sizeof(unsigned long long)); 47 48 struct nvmet_fc_ls_req_op { /* for an LS RQST XMT */ 49 struct nvmefc_ls_req ls_req; 50 51 struct nvmet_fc_tgtport *tgtport; 52 void *hosthandle; 53 54 int ls_error; 55 struct list_head lsreq_list; /* tgtport->ls_req_list */ 56 bool req_queued; 57 58 struct work_struct put_work; 59 }; 60 61 62 /* desired maximum for a single sequence - if sg list allows it */ 63 #define NVMET_FC_MAX_SEQ_LENGTH (256 * 1024) 64 65 enum nvmet_fcp_datadir { 66 NVMET_FCP_NODATA, 67 NVMET_FCP_WRITE, 68 NVMET_FCP_READ, 69 NVMET_FCP_ABORTED, 70 }; 71 72 struct nvmet_fc_fcp_iod { 73 struct nvmefc_tgt_fcp_req *fcpreq; 74 75 struct nvme_fc_cmd_iu cmdiubuf; 76 struct nvme_fc_ersp_iu rspiubuf; 77 dma_addr_t rspdma; 78 struct scatterlist *next_sg; 79 struct scatterlist *data_sg; 80 int data_sg_cnt; 81 u32 offset; 82 enum nvmet_fcp_datadir io_dir; 83 bool active; 84 bool abort; 85 bool aborted; 86 bool writedataactive; 87 spinlock_t flock; 88 89 struct nvmet_req req; 90 struct work_struct defer_work; 91 92 struct nvmet_fc_tgtport *tgtport; 93 struct nvmet_fc_tgt_queue *queue; 94 95 struct list_head fcp_list; /* tgtport->fcp_list */ 96 }; 97 98 struct nvmet_fc_tgtport { 99 struct nvmet_fc_target_port fc_target_port; 100 101 struct list_head tgt_list; /* nvmet_fc_target_list */ 102 struct device *dev; /* dev for dma mapping */ 103 struct nvmet_fc_target_template *ops; 104 105 struct nvmet_fc_ls_iod *iod; 106 spinlock_t lock; 107 struct list_head ls_rcv_list; 108 struct list_head ls_req_list; 109 struct list_head ls_busylist; 110 struct list_head assoc_list; 111 struct list_head host_list; 112 struct ida assoc_cnt; 113 struct nvmet_fc_port_entry *pe; 114 struct kref ref; 115 u32 max_sg_cnt; 116 }; 117 118 struct nvmet_fc_port_entry { 119 struct nvmet_fc_tgtport *tgtport; 120 struct nvmet_port *port; 121 u64 node_name; 122 u64 port_name; 123 struct list_head pe_list; 124 }; 125 126 struct nvmet_fc_defer_fcp_req { 127 struct list_head req_list; 128 struct nvmefc_tgt_fcp_req *fcp_req; 129 }; 130 131 struct nvmet_fc_tgt_queue { 132 bool ninetypercent; 133 u16 qid; 134 u16 sqsize; 135 u16 ersp_ratio; 136 __le16 sqhd; 137 atomic_t connected; 138 atomic_t sqtail; 139 atomic_t zrspcnt; 140 atomic_t rsn; 141 spinlock_t qlock; 142 struct nvmet_cq nvme_cq; 143 struct nvmet_sq nvme_sq; 144 struct nvmet_fc_tgt_assoc *assoc; 145 struct list_head fod_list; 146 struct list_head pending_cmd_list; 147 struct list_head avail_defer_list; 148 struct workqueue_struct *work_q; 149 struct kref ref; 150 /* array of fcp_iods */ 151 struct nvmet_fc_fcp_iod fod[] /* __counted_by(sqsize) */; 152 } __aligned(sizeof(unsigned long long)); 153 154 struct nvmet_fc_hostport { 155 struct nvmet_fc_tgtport *tgtport; 156 void *hosthandle; 157 struct list_head host_list; 158 struct kref ref; 159 u8 invalid; 160 }; 161 162 struct nvmet_fc_tgt_assoc { 163 u64 association_id; 164 u32 a_id; 165 atomic_t terminating; 166 struct nvmet_fc_tgtport *tgtport; 167 struct nvmet_fc_hostport *hostport; 168 struct nvmet_fc_ls_iod *rcv_disconn; 169 struct list_head a_list; 170 struct nvmet_fc_tgt_queue *queues[NVMET_NR_QUEUES + 1]; 171 struct kref ref; 172 struct work_struct del_work; 173 }; 174 175 /* 176 * Association and Connection IDs: 177 * 178 * Association ID will have random number in upper 6 bytes and zero 179 * in lower 2 bytes 180 * 181 * Connection IDs will be Association ID with QID or'd in lower 2 bytes 182 * 183 * note: Association ID = Connection ID for queue 0 184 */ 185 #define BYTES_FOR_QID sizeof(u16) 186 #define BYTES_FOR_QID_SHIFT (BYTES_FOR_QID * 8) 187 #define NVMET_FC_QUEUEID_MASK ((u64)((1 << BYTES_FOR_QID_SHIFT) - 1)) 188 189 static inline u64 190 nvmet_fc_makeconnid(struct nvmet_fc_tgt_assoc *assoc, u16 qid) 191 { 192 return (assoc->association_id | qid); 193 } 194 195 static inline u64 196 nvmet_fc_getassociationid(u64 connectionid) 197 { 198 return connectionid & ~NVMET_FC_QUEUEID_MASK; 199 } 200 201 static inline u16 202 nvmet_fc_getqueueid(u64 connectionid) 203 { 204 return (u16)(connectionid & NVMET_FC_QUEUEID_MASK); 205 } 206 207 static inline struct nvmet_fc_tgtport * 208 targetport_to_tgtport(struct nvmet_fc_target_port *targetport) 209 { 210 return container_of(targetport, struct nvmet_fc_tgtport, 211 fc_target_port); 212 } 213 214 static inline struct nvmet_fc_fcp_iod * 215 nvmet_req_to_fod(struct nvmet_req *nvme_req) 216 { 217 return container_of(nvme_req, struct nvmet_fc_fcp_iod, req); 218 } 219 220 221 /* *************************** Globals **************************** */ 222 223 224 static DEFINE_SPINLOCK(nvmet_fc_tgtlock); 225 226 static LIST_HEAD(nvmet_fc_target_list); 227 static DEFINE_IDA(nvmet_fc_tgtport_cnt); 228 static LIST_HEAD(nvmet_fc_portentry_list); 229 230 231 static void nvmet_fc_handle_ls_rqst_work(struct work_struct *work); 232 static void nvmet_fc_fcp_rqst_op_defer_work(struct work_struct *work); 233 static void nvmet_fc_tgt_a_put(struct nvmet_fc_tgt_assoc *assoc); 234 static int nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc); 235 static void nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue); 236 static int nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue); 237 static void nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport); 238 static void nvmet_fc_put_lsop_work(struct work_struct *work) 239 { 240 struct nvmet_fc_ls_req_op *lsop = 241 container_of(work, struct nvmet_fc_ls_req_op, put_work); 242 243 nvmet_fc_tgtport_put(lsop->tgtport); 244 kfree(lsop); 245 } 246 static int nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport); 247 static void nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport, 248 struct nvmet_fc_fcp_iod *fod); 249 static void nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc); 250 static void nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport, 251 struct nvmet_fc_ls_iod *iod); 252 253 254 /* *********************** FC-NVME DMA Handling **************************** */ 255 256 /* 257 * The fcloop device passes in a NULL device pointer. Real LLD's will 258 * pass in a valid device pointer. If NULL is passed to the dma mapping 259 * routines, depending on the platform, it may or may not succeed, and 260 * may crash. 261 * 262 * As such: 263 * Wrapper all the dma routines and check the dev pointer. 264 * 265 * If simple mappings (return just a dma address, we'll noop them, 266 * returning a dma address of 0. 267 * 268 * On more complex mappings (dma_map_sg), a pseudo routine fills 269 * in the scatter list, setting all dma addresses to 0. 270 */ 271 272 static inline dma_addr_t 273 fc_dma_map_single(struct device *dev, void *ptr, size_t size, 274 enum dma_data_direction dir) 275 { 276 return dev ? dma_map_single(dev, ptr, size, dir) : (dma_addr_t)0L; 277 } 278 279 static inline int 280 fc_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) 281 { 282 return dev ? dma_mapping_error(dev, dma_addr) : 0; 283 } 284 285 static inline void 286 fc_dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size, 287 enum dma_data_direction dir) 288 { 289 if (dev) 290 dma_unmap_single(dev, addr, size, dir); 291 } 292 293 static inline void 294 fc_dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size, 295 enum dma_data_direction dir) 296 { 297 if (dev) 298 dma_sync_single_for_cpu(dev, addr, size, dir); 299 } 300 301 static inline void 302 fc_dma_sync_single_for_device(struct device *dev, dma_addr_t addr, size_t size, 303 enum dma_data_direction dir) 304 { 305 if (dev) 306 dma_sync_single_for_device(dev, addr, size, dir); 307 } 308 309 /* pseudo dma_map_sg call */ 310 static int 311 fc_map_sg(struct scatterlist *sg, int nents) 312 { 313 struct scatterlist *s; 314 int i; 315 316 WARN_ON(nents == 0 || sg[0].length == 0); 317 318 for_each_sg(sg, s, nents, i) { 319 s->dma_address = 0L; 320 #ifdef CONFIG_NEED_SG_DMA_LENGTH 321 s->dma_length = s->length; 322 #endif 323 } 324 return nents; 325 } 326 327 static inline int 328 fc_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, 329 enum dma_data_direction dir) 330 { 331 return dev ? dma_map_sg(dev, sg, nents, dir) : fc_map_sg(sg, nents); 332 } 333 334 static inline void 335 fc_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, 336 enum dma_data_direction dir) 337 { 338 if (dev) 339 dma_unmap_sg(dev, sg, nents, dir); 340 } 341 342 343 /* ********************** FC-NVME LS XMT Handling ************************* */ 344 345 346 static void 347 __nvmet_fc_finish_ls_req(struct nvmet_fc_ls_req_op *lsop) 348 { 349 struct nvmet_fc_tgtport *tgtport = lsop->tgtport; 350 struct nvmefc_ls_req *lsreq = &lsop->ls_req; 351 unsigned long flags; 352 353 spin_lock_irqsave(&tgtport->lock, flags); 354 355 if (!lsop->req_queued) { 356 spin_unlock_irqrestore(&tgtport->lock, flags); 357 goto out_putwork; 358 } 359 360 list_del(&lsop->lsreq_list); 361 362 lsop->req_queued = false; 363 364 spin_unlock_irqrestore(&tgtport->lock, flags); 365 366 fc_dma_unmap_single(tgtport->dev, lsreq->rqstdma, 367 (lsreq->rqstlen + lsreq->rsplen), 368 DMA_BIDIRECTIONAL); 369 370 out_putwork: 371 queue_work(nvmet_wq, &lsop->put_work); 372 } 373 374 static int 375 __nvmet_fc_send_ls_req(struct nvmet_fc_tgtport *tgtport, 376 struct nvmet_fc_ls_req_op *lsop, 377 void (*done)(struct nvmefc_ls_req *req, int status)) 378 { 379 struct nvmefc_ls_req *lsreq = &lsop->ls_req; 380 unsigned long flags; 381 int ret = 0; 382 383 if (!tgtport->ops->ls_req) 384 return -EOPNOTSUPP; 385 386 if (!nvmet_fc_tgtport_get(tgtport)) 387 return -ESHUTDOWN; 388 389 lsreq->done = done; 390 lsop->req_queued = false; 391 INIT_LIST_HEAD(&lsop->lsreq_list); 392 INIT_WORK(&lsop->put_work, nvmet_fc_put_lsop_work); 393 394 lsreq->rqstdma = fc_dma_map_single(tgtport->dev, lsreq->rqstaddr, 395 lsreq->rqstlen + lsreq->rsplen, 396 DMA_BIDIRECTIONAL); 397 if (fc_dma_mapping_error(tgtport->dev, lsreq->rqstdma)) { 398 ret = -EFAULT; 399 goto out_puttgtport; 400 } 401 lsreq->rspdma = lsreq->rqstdma + lsreq->rqstlen; 402 403 spin_lock_irqsave(&tgtport->lock, flags); 404 405 list_add_tail(&lsop->lsreq_list, &tgtport->ls_req_list); 406 407 lsop->req_queued = true; 408 409 spin_unlock_irqrestore(&tgtport->lock, flags); 410 411 ret = tgtport->ops->ls_req(&tgtport->fc_target_port, lsop->hosthandle, 412 lsreq); 413 if (ret) 414 goto out_unlink; 415 416 return 0; 417 418 out_unlink: 419 lsop->ls_error = ret; 420 spin_lock_irqsave(&tgtport->lock, flags); 421 lsop->req_queued = false; 422 list_del(&lsop->lsreq_list); 423 spin_unlock_irqrestore(&tgtport->lock, flags); 424 fc_dma_unmap_single(tgtport->dev, lsreq->rqstdma, 425 (lsreq->rqstlen + lsreq->rsplen), 426 DMA_BIDIRECTIONAL); 427 out_puttgtport: 428 nvmet_fc_tgtport_put(tgtport); 429 430 return ret; 431 } 432 433 static int 434 nvmet_fc_send_ls_req_async(struct nvmet_fc_tgtport *tgtport, 435 struct nvmet_fc_ls_req_op *lsop, 436 void (*done)(struct nvmefc_ls_req *req, int status)) 437 { 438 /* don't wait for completion */ 439 440 return __nvmet_fc_send_ls_req(tgtport, lsop, done); 441 } 442 443 static void 444 nvmet_fc_disconnect_assoc_done(struct nvmefc_ls_req *lsreq, int status) 445 { 446 struct nvmet_fc_ls_req_op *lsop = 447 container_of(lsreq, struct nvmet_fc_ls_req_op, ls_req); 448 449 __nvmet_fc_finish_ls_req(lsop); 450 451 /* fc-nvme target doesn't care about success or failure of cmd */ 452 } 453 454 /* 455 * This routine sends a FC-NVME LS to disconnect (aka terminate) 456 * the FC-NVME Association. Terminating the association also 457 * terminates the FC-NVME connections (per queue, both admin and io 458 * queues) that are part of the association. E.g. things are torn 459 * down, and the related FC-NVME Association ID and Connection IDs 460 * become invalid. 461 * 462 * The behavior of the fc-nvme target is such that its 463 * understanding of the association and connections will implicitly 464 * be torn down. The action is implicit as it may be due to a loss of 465 * connectivity with the fc-nvme host, so the target may never get a 466 * response even if it tried. As such, the action of this routine 467 * is to asynchronously send the LS, ignore any results of the LS, and 468 * continue on with terminating the association. If the fc-nvme host 469 * is present and receives the LS, it too can tear down. 470 */ 471 static void 472 nvmet_fc_xmt_disconnect_assoc(struct nvmet_fc_tgt_assoc *assoc) 473 { 474 struct nvmet_fc_tgtport *tgtport = assoc->tgtport; 475 struct fcnvme_ls_disconnect_assoc_rqst *discon_rqst; 476 struct fcnvme_ls_disconnect_assoc_acc *discon_acc; 477 struct nvmet_fc_ls_req_op *lsop; 478 struct nvmefc_ls_req *lsreq; 479 int ret; 480 481 /* 482 * If ls_req is NULL or no hosthandle, it's an older lldd and no 483 * message is normal. Otherwise, send unless the hostport has 484 * already been invalidated by the lldd. 485 */ 486 if (!tgtport->ops->ls_req || assoc->hostport->invalid) 487 return; 488 489 lsop = kzalloc((sizeof(*lsop) + 490 sizeof(*discon_rqst) + sizeof(*discon_acc) + 491 tgtport->ops->lsrqst_priv_sz), GFP_KERNEL); 492 if (!lsop) { 493 dev_info(tgtport->dev, 494 "{%d:%d} send Disconnect Association failed: ENOMEM\n", 495 tgtport->fc_target_port.port_num, assoc->a_id); 496 return; 497 } 498 499 discon_rqst = (struct fcnvme_ls_disconnect_assoc_rqst *)&lsop[1]; 500 discon_acc = (struct fcnvme_ls_disconnect_assoc_acc *)&discon_rqst[1]; 501 lsreq = &lsop->ls_req; 502 if (tgtport->ops->lsrqst_priv_sz) 503 lsreq->private = (void *)&discon_acc[1]; 504 else 505 lsreq->private = NULL; 506 507 lsop->tgtport = tgtport; 508 lsop->hosthandle = assoc->hostport->hosthandle; 509 510 nvmefc_fmt_lsreq_discon_assoc(lsreq, discon_rqst, discon_acc, 511 assoc->association_id); 512 513 ret = nvmet_fc_send_ls_req_async(tgtport, lsop, 514 nvmet_fc_disconnect_assoc_done); 515 if (ret) { 516 dev_info(tgtport->dev, 517 "{%d:%d} XMT Disconnect Association failed: %d\n", 518 tgtport->fc_target_port.port_num, assoc->a_id, ret); 519 kfree(lsop); 520 } 521 } 522 523 524 /* *********************** FC-NVME Port Management ************************ */ 525 526 527 static int 528 nvmet_fc_alloc_ls_iodlist(struct nvmet_fc_tgtport *tgtport) 529 { 530 struct nvmet_fc_ls_iod *iod; 531 int i; 532 533 iod = kcalloc(NVMET_LS_CTX_COUNT, sizeof(struct nvmet_fc_ls_iod), 534 GFP_KERNEL); 535 if (!iod) 536 return -ENOMEM; 537 538 tgtport->iod = iod; 539 540 for (i = 0; i < NVMET_LS_CTX_COUNT; iod++, i++) { 541 INIT_WORK(&iod->work, nvmet_fc_handle_ls_rqst_work); 542 iod->tgtport = tgtport; 543 list_add_tail(&iod->ls_rcv_list, &tgtport->ls_rcv_list); 544 545 iod->rqstbuf = kzalloc(sizeof(union nvmefc_ls_requests) + 546 sizeof(union nvmefc_ls_responses), 547 GFP_KERNEL); 548 if (!iod->rqstbuf) 549 goto out_fail; 550 551 iod->rspbuf = (union nvmefc_ls_responses *)&iod->rqstbuf[1]; 552 553 iod->rspdma = fc_dma_map_single(tgtport->dev, iod->rspbuf, 554 sizeof(*iod->rspbuf), 555 DMA_TO_DEVICE); 556 if (fc_dma_mapping_error(tgtport->dev, iod->rspdma)) 557 goto out_fail; 558 } 559 560 return 0; 561 562 out_fail: 563 kfree(iod->rqstbuf); 564 list_del(&iod->ls_rcv_list); 565 for (iod--, i--; i >= 0; iod--, i--) { 566 fc_dma_unmap_single(tgtport->dev, iod->rspdma, 567 sizeof(*iod->rspbuf), DMA_TO_DEVICE); 568 kfree(iod->rqstbuf); 569 list_del(&iod->ls_rcv_list); 570 } 571 572 kfree(iod); 573 574 return -EFAULT; 575 } 576 577 static void 578 nvmet_fc_free_ls_iodlist(struct nvmet_fc_tgtport *tgtport) 579 { 580 struct nvmet_fc_ls_iod *iod = tgtport->iod; 581 int i; 582 583 for (i = 0; i < NVMET_LS_CTX_COUNT; iod++, i++) { 584 fc_dma_unmap_single(tgtport->dev, 585 iod->rspdma, sizeof(*iod->rspbuf), 586 DMA_TO_DEVICE); 587 kfree(iod->rqstbuf); 588 list_del(&iod->ls_rcv_list); 589 } 590 kfree(tgtport->iod); 591 } 592 593 static struct nvmet_fc_ls_iod * 594 nvmet_fc_alloc_ls_iod(struct nvmet_fc_tgtport *tgtport) 595 { 596 struct nvmet_fc_ls_iod *iod; 597 unsigned long flags; 598 599 spin_lock_irqsave(&tgtport->lock, flags); 600 iod = list_first_entry_or_null(&tgtport->ls_rcv_list, 601 struct nvmet_fc_ls_iod, ls_rcv_list); 602 if (iod) 603 list_move_tail(&iod->ls_rcv_list, &tgtport->ls_busylist); 604 spin_unlock_irqrestore(&tgtport->lock, flags); 605 return iod; 606 } 607 608 609 static void 610 nvmet_fc_free_ls_iod(struct nvmet_fc_tgtport *tgtport, 611 struct nvmet_fc_ls_iod *iod) 612 { 613 unsigned long flags; 614 615 spin_lock_irqsave(&tgtport->lock, flags); 616 list_move(&iod->ls_rcv_list, &tgtport->ls_rcv_list); 617 spin_unlock_irqrestore(&tgtport->lock, flags); 618 } 619 620 static void 621 nvmet_fc_prep_fcp_iodlist(struct nvmet_fc_tgtport *tgtport, 622 struct nvmet_fc_tgt_queue *queue) 623 { 624 struct nvmet_fc_fcp_iod *fod = queue->fod; 625 int i; 626 627 for (i = 0; i < queue->sqsize; fod++, i++) { 628 INIT_WORK(&fod->defer_work, nvmet_fc_fcp_rqst_op_defer_work); 629 fod->tgtport = tgtport; 630 fod->queue = queue; 631 fod->active = false; 632 fod->abort = false; 633 fod->aborted = false; 634 fod->fcpreq = NULL; 635 list_add_tail(&fod->fcp_list, &queue->fod_list); 636 spin_lock_init(&fod->flock); 637 638 fod->rspdma = fc_dma_map_single(tgtport->dev, &fod->rspiubuf, 639 sizeof(fod->rspiubuf), DMA_TO_DEVICE); 640 if (fc_dma_mapping_error(tgtport->dev, fod->rspdma)) { 641 list_del(&fod->fcp_list); 642 for (fod--, i--; i >= 0; fod--, i--) { 643 fc_dma_unmap_single(tgtport->dev, fod->rspdma, 644 sizeof(fod->rspiubuf), 645 DMA_TO_DEVICE); 646 fod->rspdma = 0L; 647 list_del(&fod->fcp_list); 648 } 649 650 return; 651 } 652 } 653 } 654 655 static void 656 nvmet_fc_destroy_fcp_iodlist(struct nvmet_fc_tgtport *tgtport, 657 struct nvmet_fc_tgt_queue *queue) 658 { 659 struct nvmet_fc_fcp_iod *fod = queue->fod; 660 int i; 661 662 for (i = 0; i < queue->sqsize; fod++, i++) { 663 if (fod->rspdma) 664 fc_dma_unmap_single(tgtport->dev, fod->rspdma, 665 sizeof(fod->rspiubuf), DMA_TO_DEVICE); 666 } 667 } 668 669 static struct nvmet_fc_fcp_iod * 670 nvmet_fc_alloc_fcp_iod(struct nvmet_fc_tgt_queue *queue) 671 { 672 struct nvmet_fc_fcp_iod *fod; 673 674 lockdep_assert_held(&queue->qlock); 675 676 fod = list_first_entry_or_null(&queue->fod_list, 677 struct nvmet_fc_fcp_iod, fcp_list); 678 if (fod) { 679 list_del(&fod->fcp_list); 680 fod->active = true; 681 /* 682 * no queue reference is taken, as it was taken by the 683 * queue lookup just prior to the allocation. The iod 684 * will "inherit" that reference. 685 */ 686 } 687 return fod; 688 } 689 690 691 static void 692 nvmet_fc_queue_fcp_req(struct nvmet_fc_tgtport *tgtport, 693 struct nvmet_fc_tgt_queue *queue, 694 struct nvmefc_tgt_fcp_req *fcpreq) 695 { 696 struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private; 697 698 /* 699 * put all admin cmds on hw queue id 0. All io commands go to 700 * the respective hw queue based on a modulo basis 701 */ 702 fcpreq->hwqid = queue->qid ? 703 ((queue->qid - 1) % tgtport->ops->max_hw_queues) : 0; 704 705 nvmet_fc_handle_fcp_rqst(tgtport, fod); 706 } 707 708 static void 709 nvmet_fc_fcp_rqst_op_defer_work(struct work_struct *work) 710 { 711 struct nvmet_fc_fcp_iod *fod = 712 container_of(work, struct nvmet_fc_fcp_iod, defer_work); 713 714 /* Submit deferred IO for processing */ 715 nvmet_fc_queue_fcp_req(fod->tgtport, fod->queue, fod->fcpreq); 716 717 } 718 719 static void 720 nvmet_fc_free_fcp_iod(struct nvmet_fc_tgt_queue *queue, 721 struct nvmet_fc_fcp_iod *fod) 722 { 723 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq; 724 struct nvmet_fc_tgtport *tgtport = fod->tgtport; 725 struct nvmet_fc_defer_fcp_req *deferfcp; 726 unsigned long flags; 727 728 fc_dma_sync_single_for_cpu(tgtport->dev, fod->rspdma, 729 sizeof(fod->rspiubuf), DMA_TO_DEVICE); 730 731 fcpreq->nvmet_fc_private = NULL; 732 733 fod->active = false; 734 fod->abort = false; 735 fod->aborted = false; 736 fod->writedataactive = false; 737 fod->fcpreq = NULL; 738 739 tgtport->ops->fcp_req_release(&tgtport->fc_target_port, fcpreq); 740 741 /* release the queue lookup reference on the completed IO */ 742 nvmet_fc_tgt_q_put(queue); 743 744 spin_lock_irqsave(&queue->qlock, flags); 745 deferfcp = list_first_entry_or_null(&queue->pending_cmd_list, 746 struct nvmet_fc_defer_fcp_req, req_list); 747 if (!deferfcp) { 748 list_add_tail(&fod->fcp_list, &fod->queue->fod_list); 749 spin_unlock_irqrestore(&queue->qlock, flags); 750 return; 751 } 752 753 /* Re-use the fod for the next pending cmd that was deferred */ 754 list_del(&deferfcp->req_list); 755 756 fcpreq = deferfcp->fcp_req; 757 758 /* deferfcp can be reused for another IO at a later date */ 759 list_add_tail(&deferfcp->req_list, &queue->avail_defer_list); 760 761 spin_unlock_irqrestore(&queue->qlock, flags); 762 763 /* Save NVME CMD IO in fod */ 764 memcpy(&fod->cmdiubuf, fcpreq->rspaddr, fcpreq->rsplen); 765 766 /* Setup new fcpreq to be processed */ 767 fcpreq->rspaddr = NULL; 768 fcpreq->rsplen = 0; 769 fcpreq->nvmet_fc_private = fod; 770 fod->fcpreq = fcpreq; 771 fod->active = true; 772 773 /* inform LLDD IO is now being processed */ 774 tgtport->ops->defer_rcv(&tgtport->fc_target_port, fcpreq); 775 776 /* 777 * Leave the queue lookup get reference taken when 778 * fod was originally allocated. 779 */ 780 781 queue_work(queue->work_q, &fod->defer_work); 782 } 783 784 static struct nvmet_fc_tgt_queue * 785 nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc, 786 u16 qid, u16 sqsize) 787 { 788 struct nvmet_fc_tgt_queue *queue; 789 int ret; 790 791 if (qid > NVMET_NR_QUEUES) 792 return NULL; 793 794 queue = kzalloc(struct_size(queue, fod, sqsize), GFP_KERNEL); 795 if (!queue) 796 return NULL; 797 798 queue->work_q = alloc_workqueue("ntfc%d.%d.%d", 0, 0, 799 assoc->tgtport->fc_target_port.port_num, 800 assoc->a_id, qid); 801 if (!queue->work_q) 802 goto out_free_queue; 803 804 queue->qid = qid; 805 queue->sqsize = sqsize; 806 queue->assoc = assoc; 807 INIT_LIST_HEAD(&queue->fod_list); 808 INIT_LIST_HEAD(&queue->avail_defer_list); 809 INIT_LIST_HEAD(&queue->pending_cmd_list); 810 atomic_set(&queue->connected, 0); 811 atomic_set(&queue->sqtail, 0); 812 atomic_set(&queue->rsn, 1); 813 atomic_set(&queue->zrspcnt, 0); 814 spin_lock_init(&queue->qlock); 815 kref_init(&queue->ref); 816 817 nvmet_fc_prep_fcp_iodlist(assoc->tgtport, queue); 818 819 nvmet_cq_init(&queue->nvme_cq); 820 ret = nvmet_sq_init(&queue->nvme_sq, &queue->nvme_cq); 821 if (ret) 822 goto out_fail_iodlist; 823 824 WARN_ON(assoc->queues[qid]); 825 assoc->queues[qid] = queue; 826 827 return queue; 828 829 out_fail_iodlist: 830 nvmet_cq_put(&queue->nvme_cq); 831 nvmet_fc_destroy_fcp_iodlist(assoc->tgtport, queue); 832 destroy_workqueue(queue->work_q); 833 out_free_queue: 834 kfree(queue); 835 return NULL; 836 } 837 838 839 static void 840 nvmet_fc_tgt_queue_free(struct kref *ref) 841 { 842 struct nvmet_fc_tgt_queue *queue = 843 container_of(ref, struct nvmet_fc_tgt_queue, ref); 844 845 nvmet_fc_destroy_fcp_iodlist(queue->assoc->tgtport, queue); 846 847 destroy_workqueue(queue->work_q); 848 849 kfree(queue); 850 } 851 852 static void 853 nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue) 854 { 855 kref_put(&queue->ref, nvmet_fc_tgt_queue_free); 856 } 857 858 static int 859 nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue) 860 { 861 return kref_get_unless_zero(&queue->ref); 862 } 863 864 865 static void 866 nvmet_fc_delete_target_queue(struct nvmet_fc_tgt_queue *queue) 867 { 868 struct nvmet_fc_tgtport *tgtport = queue->assoc->tgtport; 869 struct nvmet_fc_fcp_iod *fod = queue->fod; 870 struct nvmet_fc_defer_fcp_req *deferfcp, *tempptr; 871 unsigned long flags; 872 int i; 873 bool disconnect; 874 875 disconnect = atomic_xchg(&queue->connected, 0); 876 877 /* if not connected, nothing to do */ 878 if (!disconnect) 879 return; 880 881 spin_lock_irqsave(&queue->qlock, flags); 882 /* abort outstanding io's */ 883 for (i = 0; i < queue->sqsize; fod++, i++) { 884 if (fod->active) { 885 spin_lock(&fod->flock); 886 fod->abort = true; 887 /* 888 * only call lldd abort routine if waiting for 889 * writedata. other outstanding ops should finish 890 * on their own. 891 */ 892 if (fod->writedataactive) { 893 fod->aborted = true; 894 spin_unlock(&fod->flock); 895 tgtport->ops->fcp_abort( 896 &tgtport->fc_target_port, fod->fcpreq); 897 } else 898 spin_unlock(&fod->flock); 899 } 900 } 901 902 /* Cleanup defer'ed IOs in queue */ 903 list_for_each_entry_safe(deferfcp, tempptr, &queue->avail_defer_list, 904 req_list) { 905 list_del(&deferfcp->req_list); 906 kfree(deferfcp); 907 } 908 909 for (;;) { 910 deferfcp = list_first_entry_or_null(&queue->pending_cmd_list, 911 struct nvmet_fc_defer_fcp_req, req_list); 912 if (!deferfcp) 913 break; 914 915 list_del(&deferfcp->req_list); 916 spin_unlock_irqrestore(&queue->qlock, flags); 917 918 tgtport->ops->defer_rcv(&tgtport->fc_target_port, 919 deferfcp->fcp_req); 920 921 tgtport->ops->fcp_abort(&tgtport->fc_target_port, 922 deferfcp->fcp_req); 923 924 tgtport->ops->fcp_req_release(&tgtport->fc_target_port, 925 deferfcp->fcp_req); 926 927 /* release the queue lookup reference */ 928 nvmet_fc_tgt_q_put(queue); 929 930 kfree(deferfcp); 931 932 spin_lock_irqsave(&queue->qlock, flags); 933 } 934 spin_unlock_irqrestore(&queue->qlock, flags); 935 936 flush_workqueue(queue->work_q); 937 938 nvmet_sq_destroy(&queue->nvme_sq); 939 nvmet_cq_put(&queue->nvme_cq); 940 941 nvmet_fc_tgt_q_put(queue); 942 } 943 944 static struct nvmet_fc_tgt_queue * 945 nvmet_fc_find_target_queue(struct nvmet_fc_tgtport *tgtport, 946 u64 connection_id) 947 { 948 struct nvmet_fc_tgt_assoc *assoc; 949 struct nvmet_fc_tgt_queue *queue; 950 u64 association_id = nvmet_fc_getassociationid(connection_id); 951 u16 qid = nvmet_fc_getqueueid(connection_id); 952 953 if (qid > NVMET_NR_QUEUES) 954 return NULL; 955 956 rcu_read_lock(); 957 list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) { 958 if (association_id == assoc->association_id) { 959 queue = assoc->queues[qid]; 960 if (queue && 961 (!atomic_read(&queue->connected) || 962 !nvmet_fc_tgt_q_get(queue))) 963 queue = NULL; 964 rcu_read_unlock(); 965 return queue; 966 } 967 } 968 rcu_read_unlock(); 969 return NULL; 970 } 971 972 static void 973 nvmet_fc_hostport_free(struct kref *ref) 974 { 975 struct nvmet_fc_hostport *hostport = 976 container_of(ref, struct nvmet_fc_hostport, ref); 977 struct nvmet_fc_tgtport *tgtport = hostport->tgtport; 978 unsigned long flags; 979 980 spin_lock_irqsave(&tgtport->lock, flags); 981 list_del(&hostport->host_list); 982 spin_unlock_irqrestore(&tgtport->lock, flags); 983 if (tgtport->ops->host_release && hostport->invalid) 984 tgtport->ops->host_release(hostport->hosthandle); 985 kfree(hostport); 986 nvmet_fc_tgtport_put(tgtport); 987 } 988 989 static void 990 nvmet_fc_hostport_put(struct nvmet_fc_hostport *hostport) 991 { 992 kref_put(&hostport->ref, nvmet_fc_hostport_free); 993 } 994 995 static int 996 nvmet_fc_hostport_get(struct nvmet_fc_hostport *hostport) 997 { 998 return kref_get_unless_zero(&hostport->ref); 999 } 1000 1001 static struct nvmet_fc_hostport * 1002 nvmet_fc_match_hostport(struct nvmet_fc_tgtport *tgtport, void *hosthandle) 1003 { 1004 struct nvmet_fc_hostport *host; 1005 1006 lockdep_assert_held(&tgtport->lock); 1007 1008 list_for_each_entry(host, &tgtport->host_list, host_list) { 1009 if (host->hosthandle == hosthandle && !host->invalid) { 1010 if (nvmet_fc_hostport_get(host)) 1011 return host; 1012 } 1013 } 1014 1015 return NULL; 1016 } 1017 1018 static struct nvmet_fc_hostport * 1019 nvmet_fc_alloc_hostport(struct nvmet_fc_tgtport *tgtport, void *hosthandle) 1020 { 1021 struct nvmet_fc_hostport *newhost, *match = NULL; 1022 unsigned long flags; 1023 1024 /* 1025 * Caller holds a reference on tgtport. 1026 */ 1027 1028 /* if LLDD not implemented, leave as NULL */ 1029 if (!hosthandle) 1030 return NULL; 1031 1032 spin_lock_irqsave(&tgtport->lock, flags); 1033 match = nvmet_fc_match_hostport(tgtport, hosthandle); 1034 spin_unlock_irqrestore(&tgtport->lock, flags); 1035 1036 if (match) 1037 return match; 1038 1039 newhost = kzalloc(sizeof(*newhost), GFP_KERNEL); 1040 if (!newhost) 1041 return ERR_PTR(-ENOMEM); 1042 1043 spin_lock_irqsave(&tgtport->lock, flags); 1044 match = nvmet_fc_match_hostport(tgtport, hosthandle); 1045 if (match) { 1046 /* new allocation not needed */ 1047 kfree(newhost); 1048 newhost = match; 1049 } else { 1050 nvmet_fc_tgtport_get(tgtport); 1051 newhost->tgtport = tgtport; 1052 newhost->hosthandle = hosthandle; 1053 INIT_LIST_HEAD(&newhost->host_list); 1054 kref_init(&newhost->ref); 1055 1056 list_add_tail(&newhost->host_list, &tgtport->host_list); 1057 } 1058 spin_unlock_irqrestore(&tgtport->lock, flags); 1059 1060 return newhost; 1061 } 1062 1063 static void 1064 nvmet_fc_delete_assoc_work(struct work_struct *work) 1065 { 1066 struct nvmet_fc_tgt_assoc *assoc = 1067 container_of(work, struct nvmet_fc_tgt_assoc, del_work); 1068 struct nvmet_fc_tgtport *tgtport = assoc->tgtport; 1069 1070 nvmet_fc_delete_target_assoc(assoc); 1071 nvmet_fc_tgt_a_put(assoc); 1072 nvmet_fc_tgtport_put(tgtport); 1073 } 1074 1075 static void 1076 nvmet_fc_schedule_delete_assoc(struct nvmet_fc_tgt_assoc *assoc) 1077 { 1078 int terminating; 1079 1080 terminating = atomic_xchg(&assoc->terminating, 1); 1081 1082 /* if already terminating, do nothing */ 1083 if (terminating) 1084 return; 1085 1086 nvmet_fc_tgtport_get(assoc->tgtport); 1087 if (!queue_work(nvmet_wq, &assoc->del_work)) 1088 nvmet_fc_tgtport_put(assoc->tgtport); 1089 } 1090 1091 static bool 1092 nvmet_fc_assoc_exists(struct nvmet_fc_tgtport *tgtport, u64 association_id) 1093 { 1094 struct nvmet_fc_tgt_assoc *a; 1095 bool found = false; 1096 1097 rcu_read_lock(); 1098 list_for_each_entry_rcu(a, &tgtport->assoc_list, a_list) { 1099 if (association_id == a->association_id) { 1100 found = true; 1101 break; 1102 } 1103 } 1104 rcu_read_unlock(); 1105 1106 return found; 1107 } 1108 1109 static struct nvmet_fc_tgt_assoc * 1110 nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport, void *hosthandle) 1111 { 1112 struct nvmet_fc_tgt_assoc *assoc; 1113 unsigned long flags; 1114 bool done; 1115 u64 ran; 1116 int idx; 1117 1118 if (!tgtport->pe) 1119 return NULL; 1120 1121 assoc = kzalloc(sizeof(*assoc), GFP_KERNEL); 1122 if (!assoc) 1123 return NULL; 1124 1125 idx = ida_alloc(&tgtport->assoc_cnt, GFP_KERNEL); 1126 if (idx < 0) 1127 goto out_free_assoc; 1128 1129 assoc->hostport = nvmet_fc_alloc_hostport(tgtport, hosthandle); 1130 if (IS_ERR(assoc->hostport)) 1131 goto out_ida; 1132 1133 assoc->tgtport = tgtport; 1134 nvmet_fc_tgtport_get(tgtport); 1135 assoc->a_id = idx; 1136 INIT_LIST_HEAD(&assoc->a_list); 1137 kref_init(&assoc->ref); 1138 INIT_WORK(&assoc->del_work, nvmet_fc_delete_assoc_work); 1139 atomic_set(&assoc->terminating, 0); 1140 1141 done = false; 1142 do { 1143 get_random_bytes(&ran, sizeof(ran) - BYTES_FOR_QID); 1144 ran = ran << BYTES_FOR_QID_SHIFT; 1145 1146 spin_lock_irqsave(&tgtport->lock, flags); 1147 if (!nvmet_fc_assoc_exists(tgtport, ran)) { 1148 assoc->association_id = ran; 1149 list_add_tail_rcu(&assoc->a_list, &tgtport->assoc_list); 1150 done = true; 1151 } 1152 spin_unlock_irqrestore(&tgtport->lock, flags); 1153 } while (!done); 1154 1155 return assoc; 1156 1157 out_ida: 1158 ida_free(&tgtport->assoc_cnt, idx); 1159 out_free_assoc: 1160 kfree(assoc); 1161 return NULL; 1162 } 1163 1164 static void 1165 nvmet_fc_target_assoc_free(struct kref *ref) 1166 { 1167 struct nvmet_fc_tgt_assoc *assoc = 1168 container_of(ref, struct nvmet_fc_tgt_assoc, ref); 1169 struct nvmet_fc_tgtport *tgtport = assoc->tgtport; 1170 struct nvmet_fc_ls_iod *oldls; 1171 unsigned long flags; 1172 int i; 1173 1174 for (i = NVMET_NR_QUEUES; i >= 0; i--) { 1175 if (assoc->queues[i]) 1176 nvmet_fc_delete_target_queue(assoc->queues[i]); 1177 } 1178 1179 /* Send Disconnect now that all i/o has completed */ 1180 nvmet_fc_xmt_disconnect_assoc(assoc); 1181 1182 nvmet_fc_hostport_put(assoc->hostport); 1183 spin_lock_irqsave(&tgtport->lock, flags); 1184 oldls = assoc->rcv_disconn; 1185 spin_unlock_irqrestore(&tgtport->lock, flags); 1186 /* if pending Rcv Disconnect Association LS, send rsp now */ 1187 if (oldls) 1188 nvmet_fc_xmt_ls_rsp(tgtport, oldls); 1189 ida_free(&tgtport->assoc_cnt, assoc->a_id); 1190 dev_info(tgtport->dev, 1191 "{%d:%d} Association freed\n", 1192 tgtport->fc_target_port.port_num, assoc->a_id); 1193 kfree(assoc); 1194 } 1195 1196 static void 1197 nvmet_fc_tgt_a_put(struct nvmet_fc_tgt_assoc *assoc) 1198 { 1199 kref_put(&assoc->ref, nvmet_fc_target_assoc_free); 1200 } 1201 1202 static int 1203 nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc) 1204 { 1205 return kref_get_unless_zero(&assoc->ref); 1206 } 1207 1208 static void 1209 nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc) 1210 { 1211 struct nvmet_fc_tgtport *tgtport = assoc->tgtport; 1212 unsigned long flags; 1213 int i; 1214 1215 spin_lock_irqsave(&tgtport->lock, flags); 1216 list_del_rcu(&assoc->a_list); 1217 spin_unlock_irqrestore(&tgtport->lock, flags); 1218 1219 synchronize_rcu(); 1220 1221 /* ensure all in-flight I/Os have been processed */ 1222 for (i = NVMET_NR_QUEUES; i >= 0; i--) { 1223 if (assoc->queues[i]) 1224 flush_workqueue(assoc->queues[i]->work_q); 1225 } 1226 1227 dev_info(tgtport->dev, 1228 "{%d:%d} Association deleted\n", 1229 tgtport->fc_target_port.port_num, assoc->a_id); 1230 1231 nvmet_fc_tgtport_put(tgtport); 1232 } 1233 1234 static struct nvmet_fc_tgt_assoc * 1235 nvmet_fc_find_target_assoc(struct nvmet_fc_tgtport *tgtport, 1236 u64 association_id) 1237 { 1238 struct nvmet_fc_tgt_assoc *assoc; 1239 struct nvmet_fc_tgt_assoc *ret = NULL; 1240 1241 rcu_read_lock(); 1242 list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) { 1243 if (association_id == assoc->association_id) { 1244 ret = assoc; 1245 if (!nvmet_fc_tgt_a_get(assoc)) 1246 ret = NULL; 1247 break; 1248 } 1249 } 1250 rcu_read_unlock(); 1251 1252 return ret; 1253 } 1254 1255 static void 1256 nvmet_fc_portentry_bind(struct nvmet_fc_tgtport *tgtport, 1257 struct nvmet_fc_port_entry *pe, 1258 struct nvmet_port *port) 1259 { 1260 lockdep_assert_held(&nvmet_fc_tgtlock); 1261 1262 nvmet_fc_tgtport_get(tgtport); 1263 pe->tgtport = tgtport; 1264 tgtport->pe = pe; 1265 1266 pe->port = port; 1267 port->priv = pe; 1268 1269 pe->node_name = tgtport->fc_target_port.node_name; 1270 pe->port_name = tgtport->fc_target_port.port_name; 1271 INIT_LIST_HEAD(&pe->pe_list); 1272 1273 list_add_tail(&pe->pe_list, &nvmet_fc_portentry_list); 1274 } 1275 1276 static void 1277 nvmet_fc_portentry_unbind(struct nvmet_fc_port_entry *pe) 1278 { 1279 unsigned long flags; 1280 1281 spin_lock_irqsave(&nvmet_fc_tgtlock, flags); 1282 if (pe->tgtport) { 1283 nvmet_fc_tgtport_put(pe->tgtport); 1284 pe->tgtport->pe = NULL; 1285 } 1286 list_del(&pe->pe_list); 1287 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); 1288 } 1289 1290 /* 1291 * called when a targetport deregisters. Breaks the relationship 1292 * with the nvmet port, but leaves the port_entry in place so that 1293 * re-registration can resume operation. 1294 */ 1295 static void 1296 nvmet_fc_portentry_unbind_tgt(struct nvmet_fc_tgtport *tgtport) 1297 { 1298 struct nvmet_fc_port_entry *pe; 1299 unsigned long flags; 1300 1301 spin_lock_irqsave(&nvmet_fc_tgtlock, flags); 1302 pe = tgtport->pe; 1303 if (pe) { 1304 nvmet_fc_tgtport_put(pe->tgtport); 1305 pe->tgtport = NULL; 1306 } 1307 tgtport->pe = NULL; 1308 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); 1309 } 1310 1311 /* 1312 * called when a new targetport is registered. Looks in the 1313 * existing nvmet port_entries to see if the nvmet layer is 1314 * configured for the targetport's wwn's. (the targetport existed, 1315 * nvmet configured, the lldd unregistered the tgtport, and is now 1316 * reregistering the same targetport). If so, set the nvmet port 1317 * port entry on the targetport. 1318 */ 1319 static void 1320 nvmet_fc_portentry_rebind_tgt(struct nvmet_fc_tgtport *tgtport) 1321 { 1322 struct nvmet_fc_port_entry *pe; 1323 unsigned long flags; 1324 1325 spin_lock_irqsave(&nvmet_fc_tgtlock, flags); 1326 list_for_each_entry(pe, &nvmet_fc_portentry_list, pe_list) { 1327 if (tgtport->fc_target_port.node_name == pe->node_name && 1328 tgtport->fc_target_port.port_name == pe->port_name) { 1329 if (!nvmet_fc_tgtport_get(tgtport)) 1330 continue; 1331 1332 WARN_ON(pe->tgtport); 1333 tgtport->pe = pe; 1334 pe->tgtport = tgtport; 1335 break; 1336 } 1337 } 1338 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); 1339 } 1340 1341 /** 1342 * nvmet_fc_register_targetport - transport entry point called by an 1343 * LLDD to register the existence of a local 1344 * NVME subsystem FC port. 1345 * @pinfo: pointer to information about the port to be registered 1346 * @template: LLDD entrypoints and operational parameters for the port 1347 * @dev: physical hardware device node port corresponds to. Will be 1348 * used for DMA mappings 1349 * @portptr: pointer to a local port pointer. Upon success, the routine 1350 * will allocate a nvme_fc_local_port structure and place its 1351 * address in the local port pointer. Upon failure, local port 1352 * pointer will be set to NULL. 1353 * 1354 * Returns: 1355 * a completion status. Must be 0 upon success; a negative errno 1356 * (ex: -ENXIO) upon failure. 1357 */ 1358 int 1359 nvmet_fc_register_targetport(struct nvmet_fc_port_info *pinfo, 1360 struct nvmet_fc_target_template *template, 1361 struct device *dev, 1362 struct nvmet_fc_target_port **portptr) 1363 { 1364 struct nvmet_fc_tgtport *newrec; 1365 unsigned long flags; 1366 int ret, idx; 1367 1368 if (!template->xmt_ls_rsp || !template->fcp_op || 1369 !template->fcp_abort || 1370 !template->fcp_req_release || !template->targetport_delete || 1371 !template->max_hw_queues || !template->max_sgl_segments || 1372 !template->max_dif_sgl_segments || !template->dma_boundary) { 1373 ret = -EINVAL; 1374 goto out_regtgt_failed; 1375 } 1376 1377 newrec = kzalloc((sizeof(*newrec) + template->target_priv_sz), 1378 GFP_KERNEL); 1379 if (!newrec) { 1380 ret = -ENOMEM; 1381 goto out_regtgt_failed; 1382 } 1383 1384 idx = ida_alloc(&nvmet_fc_tgtport_cnt, GFP_KERNEL); 1385 if (idx < 0) { 1386 ret = -ENOSPC; 1387 goto out_fail_kfree; 1388 } 1389 1390 if (!get_device(dev) && dev) { 1391 ret = -ENODEV; 1392 goto out_ida_put; 1393 } 1394 1395 newrec->fc_target_port.node_name = pinfo->node_name; 1396 newrec->fc_target_port.port_name = pinfo->port_name; 1397 if (template->target_priv_sz) 1398 newrec->fc_target_port.private = &newrec[1]; 1399 else 1400 newrec->fc_target_port.private = NULL; 1401 newrec->fc_target_port.port_id = pinfo->port_id; 1402 newrec->fc_target_port.port_num = idx; 1403 INIT_LIST_HEAD(&newrec->tgt_list); 1404 newrec->dev = dev; 1405 newrec->ops = template; 1406 spin_lock_init(&newrec->lock); 1407 INIT_LIST_HEAD(&newrec->ls_rcv_list); 1408 INIT_LIST_HEAD(&newrec->ls_req_list); 1409 INIT_LIST_HEAD(&newrec->ls_busylist); 1410 INIT_LIST_HEAD(&newrec->assoc_list); 1411 INIT_LIST_HEAD(&newrec->host_list); 1412 kref_init(&newrec->ref); 1413 ida_init(&newrec->assoc_cnt); 1414 newrec->max_sg_cnt = template->max_sgl_segments; 1415 1416 ret = nvmet_fc_alloc_ls_iodlist(newrec); 1417 if (ret) { 1418 ret = -ENOMEM; 1419 goto out_free_newrec; 1420 } 1421 1422 nvmet_fc_portentry_rebind_tgt(newrec); 1423 1424 spin_lock_irqsave(&nvmet_fc_tgtlock, flags); 1425 list_add_tail(&newrec->tgt_list, &nvmet_fc_target_list); 1426 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); 1427 1428 *portptr = &newrec->fc_target_port; 1429 return 0; 1430 1431 out_free_newrec: 1432 put_device(dev); 1433 out_ida_put: 1434 ida_free(&nvmet_fc_tgtport_cnt, idx); 1435 out_fail_kfree: 1436 kfree(newrec); 1437 out_regtgt_failed: 1438 *portptr = NULL; 1439 return ret; 1440 } 1441 EXPORT_SYMBOL_GPL(nvmet_fc_register_targetport); 1442 1443 1444 static void 1445 nvmet_fc_free_tgtport(struct kref *ref) 1446 { 1447 struct nvmet_fc_tgtport *tgtport = 1448 container_of(ref, struct nvmet_fc_tgtport, ref); 1449 struct device *dev = tgtport->dev; 1450 1451 nvmet_fc_free_ls_iodlist(tgtport); 1452 1453 /* let the LLDD know we've finished tearing it down */ 1454 tgtport->ops->targetport_delete(&tgtport->fc_target_port); 1455 1456 ida_free(&nvmet_fc_tgtport_cnt, 1457 tgtport->fc_target_port.port_num); 1458 1459 ida_destroy(&tgtport->assoc_cnt); 1460 1461 kfree(tgtport); 1462 1463 put_device(dev); 1464 } 1465 1466 static void 1467 nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport) 1468 { 1469 kref_put(&tgtport->ref, nvmet_fc_free_tgtport); 1470 } 1471 1472 static int 1473 nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport) 1474 { 1475 return kref_get_unless_zero(&tgtport->ref); 1476 } 1477 1478 static void 1479 __nvmet_fc_free_assocs(struct nvmet_fc_tgtport *tgtport) 1480 { 1481 struct nvmet_fc_tgt_assoc *assoc; 1482 1483 rcu_read_lock(); 1484 list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) { 1485 if (!nvmet_fc_tgt_a_get(assoc)) 1486 continue; 1487 nvmet_fc_schedule_delete_assoc(assoc); 1488 nvmet_fc_tgt_a_put(assoc); 1489 } 1490 rcu_read_unlock(); 1491 } 1492 1493 /** 1494 * nvmet_fc_invalidate_host - transport entry point called by an LLDD 1495 * to remove references to a hosthandle for LS's. 1496 * 1497 * The nvmet-fc layer ensures that any references to the hosthandle 1498 * on the targetport are forgotten (set to NULL). The LLDD will 1499 * typically call this when a login with a remote host port has been 1500 * lost, thus LS's for the remote host port are no longer possible. 1501 * 1502 * If an LS request is outstanding to the targetport/hosthandle (or 1503 * issued concurrently with the call to invalidate the host), the 1504 * LLDD is responsible for terminating/aborting the LS and completing 1505 * the LS request. It is recommended that these terminations/aborts 1506 * occur after calling to invalidate the host handle to avoid additional 1507 * retries by the nvmet-fc transport. The nvmet-fc transport may 1508 * continue to reference host handle while it cleans up outstanding 1509 * NVME associations. The nvmet-fc transport will call the 1510 * ops->host_release() callback to notify the LLDD that all references 1511 * are complete and the related host handle can be recovered. 1512 * Note: if there are no references, the callback may be called before 1513 * the invalidate host call returns. 1514 * 1515 * @target_port: pointer to the (registered) target port that a prior 1516 * LS was received on and which supplied the transport the 1517 * hosthandle. 1518 * @hosthandle: the handle (pointer) that represents the host port 1519 * that no longer has connectivity and that LS's should 1520 * no longer be directed to. 1521 */ 1522 void 1523 nvmet_fc_invalidate_host(struct nvmet_fc_target_port *target_port, 1524 void *hosthandle) 1525 { 1526 struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port); 1527 struct nvmet_fc_tgt_assoc *assoc, *next; 1528 unsigned long flags; 1529 bool noassoc = true; 1530 1531 spin_lock_irqsave(&tgtport->lock, flags); 1532 list_for_each_entry_safe(assoc, next, 1533 &tgtport->assoc_list, a_list) { 1534 if (assoc->hostport->hosthandle != hosthandle) 1535 continue; 1536 if (!nvmet_fc_tgt_a_get(assoc)) 1537 continue; 1538 assoc->hostport->invalid = 1; 1539 noassoc = false; 1540 nvmet_fc_schedule_delete_assoc(assoc); 1541 nvmet_fc_tgt_a_put(assoc); 1542 } 1543 spin_unlock_irqrestore(&tgtport->lock, flags); 1544 1545 /* if there's nothing to wait for - call the callback */ 1546 if (noassoc && tgtport->ops->host_release) 1547 tgtport->ops->host_release(hosthandle); 1548 } 1549 EXPORT_SYMBOL_GPL(nvmet_fc_invalidate_host); 1550 1551 /* 1552 * nvmet layer has called to terminate an association 1553 */ 1554 static void 1555 nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl) 1556 { 1557 struct nvmet_fc_tgtport *tgtport, *next; 1558 struct nvmet_fc_tgt_assoc *assoc; 1559 struct nvmet_fc_tgt_queue *queue; 1560 unsigned long flags; 1561 bool found_ctrl = false; 1562 1563 /* this is a bit ugly, but don't want to make locks layered */ 1564 spin_lock_irqsave(&nvmet_fc_tgtlock, flags); 1565 list_for_each_entry_safe(tgtport, next, &nvmet_fc_target_list, 1566 tgt_list) { 1567 if (!nvmet_fc_tgtport_get(tgtport)) 1568 continue; 1569 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); 1570 1571 rcu_read_lock(); 1572 list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) { 1573 queue = assoc->queues[0]; 1574 if (queue && queue->nvme_sq.ctrl == ctrl) { 1575 if (nvmet_fc_tgt_a_get(assoc)) 1576 found_ctrl = true; 1577 break; 1578 } 1579 } 1580 rcu_read_unlock(); 1581 1582 nvmet_fc_tgtport_put(tgtport); 1583 1584 if (found_ctrl) { 1585 nvmet_fc_schedule_delete_assoc(assoc); 1586 nvmet_fc_tgt_a_put(assoc); 1587 return; 1588 } 1589 1590 spin_lock_irqsave(&nvmet_fc_tgtlock, flags); 1591 } 1592 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); 1593 } 1594 1595 static void 1596 nvmet_fc_free_pending_reqs(struct nvmet_fc_tgtport *tgtport) 1597 { 1598 struct nvmet_fc_ls_req_op *lsop; 1599 struct nvmefc_ls_req *lsreq; 1600 struct nvmet_fc_ls_iod *iod; 1601 int i; 1602 1603 iod = tgtport->iod; 1604 for (i = 0; i < NVMET_LS_CTX_COUNT; iod++, i++) 1605 cancel_work(&iod->work); 1606 1607 /* 1608 * After this point the connection is lost and thus any pending 1609 * request can't be processed by the normal completion path. This 1610 * is likely a request from nvmet_fc_send_ls_req_async. 1611 */ 1612 while ((lsop = list_first_entry_or_null(&tgtport->ls_req_list, 1613 struct nvmet_fc_ls_req_op, lsreq_list))) { 1614 list_del(&lsop->lsreq_list); 1615 1616 if (!lsop->req_queued) 1617 continue; 1618 1619 lsreq = &lsop->ls_req; 1620 fc_dma_unmap_single(tgtport->dev, lsreq->rqstdma, 1621 (lsreq->rqstlen + lsreq->rsplen), 1622 DMA_BIDIRECTIONAL); 1623 nvmet_fc_tgtport_put(tgtport); 1624 kfree(lsop); 1625 } 1626 } 1627 1628 /** 1629 * nvmet_fc_unregister_targetport - transport entry point called by an 1630 * LLDD to deregister/remove a previously 1631 * registered a local NVME subsystem FC port. 1632 * @target_port: pointer to the (registered) target port that is to be 1633 * deregistered. 1634 * 1635 * Returns: 1636 * a completion status. Must be 0 upon success; a negative errno 1637 * (ex: -ENXIO) upon failure. 1638 */ 1639 int 1640 nvmet_fc_unregister_targetport(struct nvmet_fc_target_port *target_port) 1641 { 1642 struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port); 1643 unsigned long flags; 1644 1645 spin_lock_irqsave(&nvmet_fc_tgtlock, flags); 1646 list_del(&tgtport->tgt_list); 1647 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); 1648 1649 nvmet_fc_portentry_unbind_tgt(tgtport); 1650 1651 /* terminate any outstanding associations */ 1652 __nvmet_fc_free_assocs(tgtport); 1653 1654 flush_workqueue(nvmet_wq); 1655 1656 nvmet_fc_free_pending_reqs(tgtport); 1657 nvmet_fc_tgtport_put(tgtport); 1658 1659 return 0; 1660 } 1661 EXPORT_SYMBOL_GPL(nvmet_fc_unregister_targetport); 1662 1663 1664 /* ********************** FC-NVME LS RCV Handling ************************* */ 1665 1666 1667 static void 1668 nvmet_fc_ls_create_association(struct nvmet_fc_tgtport *tgtport, 1669 struct nvmet_fc_ls_iod *iod) 1670 { 1671 struct fcnvme_ls_cr_assoc_rqst *rqst = &iod->rqstbuf->rq_cr_assoc; 1672 struct fcnvme_ls_cr_assoc_acc *acc = &iod->rspbuf->rsp_cr_assoc; 1673 struct nvmet_fc_tgt_queue *queue; 1674 int ret = 0; 1675 1676 memset(acc, 0, sizeof(*acc)); 1677 1678 /* 1679 * FC-NVME spec changes. There are initiators sending different 1680 * lengths as padding sizes for Create Association Cmd descriptor 1681 * was incorrect. 1682 * Accept anything of "minimum" length. Assume format per 1.15 1683 * spec (with HOSTID reduced to 16 bytes), ignore how long the 1684 * trailing pad length is. 1685 */ 1686 if (iod->rqstdatalen < FCNVME_LSDESC_CRA_RQST_MINLEN) 1687 ret = VERR_CR_ASSOC_LEN; 1688 else if (be32_to_cpu(rqst->desc_list_len) < 1689 FCNVME_LSDESC_CRA_RQST_MIN_LISTLEN) 1690 ret = VERR_CR_ASSOC_RQST_LEN; 1691 else if (rqst->assoc_cmd.desc_tag != 1692 cpu_to_be32(FCNVME_LSDESC_CREATE_ASSOC_CMD)) 1693 ret = VERR_CR_ASSOC_CMD; 1694 else if (be32_to_cpu(rqst->assoc_cmd.desc_len) < 1695 FCNVME_LSDESC_CRA_CMD_DESC_MIN_DESCLEN) 1696 ret = VERR_CR_ASSOC_CMD_LEN; 1697 else if (!rqst->assoc_cmd.ersp_ratio || 1698 (be16_to_cpu(rqst->assoc_cmd.ersp_ratio) >= 1699 be16_to_cpu(rqst->assoc_cmd.sqsize))) 1700 ret = VERR_ERSP_RATIO; 1701 1702 else { 1703 /* new association w/ admin queue */ 1704 iod->assoc = nvmet_fc_alloc_target_assoc( 1705 tgtport, iod->hosthandle); 1706 if (!iod->assoc) 1707 ret = VERR_ASSOC_ALLOC_FAIL; 1708 else { 1709 queue = nvmet_fc_alloc_target_queue(iod->assoc, 0, 1710 be16_to_cpu(rqst->assoc_cmd.sqsize)); 1711 if (!queue) { 1712 ret = VERR_QUEUE_ALLOC_FAIL; 1713 nvmet_fc_tgt_a_put(iod->assoc); 1714 } 1715 } 1716 } 1717 1718 if (ret) { 1719 dev_err(tgtport->dev, 1720 "Create Association LS failed: %s\n", 1721 validation_errors[ret]); 1722 iod->lsrsp->rsplen = nvme_fc_format_rjt(acc, 1723 sizeof(*acc), rqst->w0.ls_cmd, 1724 FCNVME_RJT_RC_LOGIC, 1725 FCNVME_RJT_EXP_NONE, 0); 1726 return; 1727 } 1728 1729 queue->ersp_ratio = be16_to_cpu(rqst->assoc_cmd.ersp_ratio); 1730 atomic_set(&queue->connected, 1); 1731 queue->sqhd = 0; /* best place to init value */ 1732 1733 dev_info(tgtport->dev, 1734 "{%d:%d} Association created\n", 1735 tgtport->fc_target_port.port_num, iod->assoc->a_id); 1736 1737 /* format a response */ 1738 1739 iod->lsrsp->rsplen = sizeof(*acc); 1740 1741 nvme_fc_format_rsp_hdr(acc, FCNVME_LS_ACC, 1742 fcnvme_lsdesc_len( 1743 sizeof(struct fcnvme_ls_cr_assoc_acc)), 1744 FCNVME_LS_CREATE_ASSOCIATION); 1745 acc->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID); 1746 acc->associd.desc_len = 1747 fcnvme_lsdesc_len( 1748 sizeof(struct fcnvme_lsdesc_assoc_id)); 1749 acc->associd.association_id = 1750 cpu_to_be64(nvmet_fc_makeconnid(iod->assoc, 0)); 1751 acc->connectid.desc_tag = cpu_to_be32(FCNVME_LSDESC_CONN_ID); 1752 acc->connectid.desc_len = 1753 fcnvme_lsdesc_len( 1754 sizeof(struct fcnvme_lsdesc_conn_id)); 1755 acc->connectid.connection_id = acc->associd.association_id; 1756 } 1757 1758 static void 1759 nvmet_fc_ls_create_connection(struct nvmet_fc_tgtport *tgtport, 1760 struct nvmet_fc_ls_iod *iod) 1761 { 1762 struct fcnvme_ls_cr_conn_rqst *rqst = &iod->rqstbuf->rq_cr_conn; 1763 struct fcnvme_ls_cr_conn_acc *acc = &iod->rspbuf->rsp_cr_conn; 1764 struct nvmet_fc_tgt_queue *queue; 1765 int ret = 0; 1766 1767 memset(acc, 0, sizeof(*acc)); 1768 1769 if (iod->rqstdatalen < sizeof(struct fcnvme_ls_cr_conn_rqst)) 1770 ret = VERR_CR_CONN_LEN; 1771 else if (rqst->desc_list_len != 1772 fcnvme_lsdesc_len( 1773 sizeof(struct fcnvme_ls_cr_conn_rqst))) 1774 ret = VERR_CR_CONN_RQST_LEN; 1775 else if (rqst->associd.desc_tag != cpu_to_be32(FCNVME_LSDESC_ASSOC_ID)) 1776 ret = VERR_ASSOC_ID; 1777 else if (rqst->associd.desc_len != 1778 fcnvme_lsdesc_len( 1779 sizeof(struct fcnvme_lsdesc_assoc_id))) 1780 ret = VERR_ASSOC_ID_LEN; 1781 else if (rqst->connect_cmd.desc_tag != 1782 cpu_to_be32(FCNVME_LSDESC_CREATE_CONN_CMD)) 1783 ret = VERR_CR_CONN_CMD; 1784 else if (rqst->connect_cmd.desc_len != 1785 fcnvme_lsdesc_len( 1786 sizeof(struct fcnvme_lsdesc_cr_conn_cmd))) 1787 ret = VERR_CR_CONN_CMD_LEN; 1788 else if (!rqst->connect_cmd.ersp_ratio || 1789 (be16_to_cpu(rqst->connect_cmd.ersp_ratio) >= 1790 be16_to_cpu(rqst->connect_cmd.sqsize))) 1791 ret = VERR_ERSP_RATIO; 1792 1793 else { 1794 /* new io queue */ 1795 iod->assoc = nvmet_fc_find_target_assoc(tgtport, 1796 be64_to_cpu(rqst->associd.association_id)); 1797 if (!iod->assoc) 1798 ret = VERR_NO_ASSOC; 1799 else { 1800 queue = nvmet_fc_alloc_target_queue(iod->assoc, 1801 be16_to_cpu(rqst->connect_cmd.qid), 1802 be16_to_cpu(rqst->connect_cmd.sqsize)); 1803 if (!queue) 1804 ret = VERR_QUEUE_ALLOC_FAIL; 1805 1806 /* release get taken in nvmet_fc_find_target_assoc */ 1807 nvmet_fc_tgt_a_put(iod->assoc); 1808 } 1809 } 1810 1811 if (ret) { 1812 dev_err(tgtport->dev, 1813 "Create Connection LS failed: %s\n", 1814 validation_errors[ret]); 1815 iod->lsrsp->rsplen = nvme_fc_format_rjt(acc, 1816 sizeof(*acc), rqst->w0.ls_cmd, 1817 (ret == VERR_NO_ASSOC) ? 1818 FCNVME_RJT_RC_INV_ASSOC : 1819 FCNVME_RJT_RC_LOGIC, 1820 FCNVME_RJT_EXP_NONE, 0); 1821 return; 1822 } 1823 1824 queue->ersp_ratio = be16_to_cpu(rqst->connect_cmd.ersp_ratio); 1825 atomic_set(&queue->connected, 1); 1826 queue->sqhd = 0; /* best place to init value */ 1827 1828 /* format a response */ 1829 1830 iod->lsrsp->rsplen = sizeof(*acc); 1831 1832 nvme_fc_format_rsp_hdr(acc, FCNVME_LS_ACC, 1833 fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_cr_conn_acc)), 1834 FCNVME_LS_CREATE_CONNECTION); 1835 acc->connectid.desc_tag = cpu_to_be32(FCNVME_LSDESC_CONN_ID); 1836 acc->connectid.desc_len = 1837 fcnvme_lsdesc_len( 1838 sizeof(struct fcnvme_lsdesc_conn_id)); 1839 acc->connectid.connection_id = 1840 cpu_to_be64(nvmet_fc_makeconnid(iod->assoc, 1841 be16_to_cpu(rqst->connect_cmd.qid))); 1842 } 1843 1844 /* 1845 * Returns true if the LS response is to be transmit 1846 * Returns false if the LS response is to be delayed 1847 */ 1848 static int 1849 nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport, 1850 struct nvmet_fc_ls_iod *iod) 1851 { 1852 struct fcnvme_ls_disconnect_assoc_rqst *rqst = 1853 &iod->rqstbuf->rq_dis_assoc; 1854 struct fcnvme_ls_disconnect_assoc_acc *acc = 1855 &iod->rspbuf->rsp_dis_assoc; 1856 struct nvmet_fc_tgt_assoc *assoc = NULL; 1857 struct nvmet_fc_ls_iod *oldls = NULL; 1858 unsigned long flags; 1859 int ret = 0; 1860 1861 memset(acc, 0, sizeof(*acc)); 1862 1863 ret = nvmefc_vldt_lsreq_discon_assoc(iod->rqstdatalen, rqst); 1864 if (!ret) { 1865 /* match an active association - takes an assoc ref if !NULL */ 1866 assoc = nvmet_fc_find_target_assoc(tgtport, 1867 be64_to_cpu(rqst->associd.association_id)); 1868 iod->assoc = assoc; 1869 if (!assoc) 1870 ret = VERR_NO_ASSOC; 1871 } 1872 1873 if (ret || !assoc) { 1874 dev_err(tgtport->dev, 1875 "Disconnect LS failed: %s\n", 1876 validation_errors[ret]); 1877 iod->lsrsp->rsplen = nvme_fc_format_rjt(acc, 1878 sizeof(*acc), rqst->w0.ls_cmd, 1879 (ret == VERR_NO_ASSOC) ? 1880 FCNVME_RJT_RC_INV_ASSOC : 1881 FCNVME_RJT_RC_LOGIC, 1882 FCNVME_RJT_EXP_NONE, 0); 1883 return true; 1884 } 1885 1886 /* format a response */ 1887 1888 iod->lsrsp->rsplen = sizeof(*acc); 1889 1890 nvme_fc_format_rsp_hdr(acc, FCNVME_LS_ACC, 1891 fcnvme_lsdesc_len( 1892 sizeof(struct fcnvme_ls_disconnect_assoc_acc)), 1893 FCNVME_LS_DISCONNECT_ASSOC); 1894 1895 /* 1896 * The rules for LS response says the response cannot 1897 * go back until ABTS's have been sent for all outstanding 1898 * I/O and a Disconnect Association LS has been sent. 1899 * So... save off the Disconnect LS to send the response 1900 * later. If there was a prior LS already saved, replace 1901 * it with the newer one and send a can't perform reject 1902 * on the older one. 1903 */ 1904 spin_lock_irqsave(&tgtport->lock, flags); 1905 oldls = assoc->rcv_disconn; 1906 assoc->rcv_disconn = iod; 1907 spin_unlock_irqrestore(&tgtport->lock, flags); 1908 1909 if (oldls) { 1910 dev_info(tgtport->dev, 1911 "{%d:%d} Multiple Disconnect Association LS's " 1912 "received\n", 1913 tgtport->fc_target_port.port_num, assoc->a_id); 1914 /* overwrite good response with bogus failure */ 1915 oldls->lsrsp->rsplen = nvme_fc_format_rjt(oldls->rspbuf, 1916 sizeof(*iod->rspbuf), 1917 /* ok to use rqst, LS is same */ 1918 rqst->w0.ls_cmd, 1919 FCNVME_RJT_RC_UNAB, 1920 FCNVME_RJT_EXP_NONE, 0); 1921 nvmet_fc_xmt_ls_rsp(tgtport, oldls); 1922 } 1923 1924 nvmet_fc_schedule_delete_assoc(assoc); 1925 nvmet_fc_tgt_a_put(assoc); 1926 1927 return false; 1928 } 1929 1930 1931 /* *********************** NVME Ctrl Routines **************************** */ 1932 1933 1934 static void nvmet_fc_fcp_nvme_cmd_done(struct nvmet_req *nvme_req); 1935 1936 static const struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops; 1937 1938 static void 1939 nvmet_fc_xmt_ls_rsp_done(struct nvmefc_ls_rsp *lsrsp) 1940 { 1941 struct nvmet_fc_ls_iod *iod = lsrsp->nvme_fc_private; 1942 struct nvmet_fc_tgtport *tgtport = iod->tgtport; 1943 1944 fc_dma_sync_single_for_cpu(tgtport->dev, iod->rspdma, 1945 sizeof(*iod->rspbuf), DMA_TO_DEVICE); 1946 nvmet_fc_free_ls_iod(tgtport, iod); 1947 nvmet_fc_tgtport_put(tgtport); 1948 } 1949 1950 static void 1951 nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport, 1952 struct nvmet_fc_ls_iod *iod) 1953 { 1954 int ret; 1955 1956 fc_dma_sync_single_for_device(tgtport->dev, iod->rspdma, 1957 sizeof(*iod->rspbuf), DMA_TO_DEVICE); 1958 1959 ret = tgtport->ops->xmt_ls_rsp(&tgtport->fc_target_port, iod->lsrsp); 1960 if (ret) 1961 nvmet_fc_xmt_ls_rsp_done(iod->lsrsp); 1962 } 1963 1964 /* 1965 * Actual processing routine for received FC-NVME LS Requests from the LLD 1966 */ 1967 static void 1968 nvmet_fc_handle_ls_rqst(struct nvmet_fc_tgtport *tgtport, 1969 struct nvmet_fc_ls_iod *iod) 1970 { 1971 struct fcnvme_ls_rqst_w0 *w0 = &iod->rqstbuf->rq_cr_assoc.w0; 1972 bool sendrsp = true; 1973 1974 iod->lsrsp->nvme_fc_private = iod; 1975 iod->lsrsp->rspbuf = iod->rspbuf; 1976 iod->lsrsp->rspdma = iod->rspdma; 1977 iod->lsrsp->done = nvmet_fc_xmt_ls_rsp_done; 1978 /* Be preventative. handlers will later set to valid length */ 1979 iod->lsrsp->rsplen = 0; 1980 1981 iod->assoc = NULL; 1982 1983 /* 1984 * handlers: 1985 * parse request input, execute the request, and format the 1986 * LS response 1987 */ 1988 switch (w0->ls_cmd) { 1989 case FCNVME_LS_CREATE_ASSOCIATION: 1990 /* Creates Association and initial Admin Queue/Connection */ 1991 nvmet_fc_ls_create_association(tgtport, iod); 1992 break; 1993 case FCNVME_LS_CREATE_CONNECTION: 1994 /* Creates an IO Queue/Connection */ 1995 nvmet_fc_ls_create_connection(tgtport, iod); 1996 break; 1997 case FCNVME_LS_DISCONNECT_ASSOC: 1998 /* Terminate a Queue/Connection or the Association */ 1999 sendrsp = nvmet_fc_ls_disconnect(tgtport, iod); 2000 break; 2001 default: 2002 iod->lsrsp->rsplen = nvme_fc_format_rjt(iod->rspbuf, 2003 sizeof(*iod->rspbuf), w0->ls_cmd, 2004 FCNVME_RJT_RC_INVAL, FCNVME_RJT_EXP_NONE, 0); 2005 } 2006 2007 if (sendrsp) 2008 nvmet_fc_xmt_ls_rsp(tgtport, iod); 2009 } 2010 2011 /* 2012 * Actual processing routine for received FC-NVME LS Requests from the LLD 2013 */ 2014 static void 2015 nvmet_fc_handle_ls_rqst_work(struct work_struct *work) 2016 { 2017 struct nvmet_fc_ls_iod *iod = 2018 container_of(work, struct nvmet_fc_ls_iod, work); 2019 struct nvmet_fc_tgtport *tgtport = iod->tgtport; 2020 2021 nvmet_fc_handle_ls_rqst(tgtport, iod); 2022 } 2023 2024 2025 /** 2026 * nvmet_fc_rcv_ls_req - transport entry point called by an LLDD 2027 * upon the reception of a NVME LS request. 2028 * 2029 * The nvmet-fc layer will copy payload to an internal structure for 2030 * processing. As such, upon completion of the routine, the LLDD may 2031 * immediately free/reuse the LS request buffer passed in the call. 2032 * 2033 * If this routine returns error, the LLDD should abort the exchange. 2034 * 2035 * @target_port: pointer to the (registered) target port the LS was 2036 * received on. 2037 * @hosthandle: pointer to the host specific data, gets stored in iod. 2038 * @lsrsp: pointer to a lsrsp structure to be used to reference 2039 * the exchange corresponding to the LS. 2040 * @lsreqbuf: pointer to the buffer containing the LS Request 2041 * @lsreqbuf_len: length, in bytes, of the received LS request 2042 */ 2043 int 2044 nvmet_fc_rcv_ls_req(struct nvmet_fc_target_port *target_port, 2045 void *hosthandle, 2046 struct nvmefc_ls_rsp *lsrsp, 2047 void *lsreqbuf, u32 lsreqbuf_len) 2048 { 2049 struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port); 2050 struct nvmet_fc_ls_iod *iod; 2051 struct fcnvme_ls_rqst_w0 *w0 = (struct fcnvme_ls_rqst_w0 *)lsreqbuf; 2052 2053 if (lsreqbuf_len > sizeof(union nvmefc_ls_requests)) { 2054 dev_info(tgtport->dev, 2055 "RCV %s LS failed: payload too large (%d)\n", 2056 (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ? 2057 nvmefc_ls_names[w0->ls_cmd] : "", 2058 lsreqbuf_len); 2059 return -E2BIG; 2060 } 2061 2062 if (!nvmet_fc_tgtport_get(tgtport)) { 2063 dev_info(tgtport->dev, 2064 "RCV %s LS failed: target deleting\n", 2065 (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ? 2066 nvmefc_ls_names[w0->ls_cmd] : ""); 2067 return -ESHUTDOWN; 2068 } 2069 2070 iod = nvmet_fc_alloc_ls_iod(tgtport); 2071 if (!iod) { 2072 dev_info(tgtport->dev, 2073 "RCV %s LS failed: context allocation failed\n", 2074 (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ? 2075 nvmefc_ls_names[w0->ls_cmd] : ""); 2076 nvmet_fc_tgtport_put(tgtport); 2077 return -ENOENT; 2078 } 2079 2080 iod->lsrsp = lsrsp; 2081 iod->fcpreq = NULL; 2082 memcpy(iod->rqstbuf, lsreqbuf, lsreqbuf_len); 2083 iod->rqstdatalen = lsreqbuf_len; 2084 iod->hosthandle = hosthandle; 2085 2086 queue_work(nvmet_wq, &iod->work); 2087 2088 return 0; 2089 } 2090 EXPORT_SYMBOL_GPL(nvmet_fc_rcv_ls_req); 2091 2092 2093 /* 2094 * ********************** 2095 * Start of FCP handling 2096 * ********************** 2097 */ 2098 2099 static int 2100 nvmet_fc_alloc_tgt_pgs(struct nvmet_fc_fcp_iod *fod) 2101 { 2102 struct scatterlist *sg; 2103 unsigned int nent; 2104 2105 sg = sgl_alloc(fod->req.transfer_len, GFP_KERNEL, &nent); 2106 if (!sg) 2107 goto out; 2108 2109 fod->data_sg = sg; 2110 fod->data_sg_cnt = nent; 2111 fod->data_sg_cnt = fc_dma_map_sg(fod->tgtport->dev, sg, nent, 2112 ((fod->io_dir == NVMET_FCP_WRITE) ? 2113 DMA_FROM_DEVICE : DMA_TO_DEVICE)); 2114 /* note: write from initiator perspective */ 2115 fod->next_sg = fod->data_sg; 2116 2117 return 0; 2118 2119 out: 2120 return NVME_SC_INTERNAL; 2121 } 2122 2123 static void 2124 nvmet_fc_free_tgt_pgs(struct nvmet_fc_fcp_iod *fod) 2125 { 2126 if (!fod->data_sg || !fod->data_sg_cnt) 2127 return; 2128 2129 fc_dma_unmap_sg(fod->tgtport->dev, fod->data_sg, fod->data_sg_cnt, 2130 ((fod->io_dir == NVMET_FCP_WRITE) ? 2131 DMA_FROM_DEVICE : DMA_TO_DEVICE)); 2132 sgl_free(fod->data_sg); 2133 fod->data_sg = NULL; 2134 fod->data_sg_cnt = 0; 2135 } 2136 2137 2138 static bool 2139 queue_90percent_full(struct nvmet_fc_tgt_queue *q, u32 sqhd) 2140 { 2141 u32 sqtail, used; 2142 2143 /* egad, this is ugly. And sqtail is just a best guess */ 2144 sqtail = atomic_read(&q->sqtail) % q->sqsize; 2145 2146 used = (sqtail < sqhd) ? (sqtail + q->sqsize - sqhd) : (sqtail - sqhd); 2147 return ((used * 10) >= (((u32)(q->sqsize - 1) * 9))); 2148 } 2149 2150 /* 2151 * Prep RSP payload. 2152 * May be a NVMET_FCOP_RSP or NVMET_FCOP_READDATA_RSP op 2153 */ 2154 static void 2155 nvmet_fc_prep_fcp_rsp(struct nvmet_fc_tgtport *tgtport, 2156 struct nvmet_fc_fcp_iod *fod) 2157 { 2158 struct nvme_fc_ersp_iu *ersp = &fod->rspiubuf; 2159 struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common; 2160 struct nvme_completion *cqe = &ersp->cqe; 2161 u32 *cqewd = (u32 *)cqe; 2162 bool send_ersp = false; 2163 u32 rsn, rspcnt, xfr_length; 2164 2165 if (fod->fcpreq->op == NVMET_FCOP_READDATA_RSP) 2166 xfr_length = fod->req.transfer_len; 2167 else 2168 xfr_length = fod->offset; 2169 2170 /* 2171 * check to see if we can send a 0's rsp. 2172 * Note: to send a 0's response, the NVME-FC host transport will 2173 * recreate the CQE. The host transport knows: sq id, SQHD (last 2174 * seen in an ersp), and command_id. Thus it will create a 2175 * zero-filled CQE with those known fields filled in. Transport 2176 * must send an ersp for any condition where the cqe won't match 2177 * this. 2178 * 2179 * Here are the FC-NVME mandated cases where we must send an ersp: 2180 * every N responses, where N=ersp_ratio 2181 * force fabric commands to send ersp's (not in FC-NVME but good 2182 * practice) 2183 * normal cmds: any time status is non-zero, or status is zero 2184 * but words 0 or 1 are non-zero. 2185 * the SQ is 90% or more full 2186 * the cmd is a fused command 2187 * transferred data length not equal to cmd iu length 2188 */ 2189 rspcnt = atomic_inc_return(&fod->queue->zrspcnt); 2190 if (!(rspcnt % fod->queue->ersp_ratio) || 2191 nvme_is_fabrics((struct nvme_command *) sqe) || 2192 xfr_length != fod->req.transfer_len || 2193 (le16_to_cpu(cqe->status) & 0xFFFE) || cqewd[0] || cqewd[1] || 2194 (sqe->flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND)) || 2195 queue_90percent_full(fod->queue, le16_to_cpu(cqe->sq_head))) 2196 send_ersp = true; 2197 2198 /* re-set the fields */ 2199 fod->fcpreq->rspaddr = ersp; 2200 fod->fcpreq->rspdma = fod->rspdma; 2201 2202 if (!send_ersp) { 2203 memset(ersp, 0, NVME_FC_SIZEOF_ZEROS_RSP); 2204 fod->fcpreq->rsplen = NVME_FC_SIZEOF_ZEROS_RSP; 2205 } else { 2206 ersp->iu_len = cpu_to_be16(sizeof(*ersp)/sizeof(u32)); 2207 rsn = atomic_inc_return(&fod->queue->rsn); 2208 ersp->rsn = cpu_to_be32(rsn); 2209 ersp->xfrd_len = cpu_to_be32(xfr_length); 2210 fod->fcpreq->rsplen = sizeof(*ersp); 2211 } 2212 2213 fc_dma_sync_single_for_device(tgtport->dev, fod->rspdma, 2214 sizeof(fod->rspiubuf), DMA_TO_DEVICE); 2215 } 2216 2217 static void nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req *fcpreq); 2218 2219 static void 2220 nvmet_fc_abort_op(struct nvmet_fc_tgtport *tgtport, 2221 struct nvmet_fc_fcp_iod *fod) 2222 { 2223 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq; 2224 2225 /* data no longer needed */ 2226 nvmet_fc_free_tgt_pgs(fod); 2227 2228 /* 2229 * if an ABTS was received or we issued the fcp_abort early 2230 * don't call abort routine again. 2231 */ 2232 /* no need to take lock - lock was taken earlier to get here */ 2233 if (!fod->aborted) 2234 tgtport->ops->fcp_abort(&tgtport->fc_target_port, fcpreq); 2235 2236 nvmet_fc_free_fcp_iod(fod->queue, fod); 2237 } 2238 2239 static void 2240 nvmet_fc_xmt_fcp_rsp(struct nvmet_fc_tgtport *tgtport, 2241 struct nvmet_fc_fcp_iod *fod) 2242 { 2243 int ret; 2244 2245 fod->fcpreq->op = NVMET_FCOP_RSP; 2246 fod->fcpreq->timeout = 0; 2247 2248 nvmet_fc_prep_fcp_rsp(tgtport, fod); 2249 2250 ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq); 2251 if (ret) 2252 nvmet_fc_abort_op(tgtport, fod); 2253 } 2254 2255 static void 2256 nvmet_fc_transfer_fcp_data(struct nvmet_fc_tgtport *tgtport, 2257 struct nvmet_fc_fcp_iod *fod, u8 op) 2258 { 2259 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq; 2260 struct scatterlist *sg = fod->next_sg; 2261 unsigned long flags; 2262 u32 remaininglen = fod->req.transfer_len - fod->offset; 2263 u32 tlen = 0; 2264 int ret; 2265 2266 fcpreq->op = op; 2267 fcpreq->offset = fod->offset; 2268 fcpreq->timeout = NVME_FC_TGTOP_TIMEOUT_SEC; 2269 2270 /* 2271 * for next sequence: 2272 * break at a sg element boundary 2273 * attempt to keep sequence length capped at 2274 * NVMET_FC_MAX_SEQ_LENGTH but allow sequence to 2275 * be longer if a single sg element is larger 2276 * than that amount. This is done to avoid creating 2277 * a new sg list to use for the tgtport api. 2278 */ 2279 fcpreq->sg = sg; 2280 fcpreq->sg_cnt = 0; 2281 while (tlen < remaininglen && 2282 fcpreq->sg_cnt < tgtport->max_sg_cnt && 2283 tlen + sg_dma_len(sg) < NVMET_FC_MAX_SEQ_LENGTH) { 2284 fcpreq->sg_cnt++; 2285 tlen += sg_dma_len(sg); 2286 sg = sg_next(sg); 2287 } 2288 if (tlen < remaininglen && fcpreq->sg_cnt == 0) { 2289 fcpreq->sg_cnt++; 2290 tlen += min_t(u32, sg_dma_len(sg), remaininglen); 2291 sg = sg_next(sg); 2292 } 2293 if (tlen < remaininglen) 2294 fod->next_sg = sg; 2295 else 2296 fod->next_sg = NULL; 2297 2298 fcpreq->transfer_length = tlen; 2299 fcpreq->transferred_length = 0; 2300 fcpreq->fcp_error = 0; 2301 fcpreq->rsplen = 0; 2302 2303 /* 2304 * If the last READDATA request: check if LLDD supports 2305 * combined xfr with response. 2306 */ 2307 if ((op == NVMET_FCOP_READDATA) && 2308 ((fod->offset + fcpreq->transfer_length) == fod->req.transfer_len) && 2309 (tgtport->ops->target_features & NVMET_FCTGTFEAT_READDATA_RSP)) { 2310 fcpreq->op = NVMET_FCOP_READDATA_RSP; 2311 nvmet_fc_prep_fcp_rsp(tgtport, fod); 2312 } 2313 2314 ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq); 2315 if (ret) { 2316 /* 2317 * should be ok to set w/o lock as it's in the thread of 2318 * execution (not an async timer routine) and doesn't 2319 * contend with any clearing action 2320 */ 2321 fod->abort = true; 2322 2323 if (op == NVMET_FCOP_WRITEDATA) { 2324 spin_lock_irqsave(&fod->flock, flags); 2325 fod->writedataactive = false; 2326 spin_unlock_irqrestore(&fod->flock, flags); 2327 nvmet_req_complete(&fod->req, NVME_SC_INTERNAL); 2328 } else /* NVMET_FCOP_READDATA or NVMET_FCOP_READDATA_RSP */ { 2329 fcpreq->fcp_error = ret; 2330 fcpreq->transferred_length = 0; 2331 nvmet_fc_xmt_fcp_op_done(fod->fcpreq); 2332 } 2333 } 2334 } 2335 2336 static inline bool 2337 __nvmet_fc_fod_op_abort(struct nvmet_fc_fcp_iod *fod, bool abort) 2338 { 2339 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq; 2340 struct nvmet_fc_tgtport *tgtport = fod->tgtport; 2341 2342 /* if in the middle of an io and we need to tear down */ 2343 if (abort) { 2344 if (fcpreq->op == NVMET_FCOP_WRITEDATA) { 2345 nvmet_req_complete(&fod->req, NVME_SC_INTERNAL); 2346 return true; 2347 } 2348 2349 nvmet_fc_abort_op(tgtport, fod); 2350 return true; 2351 } 2352 2353 return false; 2354 } 2355 2356 /* 2357 * actual done handler for FCP operations when completed by the lldd 2358 */ 2359 static void 2360 nvmet_fc_fod_op_done(struct nvmet_fc_fcp_iod *fod) 2361 { 2362 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq; 2363 struct nvmet_fc_tgtport *tgtport = fod->tgtport; 2364 unsigned long flags; 2365 bool abort; 2366 2367 spin_lock_irqsave(&fod->flock, flags); 2368 abort = fod->abort; 2369 fod->writedataactive = false; 2370 spin_unlock_irqrestore(&fod->flock, flags); 2371 2372 switch (fcpreq->op) { 2373 2374 case NVMET_FCOP_WRITEDATA: 2375 if (__nvmet_fc_fod_op_abort(fod, abort)) 2376 return; 2377 if (fcpreq->fcp_error || 2378 fcpreq->transferred_length != fcpreq->transfer_length) { 2379 spin_lock_irqsave(&fod->flock, flags); 2380 fod->abort = true; 2381 spin_unlock_irqrestore(&fod->flock, flags); 2382 2383 nvmet_req_complete(&fod->req, NVME_SC_INTERNAL); 2384 return; 2385 } 2386 2387 fod->offset += fcpreq->transferred_length; 2388 if (fod->offset != fod->req.transfer_len) { 2389 spin_lock_irqsave(&fod->flock, flags); 2390 fod->writedataactive = true; 2391 spin_unlock_irqrestore(&fod->flock, flags); 2392 2393 /* transfer the next chunk */ 2394 nvmet_fc_transfer_fcp_data(tgtport, fod, 2395 NVMET_FCOP_WRITEDATA); 2396 return; 2397 } 2398 2399 /* data transfer complete, resume with nvmet layer */ 2400 fod->req.execute(&fod->req); 2401 break; 2402 2403 case NVMET_FCOP_READDATA: 2404 case NVMET_FCOP_READDATA_RSP: 2405 if (__nvmet_fc_fod_op_abort(fod, abort)) 2406 return; 2407 if (fcpreq->fcp_error || 2408 fcpreq->transferred_length != fcpreq->transfer_length) { 2409 nvmet_fc_abort_op(tgtport, fod); 2410 return; 2411 } 2412 2413 /* success */ 2414 2415 if (fcpreq->op == NVMET_FCOP_READDATA_RSP) { 2416 /* data no longer needed */ 2417 nvmet_fc_free_tgt_pgs(fod); 2418 nvmet_fc_free_fcp_iod(fod->queue, fod); 2419 return; 2420 } 2421 2422 fod->offset += fcpreq->transferred_length; 2423 if (fod->offset != fod->req.transfer_len) { 2424 /* transfer the next chunk */ 2425 nvmet_fc_transfer_fcp_data(tgtport, fod, 2426 NVMET_FCOP_READDATA); 2427 return; 2428 } 2429 2430 /* data transfer complete, send response */ 2431 2432 /* data no longer needed */ 2433 nvmet_fc_free_tgt_pgs(fod); 2434 2435 nvmet_fc_xmt_fcp_rsp(tgtport, fod); 2436 2437 break; 2438 2439 case NVMET_FCOP_RSP: 2440 if (__nvmet_fc_fod_op_abort(fod, abort)) 2441 return; 2442 nvmet_fc_free_fcp_iod(fod->queue, fod); 2443 break; 2444 2445 default: 2446 break; 2447 } 2448 } 2449 2450 static void 2451 nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req *fcpreq) 2452 { 2453 struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private; 2454 2455 nvmet_fc_fod_op_done(fod); 2456 } 2457 2458 /* 2459 * actual completion handler after execution by the nvmet layer 2460 */ 2461 static void 2462 __nvmet_fc_fcp_nvme_cmd_done(struct nvmet_fc_tgtport *tgtport, 2463 struct nvmet_fc_fcp_iod *fod, int status) 2464 { 2465 struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common; 2466 struct nvme_completion *cqe = &fod->rspiubuf.cqe; 2467 unsigned long flags; 2468 bool abort; 2469 2470 spin_lock_irqsave(&fod->flock, flags); 2471 abort = fod->abort; 2472 spin_unlock_irqrestore(&fod->flock, flags); 2473 2474 /* if we have a CQE, snoop the last sq_head value */ 2475 if (!status) 2476 fod->queue->sqhd = cqe->sq_head; 2477 2478 if (abort) { 2479 nvmet_fc_abort_op(tgtport, fod); 2480 return; 2481 } 2482 2483 /* if an error handling the cmd post initial parsing */ 2484 if (status) { 2485 /* fudge up a failed CQE status for our transport error */ 2486 memset(cqe, 0, sizeof(*cqe)); 2487 cqe->sq_head = fod->queue->sqhd; /* echo last cqe sqhd */ 2488 cqe->sq_id = cpu_to_le16(fod->queue->qid); 2489 cqe->command_id = sqe->command_id; 2490 cqe->status = cpu_to_le16(status); 2491 } else { 2492 2493 /* 2494 * try to push the data even if the SQE status is non-zero. 2495 * There may be a status where data still was intended to 2496 * be moved 2497 */ 2498 if ((fod->io_dir == NVMET_FCP_READ) && (fod->data_sg_cnt)) { 2499 /* push the data over before sending rsp */ 2500 nvmet_fc_transfer_fcp_data(tgtport, fod, 2501 NVMET_FCOP_READDATA); 2502 return; 2503 } 2504 2505 /* writes & no data - fall thru */ 2506 } 2507 2508 /* data no longer needed */ 2509 nvmet_fc_free_tgt_pgs(fod); 2510 2511 nvmet_fc_xmt_fcp_rsp(tgtport, fod); 2512 } 2513 2514 2515 static void 2516 nvmet_fc_fcp_nvme_cmd_done(struct nvmet_req *nvme_req) 2517 { 2518 struct nvmet_fc_fcp_iod *fod = nvmet_req_to_fod(nvme_req); 2519 struct nvmet_fc_tgtport *tgtport = fod->tgtport; 2520 2521 __nvmet_fc_fcp_nvme_cmd_done(tgtport, fod, 0); 2522 } 2523 2524 2525 /* 2526 * Actual processing routine for received FC-NVME I/O Requests from the LLD 2527 */ 2528 static void 2529 nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport, 2530 struct nvmet_fc_fcp_iod *fod) 2531 { 2532 struct nvme_fc_cmd_iu *cmdiu = &fod->cmdiubuf; 2533 u32 xfrlen = be32_to_cpu(cmdiu->data_len); 2534 int ret; 2535 2536 /* 2537 * Fused commands are currently not supported in the linux 2538 * implementation. 2539 * 2540 * As such, the implementation of the FC transport does not 2541 * look at the fused commands and order delivery to the upper 2542 * layer until we have both based on csn. 2543 */ 2544 2545 fod->fcpreq->done = nvmet_fc_xmt_fcp_op_done; 2546 2547 if (cmdiu->flags & FCNVME_CMD_FLAGS_WRITE) { 2548 fod->io_dir = NVMET_FCP_WRITE; 2549 if (!nvme_is_write(&cmdiu->sqe)) 2550 goto transport_error; 2551 } else if (cmdiu->flags & FCNVME_CMD_FLAGS_READ) { 2552 fod->io_dir = NVMET_FCP_READ; 2553 if (nvme_is_write(&cmdiu->sqe)) 2554 goto transport_error; 2555 } else { 2556 fod->io_dir = NVMET_FCP_NODATA; 2557 if (xfrlen) 2558 goto transport_error; 2559 } 2560 2561 fod->req.cmd = &fod->cmdiubuf.sqe; 2562 fod->req.cqe = &fod->rspiubuf.cqe; 2563 if (!tgtport->pe) 2564 goto transport_error; 2565 fod->req.port = tgtport->pe->port; 2566 2567 /* clear any response payload */ 2568 memset(&fod->rspiubuf, 0, sizeof(fod->rspiubuf)); 2569 2570 fod->data_sg = NULL; 2571 fod->data_sg_cnt = 0; 2572 2573 ret = nvmet_req_init(&fod->req, &fod->queue->nvme_sq, 2574 &nvmet_fc_tgt_fcp_ops); 2575 if (!ret) { 2576 /* bad SQE content or invalid ctrl state */ 2577 /* nvmet layer has already called op done to send rsp. */ 2578 return; 2579 } 2580 2581 fod->req.transfer_len = xfrlen; 2582 2583 /* keep a running counter of tail position */ 2584 atomic_inc(&fod->queue->sqtail); 2585 2586 if (fod->req.transfer_len) { 2587 ret = nvmet_fc_alloc_tgt_pgs(fod); 2588 if (ret) { 2589 nvmet_req_complete(&fod->req, ret); 2590 return; 2591 } 2592 } 2593 fod->req.sg = fod->data_sg; 2594 fod->req.sg_cnt = fod->data_sg_cnt; 2595 fod->offset = 0; 2596 2597 if (fod->io_dir == NVMET_FCP_WRITE) { 2598 /* pull the data over before invoking nvmet layer */ 2599 nvmet_fc_transfer_fcp_data(tgtport, fod, NVMET_FCOP_WRITEDATA); 2600 return; 2601 } 2602 2603 /* 2604 * Reads or no data: 2605 * 2606 * can invoke the nvmet_layer now. If read data, cmd completion will 2607 * push the data 2608 */ 2609 fod->req.execute(&fod->req); 2610 return; 2611 2612 transport_error: 2613 nvmet_fc_abort_op(tgtport, fod); 2614 } 2615 2616 /** 2617 * nvmet_fc_rcv_fcp_req - transport entry point called by an LLDD 2618 * upon the reception of a NVME FCP CMD IU. 2619 * 2620 * Pass a FC-NVME FCP CMD IU received from the FC link to the nvmet-fc 2621 * layer for processing. 2622 * 2623 * The nvmet_fc layer allocates a local job structure (struct 2624 * nvmet_fc_fcp_iod) from the queue for the io and copies the 2625 * CMD IU buffer to the job structure. As such, on a successful 2626 * completion (returns 0), the LLDD may immediately free/reuse 2627 * the CMD IU buffer passed in the call. 2628 * 2629 * However, in some circumstances, due to the packetized nature of FC 2630 * and the api of the FC LLDD which may issue a hw command to send the 2631 * response, but the LLDD may not get the hw completion for that command 2632 * and upcall the nvmet_fc layer before a new command may be 2633 * asynchronously received - it's possible for a command to be received 2634 * before the LLDD and nvmet_fc have recycled the job structure. It gives 2635 * the appearance of more commands received than fits in the sq. 2636 * To alleviate this scenario, a temporary queue is maintained in the 2637 * transport for pending LLDD requests waiting for a queue job structure. 2638 * In these "overrun" cases, a temporary queue element is allocated 2639 * the LLDD request and CMD iu buffer information remembered, and the 2640 * routine returns a -EOVERFLOW status. Subsequently, when a queue job 2641 * structure is freed, it is immediately reallocated for anything on the 2642 * pending request list. The LLDDs defer_rcv() callback is called, 2643 * informing the LLDD that it may reuse the CMD IU buffer, and the io 2644 * is then started normally with the transport. 2645 * 2646 * The LLDD, when receiving an -EOVERFLOW completion status, is to treat 2647 * the completion as successful but must not reuse the CMD IU buffer 2648 * until the LLDD's defer_rcv() callback has been called for the 2649 * corresponding struct nvmefc_tgt_fcp_req pointer. 2650 * 2651 * If there is any other condition in which an error occurs, the 2652 * transport will return a non-zero status indicating the error. 2653 * In all cases other than -EOVERFLOW, the transport has not accepted the 2654 * request and the LLDD should abort the exchange. 2655 * 2656 * @target_port: pointer to the (registered) target port the FCP CMD IU 2657 * was received on. 2658 * @fcpreq: pointer to a fcpreq request structure to be used to reference 2659 * the exchange corresponding to the FCP Exchange. 2660 * @cmdiubuf: pointer to the buffer containing the FCP CMD IU 2661 * @cmdiubuf_len: length, in bytes, of the received FCP CMD IU 2662 */ 2663 int 2664 nvmet_fc_rcv_fcp_req(struct nvmet_fc_target_port *target_port, 2665 struct nvmefc_tgt_fcp_req *fcpreq, 2666 void *cmdiubuf, u32 cmdiubuf_len) 2667 { 2668 struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port); 2669 struct nvme_fc_cmd_iu *cmdiu = cmdiubuf; 2670 struct nvmet_fc_tgt_queue *queue; 2671 struct nvmet_fc_fcp_iod *fod; 2672 struct nvmet_fc_defer_fcp_req *deferfcp; 2673 unsigned long flags; 2674 2675 /* validate iu, so the connection id can be used to find the queue */ 2676 if ((cmdiubuf_len != sizeof(*cmdiu)) || 2677 (cmdiu->format_id != NVME_CMD_FORMAT_ID) || 2678 (cmdiu->fc_id != NVME_CMD_FC_ID) || 2679 (be16_to_cpu(cmdiu->iu_len) != (sizeof(*cmdiu)/4))) 2680 return -EIO; 2681 2682 queue = nvmet_fc_find_target_queue(tgtport, 2683 be64_to_cpu(cmdiu->connection_id)); 2684 if (!queue) 2685 return -ENOTCONN; 2686 2687 /* 2688 * note: reference taken by find_target_queue 2689 * After successful fod allocation, the fod will inherit the 2690 * ownership of that reference and will remove the reference 2691 * when the fod is freed. 2692 */ 2693 2694 spin_lock_irqsave(&queue->qlock, flags); 2695 2696 fod = nvmet_fc_alloc_fcp_iod(queue); 2697 if (fod) { 2698 spin_unlock_irqrestore(&queue->qlock, flags); 2699 2700 fcpreq->nvmet_fc_private = fod; 2701 fod->fcpreq = fcpreq; 2702 2703 memcpy(&fod->cmdiubuf, cmdiubuf, cmdiubuf_len); 2704 2705 nvmet_fc_queue_fcp_req(tgtport, queue, fcpreq); 2706 2707 return 0; 2708 } 2709 2710 if (!tgtport->ops->defer_rcv) { 2711 spin_unlock_irqrestore(&queue->qlock, flags); 2712 /* release the queue lookup reference */ 2713 nvmet_fc_tgt_q_put(queue); 2714 return -ENOENT; 2715 } 2716 2717 deferfcp = list_first_entry_or_null(&queue->avail_defer_list, 2718 struct nvmet_fc_defer_fcp_req, req_list); 2719 if (deferfcp) { 2720 /* Just re-use one that was previously allocated */ 2721 list_del(&deferfcp->req_list); 2722 } else { 2723 spin_unlock_irqrestore(&queue->qlock, flags); 2724 2725 /* Now we need to dynamically allocate one */ 2726 deferfcp = kmalloc(sizeof(*deferfcp), GFP_KERNEL); 2727 if (!deferfcp) { 2728 /* release the queue lookup reference */ 2729 nvmet_fc_tgt_q_put(queue); 2730 return -ENOMEM; 2731 } 2732 spin_lock_irqsave(&queue->qlock, flags); 2733 } 2734 2735 /* For now, use rspaddr / rsplen to save payload information */ 2736 fcpreq->rspaddr = cmdiubuf; 2737 fcpreq->rsplen = cmdiubuf_len; 2738 deferfcp->fcp_req = fcpreq; 2739 2740 /* defer processing till a fod becomes available */ 2741 list_add_tail(&deferfcp->req_list, &queue->pending_cmd_list); 2742 2743 /* NOTE: the queue lookup reference is still valid */ 2744 2745 spin_unlock_irqrestore(&queue->qlock, flags); 2746 2747 return -EOVERFLOW; 2748 } 2749 EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_req); 2750 2751 /** 2752 * nvmet_fc_rcv_fcp_abort - transport entry point called by an LLDD 2753 * upon the reception of an ABTS for a FCP command 2754 * 2755 * Notify the transport that an ABTS has been received for a FCP command 2756 * that had been given to the transport via nvmet_fc_rcv_fcp_req(). The 2757 * LLDD believes the command is still being worked on 2758 * (template_ops->fcp_req_release() has not been called). 2759 * 2760 * The transport will wait for any outstanding work (an op to the LLDD, 2761 * which the lldd should complete with error due to the ABTS; or the 2762 * completion from the nvmet layer of the nvme command), then will 2763 * stop processing and call the nvmet_fc_rcv_fcp_req() callback to 2764 * return the i/o context to the LLDD. The LLDD may send the BA_ACC 2765 * to the ABTS either after return from this function (assuming any 2766 * outstanding op work has been terminated) or upon the callback being 2767 * called. 2768 * 2769 * @target_port: pointer to the (registered) target port the FCP CMD IU 2770 * was received on. 2771 * @fcpreq: pointer to the fcpreq request structure that corresponds 2772 * to the exchange that received the ABTS. 2773 */ 2774 void 2775 nvmet_fc_rcv_fcp_abort(struct nvmet_fc_target_port *target_port, 2776 struct nvmefc_tgt_fcp_req *fcpreq) 2777 { 2778 struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private; 2779 struct nvmet_fc_tgt_queue *queue; 2780 unsigned long flags; 2781 2782 if (!fod || fod->fcpreq != fcpreq) 2783 /* job appears to have already completed, ignore abort */ 2784 return; 2785 2786 queue = fod->queue; 2787 2788 spin_lock_irqsave(&queue->qlock, flags); 2789 if (fod->active) { 2790 /* 2791 * mark as abort. The abort handler, invoked upon completion 2792 * of any work, will detect the aborted status and do the 2793 * callback. 2794 */ 2795 spin_lock(&fod->flock); 2796 fod->abort = true; 2797 fod->aborted = true; 2798 spin_unlock(&fod->flock); 2799 } 2800 spin_unlock_irqrestore(&queue->qlock, flags); 2801 } 2802 EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_abort); 2803 2804 2805 struct nvmet_fc_traddr { 2806 u64 nn; 2807 u64 pn; 2808 }; 2809 2810 static int 2811 __nvme_fc_parse_u64(substring_t *sstr, u64 *val) 2812 { 2813 u64 token64; 2814 2815 if (match_u64(sstr, &token64)) 2816 return -EINVAL; 2817 *val = token64; 2818 2819 return 0; 2820 } 2821 2822 /* 2823 * This routine validates and extracts the WWN's from the TRADDR string. 2824 * As kernel parsers need the 0x to determine number base, universally 2825 * build string to parse with 0x prefix before parsing name strings. 2826 */ 2827 static int 2828 nvme_fc_parse_traddr(struct nvmet_fc_traddr *traddr, char *buf, size_t blen) 2829 { 2830 char name[2 + NVME_FC_TRADDR_HEXNAMELEN + 1]; 2831 substring_t wwn = { name, &name[sizeof(name)-1] }; 2832 int nnoffset, pnoffset; 2833 2834 /* validate if string is one of the 2 allowed formats */ 2835 if (strnlen(buf, blen) == NVME_FC_TRADDR_MAXLENGTH && 2836 !strncmp(buf, "nn-0x", NVME_FC_TRADDR_OXNNLEN) && 2837 !strncmp(&buf[NVME_FC_TRADDR_MAX_PN_OFFSET], 2838 "pn-0x", NVME_FC_TRADDR_OXNNLEN)) { 2839 nnoffset = NVME_FC_TRADDR_OXNNLEN; 2840 pnoffset = NVME_FC_TRADDR_MAX_PN_OFFSET + 2841 NVME_FC_TRADDR_OXNNLEN; 2842 } else if ((strnlen(buf, blen) == NVME_FC_TRADDR_MINLENGTH && 2843 !strncmp(buf, "nn-", NVME_FC_TRADDR_NNLEN) && 2844 !strncmp(&buf[NVME_FC_TRADDR_MIN_PN_OFFSET], 2845 "pn-", NVME_FC_TRADDR_NNLEN))) { 2846 nnoffset = NVME_FC_TRADDR_NNLEN; 2847 pnoffset = NVME_FC_TRADDR_MIN_PN_OFFSET + NVME_FC_TRADDR_NNLEN; 2848 } else 2849 goto out_einval; 2850 2851 name[0] = '0'; 2852 name[1] = 'x'; 2853 name[2 + NVME_FC_TRADDR_HEXNAMELEN] = 0; 2854 2855 memcpy(&name[2], &buf[nnoffset], NVME_FC_TRADDR_HEXNAMELEN); 2856 if (__nvme_fc_parse_u64(&wwn, &traddr->nn)) 2857 goto out_einval; 2858 2859 memcpy(&name[2], &buf[pnoffset], NVME_FC_TRADDR_HEXNAMELEN); 2860 if (__nvme_fc_parse_u64(&wwn, &traddr->pn)) 2861 goto out_einval; 2862 2863 return 0; 2864 2865 out_einval: 2866 pr_warn("%s: bad traddr string\n", __func__); 2867 return -EINVAL; 2868 } 2869 2870 static int 2871 nvmet_fc_add_port(struct nvmet_port *port) 2872 { 2873 struct nvmet_fc_tgtport *tgtport; 2874 struct nvmet_fc_port_entry *pe; 2875 struct nvmet_fc_traddr traddr = { 0L, 0L }; 2876 unsigned long flags; 2877 int ret; 2878 2879 /* validate the address info */ 2880 if ((port->disc_addr.trtype != NVMF_TRTYPE_FC) || 2881 (port->disc_addr.adrfam != NVMF_ADDR_FAMILY_FC)) 2882 return -EINVAL; 2883 2884 /* map the traddr address info to a target port */ 2885 2886 ret = nvme_fc_parse_traddr(&traddr, port->disc_addr.traddr, 2887 sizeof(port->disc_addr.traddr)); 2888 if (ret) 2889 return ret; 2890 2891 pe = kzalloc(sizeof(*pe), GFP_KERNEL); 2892 if (!pe) 2893 return -ENOMEM; 2894 2895 ret = -ENXIO; 2896 spin_lock_irqsave(&nvmet_fc_tgtlock, flags); 2897 list_for_each_entry(tgtport, &nvmet_fc_target_list, tgt_list) { 2898 if ((tgtport->fc_target_port.node_name == traddr.nn) && 2899 (tgtport->fc_target_port.port_name == traddr.pn)) { 2900 if (!nvmet_fc_tgtport_get(tgtport)) 2901 continue; 2902 2903 /* a FC port can only be 1 nvmet port id */ 2904 if (!tgtport->pe) { 2905 nvmet_fc_portentry_bind(tgtport, pe, port); 2906 ret = 0; 2907 } else 2908 ret = -EALREADY; 2909 2910 nvmet_fc_tgtport_put(tgtport); 2911 break; 2912 } 2913 } 2914 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); 2915 2916 if (ret) 2917 kfree(pe); 2918 2919 return ret; 2920 } 2921 2922 static void 2923 nvmet_fc_remove_port(struct nvmet_port *port) 2924 { 2925 struct nvmet_fc_port_entry *pe = port->priv; 2926 struct nvmet_fc_tgtport *tgtport = NULL; 2927 unsigned long flags; 2928 2929 spin_lock_irqsave(&nvmet_fc_tgtlock, flags); 2930 if (pe->tgtport && nvmet_fc_tgtport_get(pe->tgtport)) 2931 tgtport = pe->tgtport; 2932 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); 2933 2934 nvmet_fc_portentry_unbind(pe); 2935 2936 if (tgtport) { 2937 /* terminate any outstanding associations */ 2938 __nvmet_fc_free_assocs(tgtport); 2939 nvmet_fc_tgtport_put(tgtport); 2940 } 2941 2942 kfree(pe); 2943 } 2944 2945 static void 2946 nvmet_fc_discovery_chg(struct nvmet_port *port) 2947 { 2948 struct nvmet_fc_port_entry *pe = port->priv; 2949 struct nvmet_fc_tgtport *tgtport = NULL; 2950 unsigned long flags; 2951 2952 spin_lock_irqsave(&nvmet_fc_tgtlock, flags); 2953 if (pe->tgtport && nvmet_fc_tgtport_get(pe->tgtport)) 2954 tgtport = pe->tgtport; 2955 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); 2956 2957 if (!tgtport) 2958 return; 2959 2960 if (tgtport && tgtport->ops->discovery_event) 2961 tgtport->ops->discovery_event(&tgtport->fc_target_port); 2962 2963 nvmet_fc_tgtport_put(tgtport); 2964 } 2965 2966 static ssize_t 2967 nvmet_fc_host_traddr(struct nvmet_ctrl *ctrl, 2968 char *traddr, size_t traddr_size) 2969 { 2970 struct nvmet_sq *sq = ctrl->sqs[0]; 2971 struct nvmet_fc_tgt_queue *queue = 2972 container_of(sq, struct nvmet_fc_tgt_queue, nvme_sq); 2973 struct nvmet_fc_tgtport *tgtport = queue->assoc ? queue->assoc->tgtport : NULL; 2974 struct nvmet_fc_hostport *hostport = queue->assoc ? queue->assoc->hostport : NULL; 2975 u64 wwnn, wwpn; 2976 ssize_t ret = 0; 2977 2978 if (!tgtport || !nvmet_fc_tgtport_get(tgtport)) 2979 return -ENODEV; 2980 if (!hostport || !nvmet_fc_hostport_get(hostport)) { 2981 ret = -ENODEV; 2982 goto out_put; 2983 } 2984 2985 if (tgtport->ops->host_traddr) { 2986 ret = tgtport->ops->host_traddr(hostport->hosthandle, &wwnn, &wwpn); 2987 if (ret) 2988 goto out_put_host; 2989 ret = snprintf(traddr, traddr_size, "nn-0x%llx:pn-0x%llx", wwnn, wwpn); 2990 } 2991 out_put_host: 2992 nvmet_fc_hostport_put(hostport); 2993 out_put: 2994 nvmet_fc_tgtport_put(tgtport); 2995 return ret; 2996 } 2997 2998 static const struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops = { 2999 .owner = THIS_MODULE, 3000 .type = NVMF_TRTYPE_FC, 3001 .msdbd = 1, 3002 .add_port = nvmet_fc_add_port, 3003 .remove_port = nvmet_fc_remove_port, 3004 .queue_response = nvmet_fc_fcp_nvme_cmd_done, 3005 .delete_ctrl = nvmet_fc_delete_ctrl, 3006 .discovery_chg = nvmet_fc_discovery_chg, 3007 .host_traddr = nvmet_fc_host_traddr, 3008 }; 3009 3010 static int __init nvmet_fc_init_module(void) 3011 { 3012 return nvmet_register_transport(&nvmet_fc_tgt_fcp_ops); 3013 } 3014 3015 static void __exit nvmet_fc_exit_module(void) 3016 { 3017 /* ensure any shutdown operation, e.g. delete ctrls have finished */ 3018 flush_workqueue(nvmet_wq); 3019 3020 /* sanity check - all lports should be removed */ 3021 if (!list_empty(&nvmet_fc_target_list)) 3022 pr_warn("%s: targetport list not empty\n", __func__); 3023 3024 nvmet_unregister_transport(&nvmet_fc_tgt_fcp_ops); 3025 3026 ida_destroy(&nvmet_fc_tgtport_cnt); 3027 } 3028 3029 module_init(nvmet_fc_init_module); 3030 module_exit(nvmet_fc_exit_module); 3031 3032 MODULE_DESCRIPTION("NVMe target FC transport driver"); 3033 MODULE_LICENSE("GPL v2"); 3034