1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2016 Avago Technologies. All rights reserved.
4 */
5 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
6 #include <linux/module.h>
7 #include <linux/slab.h>
8 #include <linux/blk-mq.h>
9 #include <linux/parser.h>
10 #include <linux/random.h>
11 #include <uapi/scsi/fc/fc_fs.h>
12 #include <uapi/scsi/fc/fc_els.h>
13
14 #include "nvmet.h"
15 #include <linux/nvme-fc-driver.h>
16 #include <linux/nvme-fc.h>
17 #include "../host/fc.h"
18
19
20 /* *************************** Data Structures/Defines ****************** */
21
22
23 #define NVMET_LS_CTX_COUNT 256
24
25 struct nvmet_fc_tgtport;
26 struct nvmet_fc_tgt_assoc;
27
28 struct nvmet_fc_ls_iod { /* for an LS RQST RCV */
29 struct nvmefc_ls_rsp *lsrsp;
30 struct nvmefc_tgt_fcp_req *fcpreq; /* only if RS */
31
32 struct list_head ls_rcv_list; /* tgtport->ls_rcv_list */
33
34 struct nvmet_fc_tgtport *tgtport;
35 struct nvmet_fc_tgt_assoc *assoc;
36 void *hosthandle;
37
38 union nvmefc_ls_requests *rqstbuf;
39 union nvmefc_ls_responses *rspbuf;
40 u16 rqstdatalen;
41 dma_addr_t rspdma;
42
43 struct scatterlist sg[2];
44
45 struct work_struct work;
46 } __aligned(sizeof(unsigned long long));
47
48 struct nvmet_fc_ls_req_op { /* for an LS RQST XMT */
49 struct nvmefc_ls_req ls_req;
50
51 struct nvmet_fc_tgtport *tgtport;
52 void *hosthandle;
53
54 int ls_error;
55 struct list_head lsreq_list; /* tgtport->ls_req_list */
56 bool req_queued;
57
58 struct work_struct put_work;
59 };
60
61
62 /* desired maximum for a single sequence - if sg list allows it */
63 #define NVMET_FC_MAX_SEQ_LENGTH (256 * 1024)
64
65 enum nvmet_fcp_datadir {
66 NVMET_FCP_NODATA,
67 NVMET_FCP_WRITE,
68 NVMET_FCP_READ,
69 NVMET_FCP_ABORTED,
70 };
71
72 struct nvmet_fc_fcp_iod {
73 struct nvmefc_tgt_fcp_req *fcpreq;
74
75 struct nvme_fc_cmd_iu cmdiubuf;
76 struct nvme_fc_ersp_iu rspiubuf;
77 dma_addr_t rspdma;
78 struct scatterlist *next_sg;
79 struct scatterlist *data_sg;
80 int data_sg_cnt;
81 u32 offset;
82 enum nvmet_fcp_datadir io_dir;
83 bool active;
84 bool abort;
85 bool aborted;
86 bool writedataactive;
87 spinlock_t flock;
88
89 struct nvmet_req req;
90 struct work_struct defer_work;
91
92 struct nvmet_fc_tgtport *tgtport;
93 struct nvmet_fc_tgt_queue *queue;
94
95 struct list_head fcp_list; /* tgtport->fcp_list */
96 };
97
98 struct nvmet_fc_tgtport {
99 struct nvmet_fc_target_port fc_target_port;
100
101 struct list_head tgt_list; /* nvmet_fc_target_list */
102 struct device *dev; /* dev for dma mapping */
103 struct nvmet_fc_target_template *ops;
104
105 struct nvmet_fc_ls_iod *iod;
106 spinlock_t lock;
107 struct list_head ls_rcv_list;
108 struct list_head ls_req_list;
109 struct list_head ls_busylist;
110 struct list_head assoc_list;
111 struct list_head host_list;
112 struct ida assoc_cnt;
113 struct nvmet_fc_port_entry *pe;
114 struct kref ref;
115 u32 max_sg_cnt;
116 };
117
118 struct nvmet_fc_port_entry {
119 struct nvmet_fc_tgtport *tgtport;
120 struct nvmet_port *port;
121 u64 node_name;
122 u64 port_name;
123 struct list_head pe_list;
124 };
125
126 struct nvmet_fc_defer_fcp_req {
127 struct list_head req_list;
128 struct nvmefc_tgt_fcp_req *fcp_req;
129 };
130
131 struct nvmet_fc_tgt_queue {
132 bool ninetypercent;
133 u16 qid;
134 u16 sqsize;
135 u16 ersp_ratio;
136 __le16 sqhd;
137 atomic_t connected;
138 atomic_t sqtail;
139 atomic_t zrspcnt;
140 atomic_t rsn;
141 spinlock_t qlock;
142 struct nvmet_cq nvme_cq;
143 struct nvmet_sq nvme_sq;
144 struct nvmet_fc_tgt_assoc *assoc;
145 struct list_head fod_list;
146 struct list_head pending_cmd_list;
147 struct list_head avail_defer_list;
148 struct workqueue_struct *work_q;
149 struct kref ref;
150 /* array of fcp_iods */
151 struct nvmet_fc_fcp_iod fod[] /* __counted_by(sqsize) */;
152 } __aligned(sizeof(unsigned long long));
153
154 struct nvmet_fc_hostport {
155 struct nvmet_fc_tgtport *tgtport;
156 void *hosthandle;
157 struct list_head host_list;
158 struct kref ref;
159 u8 invalid;
160 };
161
162 struct nvmet_fc_tgt_assoc {
163 u64 association_id;
164 u32 a_id;
165 atomic_t terminating;
166 struct nvmet_fc_tgtport *tgtport;
167 struct nvmet_fc_hostport *hostport;
168 struct nvmet_fc_ls_iod *rcv_disconn;
169 struct list_head a_list;
170 struct nvmet_fc_tgt_queue *queues[NVMET_NR_QUEUES + 1];
171 struct kref ref;
172 struct work_struct del_work;
173 };
174
175 /*
176 * Association and Connection IDs:
177 *
178 * Association ID will have random number in upper 6 bytes and zero
179 * in lower 2 bytes
180 *
181 * Connection IDs will be Association ID with QID or'd in lower 2 bytes
182 *
183 * note: Association ID = Connection ID for queue 0
184 */
185 #define BYTES_FOR_QID sizeof(u16)
186 #define BYTES_FOR_QID_SHIFT (BYTES_FOR_QID * 8)
187 #define NVMET_FC_QUEUEID_MASK ((u64)((1 << BYTES_FOR_QID_SHIFT) - 1))
188
189 static inline u64
nvmet_fc_makeconnid(struct nvmet_fc_tgt_assoc * assoc,u16 qid)190 nvmet_fc_makeconnid(struct nvmet_fc_tgt_assoc *assoc, u16 qid)
191 {
192 return (assoc->association_id | qid);
193 }
194
195 static inline u64
nvmet_fc_getassociationid(u64 connectionid)196 nvmet_fc_getassociationid(u64 connectionid)
197 {
198 return connectionid & ~NVMET_FC_QUEUEID_MASK;
199 }
200
201 static inline u16
nvmet_fc_getqueueid(u64 connectionid)202 nvmet_fc_getqueueid(u64 connectionid)
203 {
204 return (u16)(connectionid & NVMET_FC_QUEUEID_MASK);
205 }
206
207 static inline struct nvmet_fc_tgtport *
targetport_to_tgtport(struct nvmet_fc_target_port * targetport)208 targetport_to_tgtport(struct nvmet_fc_target_port *targetport)
209 {
210 return container_of(targetport, struct nvmet_fc_tgtport,
211 fc_target_port);
212 }
213
214 static inline struct nvmet_fc_fcp_iod *
nvmet_req_to_fod(struct nvmet_req * nvme_req)215 nvmet_req_to_fod(struct nvmet_req *nvme_req)
216 {
217 return container_of(nvme_req, struct nvmet_fc_fcp_iod, req);
218 }
219
220
221 /* *************************** Globals **************************** */
222
223
224 static DEFINE_SPINLOCK(nvmet_fc_tgtlock);
225
226 static LIST_HEAD(nvmet_fc_target_list);
227 static DEFINE_IDA(nvmet_fc_tgtport_cnt);
228 static LIST_HEAD(nvmet_fc_portentry_list);
229
230
231 static void nvmet_fc_handle_ls_rqst_work(struct work_struct *work);
232 static void nvmet_fc_fcp_rqst_op_defer_work(struct work_struct *work);
233 static void nvmet_fc_tgt_a_put(struct nvmet_fc_tgt_assoc *assoc);
234 static int nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc);
235 static void nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue);
236 static int nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue);
237 static void nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport);
nvmet_fc_put_lsop_work(struct work_struct * work)238 static void nvmet_fc_put_lsop_work(struct work_struct *work)
239 {
240 struct nvmet_fc_ls_req_op *lsop =
241 container_of(work, struct nvmet_fc_ls_req_op, put_work);
242
243 nvmet_fc_tgtport_put(lsop->tgtport);
244 kfree(lsop);
245 }
246 static int nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport);
247 static void nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
248 struct nvmet_fc_fcp_iod *fod);
249 static void nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc);
250 static void nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
251 struct nvmet_fc_ls_iod *iod);
252
253
254 /* *********************** FC-NVME DMA Handling **************************** */
255
256 /*
257 * The fcloop device passes in a NULL device pointer. Real LLD's will
258 * pass in a valid device pointer. If NULL is passed to the dma mapping
259 * routines, depending on the platform, it may or may not succeed, and
260 * may crash.
261 *
262 * As such:
263 * Wrapper all the dma routines and check the dev pointer.
264 *
265 * If simple mappings (return just a dma address, we'll noop them,
266 * returning a dma address of 0.
267 *
268 * On more complex mappings (dma_map_sg), a pseudo routine fills
269 * in the scatter list, setting all dma addresses to 0.
270 */
271
272 static inline dma_addr_t
fc_dma_map_single(struct device * dev,void * ptr,size_t size,enum dma_data_direction dir)273 fc_dma_map_single(struct device *dev, void *ptr, size_t size,
274 enum dma_data_direction dir)
275 {
276 return dev ? dma_map_single(dev, ptr, size, dir) : (dma_addr_t)0L;
277 }
278
279 static inline int
fc_dma_mapping_error(struct device * dev,dma_addr_t dma_addr)280 fc_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
281 {
282 return dev ? dma_mapping_error(dev, dma_addr) : 0;
283 }
284
285 static inline void
fc_dma_unmap_single(struct device * dev,dma_addr_t addr,size_t size,enum dma_data_direction dir)286 fc_dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
287 enum dma_data_direction dir)
288 {
289 if (dev)
290 dma_unmap_single(dev, addr, size, dir);
291 }
292
293 static inline void
fc_dma_sync_single_for_cpu(struct device * dev,dma_addr_t addr,size_t size,enum dma_data_direction dir)294 fc_dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
295 enum dma_data_direction dir)
296 {
297 if (dev)
298 dma_sync_single_for_cpu(dev, addr, size, dir);
299 }
300
301 static inline void
fc_dma_sync_single_for_device(struct device * dev,dma_addr_t addr,size_t size,enum dma_data_direction dir)302 fc_dma_sync_single_for_device(struct device *dev, dma_addr_t addr, size_t size,
303 enum dma_data_direction dir)
304 {
305 if (dev)
306 dma_sync_single_for_device(dev, addr, size, dir);
307 }
308
309 /* pseudo dma_map_sg call */
310 static int
fc_map_sg(struct scatterlist * sg,int nents)311 fc_map_sg(struct scatterlist *sg, int nents)
312 {
313 struct scatterlist *s;
314 int i;
315
316 WARN_ON(nents == 0 || sg[0].length == 0);
317
318 for_each_sg(sg, s, nents, i) {
319 s->dma_address = 0L;
320 #ifdef CONFIG_NEED_SG_DMA_LENGTH
321 s->dma_length = s->length;
322 #endif
323 }
324 return nents;
325 }
326
327 static inline int
fc_dma_map_sg(struct device * dev,struct scatterlist * sg,int nents,enum dma_data_direction dir)328 fc_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
329 enum dma_data_direction dir)
330 {
331 return dev ? dma_map_sg(dev, sg, nents, dir) : fc_map_sg(sg, nents);
332 }
333
334 static inline void
fc_dma_unmap_sg(struct device * dev,struct scatterlist * sg,int nents,enum dma_data_direction dir)335 fc_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
336 enum dma_data_direction dir)
337 {
338 if (dev)
339 dma_unmap_sg(dev, sg, nents, dir);
340 }
341
342
343 /* ********************** FC-NVME LS XMT Handling ************************* */
344
345
346 static void
__nvmet_fc_finish_ls_req(struct nvmet_fc_ls_req_op * lsop)347 __nvmet_fc_finish_ls_req(struct nvmet_fc_ls_req_op *lsop)
348 {
349 struct nvmet_fc_tgtport *tgtport = lsop->tgtport;
350 struct nvmefc_ls_req *lsreq = &lsop->ls_req;
351 unsigned long flags;
352
353 spin_lock_irqsave(&tgtport->lock, flags);
354
355 if (!lsop->req_queued) {
356 spin_unlock_irqrestore(&tgtport->lock, flags);
357 goto out_putwork;
358 }
359
360 list_del(&lsop->lsreq_list);
361
362 lsop->req_queued = false;
363
364 spin_unlock_irqrestore(&tgtport->lock, flags);
365
366 fc_dma_unmap_single(tgtport->dev, lsreq->rqstdma,
367 (lsreq->rqstlen + lsreq->rsplen),
368 DMA_BIDIRECTIONAL);
369
370 out_putwork:
371 queue_work(nvmet_wq, &lsop->put_work);
372 }
373
374 static int
__nvmet_fc_send_ls_req(struct nvmet_fc_tgtport * tgtport,struct nvmet_fc_ls_req_op * lsop,void (* done)(struct nvmefc_ls_req * req,int status))375 __nvmet_fc_send_ls_req(struct nvmet_fc_tgtport *tgtport,
376 struct nvmet_fc_ls_req_op *lsop,
377 void (*done)(struct nvmefc_ls_req *req, int status))
378 {
379 struct nvmefc_ls_req *lsreq = &lsop->ls_req;
380 unsigned long flags;
381 int ret = 0;
382
383 if (!tgtport->ops->ls_req)
384 return -EOPNOTSUPP;
385
386 if (!nvmet_fc_tgtport_get(tgtport))
387 return -ESHUTDOWN;
388
389 lsreq->done = done;
390 lsop->req_queued = false;
391 INIT_LIST_HEAD(&lsop->lsreq_list);
392 INIT_WORK(&lsop->put_work, nvmet_fc_put_lsop_work);
393
394 lsreq->rqstdma = fc_dma_map_single(tgtport->dev, lsreq->rqstaddr,
395 lsreq->rqstlen + lsreq->rsplen,
396 DMA_BIDIRECTIONAL);
397 if (fc_dma_mapping_error(tgtport->dev, lsreq->rqstdma)) {
398 ret = -EFAULT;
399 goto out_puttgtport;
400 }
401 lsreq->rspdma = lsreq->rqstdma + lsreq->rqstlen;
402
403 spin_lock_irqsave(&tgtport->lock, flags);
404
405 list_add_tail(&lsop->lsreq_list, &tgtport->ls_req_list);
406
407 lsop->req_queued = true;
408
409 spin_unlock_irqrestore(&tgtport->lock, flags);
410
411 ret = tgtport->ops->ls_req(&tgtport->fc_target_port, lsop->hosthandle,
412 lsreq);
413 if (ret)
414 goto out_unlink;
415
416 return 0;
417
418 out_unlink:
419 lsop->ls_error = ret;
420 spin_lock_irqsave(&tgtport->lock, flags);
421 lsop->req_queued = false;
422 list_del(&lsop->lsreq_list);
423 spin_unlock_irqrestore(&tgtport->lock, flags);
424 fc_dma_unmap_single(tgtport->dev, lsreq->rqstdma,
425 (lsreq->rqstlen + lsreq->rsplen),
426 DMA_BIDIRECTIONAL);
427 out_puttgtport:
428 nvmet_fc_tgtport_put(tgtport);
429
430 return ret;
431 }
432
433 static int
nvmet_fc_send_ls_req_async(struct nvmet_fc_tgtport * tgtport,struct nvmet_fc_ls_req_op * lsop,void (* done)(struct nvmefc_ls_req * req,int status))434 nvmet_fc_send_ls_req_async(struct nvmet_fc_tgtport *tgtport,
435 struct nvmet_fc_ls_req_op *lsop,
436 void (*done)(struct nvmefc_ls_req *req, int status))
437 {
438 /* don't wait for completion */
439
440 return __nvmet_fc_send_ls_req(tgtport, lsop, done);
441 }
442
443 static void
nvmet_fc_disconnect_assoc_done(struct nvmefc_ls_req * lsreq,int status)444 nvmet_fc_disconnect_assoc_done(struct nvmefc_ls_req *lsreq, int status)
445 {
446 struct nvmet_fc_ls_req_op *lsop =
447 container_of(lsreq, struct nvmet_fc_ls_req_op, ls_req);
448
449 __nvmet_fc_finish_ls_req(lsop);
450
451 /* fc-nvme target doesn't care about success or failure of cmd */
452 }
453
454 /*
455 * This routine sends a FC-NVME LS to disconnect (aka terminate)
456 * the FC-NVME Association. Terminating the association also
457 * terminates the FC-NVME connections (per queue, both admin and io
458 * queues) that are part of the association. E.g. things are torn
459 * down, and the related FC-NVME Association ID and Connection IDs
460 * become invalid.
461 *
462 * The behavior of the fc-nvme target is such that its
463 * understanding of the association and connections will implicitly
464 * be torn down. The action is implicit as it may be due to a loss of
465 * connectivity with the fc-nvme host, so the target may never get a
466 * response even if it tried. As such, the action of this routine
467 * is to asynchronously send the LS, ignore any results of the LS, and
468 * continue on with terminating the association. If the fc-nvme host
469 * is present and receives the LS, it too can tear down.
470 */
471 static void
nvmet_fc_xmt_disconnect_assoc(struct nvmet_fc_tgt_assoc * assoc)472 nvmet_fc_xmt_disconnect_assoc(struct nvmet_fc_tgt_assoc *assoc)
473 {
474 struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
475 struct fcnvme_ls_disconnect_assoc_rqst *discon_rqst;
476 struct fcnvme_ls_disconnect_assoc_acc *discon_acc;
477 struct nvmet_fc_ls_req_op *lsop;
478 struct nvmefc_ls_req *lsreq;
479 int ret;
480
481 /*
482 * If ls_req is NULL or no hosthandle, it's an older lldd and no
483 * message is normal. Otherwise, send unless the hostport has
484 * already been invalidated by the lldd.
485 */
486 if (!tgtport->ops->ls_req || assoc->hostport->invalid)
487 return;
488
489 lsop = kzalloc((sizeof(*lsop) +
490 sizeof(*discon_rqst) + sizeof(*discon_acc) +
491 tgtport->ops->lsrqst_priv_sz), GFP_KERNEL);
492 if (!lsop) {
493 pr_info("{%d:%d}: send Disconnect Association failed: ENOMEM\n",
494 tgtport->fc_target_port.port_num, assoc->a_id);
495 return;
496 }
497
498 discon_rqst = (struct fcnvme_ls_disconnect_assoc_rqst *)&lsop[1];
499 discon_acc = (struct fcnvme_ls_disconnect_assoc_acc *)&discon_rqst[1];
500 lsreq = &lsop->ls_req;
501 if (tgtport->ops->lsrqst_priv_sz)
502 lsreq->private = (void *)&discon_acc[1];
503 else
504 lsreq->private = NULL;
505
506 lsop->tgtport = tgtport;
507 lsop->hosthandle = assoc->hostport->hosthandle;
508
509 nvmefc_fmt_lsreq_discon_assoc(lsreq, discon_rqst, discon_acc,
510 assoc->association_id);
511
512 ret = nvmet_fc_send_ls_req_async(tgtport, lsop,
513 nvmet_fc_disconnect_assoc_done);
514 if (ret) {
515 pr_info("{%d:%d}: XMT Disconnect Association failed: %d\n",
516 tgtport->fc_target_port.port_num, assoc->a_id, ret);
517 kfree(lsop);
518 }
519 }
520
521
522 /* *********************** FC-NVME Port Management ************************ */
523
524
525 static int
nvmet_fc_alloc_ls_iodlist(struct nvmet_fc_tgtport * tgtport)526 nvmet_fc_alloc_ls_iodlist(struct nvmet_fc_tgtport *tgtport)
527 {
528 struct nvmet_fc_ls_iod *iod;
529 int i;
530
531 iod = kzalloc_objs(struct nvmet_fc_ls_iod, NVMET_LS_CTX_COUNT);
532 if (!iod)
533 return -ENOMEM;
534
535 tgtport->iod = iod;
536
537 for (i = 0; i < NVMET_LS_CTX_COUNT; iod++, i++) {
538 INIT_WORK(&iod->work, nvmet_fc_handle_ls_rqst_work);
539 iod->tgtport = tgtport;
540 list_add_tail(&iod->ls_rcv_list, &tgtport->ls_rcv_list);
541
542 iod->rqstbuf = kzalloc(sizeof(union nvmefc_ls_requests) +
543 sizeof(union nvmefc_ls_responses),
544 GFP_KERNEL);
545 if (!iod->rqstbuf)
546 goto out_fail;
547
548 iod->rspbuf = (union nvmefc_ls_responses *)&iod->rqstbuf[1];
549
550 iod->rspdma = fc_dma_map_single(tgtport->dev, iod->rspbuf,
551 sizeof(*iod->rspbuf),
552 DMA_TO_DEVICE);
553 if (fc_dma_mapping_error(tgtport->dev, iod->rspdma))
554 goto out_fail;
555 }
556
557 return 0;
558
559 out_fail:
560 kfree(iod->rqstbuf);
561 list_del(&iod->ls_rcv_list);
562 for (iod--, i--; i >= 0; iod--, i--) {
563 fc_dma_unmap_single(tgtport->dev, iod->rspdma,
564 sizeof(*iod->rspbuf), DMA_TO_DEVICE);
565 kfree(iod->rqstbuf);
566 list_del(&iod->ls_rcv_list);
567 }
568
569 kfree(iod);
570
571 return -EFAULT;
572 }
573
574 static void
nvmet_fc_free_ls_iodlist(struct nvmet_fc_tgtport * tgtport)575 nvmet_fc_free_ls_iodlist(struct nvmet_fc_tgtport *tgtport)
576 {
577 struct nvmet_fc_ls_iod *iod = tgtport->iod;
578 int i;
579
580 for (i = 0; i < NVMET_LS_CTX_COUNT; iod++, i++) {
581 fc_dma_unmap_single(tgtport->dev,
582 iod->rspdma, sizeof(*iod->rspbuf),
583 DMA_TO_DEVICE);
584 kfree(iod->rqstbuf);
585 list_del(&iod->ls_rcv_list);
586 }
587 kfree(tgtport->iod);
588 }
589
590 static struct nvmet_fc_ls_iod *
nvmet_fc_alloc_ls_iod(struct nvmet_fc_tgtport * tgtport)591 nvmet_fc_alloc_ls_iod(struct nvmet_fc_tgtport *tgtport)
592 {
593 struct nvmet_fc_ls_iod *iod;
594 unsigned long flags;
595
596 spin_lock_irqsave(&tgtport->lock, flags);
597 iod = list_first_entry_or_null(&tgtport->ls_rcv_list,
598 struct nvmet_fc_ls_iod, ls_rcv_list);
599 if (iod)
600 list_move_tail(&iod->ls_rcv_list, &tgtport->ls_busylist);
601 spin_unlock_irqrestore(&tgtport->lock, flags);
602 return iod;
603 }
604
605
606 static void
nvmet_fc_free_ls_iod(struct nvmet_fc_tgtport * tgtport,struct nvmet_fc_ls_iod * iod)607 nvmet_fc_free_ls_iod(struct nvmet_fc_tgtport *tgtport,
608 struct nvmet_fc_ls_iod *iod)
609 {
610 unsigned long flags;
611
612 spin_lock_irqsave(&tgtport->lock, flags);
613 list_move(&iod->ls_rcv_list, &tgtport->ls_rcv_list);
614 spin_unlock_irqrestore(&tgtport->lock, flags);
615 }
616
617 static void
nvmet_fc_prep_fcp_iodlist(struct nvmet_fc_tgtport * tgtport,struct nvmet_fc_tgt_queue * queue)618 nvmet_fc_prep_fcp_iodlist(struct nvmet_fc_tgtport *tgtport,
619 struct nvmet_fc_tgt_queue *queue)
620 {
621 struct nvmet_fc_fcp_iod *fod = queue->fod;
622 int i;
623
624 for (i = 0; i < queue->sqsize; fod++, i++) {
625 INIT_WORK(&fod->defer_work, nvmet_fc_fcp_rqst_op_defer_work);
626 fod->tgtport = tgtport;
627 fod->queue = queue;
628 fod->active = false;
629 fod->abort = false;
630 fod->aborted = false;
631 fod->fcpreq = NULL;
632 list_add_tail(&fod->fcp_list, &queue->fod_list);
633 spin_lock_init(&fod->flock);
634
635 fod->rspdma = fc_dma_map_single(tgtport->dev, &fod->rspiubuf,
636 sizeof(fod->rspiubuf), DMA_TO_DEVICE);
637 if (fc_dma_mapping_error(tgtport->dev, fod->rspdma)) {
638 list_del(&fod->fcp_list);
639 for (fod--, i--; i >= 0; fod--, i--) {
640 fc_dma_unmap_single(tgtport->dev, fod->rspdma,
641 sizeof(fod->rspiubuf),
642 DMA_TO_DEVICE);
643 fod->rspdma = 0L;
644 list_del(&fod->fcp_list);
645 }
646
647 return;
648 }
649 }
650 }
651
652 static void
nvmet_fc_destroy_fcp_iodlist(struct nvmet_fc_tgtport * tgtport,struct nvmet_fc_tgt_queue * queue)653 nvmet_fc_destroy_fcp_iodlist(struct nvmet_fc_tgtport *tgtport,
654 struct nvmet_fc_tgt_queue *queue)
655 {
656 struct nvmet_fc_fcp_iod *fod = queue->fod;
657 int i;
658
659 for (i = 0; i < queue->sqsize; fod++, i++) {
660 if (fod->rspdma)
661 fc_dma_unmap_single(tgtport->dev, fod->rspdma,
662 sizeof(fod->rspiubuf), DMA_TO_DEVICE);
663 }
664 }
665
666 static struct nvmet_fc_fcp_iod *
nvmet_fc_alloc_fcp_iod(struct nvmet_fc_tgt_queue * queue)667 nvmet_fc_alloc_fcp_iod(struct nvmet_fc_tgt_queue *queue)
668 {
669 struct nvmet_fc_fcp_iod *fod;
670
671 lockdep_assert_held(&queue->qlock);
672
673 fod = list_first_entry_or_null(&queue->fod_list,
674 struct nvmet_fc_fcp_iod, fcp_list);
675 if (fod) {
676 list_del(&fod->fcp_list);
677 fod->active = true;
678 /*
679 * no queue reference is taken, as it was taken by the
680 * queue lookup just prior to the allocation. The iod
681 * will "inherit" that reference.
682 */
683 }
684 return fod;
685 }
686
687
688 static void
nvmet_fc_queue_fcp_req(struct nvmet_fc_tgtport * tgtport,struct nvmet_fc_tgt_queue * queue,struct nvmefc_tgt_fcp_req * fcpreq)689 nvmet_fc_queue_fcp_req(struct nvmet_fc_tgtport *tgtport,
690 struct nvmet_fc_tgt_queue *queue,
691 struct nvmefc_tgt_fcp_req *fcpreq)
692 {
693 struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private;
694
695 /*
696 * put all admin cmds on hw queue id 0. All io commands go to
697 * the respective hw queue based on a modulo basis
698 */
699 fcpreq->hwqid = queue->qid ?
700 ((queue->qid - 1) % tgtport->ops->max_hw_queues) : 0;
701
702 nvmet_fc_handle_fcp_rqst(tgtport, fod);
703 }
704
705 static void
nvmet_fc_fcp_rqst_op_defer_work(struct work_struct * work)706 nvmet_fc_fcp_rqst_op_defer_work(struct work_struct *work)
707 {
708 struct nvmet_fc_fcp_iod *fod =
709 container_of(work, struct nvmet_fc_fcp_iod, defer_work);
710
711 /* Submit deferred IO for processing */
712 nvmet_fc_queue_fcp_req(fod->tgtport, fod->queue, fod->fcpreq);
713
714 }
715
716 static void
nvmet_fc_free_fcp_iod(struct nvmet_fc_tgt_queue * queue,struct nvmet_fc_fcp_iod * fod)717 nvmet_fc_free_fcp_iod(struct nvmet_fc_tgt_queue *queue,
718 struct nvmet_fc_fcp_iod *fod)
719 {
720 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
721 struct nvmet_fc_tgtport *tgtport = fod->tgtport;
722 struct nvmet_fc_defer_fcp_req *deferfcp;
723 unsigned long flags;
724
725 fc_dma_sync_single_for_cpu(tgtport->dev, fod->rspdma,
726 sizeof(fod->rspiubuf), DMA_TO_DEVICE);
727
728 fcpreq->nvmet_fc_private = NULL;
729
730 fod->active = false;
731 fod->abort = false;
732 fod->aborted = false;
733 fod->writedataactive = false;
734 fod->fcpreq = NULL;
735
736 tgtport->ops->fcp_req_release(&tgtport->fc_target_port, fcpreq);
737
738 /* release the queue lookup reference on the completed IO */
739 nvmet_fc_tgt_q_put(queue);
740
741 spin_lock_irqsave(&queue->qlock, flags);
742 deferfcp = list_first_entry_or_null(&queue->pending_cmd_list,
743 struct nvmet_fc_defer_fcp_req, req_list);
744 if (!deferfcp) {
745 list_add_tail(&fod->fcp_list, &fod->queue->fod_list);
746 spin_unlock_irqrestore(&queue->qlock, flags);
747 return;
748 }
749
750 /* Re-use the fod for the next pending cmd that was deferred */
751 list_del(&deferfcp->req_list);
752
753 fcpreq = deferfcp->fcp_req;
754
755 /* deferfcp can be reused for another IO at a later date */
756 list_add_tail(&deferfcp->req_list, &queue->avail_defer_list);
757
758 spin_unlock_irqrestore(&queue->qlock, flags);
759
760 /* Save NVME CMD IO in fod */
761 memcpy(&fod->cmdiubuf, fcpreq->rspaddr, fcpreq->rsplen);
762
763 /* Setup new fcpreq to be processed */
764 fcpreq->rspaddr = NULL;
765 fcpreq->rsplen = 0;
766 fcpreq->nvmet_fc_private = fod;
767 fod->fcpreq = fcpreq;
768 fod->active = true;
769
770 /* inform LLDD IO is now being processed */
771 tgtport->ops->defer_rcv(&tgtport->fc_target_port, fcpreq);
772
773 /*
774 * Leave the queue lookup get reference taken when
775 * fod was originally allocated.
776 */
777
778 queue_work(queue->work_q, &fod->defer_work);
779 }
780
781 static struct nvmet_fc_tgt_queue *
nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc * assoc,u16 qid,u16 sqsize)782 nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc,
783 u16 qid, u16 sqsize)
784 {
785 struct nvmet_fc_tgt_queue *queue;
786 int ret;
787
788 if (qid > NVMET_NR_QUEUES)
789 return NULL;
790
791 queue = kzalloc_flex(*queue, fod, sqsize);
792 if (!queue)
793 return NULL;
794
795 queue->work_q = alloc_workqueue("ntfc%d.%d.%d", 0, 0,
796 assoc->tgtport->fc_target_port.port_num,
797 assoc->a_id, qid);
798 if (!queue->work_q)
799 goto out_free_queue;
800
801 queue->qid = qid;
802 queue->sqsize = sqsize;
803 queue->assoc = assoc;
804 INIT_LIST_HEAD(&queue->fod_list);
805 INIT_LIST_HEAD(&queue->avail_defer_list);
806 INIT_LIST_HEAD(&queue->pending_cmd_list);
807 atomic_set(&queue->connected, 0);
808 atomic_set(&queue->sqtail, 0);
809 atomic_set(&queue->rsn, 1);
810 atomic_set(&queue->zrspcnt, 0);
811 spin_lock_init(&queue->qlock);
812 kref_init(&queue->ref);
813
814 nvmet_fc_prep_fcp_iodlist(assoc->tgtport, queue);
815
816 nvmet_cq_init(&queue->nvme_cq);
817 ret = nvmet_sq_init(&queue->nvme_sq, &queue->nvme_cq);
818 if (ret)
819 goto out_fail_iodlist;
820
821 WARN_ON(assoc->queues[qid]);
822 assoc->queues[qid] = queue;
823
824 return queue;
825
826 out_fail_iodlist:
827 nvmet_cq_put(&queue->nvme_cq);
828 nvmet_fc_destroy_fcp_iodlist(assoc->tgtport, queue);
829 destroy_workqueue(queue->work_q);
830 out_free_queue:
831 kfree(queue);
832 return NULL;
833 }
834
835
836 static void
nvmet_fc_tgt_queue_free(struct kref * ref)837 nvmet_fc_tgt_queue_free(struct kref *ref)
838 {
839 struct nvmet_fc_tgt_queue *queue =
840 container_of(ref, struct nvmet_fc_tgt_queue, ref);
841
842 nvmet_fc_destroy_fcp_iodlist(queue->assoc->tgtport, queue);
843
844 destroy_workqueue(queue->work_q);
845
846 kfree(queue);
847 }
848
849 static void
nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue * queue)850 nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue)
851 {
852 kref_put(&queue->ref, nvmet_fc_tgt_queue_free);
853 }
854
855 static int
nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue * queue)856 nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue)
857 {
858 return kref_get_unless_zero(&queue->ref);
859 }
860
861
862 static void
nvmet_fc_delete_target_queue(struct nvmet_fc_tgt_queue * queue)863 nvmet_fc_delete_target_queue(struct nvmet_fc_tgt_queue *queue)
864 {
865 struct nvmet_fc_tgtport *tgtport = queue->assoc->tgtport;
866 struct nvmet_fc_fcp_iod *fod = queue->fod;
867 struct nvmet_fc_defer_fcp_req *deferfcp, *tempptr;
868 unsigned long flags;
869 int i;
870 bool disconnect;
871
872 disconnect = atomic_xchg(&queue->connected, 0);
873
874 /* if not connected, nothing to do */
875 if (!disconnect)
876 return;
877
878 spin_lock_irqsave(&queue->qlock, flags);
879 /* abort outstanding io's */
880 for (i = 0; i < queue->sqsize; fod++, i++) {
881 if (fod->active) {
882 spin_lock(&fod->flock);
883 fod->abort = true;
884 /*
885 * only call lldd abort routine if waiting for
886 * writedata. other outstanding ops should finish
887 * on their own.
888 */
889 if (fod->writedataactive) {
890 fod->aborted = true;
891 spin_unlock(&fod->flock);
892 tgtport->ops->fcp_abort(
893 &tgtport->fc_target_port, fod->fcpreq);
894 } else
895 spin_unlock(&fod->flock);
896 }
897 }
898
899 /* Cleanup defer'ed IOs in queue */
900 list_for_each_entry_safe(deferfcp, tempptr, &queue->avail_defer_list,
901 req_list) {
902 list_del(&deferfcp->req_list);
903 kfree(deferfcp);
904 }
905
906 for (;;) {
907 deferfcp = list_first_entry_or_null(&queue->pending_cmd_list,
908 struct nvmet_fc_defer_fcp_req, req_list);
909 if (!deferfcp)
910 break;
911
912 list_del(&deferfcp->req_list);
913 spin_unlock_irqrestore(&queue->qlock, flags);
914
915 tgtport->ops->defer_rcv(&tgtport->fc_target_port,
916 deferfcp->fcp_req);
917
918 tgtport->ops->fcp_abort(&tgtport->fc_target_port,
919 deferfcp->fcp_req);
920
921 tgtport->ops->fcp_req_release(&tgtport->fc_target_port,
922 deferfcp->fcp_req);
923
924 /* release the queue lookup reference */
925 nvmet_fc_tgt_q_put(queue);
926
927 kfree(deferfcp);
928
929 spin_lock_irqsave(&queue->qlock, flags);
930 }
931 spin_unlock_irqrestore(&queue->qlock, flags);
932
933 flush_workqueue(queue->work_q);
934
935 nvmet_sq_destroy(&queue->nvme_sq);
936 nvmet_cq_put(&queue->nvme_cq);
937
938 nvmet_fc_tgt_q_put(queue);
939 }
940
941 static struct nvmet_fc_tgt_queue *
nvmet_fc_find_target_queue(struct nvmet_fc_tgtport * tgtport,u64 connection_id)942 nvmet_fc_find_target_queue(struct nvmet_fc_tgtport *tgtport,
943 u64 connection_id)
944 {
945 struct nvmet_fc_tgt_assoc *assoc;
946 struct nvmet_fc_tgt_queue *queue;
947 u64 association_id = nvmet_fc_getassociationid(connection_id);
948 u16 qid = nvmet_fc_getqueueid(connection_id);
949
950 if (qid > NVMET_NR_QUEUES)
951 return NULL;
952
953 rcu_read_lock();
954 list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) {
955 if (association_id == assoc->association_id) {
956 queue = assoc->queues[qid];
957 if (queue &&
958 (!atomic_read(&queue->connected) ||
959 !nvmet_fc_tgt_q_get(queue)))
960 queue = NULL;
961 rcu_read_unlock();
962 return queue;
963 }
964 }
965 rcu_read_unlock();
966 return NULL;
967 }
968
969 static void
nvmet_fc_hostport_free(struct kref * ref)970 nvmet_fc_hostport_free(struct kref *ref)
971 {
972 struct nvmet_fc_hostport *hostport =
973 container_of(ref, struct nvmet_fc_hostport, ref);
974 struct nvmet_fc_tgtport *tgtport = hostport->tgtport;
975 unsigned long flags;
976
977 spin_lock_irqsave(&tgtport->lock, flags);
978 list_del(&hostport->host_list);
979 spin_unlock_irqrestore(&tgtport->lock, flags);
980 if (tgtport->ops->host_release && hostport->invalid)
981 tgtport->ops->host_release(hostport->hosthandle);
982 kfree(hostport);
983 nvmet_fc_tgtport_put(tgtport);
984 }
985
986 static void
nvmet_fc_hostport_put(struct nvmet_fc_hostport * hostport)987 nvmet_fc_hostport_put(struct nvmet_fc_hostport *hostport)
988 {
989 kref_put(&hostport->ref, nvmet_fc_hostport_free);
990 }
991
992 static int
nvmet_fc_hostport_get(struct nvmet_fc_hostport * hostport)993 nvmet_fc_hostport_get(struct nvmet_fc_hostport *hostport)
994 {
995 return kref_get_unless_zero(&hostport->ref);
996 }
997
998 static struct nvmet_fc_hostport *
nvmet_fc_match_hostport(struct nvmet_fc_tgtport * tgtport,void * hosthandle)999 nvmet_fc_match_hostport(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
1000 {
1001 struct nvmet_fc_hostport *host;
1002
1003 lockdep_assert_held(&tgtport->lock);
1004
1005 list_for_each_entry(host, &tgtport->host_list, host_list) {
1006 if (host->hosthandle == hosthandle && !host->invalid) {
1007 if (nvmet_fc_hostport_get(host))
1008 return host;
1009 }
1010 }
1011
1012 return NULL;
1013 }
1014
1015 static struct nvmet_fc_hostport *
nvmet_fc_alloc_hostport(struct nvmet_fc_tgtport * tgtport,void * hosthandle)1016 nvmet_fc_alloc_hostport(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
1017 {
1018 struct nvmet_fc_hostport *newhost, *match = NULL;
1019 unsigned long flags;
1020
1021 /*
1022 * Caller holds a reference on tgtport.
1023 */
1024
1025 /* if LLDD not implemented, leave as NULL */
1026 if (!hosthandle)
1027 return NULL;
1028
1029 spin_lock_irqsave(&tgtport->lock, flags);
1030 match = nvmet_fc_match_hostport(tgtport, hosthandle);
1031 spin_unlock_irqrestore(&tgtport->lock, flags);
1032
1033 if (match)
1034 return match;
1035
1036 newhost = kzalloc_obj(*newhost);
1037 if (!newhost)
1038 return ERR_PTR(-ENOMEM);
1039
1040 spin_lock_irqsave(&tgtport->lock, flags);
1041 match = nvmet_fc_match_hostport(tgtport, hosthandle);
1042 if (match) {
1043 /* new allocation not needed */
1044 kfree(newhost);
1045 newhost = match;
1046 } else {
1047 nvmet_fc_tgtport_get(tgtport);
1048 newhost->tgtport = tgtport;
1049 newhost->hosthandle = hosthandle;
1050 INIT_LIST_HEAD(&newhost->host_list);
1051 kref_init(&newhost->ref);
1052
1053 list_add_tail(&newhost->host_list, &tgtport->host_list);
1054 }
1055 spin_unlock_irqrestore(&tgtport->lock, flags);
1056
1057 return newhost;
1058 }
1059
1060 static void
nvmet_fc_delete_assoc_work(struct work_struct * work)1061 nvmet_fc_delete_assoc_work(struct work_struct *work)
1062 {
1063 struct nvmet_fc_tgt_assoc *assoc =
1064 container_of(work, struct nvmet_fc_tgt_assoc, del_work);
1065 struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
1066
1067 nvmet_fc_delete_target_assoc(assoc);
1068 nvmet_fc_tgt_a_put(assoc);
1069 nvmet_fc_tgtport_put(tgtport);
1070 }
1071
1072 static void
nvmet_fc_schedule_delete_assoc(struct nvmet_fc_tgt_assoc * assoc)1073 nvmet_fc_schedule_delete_assoc(struct nvmet_fc_tgt_assoc *assoc)
1074 {
1075 int terminating;
1076
1077 terminating = atomic_xchg(&assoc->terminating, 1);
1078
1079 /* if already terminating, do nothing */
1080 if (terminating)
1081 return;
1082
1083 nvmet_fc_tgtport_get(assoc->tgtport);
1084 if (!queue_work(nvmet_wq, &assoc->del_work))
1085 nvmet_fc_tgtport_put(assoc->tgtport);
1086 }
1087
1088 static bool
nvmet_fc_assoc_exists(struct nvmet_fc_tgtport * tgtport,u64 association_id)1089 nvmet_fc_assoc_exists(struct nvmet_fc_tgtport *tgtport, u64 association_id)
1090 {
1091 struct nvmet_fc_tgt_assoc *a;
1092 bool found = false;
1093
1094 rcu_read_lock();
1095 list_for_each_entry_rcu(a, &tgtport->assoc_list, a_list) {
1096 if (association_id == a->association_id) {
1097 found = true;
1098 break;
1099 }
1100 }
1101 rcu_read_unlock();
1102
1103 return found;
1104 }
1105
1106 static struct nvmet_fc_tgt_assoc *
nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport * tgtport,void * hosthandle)1107 nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
1108 {
1109 struct nvmet_fc_tgt_assoc *assoc;
1110 unsigned long flags;
1111 bool done;
1112 u64 ran;
1113 int idx;
1114
1115 if (!tgtport->pe)
1116 return NULL;
1117
1118 assoc = kzalloc_obj(*assoc);
1119 if (!assoc)
1120 return NULL;
1121
1122 idx = ida_alloc(&tgtport->assoc_cnt, GFP_KERNEL);
1123 if (idx < 0)
1124 goto out_free_assoc;
1125
1126 assoc->hostport = nvmet_fc_alloc_hostport(tgtport, hosthandle);
1127 if (IS_ERR(assoc->hostport))
1128 goto out_ida;
1129
1130 assoc->tgtport = tgtport;
1131 nvmet_fc_tgtport_get(tgtport);
1132 assoc->a_id = idx;
1133 INIT_LIST_HEAD(&assoc->a_list);
1134 kref_init(&assoc->ref);
1135 INIT_WORK(&assoc->del_work, nvmet_fc_delete_assoc_work);
1136 atomic_set(&assoc->terminating, 0);
1137
1138 done = false;
1139 do {
1140 get_random_bytes(&ran, sizeof(ran) - BYTES_FOR_QID);
1141 ran = ran << BYTES_FOR_QID_SHIFT;
1142
1143 spin_lock_irqsave(&tgtport->lock, flags);
1144 if (!nvmet_fc_assoc_exists(tgtport, ran)) {
1145 assoc->association_id = ran;
1146 list_add_tail_rcu(&assoc->a_list, &tgtport->assoc_list);
1147 done = true;
1148 }
1149 spin_unlock_irqrestore(&tgtport->lock, flags);
1150 } while (!done);
1151
1152 return assoc;
1153
1154 out_ida:
1155 ida_free(&tgtport->assoc_cnt, idx);
1156 out_free_assoc:
1157 kfree(assoc);
1158 return NULL;
1159 }
1160
1161 static void
nvmet_fc_target_assoc_free(struct kref * ref)1162 nvmet_fc_target_assoc_free(struct kref *ref)
1163 {
1164 struct nvmet_fc_tgt_assoc *assoc =
1165 container_of(ref, struct nvmet_fc_tgt_assoc, ref);
1166 struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
1167 struct nvmet_fc_ls_iod *oldls;
1168 unsigned long flags;
1169 int i;
1170
1171 for (i = NVMET_NR_QUEUES; i >= 0; i--) {
1172 if (assoc->queues[i])
1173 nvmet_fc_delete_target_queue(assoc->queues[i]);
1174 }
1175
1176 /* Send Disconnect now that all i/o has completed */
1177 nvmet_fc_xmt_disconnect_assoc(assoc);
1178
1179 nvmet_fc_hostport_put(assoc->hostport);
1180 spin_lock_irqsave(&tgtport->lock, flags);
1181 oldls = assoc->rcv_disconn;
1182 spin_unlock_irqrestore(&tgtport->lock, flags);
1183 /* if pending Rcv Disconnect Association LS, send rsp now */
1184 if (oldls)
1185 nvmet_fc_xmt_ls_rsp(tgtport, oldls);
1186 ida_free(&tgtport->assoc_cnt, assoc->a_id);
1187 pr_info("{%d:%d}: Association freed\n",
1188 tgtport->fc_target_port.port_num, assoc->a_id);
1189 kfree(assoc);
1190 }
1191
1192 static void
nvmet_fc_tgt_a_put(struct nvmet_fc_tgt_assoc * assoc)1193 nvmet_fc_tgt_a_put(struct nvmet_fc_tgt_assoc *assoc)
1194 {
1195 kref_put(&assoc->ref, nvmet_fc_target_assoc_free);
1196 }
1197
1198 static int
nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc * assoc)1199 nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc)
1200 {
1201 return kref_get_unless_zero(&assoc->ref);
1202 }
1203
1204 static void
nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc * assoc)1205 nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc)
1206 {
1207 struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
1208 unsigned long flags;
1209 int i;
1210
1211 spin_lock_irqsave(&tgtport->lock, flags);
1212 list_del_rcu(&assoc->a_list);
1213 spin_unlock_irqrestore(&tgtport->lock, flags);
1214
1215 synchronize_rcu();
1216
1217 /* ensure all in-flight I/Os have been processed */
1218 for (i = NVMET_NR_QUEUES; i >= 0; i--) {
1219 if (assoc->queues[i])
1220 flush_workqueue(assoc->queues[i]->work_q);
1221 }
1222
1223 pr_info("{%d:%d}: Association deleted\n",
1224 tgtport->fc_target_port.port_num, assoc->a_id);
1225
1226 nvmet_fc_tgtport_put(tgtport);
1227 }
1228
1229 static struct nvmet_fc_tgt_assoc *
nvmet_fc_find_target_assoc(struct nvmet_fc_tgtport * tgtport,u64 association_id)1230 nvmet_fc_find_target_assoc(struct nvmet_fc_tgtport *tgtport,
1231 u64 association_id)
1232 {
1233 struct nvmet_fc_tgt_assoc *assoc;
1234 struct nvmet_fc_tgt_assoc *ret = NULL;
1235
1236 rcu_read_lock();
1237 list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) {
1238 if (association_id == assoc->association_id) {
1239 ret = assoc;
1240 if (!nvmet_fc_tgt_a_get(assoc))
1241 ret = NULL;
1242 break;
1243 }
1244 }
1245 rcu_read_unlock();
1246
1247 return ret;
1248 }
1249
1250 static void
nvmet_fc_portentry_bind(struct nvmet_fc_tgtport * tgtport,struct nvmet_fc_port_entry * pe,struct nvmet_port * port)1251 nvmet_fc_portentry_bind(struct nvmet_fc_tgtport *tgtport,
1252 struct nvmet_fc_port_entry *pe,
1253 struct nvmet_port *port)
1254 {
1255 lockdep_assert_held(&nvmet_fc_tgtlock);
1256
1257 nvmet_fc_tgtport_get(tgtport);
1258 pe->tgtport = tgtport;
1259 tgtport->pe = pe;
1260
1261 pe->port = port;
1262 port->priv = pe;
1263
1264 pe->node_name = tgtport->fc_target_port.node_name;
1265 pe->port_name = tgtport->fc_target_port.port_name;
1266 INIT_LIST_HEAD(&pe->pe_list);
1267
1268 list_add_tail(&pe->pe_list, &nvmet_fc_portentry_list);
1269 }
1270
1271 static void
nvmet_fc_portentry_unbind(struct nvmet_fc_port_entry * pe)1272 nvmet_fc_portentry_unbind(struct nvmet_fc_port_entry *pe)
1273 {
1274 unsigned long flags;
1275
1276 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
1277 if (pe->tgtport) {
1278 nvmet_fc_tgtport_put(pe->tgtport);
1279 pe->tgtport->pe = NULL;
1280 }
1281 list_del(&pe->pe_list);
1282 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
1283 }
1284
1285 /*
1286 * called when a targetport deregisters. Breaks the relationship
1287 * with the nvmet port, but leaves the port_entry in place so that
1288 * re-registration can resume operation.
1289 */
1290 static void
nvmet_fc_portentry_unbind_tgt(struct nvmet_fc_tgtport * tgtport)1291 nvmet_fc_portentry_unbind_tgt(struct nvmet_fc_tgtport *tgtport)
1292 {
1293 struct nvmet_fc_port_entry *pe;
1294 unsigned long flags;
1295
1296 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
1297 pe = tgtport->pe;
1298 if (pe) {
1299 nvmet_fc_tgtport_put(pe->tgtport);
1300 pe->tgtport = NULL;
1301 }
1302 tgtport->pe = NULL;
1303 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
1304 }
1305
1306 /*
1307 * called when a new targetport is registered. Looks in the
1308 * existing nvmet port_entries to see if the nvmet layer is
1309 * configured for the targetport's wwn's. (the targetport existed,
1310 * nvmet configured, the lldd unregistered the tgtport, and is now
1311 * reregistering the same targetport). If so, set the nvmet port
1312 * port entry on the targetport.
1313 */
1314 static void
nvmet_fc_portentry_rebind_tgt(struct nvmet_fc_tgtport * tgtport)1315 nvmet_fc_portentry_rebind_tgt(struct nvmet_fc_tgtport *tgtport)
1316 {
1317 struct nvmet_fc_port_entry *pe;
1318 unsigned long flags;
1319
1320 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
1321 list_for_each_entry(pe, &nvmet_fc_portentry_list, pe_list) {
1322 if (tgtport->fc_target_port.node_name == pe->node_name &&
1323 tgtport->fc_target_port.port_name == pe->port_name) {
1324 if (!nvmet_fc_tgtport_get(tgtport))
1325 continue;
1326
1327 WARN_ON(pe->tgtport);
1328 tgtport->pe = pe;
1329 pe->tgtport = tgtport;
1330 break;
1331 }
1332 }
1333 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
1334 }
1335
1336 /**
1337 * nvmet_fc_register_targetport - transport entry point called by an
1338 * LLDD to register the existence of a local
1339 * NVME subsystem FC port.
1340 * @pinfo: pointer to information about the port to be registered
1341 * @template: LLDD entrypoints and operational parameters for the port
1342 * @dev: physical hardware device node port corresponds to. Will be
1343 * used for DMA mappings
1344 * @portptr: pointer to a local port pointer. Upon success, the routine
1345 * will allocate a nvme_fc_local_port structure and place its
1346 * address in the local port pointer. Upon failure, local port
1347 * pointer will be set to NULL.
1348 *
1349 * Returns:
1350 * a completion status. Must be 0 upon success; a negative errno
1351 * (ex: -ENXIO) upon failure.
1352 */
1353 int
nvmet_fc_register_targetport(struct nvmet_fc_port_info * pinfo,struct nvmet_fc_target_template * template,struct device * dev,struct nvmet_fc_target_port ** portptr)1354 nvmet_fc_register_targetport(struct nvmet_fc_port_info *pinfo,
1355 struct nvmet_fc_target_template *template,
1356 struct device *dev,
1357 struct nvmet_fc_target_port **portptr)
1358 {
1359 struct nvmet_fc_tgtport *newrec;
1360 unsigned long flags;
1361 int ret, idx;
1362
1363 if (!template->xmt_ls_rsp || !template->fcp_op ||
1364 !template->fcp_abort ||
1365 !template->fcp_req_release || !template->targetport_delete ||
1366 !template->max_hw_queues || !template->max_sgl_segments ||
1367 !template->max_dif_sgl_segments || !template->dma_boundary) {
1368 ret = -EINVAL;
1369 goto out_regtgt_failed;
1370 }
1371
1372 newrec = kzalloc((sizeof(*newrec) + template->target_priv_sz),
1373 GFP_KERNEL);
1374 if (!newrec) {
1375 ret = -ENOMEM;
1376 goto out_regtgt_failed;
1377 }
1378
1379 idx = ida_alloc(&nvmet_fc_tgtport_cnt, GFP_KERNEL);
1380 if (idx < 0) {
1381 ret = -ENOSPC;
1382 goto out_fail_kfree;
1383 }
1384
1385 if (!get_device(dev) && dev) {
1386 ret = -ENODEV;
1387 goto out_ida_put;
1388 }
1389
1390 newrec->fc_target_port.node_name = pinfo->node_name;
1391 newrec->fc_target_port.port_name = pinfo->port_name;
1392 if (template->target_priv_sz)
1393 newrec->fc_target_port.private = &newrec[1];
1394 else
1395 newrec->fc_target_port.private = NULL;
1396 newrec->fc_target_port.port_id = pinfo->port_id;
1397 newrec->fc_target_port.port_num = idx;
1398 INIT_LIST_HEAD(&newrec->tgt_list);
1399 newrec->dev = dev;
1400 newrec->ops = template;
1401 spin_lock_init(&newrec->lock);
1402 INIT_LIST_HEAD(&newrec->ls_rcv_list);
1403 INIT_LIST_HEAD(&newrec->ls_req_list);
1404 INIT_LIST_HEAD(&newrec->ls_busylist);
1405 INIT_LIST_HEAD(&newrec->assoc_list);
1406 INIT_LIST_HEAD(&newrec->host_list);
1407 kref_init(&newrec->ref);
1408 ida_init(&newrec->assoc_cnt);
1409 newrec->max_sg_cnt = template->max_sgl_segments;
1410
1411 ret = nvmet_fc_alloc_ls_iodlist(newrec);
1412 if (ret) {
1413 ret = -ENOMEM;
1414 goto out_free_newrec;
1415 }
1416
1417 nvmet_fc_portentry_rebind_tgt(newrec);
1418
1419 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
1420 list_add_tail(&newrec->tgt_list, &nvmet_fc_target_list);
1421 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
1422
1423 *portptr = &newrec->fc_target_port;
1424 return 0;
1425
1426 out_free_newrec:
1427 put_device(dev);
1428 out_ida_put:
1429 ida_free(&nvmet_fc_tgtport_cnt, idx);
1430 out_fail_kfree:
1431 kfree(newrec);
1432 out_regtgt_failed:
1433 *portptr = NULL;
1434 return ret;
1435 }
1436 EXPORT_SYMBOL_GPL(nvmet_fc_register_targetport);
1437
1438
1439 static void
nvmet_fc_free_tgtport(struct kref * ref)1440 nvmet_fc_free_tgtport(struct kref *ref)
1441 {
1442 struct nvmet_fc_tgtport *tgtport =
1443 container_of(ref, struct nvmet_fc_tgtport, ref);
1444 struct device *dev = tgtport->dev;
1445
1446 nvmet_fc_free_ls_iodlist(tgtport);
1447
1448 /* let the LLDD know we've finished tearing it down */
1449 tgtport->ops->targetport_delete(&tgtport->fc_target_port);
1450
1451 ida_free(&nvmet_fc_tgtport_cnt,
1452 tgtport->fc_target_port.port_num);
1453
1454 ida_destroy(&tgtport->assoc_cnt);
1455
1456 kfree(tgtport);
1457
1458 put_device(dev);
1459 }
1460
1461 static void
nvmet_fc_tgtport_put(struct nvmet_fc_tgtport * tgtport)1462 nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport)
1463 {
1464 kref_put(&tgtport->ref, nvmet_fc_free_tgtport);
1465 }
1466
1467 static int
nvmet_fc_tgtport_get(struct nvmet_fc_tgtport * tgtport)1468 nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport)
1469 {
1470 return kref_get_unless_zero(&tgtport->ref);
1471 }
1472
1473 static void
__nvmet_fc_free_assocs(struct nvmet_fc_tgtport * tgtport)1474 __nvmet_fc_free_assocs(struct nvmet_fc_tgtport *tgtport)
1475 {
1476 struct nvmet_fc_tgt_assoc *assoc;
1477
1478 rcu_read_lock();
1479 list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) {
1480 if (!nvmet_fc_tgt_a_get(assoc))
1481 continue;
1482 nvmet_fc_schedule_delete_assoc(assoc);
1483 nvmet_fc_tgt_a_put(assoc);
1484 }
1485 rcu_read_unlock();
1486 }
1487
1488 /**
1489 * nvmet_fc_invalidate_host - transport entry point called by an LLDD
1490 * to remove references to a hosthandle for LS's.
1491 *
1492 * The nvmet-fc layer ensures that any references to the hosthandle
1493 * on the targetport are forgotten (set to NULL). The LLDD will
1494 * typically call this when a login with a remote host port has been
1495 * lost, thus LS's for the remote host port are no longer possible.
1496 *
1497 * If an LS request is outstanding to the targetport/hosthandle (or
1498 * issued concurrently with the call to invalidate the host), the
1499 * LLDD is responsible for terminating/aborting the LS and completing
1500 * the LS request. It is recommended that these terminations/aborts
1501 * occur after calling to invalidate the host handle to avoid additional
1502 * retries by the nvmet-fc transport. The nvmet-fc transport may
1503 * continue to reference host handle while it cleans up outstanding
1504 * NVME associations. The nvmet-fc transport will call the
1505 * ops->host_release() callback to notify the LLDD that all references
1506 * are complete and the related host handle can be recovered.
1507 * Note: if there are no references, the callback may be called before
1508 * the invalidate host call returns.
1509 *
1510 * @target_port: pointer to the (registered) target port that a prior
1511 * LS was received on and which supplied the transport the
1512 * hosthandle.
1513 * @hosthandle: the handle (pointer) that represents the host port
1514 * that no longer has connectivity and that LS's should
1515 * no longer be directed to.
1516 */
1517 void
nvmet_fc_invalidate_host(struct nvmet_fc_target_port * target_port,void * hosthandle)1518 nvmet_fc_invalidate_host(struct nvmet_fc_target_port *target_port,
1519 void *hosthandle)
1520 {
1521 struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
1522 struct nvmet_fc_tgt_assoc *assoc, *next;
1523 unsigned long flags;
1524 bool noassoc = true;
1525
1526 spin_lock_irqsave(&tgtport->lock, flags);
1527 list_for_each_entry_safe(assoc, next,
1528 &tgtport->assoc_list, a_list) {
1529 if (assoc->hostport->hosthandle != hosthandle)
1530 continue;
1531 if (!nvmet_fc_tgt_a_get(assoc))
1532 continue;
1533 assoc->hostport->invalid = 1;
1534 noassoc = false;
1535 nvmet_fc_schedule_delete_assoc(assoc);
1536 nvmet_fc_tgt_a_put(assoc);
1537 }
1538 spin_unlock_irqrestore(&tgtport->lock, flags);
1539
1540 /* if there's nothing to wait for - call the callback */
1541 if (noassoc && tgtport->ops->host_release)
1542 tgtport->ops->host_release(hosthandle);
1543 }
1544 EXPORT_SYMBOL_GPL(nvmet_fc_invalidate_host);
1545
1546 /*
1547 * nvmet layer has called to terminate an association
1548 */
1549 static void
nvmet_fc_delete_ctrl(struct nvmet_ctrl * ctrl)1550 nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl)
1551 {
1552 struct nvmet_fc_tgtport *tgtport, *next;
1553 struct nvmet_fc_tgt_assoc *assoc;
1554 struct nvmet_fc_tgt_queue *queue;
1555 unsigned long flags;
1556 bool found_ctrl = false;
1557
1558 /* this is a bit ugly, but don't want to make locks layered */
1559 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
1560 list_for_each_entry_safe(tgtport, next, &nvmet_fc_target_list,
1561 tgt_list) {
1562 if (!nvmet_fc_tgtport_get(tgtport))
1563 continue;
1564 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
1565
1566 rcu_read_lock();
1567 list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) {
1568 queue = assoc->queues[0];
1569 if (queue && queue->nvme_sq.ctrl == ctrl) {
1570 if (nvmet_fc_tgt_a_get(assoc))
1571 found_ctrl = true;
1572 break;
1573 }
1574 }
1575 rcu_read_unlock();
1576
1577 nvmet_fc_tgtport_put(tgtport);
1578
1579 if (found_ctrl) {
1580 nvmet_fc_schedule_delete_assoc(assoc);
1581 nvmet_fc_tgt_a_put(assoc);
1582 return;
1583 }
1584
1585 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
1586 }
1587 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
1588 }
1589
1590 static void
nvmet_fc_free_pending_reqs(struct nvmet_fc_tgtport * tgtport)1591 nvmet_fc_free_pending_reqs(struct nvmet_fc_tgtport *tgtport)
1592 {
1593 struct nvmet_fc_ls_req_op *lsop;
1594 struct nvmefc_ls_req *lsreq;
1595 struct nvmet_fc_ls_iod *iod;
1596 int i;
1597
1598 iod = tgtport->iod;
1599 for (i = 0; i < NVMET_LS_CTX_COUNT; iod++, i++)
1600 cancel_work(&iod->work);
1601
1602 /*
1603 * After this point the connection is lost and thus any pending
1604 * request can't be processed by the normal completion path. This
1605 * is likely a request from nvmet_fc_send_ls_req_async.
1606 */
1607 while ((lsop = list_first_entry_or_null(&tgtport->ls_req_list,
1608 struct nvmet_fc_ls_req_op, lsreq_list))) {
1609 list_del(&lsop->lsreq_list);
1610
1611 if (!lsop->req_queued)
1612 continue;
1613
1614 lsreq = &lsop->ls_req;
1615 fc_dma_unmap_single(tgtport->dev, lsreq->rqstdma,
1616 (lsreq->rqstlen + lsreq->rsplen),
1617 DMA_BIDIRECTIONAL);
1618 nvmet_fc_tgtport_put(tgtport);
1619 kfree(lsop);
1620 }
1621 }
1622
1623 /**
1624 * nvmet_fc_unregister_targetport - transport entry point called by an
1625 * LLDD to deregister/remove a previously
1626 * registered a local NVME subsystem FC port.
1627 * @target_port: pointer to the (registered) target port that is to be
1628 * deregistered.
1629 *
1630 * Returns:
1631 * a completion status. Must be 0 upon success; a negative errno
1632 * (ex: -ENXIO) upon failure.
1633 */
1634 int
nvmet_fc_unregister_targetport(struct nvmet_fc_target_port * target_port)1635 nvmet_fc_unregister_targetport(struct nvmet_fc_target_port *target_port)
1636 {
1637 struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
1638 unsigned long flags;
1639
1640 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
1641 list_del(&tgtport->tgt_list);
1642 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
1643
1644 nvmet_fc_portentry_unbind_tgt(tgtport);
1645
1646 /* terminate any outstanding associations */
1647 __nvmet_fc_free_assocs(tgtport);
1648
1649 flush_workqueue(nvmet_wq);
1650
1651 nvmet_fc_free_pending_reqs(tgtport);
1652 nvmet_fc_tgtport_put(tgtport);
1653
1654 return 0;
1655 }
1656 EXPORT_SYMBOL_GPL(nvmet_fc_unregister_targetport);
1657
1658
1659 /* ********************** FC-NVME LS RCV Handling ************************* */
1660
1661
1662 static void
nvmet_fc_ls_create_association(struct nvmet_fc_tgtport * tgtport,struct nvmet_fc_ls_iod * iod)1663 nvmet_fc_ls_create_association(struct nvmet_fc_tgtport *tgtport,
1664 struct nvmet_fc_ls_iod *iod)
1665 {
1666 struct fcnvme_ls_cr_assoc_rqst *rqst = &iod->rqstbuf->rq_cr_assoc;
1667 struct fcnvme_ls_cr_assoc_acc *acc = &iod->rspbuf->rsp_cr_assoc;
1668 struct nvmet_fc_tgt_queue *queue;
1669 int ret = 0;
1670
1671 memset(acc, 0, sizeof(*acc));
1672
1673 /*
1674 * FC-NVME spec changes. There are initiators sending different
1675 * lengths as padding sizes for Create Association Cmd descriptor
1676 * was incorrect.
1677 * Accept anything of "minimum" length. Assume format per 1.15
1678 * spec (with HOSTID reduced to 16 bytes), ignore how long the
1679 * trailing pad length is.
1680 */
1681 if (iod->rqstdatalen < FCNVME_LSDESC_CRA_RQST_MINLEN)
1682 ret = VERR_CR_ASSOC_LEN;
1683 else if (be32_to_cpu(rqst->desc_list_len) <
1684 FCNVME_LSDESC_CRA_RQST_MIN_LISTLEN)
1685 ret = VERR_CR_ASSOC_RQST_LEN;
1686 else if (rqst->assoc_cmd.desc_tag !=
1687 cpu_to_be32(FCNVME_LSDESC_CREATE_ASSOC_CMD))
1688 ret = VERR_CR_ASSOC_CMD;
1689 else if (be32_to_cpu(rqst->assoc_cmd.desc_len) <
1690 FCNVME_LSDESC_CRA_CMD_DESC_MIN_DESCLEN)
1691 ret = VERR_CR_ASSOC_CMD_LEN;
1692 else if (!rqst->assoc_cmd.ersp_ratio ||
1693 (be16_to_cpu(rqst->assoc_cmd.ersp_ratio) >=
1694 be16_to_cpu(rqst->assoc_cmd.sqsize)))
1695 ret = VERR_ERSP_RATIO;
1696
1697 else {
1698 /* new association w/ admin queue */
1699 iod->assoc = nvmet_fc_alloc_target_assoc(
1700 tgtport, iod->hosthandle);
1701 if (!iod->assoc)
1702 ret = VERR_ASSOC_ALLOC_FAIL;
1703 else {
1704 queue = nvmet_fc_alloc_target_queue(iod->assoc, 0,
1705 be16_to_cpu(rqst->assoc_cmd.sqsize));
1706 if (!queue) {
1707 ret = VERR_QUEUE_ALLOC_FAIL;
1708 nvmet_fc_tgt_a_put(iod->assoc);
1709 }
1710 }
1711 }
1712
1713 if (ret) {
1714 pr_err("{%d}: Create Association LS failed: %s\n",
1715 tgtport->fc_target_port.port_num,
1716 validation_errors[ret]);
1717 iod->lsrsp->rsplen = nvme_fc_format_rjt(acc,
1718 sizeof(*acc), rqst->w0.ls_cmd,
1719 FCNVME_RJT_RC_LOGIC,
1720 FCNVME_RJT_EXP_NONE, 0);
1721 return;
1722 }
1723
1724 queue->ersp_ratio = be16_to_cpu(rqst->assoc_cmd.ersp_ratio);
1725 atomic_set(&queue->connected, 1);
1726 queue->sqhd = 0; /* best place to init value */
1727
1728 pr_info("{%d:%d}: Association created\n",
1729 tgtport->fc_target_port.port_num, iod->assoc->a_id);
1730
1731 /* format a response */
1732
1733 iod->lsrsp->rsplen = sizeof(*acc);
1734
1735 nvme_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
1736 fcnvme_lsdesc_len(
1737 sizeof(struct fcnvme_ls_cr_assoc_acc)),
1738 FCNVME_LS_CREATE_ASSOCIATION);
1739 acc->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID);
1740 acc->associd.desc_len =
1741 fcnvme_lsdesc_len(
1742 sizeof(struct fcnvme_lsdesc_assoc_id));
1743 acc->associd.association_id =
1744 cpu_to_be64(nvmet_fc_makeconnid(iod->assoc, 0));
1745 acc->connectid.desc_tag = cpu_to_be32(FCNVME_LSDESC_CONN_ID);
1746 acc->connectid.desc_len =
1747 fcnvme_lsdesc_len(
1748 sizeof(struct fcnvme_lsdesc_conn_id));
1749 acc->connectid.connection_id = acc->associd.association_id;
1750 }
1751
1752 static void
nvmet_fc_ls_create_connection(struct nvmet_fc_tgtport * tgtport,struct nvmet_fc_ls_iod * iod)1753 nvmet_fc_ls_create_connection(struct nvmet_fc_tgtport *tgtport,
1754 struct nvmet_fc_ls_iod *iod)
1755 {
1756 struct fcnvme_ls_cr_conn_rqst *rqst = &iod->rqstbuf->rq_cr_conn;
1757 struct fcnvme_ls_cr_conn_acc *acc = &iod->rspbuf->rsp_cr_conn;
1758 struct nvmet_fc_tgt_queue *queue;
1759 int ret = 0;
1760
1761 memset(acc, 0, sizeof(*acc));
1762
1763 if (iod->rqstdatalen < sizeof(struct fcnvme_ls_cr_conn_rqst))
1764 ret = VERR_CR_CONN_LEN;
1765 else if (rqst->desc_list_len !=
1766 fcnvme_lsdesc_len(
1767 sizeof(struct fcnvme_ls_cr_conn_rqst)))
1768 ret = VERR_CR_CONN_RQST_LEN;
1769 else if (rqst->associd.desc_tag != cpu_to_be32(FCNVME_LSDESC_ASSOC_ID))
1770 ret = VERR_ASSOC_ID;
1771 else if (rqst->associd.desc_len !=
1772 fcnvme_lsdesc_len(
1773 sizeof(struct fcnvme_lsdesc_assoc_id)))
1774 ret = VERR_ASSOC_ID_LEN;
1775 else if (rqst->connect_cmd.desc_tag !=
1776 cpu_to_be32(FCNVME_LSDESC_CREATE_CONN_CMD))
1777 ret = VERR_CR_CONN_CMD;
1778 else if (rqst->connect_cmd.desc_len !=
1779 fcnvme_lsdesc_len(
1780 sizeof(struct fcnvme_lsdesc_cr_conn_cmd)))
1781 ret = VERR_CR_CONN_CMD_LEN;
1782 else if (!rqst->connect_cmd.ersp_ratio ||
1783 (be16_to_cpu(rqst->connect_cmd.ersp_ratio) >=
1784 be16_to_cpu(rqst->connect_cmd.sqsize)))
1785 ret = VERR_ERSP_RATIO;
1786
1787 else {
1788 /* new io queue */
1789 iod->assoc = nvmet_fc_find_target_assoc(tgtport,
1790 be64_to_cpu(rqst->associd.association_id));
1791 if (!iod->assoc)
1792 ret = VERR_NO_ASSOC;
1793 else {
1794 queue = nvmet_fc_alloc_target_queue(iod->assoc,
1795 be16_to_cpu(rqst->connect_cmd.qid),
1796 be16_to_cpu(rqst->connect_cmd.sqsize));
1797 if (!queue)
1798 ret = VERR_QUEUE_ALLOC_FAIL;
1799
1800 /* release get taken in nvmet_fc_find_target_assoc */
1801 nvmet_fc_tgt_a_put(iod->assoc);
1802 }
1803 }
1804
1805 if (ret) {
1806 pr_err("{%d}: Create Connection LS failed: %s\n",
1807 tgtport->fc_target_port.port_num,
1808 validation_errors[ret]);
1809 iod->lsrsp->rsplen = nvme_fc_format_rjt(acc,
1810 sizeof(*acc), rqst->w0.ls_cmd,
1811 (ret == VERR_NO_ASSOC) ?
1812 FCNVME_RJT_RC_INV_ASSOC :
1813 FCNVME_RJT_RC_LOGIC,
1814 FCNVME_RJT_EXP_NONE, 0);
1815 return;
1816 }
1817
1818 queue->ersp_ratio = be16_to_cpu(rqst->connect_cmd.ersp_ratio);
1819 atomic_set(&queue->connected, 1);
1820 queue->sqhd = 0; /* best place to init value */
1821
1822 /* format a response */
1823
1824 iod->lsrsp->rsplen = sizeof(*acc);
1825
1826 nvme_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
1827 fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_cr_conn_acc)),
1828 FCNVME_LS_CREATE_CONNECTION);
1829 acc->connectid.desc_tag = cpu_to_be32(FCNVME_LSDESC_CONN_ID);
1830 acc->connectid.desc_len =
1831 fcnvme_lsdesc_len(
1832 sizeof(struct fcnvme_lsdesc_conn_id));
1833 acc->connectid.connection_id =
1834 cpu_to_be64(nvmet_fc_makeconnid(iod->assoc,
1835 be16_to_cpu(rqst->connect_cmd.qid)));
1836 }
1837
1838 /*
1839 * Returns true if the LS response is to be transmit
1840 * Returns false if the LS response is to be delayed
1841 */
1842 static int
nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport * tgtport,struct nvmet_fc_ls_iod * iod)1843 nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
1844 struct nvmet_fc_ls_iod *iod)
1845 {
1846 struct fcnvme_ls_disconnect_assoc_rqst *rqst =
1847 &iod->rqstbuf->rq_dis_assoc;
1848 struct fcnvme_ls_disconnect_assoc_acc *acc =
1849 &iod->rspbuf->rsp_dis_assoc;
1850 struct nvmet_fc_tgt_assoc *assoc = NULL;
1851 struct nvmet_fc_ls_iod *oldls = NULL;
1852 unsigned long flags;
1853 int ret = 0;
1854
1855 memset(acc, 0, sizeof(*acc));
1856
1857 ret = nvmefc_vldt_lsreq_discon_assoc(iod->rqstdatalen, rqst);
1858 if (!ret) {
1859 /* match an active association - takes an assoc ref if !NULL */
1860 assoc = nvmet_fc_find_target_assoc(tgtport,
1861 be64_to_cpu(rqst->associd.association_id));
1862 iod->assoc = assoc;
1863 if (!assoc)
1864 ret = VERR_NO_ASSOC;
1865 }
1866
1867 if (ret || !assoc) {
1868 pr_err("{%d}: Disconnect LS failed: %s\n",
1869 tgtport->fc_target_port.port_num,
1870 validation_errors[ret]);
1871 iod->lsrsp->rsplen = nvme_fc_format_rjt(acc,
1872 sizeof(*acc), rqst->w0.ls_cmd,
1873 (ret == VERR_NO_ASSOC) ?
1874 FCNVME_RJT_RC_INV_ASSOC :
1875 FCNVME_RJT_RC_LOGIC,
1876 FCNVME_RJT_EXP_NONE, 0);
1877 return true;
1878 }
1879
1880 /* format a response */
1881
1882 iod->lsrsp->rsplen = sizeof(*acc);
1883
1884 nvme_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
1885 fcnvme_lsdesc_len(
1886 sizeof(struct fcnvme_ls_disconnect_assoc_acc)),
1887 FCNVME_LS_DISCONNECT_ASSOC);
1888
1889 /*
1890 * The rules for LS response says the response cannot
1891 * go back until ABTS's have been sent for all outstanding
1892 * I/O and a Disconnect Association LS has been sent.
1893 * So... save off the Disconnect LS to send the response
1894 * later. If there was a prior LS already saved, replace
1895 * it with the newer one and send a can't perform reject
1896 * on the older one.
1897 */
1898 spin_lock_irqsave(&tgtport->lock, flags);
1899 oldls = assoc->rcv_disconn;
1900 assoc->rcv_disconn = iod;
1901 spin_unlock_irqrestore(&tgtport->lock, flags);
1902
1903 if (oldls) {
1904 pr_info("{%d:%d}: Multiple Disconnect Association LS's "
1905 "received\n",
1906 tgtport->fc_target_port.port_num, assoc->a_id);
1907 /* overwrite good response with bogus failure */
1908 oldls->lsrsp->rsplen = nvme_fc_format_rjt(oldls->rspbuf,
1909 sizeof(*iod->rspbuf),
1910 /* ok to use rqst, LS is same */
1911 rqst->w0.ls_cmd,
1912 FCNVME_RJT_RC_UNAB,
1913 FCNVME_RJT_EXP_NONE, 0);
1914 nvmet_fc_xmt_ls_rsp(tgtport, oldls);
1915 }
1916
1917 nvmet_fc_schedule_delete_assoc(assoc);
1918 nvmet_fc_tgt_a_put(assoc);
1919
1920 return false;
1921 }
1922
1923
1924 /* *********************** NVME Ctrl Routines **************************** */
1925
1926
1927 static void nvmet_fc_fcp_nvme_cmd_done(struct nvmet_req *nvme_req);
1928
1929 static const struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops;
1930
1931 static void
nvmet_fc_xmt_ls_rsp_done(struct nvmefc_ls_rsp * lsrsp)1932 nvmet_fc_xmt_ls_rsp_done(struct nvmefc_ls_rsp *lsrsp)
1933 {
1934 struct nvmet_fc_ls_iod *iod = lsrsp->nvme_fc_private;
1935 struct nvmet_fc_tgtport *tgtport = iod->tgtport;
1936
1937 fc_dma_sync_single_for_cpu(tgtport->dev, iod->rspdma,
1938 sizeof(*iod->rspbuf), DMA_TO_DEVICE);
1939 nvmet_fc_free_ls_iod(tgtport, iod);
1940 nvmet_fc_tgtport_put(tgtport);
1941 }
1942
1943 static void
nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport * tgtport,struct nvmet_fc_ls_iod * iod)1944 nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
1945 struct nvmet_fc_ls_iod *iod)
1946 {
1947 int ret;
1948
1949 fc_dma_sync_single_for_device(tgtport->dev, iod->rspdma,
1950 sizeof(*iod->rspbuf), DMA_TO_DEVICE);
1951
1952 ret = tgtport->ops->xmt_ls_rsp(&tgtport->fc_target_port, iod->lsrsp);
1953 if (ret)
1954 nvmet_fc_xmt_ls_rsp_done(iod->lsrsp);
1955 }
1956
1957 /*
1958 * Actual processing routine for received FC-NVME LS Requests from the LLD
1959 */
1960 static void
nvmet_fc_handle_ls_rqst(struct nvmet_fc_tgtport * tgtport,struct nvmet_fc_ls_iod * iod)1961 nvmet_fc_handle_ls_rqst(struct nvmet_fc_tgtport *tgtport,
1962 struct nvmet_fc_ls_iod *iod)
1963 {
1964 struct fcnvme_ls_rqst_w0 *w0 = &iod->rqstbuf->rq_cr_assoc.w0;
1965 bool sendrsp = true;
1966
1967 iod->lsrsp->nvme_fc_private = iod;
1968 iod->lsrsp->rspbuf = iod->rspbuf;
1969 iod->lsrsp->rspdma = iod->rspdma;
1970 iod->lsrsp->done = nvmet_fc_xmt_ls_rsp_done;
1971 /* Be preventative. handlers will later set to valid length */
1972 iod->lsrsp->rsplen = 0;
1973
1974 iod->assoc = NULL;
1975
1976 /*
1977 * handlers:
1978 * parse request input, execute the request, and format the
1979 * LS response
1980 */
1981 switch (w0->ls_cmd) {
1982 case FCNVME_LS_CREATE_ASSOCIATION:
1983 /* Creates Association and initial Admin Queue/Connection */
1984 nvmet_fc_ls_create_association(tgtport, iod);
1985 break;
1986 case FCNVME_LS_CREATE_CONNECTION:
1987 /* Creates an IO Queue/Connection */
1988 nvmet_fc_ls_create_connection(tgtport, iod);
1989 break;
1990 case FCNVME_LS_DISCONNECT_ASSOC:
1991 /* Terminate a Queue/Connection or the Association */
1992 sendrsp = nvmet_fc_ls_disconnect(tgtport, iod);
1993 break;
1994 default:
1995 iod->lsrsp->rsplen = nvme_fc_format_rjt(iod->rspbuf,
1996 sizeof(*iod->rspbuf), w0->ls_cmd,
1997 FCNVME_RJT_RC_INVAL, FCNVME_RJT_EXP_NONE, 0);
1998 }
1999
2000 if (sendrsp)
2001 nvmet_fc_xmt_ls_rsp(tgtport, iod);
2002 }
2003
2004 /*
2005 * Actual processing routine for received FC-NVME LS Requests from the LLD
2006 */
2007 static void
nvmet_fc_handle_ls_rqst_work(struct work_struct * work)2008 nvmet_fc_handle_ls_rqst_work(struct work_struct *work)
2009 {
2010 struct nvmet_fc_ls_iod *iod =
2011 container_of(work, struct nvmet_fc_ls_iod, work);
2012 struct nvmet_fc_tgtport *tgtport = iod->tgtport;
2013
2014 nvmet_fc_handle_ls_rqst(tgtport, iod);
2015 }
2016
2017
2018 /**
2019 * nvmet_fc_rcv_ls_req - transport entry point called by an LLDD
2020 * upon the reception of a NVME LS request.
2021 *
2022 * The nvmet-fc layer will copy payload to an internal structure for
2023 * processing. As such, upon completion of the routine, the LLDD may
2024 * immediately free/reuse the LS request buffer passed in the call.
2025 *
2026 * If this routine returns error, the LLDD should abort the exchange.
2027 *
2028 * @target_port: pointer to the (registered) target port the LS was
2029 * received on.
2030 * @hosthandle: pointer to the host specific data, gets stored in iod.
2031 * @lsrsp: pointer to a lsrsp structure to be used to reference
2032 * the exchange corresponding to the LS.
2033 * @lsreqbuf: pointer to the buffer containing the LS Request
2034 * @lsreqbuf_len: length, in bytes, of the received LS request
2035 */
2036 int
nvmet_fc_rcv_ls_req(struct nvmet_fc_target_port * target_port,void * hosthandle,struct nvmefc_ls_rsp * lsrsp,void * lsreqbuf,u32 lsreqbuf_len)2037 nvmet_fc_rcv_ls_req(struct nvmet_fc_target_port *target_port,
2038 void *hosthandle,
2039 struct nvmefc_ls_rsp *lsrsp,
2040 void *lsreqbuf, u32 lsreqbuf_len)
2041 {
2042 struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
2043 struct nvmet_fc_ls_iod *iod;
2044 struct fcnvme_ls_rqst_w0 *w0 = (struct fcnvme_ls_rqst_w0 *)lsreqbuf;
2045
2046 if (lsreqbuf_len > sizeof(union nvmefc_ls_requests)) {
2047 pr_info("{%d}: RCV %s LS failed: payload too large (%d)\n",
2048 tgtport->fc_target_port.port_num,
2049 (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ?
2050 nvmefc_ls_names[w0->ls_cmd] : "",
2051 lsreqbuf_len);
2052 return -E2BIG;
2053 }
2054
2055 if (!nvmet_fc_tgtport_get(tgtport)) {
2056 pr_info("{%d}: RCV %s LS failed: target deleting\n",
2057 tgtport->fc_target_port.port_num,
2058 (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ?
2059 nvmefc_ls_names[w0->ls_cmd] : "");
2060 return -ESHUTDOWN;
2061 }
2062
2063 iod = nvmet_fc_alloc_ls_iod(tgtport);
2064 if (!iod) {
2065 pr_info("{%d}: RCV %s LS failed: context allocation failed\n",
2066 tgtport->fc_target_port.port_num,
2067 (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ?
2068 nvmefc_ls_names[w0->ls_cmd] : "");
2069 nvmet_fc_tgtport_put(tgtport);
2070 return -ENOENT;
2071 }
2072
2073 iod->lsrsp = lsrsp;
2074 iod->fcpreq = NULL;
2075 memcpy(iod->rqstbuf, lsreqbuf, lsreqbuf_len);
2076 iod->rqstdatalen = lsreqbuf_len;
2077 iod->hosthandle = hosthandle;
2078
2079 queue_work(nvmet_wq, &iod->work);
2080
2081 return 0;
2082 }
2083 EXPORT_SYMBOL_GPL(nvmet_fc_rcv_ls_req);
2084
2085
2086 /*
2087 * **********************
2088 * Start of FCP handling
2089 * **********************
2090 */
2091
2092 static int
nvmet_fc_alloc_tgt_pgs(struct nvmet_fc_fcp_iod * fod)2093 nvmet_fc_alloc_tgt_pgs(struct nvmet_fc_fcp_iod *fod)
2094 {
2095 struct scatterlist *sg;
2096 unsigned int nent;
2097
2098 sg = sgl_alloc(fod->req.transfer_len, GFP_KERNEL, &nent);
2099 if (!sg)
2100 goto out;
2101
2102 fod->data_sg = sg;
2103 fod->data_sg_cnt = nent;
2104 fod->data_sg_cnt = fc_dma_map_sg(fod->tgtport->dev, sg, nent,
2105 ((fod->io_dir == NVMET_FCP_WRITE) ?
2106 DMA_FROM_DEVICE : DMA_TO_DEVICE));
2107 /* note: write from initiator perspective */
2108 fod->next_sg = fod->data_sg;
2109
2110 return 0;
2111
2112 out:
2113 return NVME_SC_INTERNAL;
2114 }
2115
2116 static void
nvmet_fc_free_tgt_pgs(struct nvmet_fc_fcp_iod * fod)2117 nvmet_fc_free_tgt_pgs(struct nvmet_fc_fcp_iod *fod)
2118 {
2119 if (!fod->data_sg || !fod->data_sg_cnt)
2120 return;
2121
2122 fc_dma_unmap_sg(fod->tgtport->dev, fod->data_sg, fod->data_sg_cnt,
2123 ((fod->io_dir == NVMET_FCP_WRITE) ?
2124 DMA_FROM_DEVICE : DMA_TO_DEVICE));
2125 sgl_free(fod->data_sg);
2126 fod->data_sg = NULL;
2127 fod->data_sg_cnt = 0;
2128 }
2129
2130
2131 static bool
queue_90percent_full(struct nvmet_fc_tgt_queue * q,u32 sqhd)2132 queue_90percent_full(struct nvmet_fc_tgt_queue *q, u32 sqhd)
2133 {
2134 u32 sqtail, used;
2135
2136 /* egad, this is ugly. And sqtail is just a best guess */
2137 sqtail = atomic_read(&q->sqtail) % q->sqsize;
2138
2139 used = (sqtail < sqhd) ? (sqtail + q->sqsize - sqhd) : (sqtail - sqhd);
2140 return ((used * 10) >= (((u32)(q->sqsize - 1) * 9)));
2141 }
2142
2143 /*
2144 * Prep RSP payload.
2145 * May be a NVMET_FCOP_RSP or NVMET_FCOP_READDATA_RSP op
2146 */
2147 static void
nvmet_fc_prep_fcp_rsp(struct nvmet_fc_tgtport * tgtport,struct nvmet_fc_fcp_iod * fod)2148 nvmet_fc_prep_fcp_rsp(struct nvmet_fc_tgtport *tgtport,
2149 struct nvmet_fc_fcp_iod *fod)
2150 {
2151 struct nvme_fc_ersp_iu *ersp = &fod->rspiubuf;
2152 struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common;
2153 struct nvme_completion *cqe = &ersp->cqe;
2154 u32 *cqewd = (u32 *)cqe;
2155 bool send_ersp = false;
2156 u32 rsn, rspcnt, xfr_length;
2157
2158 if (fod->fcpreq->op == NVMET_FCOP_READDATA_RSP)
2159 xfr_length = fod->req.transfer_len;
2160 else
2161 xfr_length = fod->offset;
2162
2163 /*
2164 * check to see if we can send a 0's rsp.
2165 * Note: to send a 0's response, the NVME-FC host transport will
2166 * recreate the CQE. The host transport knows: sq id, SQHD (last
2167 * seen in an ersp), and command_id. Thus it will create a
2168 * zero-filled CQE with those known fields filled in. Transport
2169 * must send an ersp for any condition where the cqe won't match
2170 * this.
2171 *
2172 * Here are the FC-NVME mandated cases where we must send an ersp:
2173 * every N responses, where N=ersp_ratio
2174 * force fabric commands to send ersp's (not in FC-NVME but good
2175 * practice)
2176 * normal cmds: any time status is non-zero, or status is zero
2177 * but words 0 or 1 are non-zero.
2178 * the SQ is 90% or more full
2179 * the cmd is a fused command
2180 * transferred data length not equal to cmd iu length
2181 */
2182 rspcnt = atomic_inc_return(&fod->queue->zrspcnt);
2183 if (!(rspcnt % fod->queue->ersp_ratio) ||
2184 nvme_is_fabrics((struct nvme_command *) sqe) ||
2185 xfr_length != fod->req.transfer_len ||
2186 (le16_to_cpu(cqe->status) & 0xFFFE) || cqewd[0] || cqewd[1] ||
2187 (sqe->flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND)) ||
2188 queue_90percent_full(fod->queue, le16_to_cpu(cqe->sq_head)))
2189 send_ersp = true;
2190
2191 /* re-set the fields */
2192 fod->fcpreq->rspaddr = ersp;
2193 fod->fcpreq->rspdma = fod->rspdma;
2194
2195 if (!send_ersp) {
2196 memset(ersp, 0, NVME_FC_SIZEOF_ZEROS_RSP);
2197 fod->fcpreq->rsplen = NVME_FC_SIZEOF_ZEROS_RSP;
2198 } else {
2199 ersp->iu_len = cpu_to_be16(sizeof(*ersp)/sizeof(u32));
2200 rsn = atomic_inc_return(&fod->queue->rsn);
2201 ersp->rsn = cpu_to_be32(rsn);
2202 ersp->xfrd_len = cpu_to_be32(xfr_length);
2203 fod->fcpreq->rsplen = sizeof(*ersp);
2204 }
2205
2206 fc_dma_sync_single_for_device(tgtport->dev, fod->rspdma,
2207 sizeof(fod->rspiubuf), DMA_TO_DEVICE);
2208 }
2209
2210 static void nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req *fcpreq);
2211
2212 static void
nvmet_fc_abort_op(struct nvmet_fc_tgtport * tgtport,struct nvmet_fc_fcp_iod * fod)2213 nvmet_fc_abort_op(struct nvmet_fc_tgtport *tgtport,
2214 struct nvmet_fc_fcp_iod *fod)
2215 {
2216 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
2217
2218 /* data no longer needed */
2219 nvmet_fc_free_tgt_pgs(fod);
2220
2221 /*
2222 * if an ABTS was received or we issued the fcp_abort early
2223 * don't call abort routine again.
2224 */
2225 /* no need to take lock - lock was taken earlier to get here */
2226 if (!fod->aborted)
2227 tgtport->ops->fcp_abort(&tgtport->fc_target_port, fcpreq);
2228
2229 nvmet_fc_free_fcp_iod(fod->queue, fod);
2230 }
2231
2232 static void
nvmet_fc_xmt_fcp_rsp(struct nvmet_fc_tgtport * tgtport,struct nvmet_fc_fcp_iod * fod)2233 nvmet_fc_xmt_fcp_rsp(struct nvmet_fc_tgtport *tgtport,
2234 struct nvmet_fc_fcp_iod *fod)
2235 {
2236 int ret;
2237
2238 fod->fcpreq->op = NVMET_FCOP_RSP;
2239 fod->fcpreq->timeout = 0;
2240
2241 nvmet_fc_prep_fcp_rsp(tgtport, fod);
2242
2243 ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq);
2244 if (ret)
2245 nvmet_fc_abort_op(tgtport, fod);
2246 }
2247
2248 static void
nvmet_fc_transfer_fcp_data(struct nvmet_fc_tgtport * tgtport,struct nvmet_fc_fcp_iod * fod,u8 op)2249 nvmet_fc_transfer_fcp_data(struct nvmet_fc_tgtport *tgtport,
2250 struct nvmet_fc_fcp_iod *fod, u8 op)
2251 {
2252 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
2253 struct scatterlist *sg = fod->next_sg;
2254 unsigned long flags;
2255 u32 remaininglen = fod->req.transfer_len - fod->offset;
2256 u32 tlen = 0;
2257 int ret;
2258
2259 fcpreq->op = op;
2260 fcpreq->offset = fod->offset;
2261 fcpreq->timeout = NVME_FC_TGTOP_TIMEOUT_SEC;
2262
2263 /*
2264 * for next sequence:
2265 * break at a sg element boundary
2266 * attempt to keep sequence length capped at
2267 * NVMET_FC_MAX_SEQ_LENGTH but allow sequence to
2268 * be longer if a single sg element is larger
2269 * than that amount. This is done to avoid creating
2270 * a new sg list to use for the tgtport api.
2271 */
2272 fcpreq->sg = sg;
2273 fcpreq->sg_cnt = 0;
2274 while (tlen < remaininglen &&
2275 fcpreq->sg_cnt < tgtport->max_sg_cnt &&
2276 tlen + sg_dma_len(sg) < NVMET_FC_MAX_SEQ_LENGTH) {
2277 fcpreq->sg_cnt++;
2278 tlen += sg_dma_len(sg);
2279 sg = sg_next(sg);
2280 }
2281 if (tlen < remaininglen && fcpreq->sg_cnt == 0) {
2282 fcpreq->sg_cnt++;
2283 tlen += min_t(u32, sg_dma_len(sg), remaininglen);
2284 sg = sg_next(sg);
2285 }
2286 if (tlen < remaininglen)
2287 fod->next_sg = sg;
2288 else
2289 fod->next_sg = NULL;
2290
2291 fcpreq->transfer_length = tlen;
2292 fcpreq->transferred_length = 0;
2293 fcpreq->fcp_error = 0;
2294 fcpreq->rsplen = 0;
2295
2296 /*
2297 * If the last READDATA request: check if LLDD supports
2298 * combined xfr with response.
2299 */
2300 if ((op == NVMET_FCOP_READDATA) &&
2301 ((fod->offset + fcpreq->transfer_length) == fod->req.transfer_len) &&
2302 (tgtport->ops->target_features & NVMET_FCTGTFEAT_READDATA_RSP)) {
2303 fcpreq->op = NVMET_FCOP_READDATA_RSP;
2304 nvmet_fc_prep_fcp_rsp(tgtport, fod);
2305 }
2306
2307 ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq);
2308 if (ret) {
2309 /*
2310 * should be ok to set w/o lock as it's in the thread of
2311 * execution (not an async timer routine) and doesn't
2312 * contend with any clearing action
2313 */
2314 fod->abort = true;
2315
2316 if (op == NVMET_FCOP_WRITEDATA) {
2317 spin_lock_irqsave(&fod->flock, flags);
2318 fod->writedataactive = false;
2319 spin_unlock_irqrestore(&fod->flock, flags);
2320 nvmet_req_complete(&fod->req, NVME_SC_INTERNAL);
2321 } else /* NVMET_FCOP_READDATA or NVMET_FCOP_READDATA_RSP */ {
2322 fcpreq->fcp_error = ret;
2323 fcpreq->transferred_length = 0;
2324 nvmet_fc_xmt_fcp_op_done(fod->fcpreq);
2325 }
2326 }
2327 }
2328
2329 static inline bool
__nvmet_fc_fod_op_abort(struct nvmet_fc_fcp_iod * fod,bool abort)2330 __nvmet_fc_fod_op_abort(struct nvmet_fc_fcp_iod *fod, bool abort)
2331 {
2332 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
2333 struct nvmet_fc_tgtport *tgtport = fod->tgtport;
2334
2335 /* if in the middle of an io and we need to tear down */
2336 if (abort) {
2337 if (fcpreq->op == NVMET_FCOP_WRITEDATA) {
2338 nvmet_req_complete(&fod->req, NVME_SC_INTERNAL);
2339 return true;
2340 }
2341
2342 nvmet_fc_abort_op(tgtport, fod);
2343 return true;
2344 }
2345
2346 return false;
2347 }
2348
2349 /*
2350 * actual done handler for FCP operations when completed by the lldd
2351 */
2352 static void
nvmet_fc_fod_op_done(struct nvmet_fc_fcp_iod * fod)2353 nvmet_fc_fod_op_done(struct nvmet_fc_fcp_iod *fod)
2354 {
2355 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
2356 struct nvmet_fc_tgtport *tgtport = fod->tgtport;
2357 unsigned long flags;
2358 bool abort;
2359
2360 spin_lock_irqsave(&fod->flock, flags);
2361 abort = fod->abort;
2362 fod->writedataactive = false;
2363 spin_unlock_irqrestore(&fod->flock, flags);
2364
2365 switch (fcpreq->op) {
2366
2367 case NVMET_FCOP_WRITEDATA:
2368 if (__nvmet_fc_fod_op_abort(fod, abort))
2369 return;
2370 if (fcpreq->fcp_error ||
2371 fcpreq->transferred_length != fcpreq->transfer_length) {
2372 spin_lock_irqsave(&fod->flock, flags);
2373 fod->abort = true;
2374 spin_unlock_irqrestore(&fod->flock, flags);
2375
2376 nvmet_req_complete(&fod->req, NVME_SC_INTERNAL);
2377 return;
2378 }
2379
2380 fod->offset += fcpreq->transferred_length;
2381 if (fod->offset != fod->req.transfer_len) {
2382 spin_lock_irqsave(&fod->flock, flags);
2383 fod->writedataactive = true;
2384 spin_unlock_irqrestore(&fod->flock, flags);
2385
2386 /* transfer the next chunk */
2387 nvmet_fc_transfer_fcp_data(tgtport, fod,
2388 NVMET_FCOP_WRITEDATA);
2389 return;
2390 }
2391
2392 /* data transfer complete, resume with nvmet layer */
2393 fod->req.execute(&fod->req);
2394 break;
2395
2396 case NVMET_FCOP_READDATA:
2397 case NVMET_FCOP_READDATA_RSP:
2398 if (__nvmet_fc_fod_op_abort(fod, abort))
2399 return;
2400 if (fcpreq->fcp_error ||
2401 fcpreq->transferred_length != fcpreq->transfer_length) {
2402 nvmet_fc_abort_op(tgtport, fod);
2403 return;
2404 }
2405
2406 /* success */
2407
2408 if (fcpreq->op == NVMET_FCOP_READDATA_RSP) {
2409 /* data no longer needed */
2410 nvmet_fc_free_tgt_pgs(fod);
2411 nvmet_fc_free_fcp_iod(fod->queue, fod);
2412 return;
2413 }
2414
2415 fod->offset += fcpreq->transferred_length;
2416 if (fod->offset != fod->req.transfer_len) {
2417 /* transfer the next chunk */
2418 nvmet_fc_transfer_fcp_data(tgtport, fod,
2419 NVMET_FCOP_READDATA);
2420 return;
2421 }
2422
2423 /* data transfer complete, send response */
2424
2425 /* data no longer needed */
2426 nvmet_fc_free_tgt_pgs(fod);
2427
2428 nvmet_fc_xmt_fcp_rsp(tgtport, fod);
2429
2430 break;
2431
2432 case NVMET_FCOP_RSP:
2433 if (__nvmet_fc_fod_op_abort(fod, abort))
2434 return;
2435 nvmet_fc_free_fcp_iod(fod->queue, fod);
2436 break;
2437
2438 default:
2439 break;
2440 }
2441 }
2442
2443 static void
nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req * fcpreq)2444 nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req *fcpreq)
2445 {
2446 struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private;
2447
2448 nvmet_fc_fod_op_done(fod);
2449 }
2450
2451 /*
2452 * actual completion handler after execution by the nvmet layer
2453 */
2454 static void
__nvmet_fc_fcp_nvme_cmd_done(struct nvmet_fc_tgtport * tgtport,struct nvmet_fc_fcp_iod * fod,int status)2455 __nvmet_fc_fcp_nvme_cmd_done(struct nvmet_fc_tgtport *tgtport,
2456 struct nvmet_fc_fcp_iod *fod, int status)
2457 {
2458 struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common;
2459 struct nvme_completion *cqe = &fod->rspiubuf.cqe;
2460 unsigned long flags;
2461 bool abort;
2462
2463 spin_lock_irqsave(&fod->flock, flags);
2464 abort = fod->abort;
2465 spin_unlock_irqrestore(&fod->flock, flags);
2466
2467 /* if we have a CQE, snoop the last sq_head value */
2468 if (!status)
2469 fod->queue->sqhd = cqe->sq_head;
2470
2471 if (abort) {
2472 nvmet_fc_abort_op(tgtport, fod);
2473 return;
2474 }
2475
2476 /* if an error handling the cmd post initial parsing */
2477 if (status) {
2478 /* fudge up a failed CQE status for our transport error */
2479 memset(cqe, 0, sizeof(*cqe));
2480 cqe->sq_head = fod->queue->sqhd; /* echo last cqe sqhd */
2481 cqe->sq_id = cpu_to_le16(fod->queue->qid);
2482 cqe->command_id = sqe->command_id;
2483 cqe->status = cpu_to_le16(status);
2484 } else {
2485
2486 /*
2487 * try to push the data even if the SQE status is non-zero.
2488 * There may be a status where data still was intended to
2489 * be moved
2490 */
2491 if ((fod->io_dir == NVMET_FCP_READ) && (fod->data_sg_cnt)) {
2492 /* push the data over before sending rsp */
2493 nvmet_fc_transfer_fcp_data(tgtport, fod,
2494 NVMET_FCOP_READDATA);
2495 return;
2496 }
2497
2498 /* writes & no data - fall thru */
2499 }
2500
2501 /* data no longer needed */
2502 nvmet_fc_free_tgt_pgs(fod);
2503
2504 nvmet_fc_xmt_fcp_rsp(tgtport, fod);
2505 }
2506
2507
2508 static void
nvmet_fc_fcp_nvme_cmd_done(struct nvmet_req * nvme_req)2509 nvmet_fc_fcp_nvme_cmd_done(struct nvmet_req *nvme_req)
2510 {
2511 struct nvmet_fc_fcp_iod *fod = nvmet_req_to_fod(nvme_req);
2512 struct nvmet_fc_tgtport *tgtport = fod->tgtport;
2513
2514 __nvmet_fc_fcp_nvme_cmd_done(tgtport, fod, 0);
2515 }
2516
2517
2518 /*
2519 * Actual processing routine for received FC-NVME I/O Requests from the LLD
2520 */
2521 static void
nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport * tgtport,struct nvmet_fc_fcp_iod * fod)2522 nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
2523 struct nvmet_fc_fcp_iod *fod)
2524 {
2525 struct nvme_fc_cmd_iu *cmdiu = &fod->cmdiubuf;
2526 u32 xfrlen = be32_to_cpu(cmdiu->data_len);
2527 int ret;
2528
2529 /*
2530 * Fused commands are currently not supported in the linux
2531 * implementation.
2532 *
2533 * As such, the implementation of the FC transport does not
2534 * look at the fused commands and order delivery to the upper
2535 * layer until we have both based on csn.
2536 */
2537
2538 fod->fcpreq->done = nvmet_fc_xmt_fcp_op_done;
2539
2540 if (cmdiu->flags & FCNVME_CMD_FLAGS_WRITE) {
2541 fod->io_dir = NVMET_FCP_WRITE;
2542 if (!nvme_is_write(&cmdiu->sqe))
2543 goto transport_error;
2544 } else if (cmdiu->flags & FCNVME_CMD_FLAGS_READ) {
2545 fod->io_dir = NVMET_FCP_READ;
2546 if (nvme_is_write(&cmdiu->sqe))
2547 goto transport_error;
2548 } else {
2549 fod->io_dir = NVMET_FCP_NODATA;
2550 if (xfrlen)
2551 goto transport_error;
2552 }
2553
2554 fod->req.cmd = &fod->cmdiubuf.sqe;
2555 fod->req.cqe = &fod->rspiubuf.cqe;
2556 if (!tgtport->pe)
2557 goto transport_error;
2558 fod->req.port = tgtport->pe->port;
2559
2560 /* clear any response payload */
2561 memset(&fod->rspiubuf, 0, sizeof(fod->rspiubuf));
2562
2563 fod->data_sg = NULL;
2564 fod->data_sg_cnt = 0;
2565
2566 ret = nvmet_req_init(&fod->req, &fod->queue->nvme_sq,
2567 &nvmet_fc_tgt_fcp_ops);
2568 if (!ret) {
2569 /* bad SQE content or invalid ctrl state */
2570 /* nvmet layer has already called op done to send rsp. */
2571 return;
2572 }
2573
2574 fod->req.transfer_len = xfrlen;
2575
2576 /* keep a running counter of tail position */
2577 atomic_inc(&fod->queue->sqtail);
2578
2579 if (fod->req.transfer_len) {
2580 ret = nvmet_fc_alloc_tgt_pgs(fod);
2581 if (ret) {
2582 nvmet_req_complete(&fod->req, ret);
2583 return;
2584 }
2585 }
2586 fod->req.sg = fod->data_sg;
2587 fod->req.sg_cnt = fod->data_sg_cnt;
2588 fod->offset = 0;
2589
2590 if (fod->io_dir == NVMET_FCP_WRITE) {
2591 /* pull the data over before invoking nvmet layer */
2592 nvmet_fc_transfer_fcp_data(tgtport, fod, NVMET_FCOP_WRITEDATA);
2593 return;
2594 }
2595
2596 /*
2597 * Reads or no data:
2598 *
2599 * can invoke the nvmet_layer now. If read data, cmd completion will
2600 * push the data
2601 */
2602 fod->req.execute(&fod->req);
2603 return;
2604
2605 transport_error:
2606 nvmet_fc_abort_op(tgtport, fod);
2607 }
2608
2609 /**
2610 * nvmet_fc_rcv_fcp_req - transport entry point called by an LLDD
2611 * upon the reception of a NVME FCP CMD IU.
2612 *
2613 * Pass a FC-NVME FCP CMD IU received from the FC link to the nvmet-fc
2614 * layer for processing.
2615 *
2616 * The nvmet_fc layer allocates a local job structure (struct
2617 * nvmet_fc_fcp_iod) from the queue for the io and copies the
2618 * CMD IU buffer to the job structure. As such, on a successful
2619 * completion (returns 0), the LLDD may immediately free/reuse
2620 * the CMD IU buffer passed in the call.
2621 *
2622 * However, in some circumstances, due to the packetized nature of FC
2623 * and the api of the FC LLDD which may issue a hw command to send the
2624 * response, but the LLDD may not get the hw completion for that command
2625 * and upcall the nvmet_fc layer before a new command may be
2626 * asynchronously received - it's possible for a command to be received
2627 * before the LLDD and nvmet_fc have recycled the job structure. It gives
2628 * the appearance of more commands received than fits in the sq.
2629 * To alleviate this scenario, a temporary queue is maintained in the
2630 * transport for pending LLDD requests waiting for a queue job structure.
2631 * In these "overrun" cases, a temporary queue element is allocated
2632 * the LLDD request and CMD iu buffer information remembered, and the
2633 * routine returns a -EOVERFLOW status. Subsequently, when a queue job
2634 * structure is freed, it is immediately reallocated for anything on the
2635 * pending request list. The LLDDs defer_rcv() callback is called,
2636 * informing the LLDD that it may reuse the CMD IU buffer, and the io
2637 * is then started normally with the transport.
2638 *
2639 * The LLDD, when receiving an -EOVERFLOW completion status, is to treat
2640 * the completion as successful but must not reuse the CMD IU buffer
2641 * until the LLDD's defer_rcv() callback has been called for the
2642 * corresponding struct nvmefc_tgt_fcp_req pointer.
2643 *
2644 * If there is any other condition in which an error occurs, the
2645 * transport will return a non-zero status indicating the error.
2646 * In all cases other than -EOVERFLOW, the transport has not accepted the
2647 * request and the LLDD should abort the exchange.
2648 *
2649 * @target_port: pointer to the (registered) target port the FCP CMD IU
2650 * was received on.
2651 * @fcpreq: pointer to a fcpreq request structure to be used to reference
2652 * the exchange corresponding to the FCP Exchange.
2653 * @cmdiubuf: pointer to the buffer containing the FCP CMD IU
2654 * @cmdiubuf_len: length, in bytes, of the received FCP CMD IU
2655 */
2656 int
nvmet_fc_rcv_fcp_req(struct nvmet_fc_target_port * target_port,struct nvmefc_tgt_fcp_req * fcpreq,void * cmdiubuf,u32 cmdiubuf_len)2657 nvmet_fc_rcv_fcp_req(struct nvmet_fc_target_port *target_port,
2658 struct nvmefc_tgt_fcp_req *fcpreq,
2659 void *cmdiubuf, u32 cmdiubuf_len)
2660 {
2661 struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
2662 struct nvme_fc_cmd_iu *cmdiu = cmdiubuf;
2663 struct nvmet_fc_tgt_queue *queue;
2664 struct nvmet_fc_fcp_iod *fod;
2665 struct nvmet_fc_defer_fcp_req *deferfcp;
2666 unsigned long flags;
2667
2668 /* validate iu, so the connection id can be used to find the queue */
2669 if ((cmdiubuf_len != sizeof(*cmdiu)) ||
2670 (cmdiu->format_id != NVME_CMD_FORMAT_ID) ||
2671 (cmdiu->fc_id != NVME_CMD_FC_ID) ||
2672 (be16_to_cpu(cmdiu->iu_len) != (sizeof(*cmdiu)/4)))
2673 return -EIO;
2674
2675 queue = nvmet_fc_find_target_queue(tgtport,
2676 be64_to_cpu(cmdiu->connection_id));
2677 if (!queue)
2678 return -ENOTCONN;
2679
2680 /*
2681 * note: reference taken by find_target_queue
2682 * After successful fod allocation, the fod will inherit the
2683 * ownership of that reference and will remove the reference
2684 * when the fod is freed.
2685 */
2686
2687 spin_lock_irqsave(&queue->qlock, flags);
2688
2689 fod = nvmet_fc_alloc_fcp_iod(queue);
2690 if (fod) {
2691 spin_unlock_irqrestore(&queue->qlock, flags);
2692
2693 fcpreq->nvmet_fc_private = fod;
2694 fod->fcpreq = fcpreq;
2695
2696 memcpy(&fod->cmdiubuf, cmdiubuf, cmdiubuf_len);
2697
2698 nvmet_fc_queue_fcp_req(tgtport, queue, fcpreq);
2699
2700 return 0;
2701 }
2702
2703 if (!tgtport->ops->defer_rcv) {
2704 spin_unlock_irqrestore(&queue->qlock, flags);
2705 /* release the queue lookup reference */
2706 nvmet_fc_tgt_q_put(queue);
2707 return -ENOENT;
2708 }
2709
2710 deferfcp = list_first_entry_or_null(&queue->avail_defer_list,
2711 struct nvmet_fc_defer_fcp_req, req_list);
2712 if (deferfcp) {
2713 /* Just re-use one that was previously allocated */
2714 list_del(&deferfcp->req_list);
2715 } else {
2716 spin_unlock_irqrestore(&queue->qlock, flags);
2717
2718 /* Now we need to dynamically allocate one */
2719 deferfcp = kmalloc_obj(*deferfcp);
2720 if (!deferfcp) {
2721 /* release the queue lookup reference */
2722 nvmet_fc_tgt_q_put(queue);
2723 return -ENOMEM;
2724 }
2725 spin_lock_irqsave(&queue->qlock, flags);
2726 }
2727
2728 /* For now, use rspaddr / rsplen to save payload information */
2729 fcpreq->rspaddr = cmdiubuf;
2730 fcpreq->rsplen = cmdiubuf_len;
2731 deferfcp->fcp_req = fcpreq;
2732
2733 /* defer processing till a fod becomes available */
2734 list_add_tail(&deferfcp->req_list, &queue->pending_cmd_list);
2735
2736 /* NOTE: the queue lookup reference is still valid */
2737
2738 spin_unlock_irqrestore(&queue->qlock, flags);
2739
2740 return -EOVERFLOW;
2741 }
2742 EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_req);
2743
2744 /**
2745 * nvmet_fc_rcv_fcp_abort - transport entry point called by an LLDD
2746 * upon the reception of an ABTS for a FCP command
2747 *
2748 * Notify the transport that an ABTS has been received for a FCP command
2749 * that had been given to the transport via nvmet_fc_rcv_fcp_req(). The
2750 * LLDD believes the command is still being worked on
2751 * (template_ops->fcp_req_release() has not been called).
2752 *
2753 * The transport will wait for any outstanding work (an op to the LLDD,
2754 * which the lldd should complete with error due to the ABTS; or the
2755 * completion from the nvmet layer of the nvme command), then will
2756 * stop processing and call the nvmet_fc_rcv_fcp_req() callback to
2757 * return the i/o context to the LLDD. The LLDD may send the BA_ACC
2758 * to the ABTS either after return from this function (assuming any
2759 * outstanding op work has been terminated) or upon the callback being
2760 * called.
2761 *
2762 * @target_port: pointer to the (registered) target port the FCP CMD IU
2763 * was received on.
2764 * @fcpreq: pointer to the fcpreq request structure that corresponds
2765 * to the exchange that received the ABTS.
2766 */
2767 void
nvmet_fc_rcv_fcp_abort(struct nvmet_fc_target_port * target_port,struct nvmefc_tgt_fcp_req * fcpreq)2768 nvmet_fc_rcv_fcp_abort(struct nvmet_fc_target_port *target_port,
2769 struct nvmefc_tgt_fcp_req *fcpreq)
2770 {
2771 struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private;
2772 struct nvmet_fc_tgt_queue *queue;
2773 unsigned long flags;
2774
2775 if (!fod || fod->fcpreq != fcpreq)
2776 /* job appears to have already completed, ignore abort */
2777 return;
2778
2779 queue = fod->queue;
2780
2781 spin_lock_irqsave(&queue->qlock, flags);
2782 if (fod->active) {
2783 /*
2784 * mark as abort. The abort handler, invoked upon completion
2785 * of any work, will detect the aborted status and do the
2786 * callback.
2787 */
2788 spin_lock(&fod->flock);
2789 fod->abort = true;
2790 fod->aborted = true;
2791 spin_unlock(&fod->flock);
2792 }
2793 spin_unlock_irqrestore(&queue->qlock, flags);
2794 }
2795 EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_abort);
2796
2797
2798 struct nvmet_fc_traddr {
2799 u64 nn;
2800 u64 pn;
2801 };
2802
2803 static int
__nvme_fc_parse_u64(substring_t * sstr,u64 * val)2804 __nvme_fc_parse_u64(substring_t *sstr, u64 *val)
2805 {
2806 u64 token64;
2807
2808 if (match_u64(sstr, &token64))
2809 return -EINVAL;
2810 *val = token64;
2811
2812 return 0;
2813 }
2814
2815 /*
2816 * This routine validates and extracts the WWN's from the TRADDR string.
2817 * As kernel parsers need the 0x to determine number base, universally
2818 * build string to parse with 0x prefix before parsing name strings.
2819 */
2820 static int
nvme_fc_parse_traddr(struct nvmet_fc_traddr * traddr,char * buf,size_t blen)2821 nvme_fc_parse_traddr(struct nvmet_fc_traddr *traddr, char *buf, size_t blen)
2822 {
2823 char name[2 + NVME_FC_TRADDR_HEXNAMELEN + 1];
2824 substring_t wwn = { name, &name[sizeof(name)-1] };
2825 int nnoffset, pnoffset;
2826
2827 /* validate if string is one of the 2 allowed formats */
2828 if (strnlen(buf, blen) == NVME_FC_TRADDR_MAXLENGTH &&
2829 !strncmp(buf, "nn-0x", NVME_FC_TRADDR_OXNNLEN) &&
2830 !strncmp(&buf[NVME_FC_TRADDR_MAX_PN_OFFSET],
2831 "pn-0x", NVME_FC_TRADDR_OXNNLEN)) {
2832 nnoffset = NVME_FC_TRADDR_OXNNLEN;
2833 pnoffset = NVME_FC_TRADDR_MAX_PN_OFFSET +
2834 NVME_FC_TRADDR_OXNNLEN;
2835 } else if ((strnlen(buf, blen) == NVME_FC_TRADDR_MINLENGTH &&
2836 !strncmp(buf, "nn-", NVME_FC_TRADDR_NNLEN) &&
2837 !strncmp(&buf[NVME_FC_TRADDR_MIN_PN_OFFSET],
2838 "pn-", NVME_FC_TRADDR_NNLEN))) {
2839 nnoffset = NVME_FC_TRADDR_NNLEN;
2840 pnoffset = NVME_FC_TRADDR_MIN_PN_OFFSET + NVME_FC_TRADDR_NNLEN;
2841 } else
2842 goto out_einval;
2843
2844 name[0] = '0';
2845 name[1] = 'x';
2846 name[2 + NVME_FC_TRADDR_HEXNAMELEN] = 0;
2847
2848 memcpy(&name[2], &buf[nnoffset], NVME_FC_TRADDR_HEXNAMELEN);
2849 if (__nvme_fc_parse_u64(&wwn, &traddr->nn))
2850 goto out_einval;
2851
2852 memcpy(&name[2], &buf[pnoffset], NVME_FC_TRADDR_HEXNAMELEN);
2853 if (__nvme_fc_parse_u64(&wwn, &traddr->pn))
2854 goto out_einval;
2855
2856 return 0;
2857
2858 out_einval:
2859 pr_warn("%s: bad traddr string\n", __func__);
2860 return -EINVAL;
2861 }
2862
2863 static int
nvmet_fc_add_port(struct nvmet_port * port)2864 nvmet_fc_add_port(struct nvmet_port *port)
2865 {
2866 struct nvmet_fc_tgtport *tgtport;
2867 struct nvmet_fc_port_entry *pe;
2868 struct nvmet_fc_traddr traddr = { 0L, 0L };
2869 unsigned long flags;
2870 int ret;
2871
2872 /* validate the address info */
2873 if ((port->disc_addr.trtype != NVMF_TRTYPE_FC) ||
2874 (port->disc_addr.adrfam != NVMF_ADDR_FAMILY_FC))
2875 return -EINVAL;
2876
2877 /* map the traddr address info to a target port */
2878
2879 ret = nvme_fc_parse_traddr(&traddr, port->disc_addr.traddr,
2880 sizeof(port->disc_addr.traddr));
2881 if (ret)
2882 return ret;
2883
2884 pe = kzalloc_obj(*pe);
2885 if (!pe)
2886 return -ENOMEM;
2887
2888 ret = -ENXIO;
2889 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
2890 list_for_each_entry(tgtport, &nvmet_fc_target_list, tgt_list) {
2891 if ((tgtport->fc_target_port.node_name == traddr.nn) &&
2892 (tgtport->fc_target_port.port_name == traddr.pn)) {
2893 if (!nvmet_fc_tgtport_get(tgtport))
2894 continue;
2895
2896 /* a FC port can only be 1 nvmet port id */
2897 if (!tgtport->pe) {
2898 nvmet_fc_portentry_bind(tgtport, pe, port);
2899 ret = 0;
2900 } else
2901 ret = -EALREADY;
2902
2903 nvmet_fc_tgtport_put(tgtport);
2904 break;
2905 }
2906 }
2907 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
2908
2909 if (ret)
2910 kfree(pe);
2911
2912 return ret;
2913 }
2914
2915 static void
nvmet_fc_remove_port(struct nvmet_port * port)2916 nvmet_fc_remove_port(struct nvmet_port *port)
2917 {
2918 struct nvmet_fc_port_entry *pe = port->priv;
2919 struct nvmet_fc_tgtport *tgtport = NULL;
2920 unsigned long flags;
2921
2922 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
2923 if (pe->tgtport && nvmet_fc_tgtport_get(pe->tgtport))
2924 tgtport = pe->tgtport;
2925 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
2926
2927 nvmet_fc_portentry_unbind(pe);
2928
2929 if (tgtport) {
2930 /* terminate any outstanding associations */
2931 __nvmet_fc_free_assocs(tgtport);
2932 nvmet_fc_tgtport_put(tgtport);
2933 }
2934
2935 kfree(pe);
2936 }
2937
2938 static void
nvmet_fc_discovery_chg(struct nvmet_port * port)2939 nvmet_fc_discovery_chg(struct nvmet_port *port)
2940 {
2941 struct nvmet_fc_port_entry *pe = port->priv;
2942 struct nvmet_fc_tgtport *tgtport = NULL;
2943 unsigned long flags;
2944
2945 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
2946 if (pe->tgtport && nvmet_fc_tgtport_get(pe->tgtport))
2947 tgtport = pe->tgtport;
2948 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
2949
2950 if (!tgtport)
2951 return;
2952
2953 if (tgtport && tgtport->ops->discovery_event)
2954 tgtport->ops->discovery_event(&tgtport->fc_target_port);
2955
2956 nvmet_fc_tgtport_put(tgtport);
2957 }
2958
2959 static ssize_t
nvmet_fc_host_traddr(struct nvmet_ctrl * ctrl,char * traddr,size_t traddr_size)2960 nvmet_fc_host_traddr(struct nvmet_ctrl *ctrl,
2961 char *traddr, size_t traddr_size)
2962 {
2963 struct nvmet_sq *sq = ctrl->sqs[0];
2964 struct nvmet_fc_tgt_queue *queue =
2965 container_of(sq, struct nvmet_fc_tgt_queue, nvme_sq);
2966 struct nvmet_fc_tgtport *tgtport = queue->assoc ? queue->assoc->tgtport : NULL;
2967 struct nvmet_fc_hostport *hostport = queue->assoc ? queue->assoc->hostport : NULL;
2968 u64 wwnn, wwpn;
2969 ssize_t ret = 0;
2970
2971 if (!tgtport || !nvmet_fc_tgtport_get(tgtport))
2972 return -ENODEV;
2973 if (!hostport || !nvmet_fc_hostport_get(hostport)) {
2974 ret = -ENODEV;
2975 goto out_put;
2976 }
2977
2978 if (tgtport->ops->host_traddr) {
2979 ret = tgtport->ops->host_traddr(hostport->hosthandle, &wwnn, &wwpn);
2980 if (ret)
2981 goto out_put_host;
2982 ret = snprintf(traddr, traddr_size, "nn-0x%llx:pn-0x%llx", wwnn, wwpn);
2983 }
2984 out_put_host:
2985 nvmet_fc_hostport_put(hostport);
2986 out_put:
2987 nvmet_fc_tgtport_put(tgtport);
2988 return ret;
2989 }
2990
2991 static const struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops = {
2992 .owner = THIS_MODULE,
2993 .type = NVMF_TRTYPE_FC,
2994 .msdbd = 1,
2995 .add_port = nvmet_fc_add_port,
2996 .remove_port = nvmet_fc_remove_port,
2997 .queue_response = nvmet_fc_fcp_nvme_cmd_done,
2998 .delete_ctrl = nvmet_fc_delete_ctrl,
2999 .discovery_chg = nvmet_fc_discovery_chg,
3000 .host_traddr = nvmet_fc_host_traddr,
3001 };
3002
nvmet_fc_init_module(void)3003 static int __init nvmet_fc_init_module(void)
3004 {
3005 return nvmet_register_transport(&nvmet_fc_tgt_fcp_ops);
3006 }
3007
nvmet_fc_exit_module(void)3008 static void __exit nvmet_fc_exit_module(void)
3009 {
3010 /* ensure any shutdown operation, e.g. delete ctrls have finished */
3011 flush_workqueue(nvmet_wq);
3012
3013 /* sanity check - all lports should be removed */
3014 if (!list_empty(&nvmet_fc_target_list))
3015 pr_warn("%s: targetport list not empty\n", __func__);
3016
3017 nvmet_unregister_transport(&nvmet_fc_tgt_fcp_ops);
3018
3019 ida_destroy(&nvmet_fc_tgtport_cnt);
3020 }
3021
3022 module_init(nvmet_fc_init_module);
3023 module_exit(nvmet_fc_exit_module);
3024
3025 MODULE_DESCRIPTION("NVMe target FC transport driver");
3026 MODULE_LICENSE("GPL v2");
3027