1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2016 Avago Technologies. All rights reserved.
4 */
5 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
6 #include <linux/module.h>
7 #include <linux/parser.h>
8 #include <uapi/scsi/fc/fc_fs.h>
9
10 #include "../host/nvme.h"
11 #include "../target/nvmet.h"
12 #include <linux/nvme-fc-driver.h>
13 #include <linux/nvme-fc.h>
14
15
16 enum {
17 NVMF_OPT_ERR = 0,
18 NVMF_OPT_WWNN = 1 << 0,
19 NVMF_OPT_WWPN = 1 << 1,
20 NVMF_OPT_ROLES = 1 << 2,
21 NVMF_OPT_FCADDR = 1 << 3,
22 NVMF_OPT_LPWWNN = 1 << 4,
23 NVMF_OPT_LPWWPN = 1 << 5,
24 };
25
26 struct fcloop_ctrl_options {
27 int mask;
28 u64 wwnn;
29 u64 wwpn;
30 u32 roles;
31 u32 fcaddr;
32 u64 lpwwnn;
33 u64 lpwwpn;
34 };
35
36 static const match_table_t opt_tokens = {
37 { NVMF_OPT_WWNN, "wwnn=%s" },
38 { NVMF_OPT_WWPN, "wwpn=%s" },
39 { NVMF_OPT_ROLES, "roles=%d" },
40 { NVMF_OPT_FCADDR, "fcaddr=%x" },
41 { NVMF_OPT_LPWWNN, "lpwwnn=%s" },
42 { NVMF_OPT_LPWWPN, "lpwwpn=%s" },
43 { NVMF_OPT_ERR, NULL }
44 };
45
fcloop_verify_addr(substring_t * s)46 static int fcloop_verify_addr(substring_t *s)
47 {
48 size_t blen = s->to - s->from + 1;
49
50 if (strnlen(s->from, blen) != NVME_FC_TRADDR_HEXNAMELEN + 2 ||
51 strncmp(s->from, "0x", 2))
52 return -EINVAL;
53
54 return 0;
55 }
56
57 static int
fcloop_parse_options(struct fcloop_ctrl_options * opts,const char * buf)58 fcloop_parse_options(struct fcloop_ctrl_options *opts,
59 const char *buf)
60 {
61 substring_t args[MAX_OPT_ARGS];
62 char *options, *o, *p;
63 int token, ret = 0;
64 u64 token64;
65
66 options = o = kstrdup(buf, GFP_KERNEL);
67 if (!options)
68 return -ENOMEM;
69
70 while ((p = strsep(&o, ",\n")) != NULL) {
71 if (!*p)
72 continue;
73
74 token = match_token(p, opt_tokens, args);
75 opts->mask |= token;
76 switch (token) {
77 case NVMF_OPT_WWNN:
78 if (fcloop_verify_addr(args) ||
79 match_u64(args, &token64)) {
80 ret = -EINVAL;
81 goto out_free_options;
82 }
83 opts->wwnn = token64;
84 break;
85 case NVMF_OPT_WWPN:
86 if (fcloop_verify_addr(args) ||
87 match_u64(args, &token64)) {
88 ret = -EINVAL;
89 goto out_free_options;
90 }
91 opts->wwpn = token64;
92 break;
93 case NVMF_OPT_ROLES:
94 if (match_int(args, &token)) {
95 ret = -EINVAL;
96 goto out_free_options;
97 }
98 opts->roles = token;
99 break;
100 case NVMF_OPT_FCADDR:
101 if (match_hex(args, &token)) {
102 ret = -EINVAL;
103 goto out_free_options;
104 }
105 opts->fcaddr = token;
106 break;
107 case NVMF_OPT_LPWWNN:
108 if (fcloop_verify_addr(args) ||
109 match_u64(args, &token64)) {
110 ret = -EINVAL;
111 goto out_free_options;
112 }
113 opts->lpwwnn = token64;
114 break;
115 case NVMF_OPT_LPWWPN:
116 if (fcloop_verify_addr(args) ||
117 match_u64(args, &token64)) {
118 ret = -EINVAL;
119 goto out_free_options;
120 }
121 opts->lpwwpn = token64;
122 break;
123 default:
124 pr_warn("unknown parameter or missing value '%s'\n", p);
125 ret = -EINVAL;
126 goto out_free_options;
127 }
128 }
129
130 out_free_options:
131 kfree(options);
132 return ret;
133 }
134
135
136 static int
fcloop_parse_nm_options(struct device * dev,u64 * nname,u64 * pname,const char * buf)137 fcloop_parse_nm_options(struct device *dev, u64 *nname, u64 *pname,
138 const char *buf)
139 {
140 substring_t args[MAX_OPT_ARGS];
141 char *options, *o, *p;
142 int token, ret = 0;
143 u64 token64;
144
145 *nname = -1;
146 *pname = -1;
147
148 options = o = kstrdup(buf, GFP_KERNEL);
149 if (!options)
150 return -ENOMEM;
151
152 while ((p = strsep(&o, ",\n")) != NULL) {
153 if (!*p)
154 continue;
155
156 token = match_token(p, opt_tokens, args);
157 switch (token) {
158 case NVMF_OPT_WWNN:
159 if (fcloop_verify_addr(args) ||
160 match_u64(args, &token64)) {
161 ret = -EINVAL;
162 goto out_free_options;
163 }
164 *nname = token64;
165 break;
166 case NVMF_OPT_WWPN:
167 if (fcloop_verify_addr(args) ||
168 match_u64(args, &token64)) {
169 ret = -EINVAL;
170 goto out_free_options;
171 }
172 *pname = token64;
173 break;
174 default:
175 pr_warn("unknown parameter or missing value '%s'\n", p);
176 ret = -EINVAL;
177 goto out_free_options;
178 }
179 }
180
181 out_free_options:
182 kfree(options);
183
184 if (!ret) {
185 if (*nname == -1)
186 return -EINVAL;
187 if (*pname == -1)
188 return -EINVAL;
189 }
190
191 return ret;
192 }
193
194
195 #define LPORT_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN)
196
197 #define RPORT_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN | \
198 NVMF_OPT_LPWWNN | NVMF_OPT_LPWWPN)
199
200 #define TGTPORT_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN)
201
202
203 static DEFINE_SPINLOCK(fcloop_lock);
204 static LIST_HEAD(fcloop_lports);
205 static LIST_HEAD(fcloop_nports);
206
207 struct fcloop_lport {
208 struct nvme_fc_local_port *localport;
209 struct list_head lport_list;
210 refcount_t ref;
211 };
212
213 struct fcloop_lport_priv {
214 struct fcloop_lport *lport;
215 };
216
217 /* The port is already being removed, avoid double free */
218 #define PORT_DELETED 0
219
220 struct fcloop_rport {
221 struct nvme_fc_remote_port *remoteport;
222 struct nvmet_fc_target_port *targetport;
223 struct fcloop_nport *nport;
224 struct fcloop_lport *lport;
225 spinlock_t lock;
226 struct list_head ls_list;
227 struct work_struct ls_work;
228 unsigned long flags;
229 };
230
231 struct fcloop_tport {
232 struct nvmet_fc_target_port *targetport;
233 struct nvme_fc_remote_port *remoteport;
234 struct fcloop_nport *nport;
235 struct fcloop_lport *lport;
236 spinlock_t lock;
237 struct list_head ls_list;
238 struct work_struct ls_work;
239 unsigned long flags;
240 };
241
242 struct fcloop_nport {
243 struct fcloop_rport *rport;
244 struct fcloop_tport *tport;
245 struct fcloop_lport *lport;
246 struct list_head nport_list;
247 refcount_t ref;
248 u64 node_name;
249 u64 port_name;
250 u32 port_role;
251 u32 port_id;
252 };
253
254 struct fcloop_lsreq {
255 struct nvmefc_ls_req *lsreq;
256 struct nvmefc_ls_rsp ls_rsp;
257 int status;
258 struct list_head ls_list; /* fcloop_rport->ls_list */
259 };
260
261 struct fcloop_rscn {
262 struct fcloop_tport *tport;
263 struct work_struct work;
264 };
265
266 enum {
267 INI_IO_START = 0,
268 INI_IO_ACTIVE = 1,
269 INI_IO_ABORTED = 2,
270 INI_IO_COMPLETED = 3,
271 };
272
273 struct fcloop_fcpreq {
274 struct fcloop_tport *tport;
275 struct nvmefc_fcp_req *fcpreq;
276 spinlock_t reqlock;
277 u16 status;
278 u32 inistate;
279 bool active;
280 bool aborted;
281 refcount_t ref;
282 struct work_struct fcp_rcv_work;
283 struct work_struct abort_rcv_work;
284 struct work_struct tio_done_work;
285 struct nvmefc_tgt_fcp_req tgt_fcp_req;
286 };
287
288 struct fcloop_ini_fcpreq {
289 struct nvmefc_fcp_req *fcpreq;
290 struct fcloop_fcpreq *tfcp_req;
291 spinlock_t inilock;
292 };
293
294 /* SLAB cache for fcloop_lsreq structures */
295 static struct kmem_cache *lsreq_cache;
296
297 static inline struct fcloop_lsreq *
ls_rsp_to_lsreq(struct nvmefc_ls_rsp * lsrsp)298 ls_rsp_to_lsreq(struct nvmefc_ls_rsp *lsrsp)
299 {
300 return container_of(lsrsp, struct fcloop_lsreq, ls_rsp);
301 }
302
303 static inline struct fcloop_fcpreq *
tgt_fcp_req_to_fcpreq(struct nvmefc_tgt_fcp_req * tgt_fcpreq)304 tgt_fcp_req_to_fcpreq(struct nvmefc_tgt_fcp_req *tgt_fcpreq)
305 {
306 return container_of(tgt_fcpreq, struct fcloop_fcpreq, tgt_fcp_req);
307 }
308
309
310 static int
fcloop_create_queue(struct nvme_fc_local_port * localport,unsigned int qidx,u16 qsize,void ** handle)311 fcloop_create_queue(struct nvme_fc_local_port *localport,
312 unsigned int qidx, u16 qsize,
313 void **handle)
314 {
315 *handle = localport;
316 return 0;
317 }
318
319 static void
fcloop_delete_queue(struct nvme_fc_local_port * localport,unsigned int idx,void * handle)320 fcloop_delete_queue(struct nvme_fc_local_port *localport,
321 unsigned int idx, void *handle)
322 {
323 }
324
325 static void
fcloop_rport_lsrqst_work(struct work_struct * work)326 fcloop_rport_lsrqst_work(struct work_struct *work)
327 {
328 struct fcloop_rport *rport =
329 container_of(work, struct fcloop_rport, ls_work);
330 struct fcloop_lsreq *tls_req;
331
332 spin_lock(&rport->lock);
333 for (;;) {
334 tls_req = list_first_entry_or_null(&rport->ls_list,
335 struct fcloop_lsreq, ls_list);
336 if (!tls_req)
337 break;
338
339 list_del(&tls_req->ls_list);
340 spin_unlock(&rport->lock);
341
342 tls_req->lsreq->done(tls_req->lsreq, tls_req->status);
343 /*
344 * callee may free memory containing tls_req.
345 * do not reference lsreq after this.
346 */
347 kmem_cache_free(lsreq_cache, tls_req);
348
349 spin_lock(&rport->lock);
350 }
351 spin_unlock(&rport->lock);
352 }
353
354 static int
fcloop_h2t_ls_req(struct nvme_fc_local_port * localport,struct nvme_fc_remote_port * remoteport,struct nvmefc_ls_req * lsreq)355 fcloop_h2t_ls_req(struct nvme_fc_local_port *localport,
356 struct nvme_fc_remote_port *remoteport,
357 struct nvmefc_ls_req *lsreq)
358 {
359 struct fcloop_rport *rport = remoteport->private;
360 struct fcloop_lsreq *tls_req;
361 int ret = 0;
362
363 tls_req = kmem_cache_alloc(lsreq_cache, GFP_KERNEL);
364 if (!tls_req)
365 return -ENOMEM;
366 tls_req->lsreq = lsreq;
367 INIT_LIST_HEAD(&tls_req->ls_list);
368
369 if (!rport->targetport) {
370 tls_req->status = -ECONNREFUSED;
371 spin_lock(&rport->lock);
372 list_add_tail(&tls_req->ls_list, &rport->ls_list);
373 spin_unlock(&rport->lock);
374 queue_work(nvmet_wq, &rport->ls_work);
375 return ret;
376 }
377
378 tls_req->status = 0;
379 ret = nvmet_fc_rcv_ls_req(rport->targetport, rport,
380 &tls_req->ls_rsp,
381 lsreq->rqstaddr, lsreq->rqstlen);
382
383 return ret;
384 }
385
386 static int
fcloop_h2t_xmt_ls_rsp(struct nvmet_fc_target_port * targetport,struct nvmefc_ls_rsp * lsrsp)387 fcloop_h2t_xmt_ls_rsp(struct nvmet_fc_target_port *targetport,
388 struct nvmefc_ls_rsp *lsrsp)
389 {
390 struct fcloop_lsreq *tls_req = ls_rsp_to_lsreq(lsrsp);
391 struct nvmefc_ls_req *lsreq = tls_req->lsreq;
392 struct fcloop_tport *tport = targetport->private;
393 struct nvme_fc_remote_port *remoteport = tport->remoteport;
394 struct fcloop_rport *rport;
395
396 memcpy(lsreq->rspaddr, lsrsp->rspbuf,
397 ((lsreq->rsplen < lsrsp->rsplen) ?
398 lsreq->rsplen : lsrsp->rsplen));
399
400 lsrsp->done(lsrsp);
401
402 if (!remoteport) {
403 kmem_cache_free(lsreq_cache, tls_req);
404 return 0;
405 }
406
407 rport = remoteport->private;
408 spin_lock(&rport->lock);
409 list_add_tail(&tls_req->ls_list, &rport->ls_list);
410 spin_unlock(&rport->lock);
411 queue_work(nvmet_wq, &rport->ls_work);
412
413 return 0;
414 }
415
416 static void
fcloop_tport_lsrqst_work(struct work_struct * work)417 fcloop_tport_lsrqst_work(struct work_struct *work)
418 {
419 struct fcloop_tport *tport =
420 container_of(work, struct fcloop_tport, ls_work);
421 struct fcloop_lsreq *tls_req;
422
423 spin_lock(&tport->lock);
424 for (;;) {
425 tls_req = list_first_entry_or_null(&tport->ls_list,
426 struct fcloop_lsreq, ls_list);
427 if (!tls_req)
428 break;
429
430 list_del(&tls_req->ls_list);
431 spin_unlock(&tport->lock);
432
433 tls_req->lsreq->done(tls_req->lsreq, tls_req->status);
434 /*
435 * callee may free memory containing tls_req.
436 * do not reference lsreq after this.
437 */
438 kmem_cache_free(lsreq_cache, tls_req);
439
440 spin_lock(&tport->lock);
441 }
442 spin_unlock(&tport->lock);
443 }
444
445 static int
fcloop_t2h_ls_req(struct nvmet_fc_target_port * targetport,void * hosthandle,struct nvmefc_ls_req * lsreq)446 fcloop_t2h_ls_req(struct nvmet_fc_target_port *targetport, void *hosthandle,
447 struct nvmefc_ls_req *lsreq)
448 {
449 struct fcloop_tport *tport = targetport->private;
450 struct fcloop_lsreq *tls_req;
451 int ret = 0;
452
453 /*
454 * hosthandle should be the dst.rport value.
455 * hosthandle ignored as fcloop currently is
456 * 1:1 tgtport vs remoteport
457 */
458
459 tls_req = kmem_cache_alloc(lsreq_cache, GFP_KERNEL);
460 if (!tls_req)
461 return -ENOMEM;
462 tls_req->lsreq = lsreq;
463 INIT_LIST_HEAD(&tls_req->ls_list);
464
465 if (!tport->remoteport) {
466 tls_req->status = -ECONNREFUSED;
467 spin_lock(&tport->lock);
468 list_add_tail(&tls_req->ls_list, &tport->ls_list);
469 spin_unlock(&tport->lock);
470 queue_work(nvmet_wq, &tport->ls_work);
471 return ret;
472 }
473
474 tls_req->status = 0;
475 ret = nvme_fc_rcv_ls_req(tport->remoteport, &tls_req->ls_rsp,
476 lsreq->rqstaddr, lsreq->rqstlen);
477
478 if (ret)
479 kmem_cache_free(lsreq_cache, tls_req);
480
481 return ret;
482 }
483
484 static int
fcloop_t2h_xmt_ls_rsp(struct nvme_fc_local_port * localport,struct nvme_fc_remote_port * remoteport,struct nvmefc_ls_rsp * lsrsp)485 fcloop_t2h_xmt_ls_rsp(struct nvme_fc_local_port *localport,
486 struct nvme_fc_remote_port *remoteport,
487 struct nvmefc_ls_rsp *lsrsp)
488 {
489 struct fcloop_lsreq *tls_req = ls_rsp_to_lsreq(lsrsp);
490 struct nvmefc_ls_req *lsreq = tls_req->lsreq;
491 struct fcloop_rport *rport = remoteport->private;
492 struct nvmet_fc_target_port *targetport = rport->targetport;
493 struct fcloop_tport *tport;
494 int ret = 0;
495
496 if (!targetport) {
497 /*
498 * The target port is gone. The target doesn't expect any
499 * response anymore and thus lsreq can't be accessed anymore.
500 *
501 * We end up here from delete association exchange:
502 * nvmet_fc_xmt_disconnect_assoc sends an async request.
503 *
504 * Return success when remoteport is still online because this
505 * is what LLDDs do and silently drop the response. Otherwise,
506 * return with error to signal upper layer to perform the lsrsp
507 * resource cleanup.
508 */
509 if (remoteport->port_state == FC_OBJSTATE_ONLINE)
510 lsrsp->done(lsrsp);
511 else
512 ret = -ENODEV;
513
514 kmem_cache_free(lsreq_cache, tls_req);
515 return ret;
516 }
517
518 memcpy(lsreq->rspaddr, lsrsp->rspbuf,
519 ((lsreq->rsplen < lsrsp->rsplen) ?
520 lsreq->rsplen : lsrsp->rsplen));
521 lsrsp->done(lsrsp);
522
523 tport = targetport->private;
524 spin_lock(&tport->lock);
525 list_add_tail(&tls_req->ls_list, &tport->ls_list);
526 spin_unlock(&tport->lock);
527 queue_work(nvmet_wq, &tport->ls_work);
528
529 return 0;
530 }
531
532 static void
fcloop_t2h_host_release(void * hosthandle)533 fcloop_t2h_host_release(void *hosthandle)
534 {
535 /* host handle ignored for now */
536 }
537
538 static int
fcloop_t2h_host_traddr(void * hosthandle,u64 * wwnn,u64 * wwpn)539 fcloop_t2h_host_traddr(void *hosthandle, u64 *wwnn, u64 *wwpn)
540 {
541 struct fcloop_rport *rport = hosthandle;
542
543 *wwnn = rport->lport->localport->node_name;
544 *wwpn = rport->lport->localport->port_name;
545 return 0;
546 }
547
548 /*
549 * Simulate reception of RSCN and converting it to a initiator transport
550 * call to rescan a remote port.
551 */
552 static void
fcloop_tgt_rscn_work(struct work_struct * work)553 fcloop_tgt_rscn_work(struct work_struct *work)
554 {
555 struct fcloop_rscn *tgt_rscn =
556 container_of(work, struct fcloop_rscn, work);
557 struct fcloop_tport *tport = tgt_rscn->tport;
558
559 if (tport->remoteport)
560 nvme_fc_rescan_remoteport(tport->remoteport);
561 kfree(tgt_rscn);
562 }
563
564 static void
fcloop_tgt_discovery_evt(struct nvmet_fc_target_port * tgtport)565 fcloop_tgt_discovery_evt(struct nvmet_fc_target_port *tgtport)
566 {
567 struct fcloop_rscn *tgt_rscn;
568
569 tgt_rscn = kzalloc_obj(*tgt_rscn);
570 if (!tgt_rscn)
571 return;
572
573 tgt_rscn->tport = tgtport->private;
574 INIT_WORK(&tgt_rscn->work, fcloop_tgt_rscn_work);
575
576 queue_work(nvmet_wq, &tgt_rscn->work);
577 }
578
579 static void
fcloop_tfcp_req_put(struct fcloop_fcpreq * tfcp_req)580 fcloop_tfcp_req_put(struct fcloop_fcpreq *tfcp_req)
581 {
582 if (!refcount_dec_and_test(&tfcp_req->ref))
583 return;
584
585 kfree(tfcp_req);
586 }
587
588 static int
fcloop_tfcp_req_get(struct fcloop_fcpreq * tfcp_req)589 fcloop_tfcp_req_get(struct fcloop_fcpreq *tfcp_req)
590 {
591 return refcount_inc_not_zero(&tfcp_req->ref);
592 }
593
594 static void
fcloop_call_host_done(struct nvmefc_fcp_req * fcpreq,struct fcloop_fcpreq * tfcp_req,int status)595 fcloop_call_host_done(struct nvmefc_fcp_req *fcpreq,
596 struct fcloop_fcpreq *tfcp_req, int status)
597 {
598 struct fcloop_ini_fcpreq *inireq = NULL;
599
600 if (fcpreq) {
601 inireq = fcpreq->private;
602 spin_lock(&inireq->inilock);
603 inireq->tfcp_req = NULL;
604 spin_unlock(&inireq->inilock);
605
606 fcpreq->status = status;
607 fcpreq->done(fcpreq);
608 }
609
610 /* release original io reference on tgt struct */
611 if (tfcp_req)
612 fcloop_tfcp_req_put(tfcp_req);
613 }
614
615 static bool drop_fabric_opcode;
616 #define DROP_OPCODE_MASK 0x00FF
617 /* fabrics opcode will have a bit set above 1st byte */
618 static int drop_opcode = -1;
619 static int drop_instance;
620 static int drop_amount;
621 static int drop_current_cnt;
622
623 /*
624 * Routine to parse io and determine if the io is to be dropped.
625 * Returns:
626 * 0 if io is not obstructed
627 * 1 if io was dropped
628 */
check_for_drop(struct fcloop_fcpreq * tfcp_req)629 static int check_for_drop(struct fcloop_fcpreq *tfcp_req)
630 {
631 struct nvmefc_fcp_req *fcpreq = tfcp_req->fcpreq;
632 struct nvme_fc_cmd_iu *cmdiu = fcpreq->cmdaddr;
633 struct nvme_command *sqe = &cmdiu->sqe;
634
635 if (drop_opcode == -1)
636 return 0;
637
638 pr_info("%s: seq opcd x%02x fctype x%02x: drop F %s op x%02x "
639 "inst %d start %d amt %d\n",
640 __func__, sqe->common.opcode, sqe->fabrics.fctype,
641 drop_fabric_opcode ? "y" : "n",
642 drop_opcode, drop_current_cnt, drop_instance, drop_amount);
643
644 if ((drop_fabric_opcode &&
645 (sqe->common.opcode != nvme_fabrics_command ||
646 sqe->fabrics.fctype != drop_opcode)) ||
647 (!drop_fabric_opcode && sqe->common.opcode != drop_opcode))
648 return 0;
649
650 if (++drop_current_cnt >= drop_instance) {
651 if (drop_current_cnt >= drop_instance + drop_amount)
652 drop_opcode = -1;
653 return 1;
654 }
655
656 return 0;
657 }
658
659 static void
fcloop_fcp_recv_work(struct work_struct * work)660 fcloop_fcp_recv_work(struct work_struct *work)
661 {
662 struct fcloop_fcpreq *tfcp_req =
663 container_of(work, struct fcloop_fcpreq, fcp_rcv_work);
664 struct nvmefc_fcp_req *fcpreq;
665 unsigned long flags;
666 int ret = 0;
667 bool aborted = false;
668
669 spin_lock_irqsave(&tfcp_req->reqlock, flags);
670 fcpreq = tfcp_req->fcpreq;
671 switch (tfcp_req->inistate) {
672 case INI_IO_START:
673 tfcp_req->inistate = INI_IO_ACTIVE;
674 break;
675 case INI_IO_ABORTED:
676 aborted = true;
677 break;
678 default:
679 spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
680 WARN_ON(1);
681 return;
682 }
683 spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
684
685 if (unlikely(aborted)) {
686 /* the abort handler will call fcloop_call_host_done */
687 return;
688 }
689
690 if (unlikely(check_for_drop(tfcp_req))) {
691 pr_info("%s: dropped command ********\n", __func__);
692 return;
693 }
694
695 ret = nvmet_fc_rcv_fcp_req(tfcp_req->tport->targetport,
696 &tfcp_req->tgt_fcp_req,
697 fcpreq->cmdaddr, fcpreq->cmdlen);
698 if (ret)
699 fcloop_call_host_done(fcpreq, tfcp_req, ret);
700 }
701
702 static void
fcloop_fcp_abort_recv_work(struct work_struct * work)703 fcloop_fcp_abort_recv_work(struct work_struct *work)
704 {
705 struct fcloop_fcpreq *tfcp_req =
706 container_of(work, struct fcloop_fcpreq, abort_rcv_work);
707 struct nvmefc_fcp_req *fcpreq;
708 bool completed = false;
709 unsigned long flags;
710
711 spin_lock_irqsave(&tfcp_req->reqlock, flags);
712 switch (tfcp_req->inistate) {
713 case INI_IO_ABORTED:
714 fcpreq = tfcp_req->fcpreq;
715 tfcp_req->fcpreq = NULL;
716 break;
717 case INI_IO_COMPLETED:
718 completed = true;
719 break;
720 default:
721 spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
722 fcloop_tfcp_req_put(tfcp_req);
723 WARN_ON(1);
724 return;
725 }
726 spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
727
728 if (unlikely(completed)) {
729 /* remove reference taken in original abort downcall */
730 fcloop_tfcp_req_put(tfcp_req);
731 return;
732 }
733
734 if (tfcp_req->tport->targetport)
735 nvmet_fc_rcv_fcp_abort(tfcp_req->tport->targetport,
736 &tfcp_req->tgt_fcp_req);
737
738 fcloop_call_host_done(fcpreq, tfcp_req, -ECANCELED);
739 /* call_host_done releases reference for abort downcall */
740 }
741
742 /*
743 * FCP IO operation done by target completion.
744 * call back up initiator "done" flows.
745 */
746 static void
fcloop_tgt_fcprqst_done_work(struct work_struct * work)747 fcloop_tgt_fcprqst_done_work(struct work_struct *work)
748 {
749 struct fcloop_fcpreq *tfcp_req =
750 container_of(work, struct fcloop_fcpreq, tio_done_work);
751 struct nvmefc_fcp_req *fcpreq;
752 unsigned long flags;
753
754 spin_lock_irqsave(&tfcp_req->reqlock, flags);
755 fcpreq = tfcp_req->fcpreq;
756 tfcp_req->inistate = INI_IO_COMPLETED;
757 spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
758
759 fcloop_call_host_done(fcpreq, tfcp_req, tfcp_req->status);
760 }
761
762
763 static int
fcloop_fcp_req(struct nvme_fc_local_port * localport,struct nvme_fc_remote_port * remoteport,void * hw_queue_handle,struct nvmefc_fcp_req * fcpreq)764 fcloop_fcp_req(struct nvme_fc_local_port *localport,
765 struct nvme_fc_remote_port *remoteport,
766 void *hw_queue_handle,
767 struct nvmefc_fcp_req *fcpreq)
768 {
769 struct fcloop_rport *rport = remoteport->private;
770 struct fcloop_ini_fcpreq *inireq = fcpreq->private;
771 struct fcloop_fcpreq *tfcp_req;
772
773 if (!rport->targetport)
774 return -ECONNREFUSED;
775
776 tfcp_req = kzalloc_obj(*tfcp_req, GFP_ATOMIC);
777 if (!tfcp_req)
778 return -ENOMEM;
779
780 inireq->fcpreq = fcpreq;
781 inireq->tfcp_req = tfcp_req;
782 spin_lock_init(&inireq->inilock);
783
784 tfcp_req->fcpreq = fcpreq;
785 tfcp_req->tport = rport->targetport->private;
786 tfcp_req->inistate = INI_IO_START;
787 spin_lock_init(&tfcp_req->reqlock);
788 INIT_WORK(&tfcp_req->fcp_rcv_work, fcloop_fcp_recv_work);
789 INIT_WORK(&tfcp_req->abort_rcv_work, fcloop_fcp_abort_recv_work);
790 INIT_WORK(&tfcp_req->tio_done_work, fcloop_tgt_fcprqst_done_work);
791 refcount_set(&tfcp_req->ref, 1);
792
793 queue_work(nvmet_wq, &tfcp_req->fcp_rcv_work);
794
795 return 0;
796 }
797
798 static void
fcloop_fcp_copy_data(u8 op,struct scatterlist * data_sg,struct scatterlist * io_sg,u32 offset,u32 length)799 fcloop_fcp_copy_data(u8 op, struct scatterlist *data_sg,
800 struct scatterlist *io_sg, u32 offset, u32 length)
801 {
802 void *data_p, *io_p;
803 u32 data_len, io_len, tlen;
804
805 io_p = sg_virt(io_sg);
806 io_len = io_sg->length;
807
808 for ( ; offset; ) {
809 tlen = min_t(u32, offset, io_len);
810 offset -= tlen;
811 io_len -= tlen;
812 if (!io_len) {
813 io_sg = sg_next(io_sg);
814 io_p = sg_virt(io_sg);
815 io_len = io_sg->length;
816 } else
817 io_p += tlen;
818 }
819
820 data_p = sg_virt(data_sg);
821 data_len = data_sg->length;
822
823 for ( ; length; ) {
824 tlen = min_t(u32, io_len, data_len);
825 tlen = min_t(u32, tlen, length);
826
827 if (op == NVMET_FCOP_WRITEDATA)
828 memcpy(data_p, io_p, tlen);
829 else
830 memcpy(io_p, data_p, tlen);
831
832 length -= tlen;
833
834 io_len -= tlen;
835 if ((!io_len) && (length)) {
836 io_sg = sg_next(io_sg);
837 io_p = sg_virt(io_sg);
838 io_len = io_sg->length;
839 } else
840 io_p += tlen;
841
842 data_len -= tlen;
843 if ((!data_len) && (length)) {
844 data_sg = sg_next(data_sg);
845 data_p = sg_virt(data_sg);
846 data_len = data_sg->length;
847 } else
848 data_p += tlen;
849 }
850 }
851
852 static int
fcloop_fcp_op(struct nvmet_fc_target_port * tgtport,struct nvmefc_tgt_fcp_req * tgt_fcpreq)853 fcloop_fcp_op(struct nvmet_fc_target_port *tgtport,
854 struct nvmefc_tgt_fcp_req *tgt_fcpreq)
855 {
856 struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
857 struct nvmefc_fcp_req *fcpreq;
858 u32 rsplen = 0, xfrlen = 0;
859 int fcp_err = 0, active, aborted;
860 u8 op = tgt_fcpreq->op;
861 unsigned long flags;
862
863 spin_lock_irqsave(&tfcp_req->reqlock, flags);
864 fcpreq = tfcp_req->fcpreq;
865 active = tfcp_req->active;
866 aborted = tfcp_req->aborted;
867 tfcp_req->active = true;
868 spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
869
870 if (unlikely(active))
871 /* illegal - call while i/o active */
872 return -EALREADY;
873
874 if (unlikely(aborted)) {
875 /* target transport has aborted i/o prior */
876 spin_lock_irqsave(&tfcp_req->reqlock, flags);
877 tfcp_req->active = false;
878 spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
879 tgt_fcpreq->transferred_length = 0;
880 tgt_fcpreq->fcp_error = -ECANCELED;
881 tgt_fcpreq->done(tgt_fcpreq);
882 return 0;
883 }
884
885 /*
886 * if fcpreq is NULL, the I/O has been aborted (from
887 * initiator side). For the target side, act as if all is well
888 * but don't actually move data.
889 */
890
891 switch (op) {
892 case NVMET_FCOP_WRITEDATA:
893 xfrlen = tgt_fcpreq->transfer_length;
894 if (fcpreq) {
895 fcloop_fcp_copy_data(op, tgt_fcpreq->sg,
896 fcpreq->first_sgl, tgt_fcpreq->offset,
897 xfrlen);
898 fcpreq->transferred_length += xfrlen;
899 }
900 break;
901
902 case NVMET_FCOP_READDATA:
903 case NVMET_FCOP_READDATA_RSP:
904 xfrlen = tgt_fcpreq->transfer_length;
905 if (fcpreq) {
906 fcloop_fcp_copy_data(op, tgt_fcpreq->sg,
907 fcpreq->first_sgl, tgt_fcpreq->offset,
908 xfrlen);
909 fcpreq->transferred_length += xfrlen;
910 }
911 if (op == NVMET_FCOP_READDATA)
912 break;
913
914 /* Fall-Thru to RSP handling */
915 fallthrough;
916
917 case NVMET_FCOP_RSP:
918 if (fcpreq) {
919 rsplen = ((fcpreq->rsplen < tgt_fcpreq->rsplen) ?
920 fcpreq->rsplen : tgt_fcpreq->rsplen);
921 memcpy(fcpreq->rspaddr, tgt_fcpreq->rspaddr, rsplen);
922 if (rsplen < tgt_fcpreq->rsplen)
923 fcp_err = -E2BIG;
924 fcpreq->rcv_rsplen = rsplen;
925 fcpreq->status = 0;
926 }
927 tfcp_req->status = 0;
928 break;
929
930 default:
931 fcp_err = -EINVAL;
932 break;
933 }
934
935 spin_lock_irqsave(&tfcp_req->reqlock, flags);
936 tfcp_req->active = false;
937 spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
938
939 tgt_fcpreq->transferred_length = xfrlen;
940 tgt_fcpreq->fcp_error = fcp_err;
941 tgt_fcpreq->done(tgt_fcpreq);
942
943 return 0;
944 }
945
946 static void
fcloop_tgt_fcp_abort(struct nvmet_fc_target_port * tgtport,struct nvmefc_tgt_fcp_req * tgt_fcpreq)947 fcloop_tgt_fcp_abort(struct nvmet_fc_target_port *tgtport,
948 struct nvmefc_tgt_fcp_req *tgt_fcpreq)
949 {
950 struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
951 unsigned long flags;
952
953 /*
954 * mark aborted only in case there were 2 threads in transport
955 * (one doing io, other doing abort) and only kills ops posted
956 * after the abort request
957 */
958 spin_lock_irqsave(&tfcp_req->reqlock, flags);
959 tfcp_req->aborted = true;
960 spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
961
962 tfcp_req->status = NVME_SC_INTERNAL;
963
964 /*
965 * nothing more to do. If io wasn't active, the transport should
966 * immediately call the req_release. If it was active, the op
967 * will complete, and the lldd should call req_release.
968 */
969 }
970
971 static void
fcloop_fcp_req_release(struct nvmet_fc_target_port * tgtport,struct nvmefc_tgt_fcp_req * tgt_fcpreq)972 fcloop_fcp_req_release(struct nvmet_fc_target_port *tgtport,
973 struct nvmefc_tgt_fcp_req *tgt_fcpreq)
974 {
975 struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
976
977 queue_work(nvmet_wq, &tfcp_req->tio_done_work);
978 }
979
980 static void
fcloop_h2t_ls_abort(struct nvme_fc_local_port * localport,struct nvme_fc_remote_port * remoteport,struct nvmefc_ls_req * lsreq)981 fcloop_h2t_ls_abort(struct nvme_fc_local_port *localport,
982 struct nvme_fc_remote_port *remoteport,
983 struct nvmefc_ls_req *lsreq)
984 {
985 }
986
987 static void
fcloop_t2h_ls_abort(struct nvmet_fc_target_port * targetport,void * hosthandle,struct nvmefc_ls_req * lsreq)988 fcloop_t2h_ls_abort(struct nvmet_fc_target_port *targetport,
989 void *hosthandle, struct nvmefc_ls_req *lsreq)
990 {
991 }
992
993 static void
fcloop_fcp_abort(struct nvme_fc_local_port * localport,struct nvme_fc_remote_port * remoteport,void * hw_queue_handle,struct nvmefc_fcp_req * fcpreq)994 fcloop_fcp_abort(struct nvme_fc_local_port *localport,
995 struct nvme_fc_remote_port *remoteport,
996 void *hw_queue_handle,
997 struct nvmefc_fcp_req *fcpreq)
998 {
999 struct fcloop_ini_fcpreq *inireq = fcpreq->private;
1000 struct fcloop_fcpreq *tfcp_req;
1001 bool abortio = true;
1002 unsigned long flags;
1003
1004 spin_lock(&inireq->inilock);
1005 tfcp_req = inireq->tfcp_req;
1006 if (tfcp_req) {
1007 if (!fcloop_tfcp_req_get(tfcp_req))
1008 tfcp_req = NULL;
1009 }
1010 spin_unlock(&inireq->inilock);
1011
1012 if (!tfcp_req) {
1013 /* abort has already been called */
1014 goto out_host_done;
1015 }
1016
1017 /* break initiator/target relationship for io */
1018 spin_lock_irqsave(&tfcp_req->reqlock, flags);
1019 switch (tfcp_req->inistate) {
1020 case INI_IO_START:
1021 case INI_IO_ACTIVE:
1022 tfcp_req->inistate = INI_IO_ABORTED;
1023 break;
1024 case INI_IO_COMPLETED:
1025 abortio = false;
1026 break;
1027 default:
1028 spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
1029 WARN_ON(1);
1030 goto out_host_done;
1031 }
1032 spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
1033
1034 if (abortio)
1035 /* leave the reference while the work item is scheduled */
1036 WARN_ON(!queue_work(nvmet_wq, &tfcp_req->abort_rcv_work));
1037 else {
1038 /*
1039 * as the io has already had the done callback made,
1040 * nothing more to do. So release the reference taken above
1041 */
1042 fcloop_tfcp_req_put(tfcp_req);
1043 }
1044
1045 return;
1046
1047 out_host_done:
1048 fcloop_call_host_done(fcpreq, tfcp_req, -ECANCELED);
1049 }
1050
1051 static void
fcloop_lport_put(struct fcloop_lport * lport)1052 fcloop_lport_put(struct fcloop_lport *lport)
1053 {
1054 unsigned long flags;
1055
1056 if (!refcount_dec_and_test(&lport->ref))
1057 return;
1058
1059 spin_lock_irqsave(&fcloop_lock, flags);
1060 list_del(&lport->lport_list);
1061 spin_unlock_irqrestore(&fcloop_lock, flags);
1062
1063 kfree(lport);
1064 }
1065
1066 static int
fcloop_lport_get(struct fcloop_lport * lport)1067 fcloop_lport_get(struct fcloop_lport *lport)
1068 {
1069 return refcount_inc_not_zero(&lport->ref);
1070 }
1071
1072 static void
fcloop_nport_put(struct fcloop_nport * nport)1073 fcloop_nport_put(struct fcloop_nport *nport)
1074 {
1075 unsigned long flags;
1076
1077 if (!refcount_dec_and_test(&nport->ref))
1078 return;
1079
1080 spin_lock_irqsave(&fcloop_lock, flags);
1081 list_del(&nport->nport_list);
1082 spin_unlock_irqrestore(&fcloop_lock, flags);
1083
1084 if (nport->lport)
1085 fcloop_lport_put(nport->lport);
1086
1087 kfree(nport);
1088 }
1089
1090 static int
fcloop_nport_get(struct fcloop_nport * nport)1091 fcloop_nport_get(struct fcloop_nport *nport)
1092 {
1093 return refcount_inc_not_zero(&nport->ref);
1094 }
1095
1096 static void
fcloop_localport_delete(struct nvme_fc_local_port * localport)1097 fcloop_localport_delete(struct nvme_fc_local_port *localport)
1098 {
1099 struct fcloop_lport_priv *lport_priv = localport->private;
1100 struct fcloop_lport *lport = lport_priv->lport;
1101
1102 fcloop_lport_put(lport);
1103 }
1104
1105 static void
fcloop_remoteport_delete(struct nvme_fc_remote_port * remoteport)1106 fcloop_remoteport_delete(struct nvme_fc_remote_port *remoteport)
1107 {
1108 struct fcloop_rport *rport = remoteport->private;
1109 bool put_port = false;
1110 unsigned long flags;
1111
1112 flush_work(&rport->ls_work);
1113
1114 spin_lock_irqsave(&fcloop_lock, flags);
1115 if (!test_and_set_bit(PORT_DELETED, &rport->flags))
1116 put_port = true;
1117 rport->nport->rport = NULL;
1118 spin_unlock_irqrestore(&fcloop_lock, flags);
1119
1120 if (put_port) {
1121 WARN_ON(!list_empty(&rport->ls_list));
1122 fcloop_nport_put(rport->nport);
1123 }
1124 }
1125
1126 static void
fcloop_targetport_delete(struct nvmet_fc_target_port * targetport)1127 fcloop_targetport_delete(struct nvmet_fc_target_port *targetport)
1128 {
1129 struct fcloop_tport *tport = targetport->private;
1130 bool put_port = false;
1131 unsigned long flags;
1132
1133 flush_work(&tport->ls_work);
1134
1135 spin_lock_irqsave(&fcloop_lock, flags);
1136 if (!test_and_set_bit(PORT_DELETED, &tport->flags))
1137 put_port = true;
1138 tport->nport->tport = NULL;
1139 spin_unlock_irqrestore(&fcloop_lock, flags);
1140
1141 if (put_port) {
1142 WARN_ON(!list_empty(&tport->ls_list));
1143 fcloop_nport_put(tport->nport);
1144 }
1145 }
1146
1147 #define FCLOOP_HW_QUEUES 4
1148 #define FCLOOP_SGL_SEGS 256
1149 #define FCLOOP_DMABOUND_4G 0xFFFFFFFF
1150
1151 static struct nvme_fc_port_template fctemplate = {
1152 .localport_delete = fcloop_localport_delete,
1153 .remoteport_delete = fcloop_remoteport_delete,
1154 .create_queue = fcloop_create_queue,
1155 .delete_queue = fcloop_delete_queue,
1156 .ls_req = fcloop_h2t_ls_req,
1157 .fcp_io = fcloop_fcp_req,
1158 .ls_abort = fcloop_h2t_ls_abort,
1159 .fcp_abort = fcloop_fcp_abort,
1160 .xmt_ls_rsp = fcloop_t2h_xmt_ls_rsp,
1161 .max_hw_queues = FCLOOP_HW_QUEUES,
1162 .max_sgl_segments = FCLOOP_SGL_SEGS,
1163 .max_dif_sgl_segments = FCLOOP_SGL_SEGS,
1164 .dma_boundary = FCLOOP_DMABOUND_4G,
1165 /* sizes of additional private data for data structures */
1166 .local_priv_sz = sizeof(struct fcloop_lport_priv),
1167 .remote_priv_sz = sizeof(struct fcloop_rport),
1168 .fcprqst_priv_sz = sizeof(struct fcloop_ini_fcpreq),
1169 };
1170
1171 static struct nvmet_fc_target_template tgttemplate = {
1172 .targetport_delete = fcloop_targetport_delete,
1173 .xmt_ls_rsp = fcloop_h2t_xmt_ls_rsp,
1174 .fcp_op = fcloop_fcp_op,
1175 .fcp_abort = fcloop_tgt_fcp_abort,
1176 .fcp_req_release = fcloop_fcp_req_release,
1177 .discovery_event = fcloop_tgt_discovery_evt,
1178 .ls_req = fcloop_t2h_ls_req,
1179 .ls_abort = fcloop_t2h_ls_abort,
1180 .host_release = fcloop_t2h_host_release,
1181 .host_traddr = fcloop_t2h_host_traddr,
1182 .max_hw_queues = FCLOOP_HW_QUEUES,
1183 .max_sgl_segments = FCLOOP_SGL_SEGS,
1184 .max_dif_sgl_segments = FCLOOP_SGL_SEGS,
1185 .dma_boundary = FCLOOP_DMABOUND_4G,
1186 /* optional features */
1187 .target_features = 0,
1188 /* sizes of additional private data for data structures */
1189 .target_priv_sz = sizeof(struct fcloop_tport),
1190 };
1191
1192 static ssize_t
fcloop_create_local_port(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1193 fcloop_create_local_port(struct device *dev, struct device_attribute *attr,
1194 const char *buf, size_t count)
1195 {
1196 struct nvme_fc_port_info pinfo;
1197 struct fcloop_ctrl_options *opts;
1198 struct nvme_fc_local_port *localport;
1199 struct fcloop_lport *lport;
1200 struct fcloop_lport_priv *lport_priv;
1201 unsigned long flags;
1202 int ret = -ENOMEM;
1203
1204 lport = kzalloc_obj(*lport);
1205 if (!lport)
1206 return -ENOMEM;
1207
1208 opts = kzalloc_obj(*opts);
1209 if (!opts)
1210 goto out_free_lport;
1211
1212 ret = fcloop_parse_options(opts, buf);
1213 if (ret)
1214 goto out_free_opts;
1215
1216 /* everything there ? */
1217 if ((opts->mask & LPORT_OPTS) != LPORT_OPTS) {
1218 ret = -EINVAL;
1219 goto out_free_opts;
1220 }
1221
1222 memset(&pinfo, 0, sizeof(pinfo));
1223 pinfo.node_name = opts->wwnn;
1224 pinfo.port_name = opts->wwpn;
1225 pinfo.port_role = opts->roles;
1226 pinfo.port_id = opts->fcaddr;
1227
1228 ret = nvme_fc_register_localport(&pinfo, &fctemplate, NULL, &localport);
1229 if (!ret) {
1230 /* success */
1231 lport_priv = localport->private;
1232 lport_priv->lport = lport;
1233
1234 lport->localport = localport;
1235 INIT_LIST_HEAD(&lport->lport_list);
1236 refcount_set(&lport->ref, 1);
1237
1238 spin_lock_irqsave(&fcloop_lock, flags);
1239 list_add_tail(&lport->lport_list, &fcloop_lports);
1240 spin_unlock_irqrestore(&fcloop_lock, flags);
1241 }
1242
1243 out_free_opts:
1244 kfree(opts);
1245 out_free_lport:
1246 /* free only if we're going to fail */
1247 if (ret)
1248 kfree(lport);
1249
1250 return ret ? ret : count;
1251 }
1252
1253 static int
__localport_unreg(struct fcloop_lport * lport)1254 __localport_unreg(struct fcloop_lport *lport)
1255 {
1256 return nvme_fc_unregister_localport(lport->localport);
1257 }
1258
1259 static struct fcloop_nport *
__fcloop_nport_lookup(u64 node_name,u64 port_name)1260 __fcloop_nport_lookup(u64 node_name, u64 port_name)
1261 {
1262 struct fcloop_nport *nport;
1263
1264 list_for_each_entry(nport, &fcloop_nports, nport_list) {
1265 if (nport->node_name != node_name ||
1266 nport->port_name != port_name)
1267 continue;
1268
1269 if (fcloop_nport_get(nport))
1270 return nport;
1271
1272 break;
1273 }
1274
1275 return NULL;
1276 }
1277
1278 static struct fcloop_nport *
fcloop_nport_lookup(u64 node_name,u64 port_name)1279 fcloop_nport_lookup(u64 node_name, u64 port_name)
1280 {
1281 struct fcloop_nport *nport;
1282 unsigned long flags;
1283
1284 spin_lock_irqsave(&fcloop_lock, flags);
1285 nport = __fcloop_nport_lookup(node_name, port_name);
1286 spin_unlock_irqrestore(&fcloop_lock, flags);
1287
1288 return nport;
1289 }
1290
1291 static struct fcloop_lport *
__fcloop_lport_lookup(u64 node_name,u64 port_name)1292 __fcloop_lport_lookup(u64 node_name, u64 port_name)
1293 {
1294 struct fcloop_lport *lport;
1295
1296 list_for_each_entry(lport, &fcloop_lports, lport_list) {
1297 if (lport->localport->node_name != node_name ||
1298 lport->localport->port_name != port_name)
1299 continue;
1300
1301 if (fcloop_lport_get(lport))
1302 return lport;
1303
1304 break;
1305 }
1306
1307 return NULL;
1308 }
1309
1310 static struct fcloop_lport *
fcloop_lport_lookup(u64 node_name,u64 port_name)1311 fcloop_lport_lookup(u64 node_name, u64 port_name)
1312 {
1313 struct fcloop_lport *lport;
1314 unsigned long flags;
1315
1316 spin_lock_irqsave(&fcloop_lock, flags);
1317 lport = __fcloop_lport_lookup(node_name, port_name);
1318 spin_unlock_irqrestore(&fcloop_lock, flags);
1319
1320 return lport;
1321 }
1322
1323 static ssize_t
fcloop_delete_local_port(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1324 fcloop_delete_local_port(struct device *dev, struct device_attribute *attr,
1325 const char *buf, size_t count)
1326 {
1327 struct fcloop_lport *lport;
1328 u64 nodename, portname;
1329 int ret;
1330
1331 ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf);
1332 if (ret)
1333 return ret;
1334
1335 lport = fcloop_lport_lookup(nodename, portname);
1336 if (!lport)
1337 return -ENOENT;
1338
1339 ret = __localport_unreg(lport);
1340 fcloop_lport_put(lport);
1341
1342 return ret ? ret : count;
1343 }
1344
1345 static struct fcloop_nport *
fcloop_alloc_nport(const char * buf,size_t count,bool remoteport)1346 fcloop_alloc_nport(const char *buf, size_t count, bool remoteport)
1347 {
1348 struct fcloop_nport *newnport, *nport;
1349 struct fcloop_lport *lport;
1350 struct fcloop_ctrl_options *opts;
1351 unsigned long flags;
1352 u32 opts_mask = (remoteport) ? RPORT_OPTS : TGTPORT_OPTS;
1353 int ret;
1354
1355 opts = kzalloc_obj(*opts);
1356 if (!opts)
1357 return NULL;
1358
1359 ret = fcloop_parse_options(opts, buf);
1360 if (ret)
1361 goto out_free_opts;
1362
1363 /* everything there ? */
1364 if ((opts->mask & opts_mask) != opts_mask)
1365 goto out_free_opts;
1366
1367 newnport = kzalloc_obj(*newnport);
1368 if (!newnport)
1369 goto out_free_opts;
1370
1371 INIT_LIST_HEAD(&newnport->nport_list);
1372 newnport->node_name = opts->wwnn;
1373 newnport->port_name = opts->wwpn;
1374 if (opts->mask & NVMF_OPT_ROLES)
1375 newnport->port_role = opts->roles;
1376 if (opts->mask & NVMF_OPT_FCADDR)
1377 newnport->port_id = opts->fcaddr;
1378 refcount_set(&newnport->ref, 1);
1379
1380 spin_lock_irqsave(&fcloop_lock, flags);
1381 lport = __fcloop_lport_lookup(opts->wwnn, opts->wwpn);
1382 if (lport) {
1383 /* invalid configuration */
1384 fcloop_lport_put(lport);
1385 goto out_free_newnport;
1386 }
1387
1388 if (remoteport) {
1389 lport = __fcloop_lport_lookup(opts->lpwwnn, opts->lpwwpn);
1390 if (!lport) {
1391 /* invalid configuration */
1392 goto out_free_newnport;
1393 }
1394 }
1395
1396 nport = __fcloop_nport_lookup(opts->wwnn, opts->wwpn);
1397 if (nport) {
1398 if ((remoteport && nport->rport) ||
1399 (!remoteport && nport->tport)) {
1400 /* invalid configuration */
1401 goto out_put_nport;
1402 }
1403
1404 /* found existing nport, discard the new nport */
1405 kfree(newnport);
1406 } else {
1407 list_add_tail(&newnport->nport_list, &fcloop_nports);
1408 nport = newnport;
1409 }
1410
1411 if (opts->mask & NVMF_OPT_ROLES)
1412 nport->port_role = opts->roles;
1413 if (opts->mask & NVMF_OPT_FCADDR)
1414 nport->port_id = opts->fcaddr;
1415 if (lport) {
1416 if (!nport->lport)
1417 nport->lport = lport;
1418 else
1419 fcloop_lport_put(lport);
1420 }
1421 spin_unlock_irqrestore(&fcloop_lock, flags);
1422
1423 kfree(opts);
1424 return nport;
1425
1426 out_put_nport:
1427 if (lport)
1428 fcloop_lport_put(lport);
1429 fcloop_nport_put(nport);
1430 out_free_newnport:
1431 spin_unlock_irqrestore(&fcloop_lock, flags);
1432 kfree(newnport);
1433 out_free_opts:
1434 kfree(opts);
1435 return NULL;
1436 }
1437
1438 static ssize_t
fcloop_create_remote_port(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1439 fcloop_create_remote_port(struct device *dev, struct device_attribute *attr,
1440 const char *buf, size_t count)
1441 {
1442 struct nvme_fc_remote_port *remoteport;
1443 struct fcloop_nport *nport;
1444 struct fcloop_rport *rport;
1445 struct nvme_fc_port_info pinfo;
1446 int ret;
1447
1448 nport = fcloop_alloc_nport(buf, count, true);
1449 if (!nport)
1450 return -EIO;
1451
1452 memset(&pinfo, 0, sizeof(pinfo));
1453 pinfo.node_name = nport->node_name;
1454 pinfo.port_name = nport->port_name;
1455 pinfo.port_role = nport->port_role;
1456 pinfo.port_id = nport->port_id;
1457
1458 ret = nvme_fc_register_remoteport(nport->lport->localport,
1459 &pinfo, &remoteport);
1460 if (ret || !remoteport) {
1461 fcloop_nport_put(nport);
1462 return ret;
1463 }
1464
1465 /* success */
1466 rport = remoteport->private;
1467 rport->remoteport = remoteport;
1468 rport->targetport = (nport->tport) ? nport->tport->targetport : NULL;
1469 if (nport->tport) {
1470 nport->tport->remoteport = remoteport;
1471 nport->tport->lport = nport->lport;
1472 }
1473 rport->nport = nport;
1474 rport->lport = nport->lport;
1475 nport->rport = rport;
1476 rport->flags = 0;
1477 spin_lock_init(&rport->lock);
1478 INIT_WORK(&rport->ls_work, fcloop_rport_lsrqst_work);
1479 INIT_LIST_HEAD(&rport->ls_list);
1480
1481 return count;
1482 }
1483
1484
1485 static struct fcloop_rport *
__unlink_remote_port(struct fcloop_nport * nport)1486 __unlink_remote_port(struct fcloop_nport *nport)
1487 {
1488 struct fcloop_rport *rport = nport->rport;
1489
1490 lockdep_assert_held(&fcloop_lock);
1491
1492 if (rport && nport->tport)
1493 nport->tport->remoteport = NULL;
1494 nport->rport = NULL;
1495
1496 return rport;
1497 }
1498
1499 static int
__remoteport_unreg(struct fcloop_nport * nport,struct fcloop_rport * rport)1500 __remoteport_unreg(struct fcloop_nport *nport, struct fcloop_rport *rport)
1501 {
1502 return nvme_fc_unregister_remoteport(rport->remoteport);
1503 }
1504
1505 static ssize_t
fcloop_delete_remote_port(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1506 fcloop_delete_remote_port(struct device *dev, struct device_attribute *attr,
1507 const char *buf, size_t count)
1508 {
1509 struct fcloop_nport *nport;
1510 struct fcloop_rport *rport;
1511 u64 nodename, portname;
1512 unsigned long flags;
1513 int ret;
1514
1515 ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf);
1516 if (ret)
1517 return ret;
1518
1519 nport = fcloop_nport_lookup(nodename, portname);
1520 if (!nport)
1521 return -ENOENT;
1522
1523 spin_lock_irqsave(&fcloop_lock, flags);
1524 rport = __unlink_remote_port(nport);
1525 spin_unlock_irqrestore(&fcloop_lock, flags);
1526
1527 if (!rport) {
1528 ret = -ENOENT;
1529 goto out_nport_put;
1530 }
1531
1532 ret = __remoteport_unreg(nport, rport);
1533
1534 out_nport_put:
1535 fcloop_nport_put(nport);
1536
1537 return ret ? ret : count;
1538 }
1539
1540 static ssize_t
fcloop_create_target_port(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1541 fcloop_create_target_port(struct device *dev, struct device_attribute *attr,
1542 const char *buf, size_t count)
1543 {
1544 struct nvmet_fc_target_port *targetport;
1545 struct fcloop_nport *nport;
1546 struct fcloop_tport *tport;
1547 struct nvmet_fc_port_info tinfo;
1548 int ret;
1549
1550 nport = fcloop_alloc_nport(buf, count, false);
1551 if (!nport)
1552 return -EIO;
1553
1554 tinfo.node_name = nport->node_name;
1555 tinfo.port_name = nport->port_name;
1556 tinfo.port_id = nport->port_id;
1557
1558 ret = nvmet_fc_register_targetport(&tinfo, &tgttemplate, NULL,
1559 &targetport);
1560 if (ret) {
1561 fcloop_nport_put(nport);
1562 return ret;
1563 }
1564
1565 /* success */
1566 tport = targetport->private;
1567 tport->targetport = targetport;
1568 tport->remoteport = (nport->rport) ? nport->rport->remoteport : NULL;
1569 if (nport->rport)
1570 nport->rport->targetport = targetport;
1571 tport->nport = nport;
1572 tport->lport = nport->lport;
1573 nport->tport = tport;
1574 tport->flags = 0;
1575 spin_lock_init(&tport->lock);
1576 INIT_WORK(&tport->ls_work, fcloop_tport_lsrqst_work);
1577 INIT_LIST_HEAD(&tport->ls_list);
1578
1579 return count;
1580 }
1581
1582
1583 static struct fcloop_tport *
__unlink_target_port(struct fcloop_nport * nport)1584 __unlink_target_port(struct fcloop_nport *nport)
1585 {
1586 struct fcloop_tport *tport = nport->tport;
1587
1588 lockdep_assert_held(&fcloop_lock);
1589
1590 if (tport && nport->rport)
1591 nport->rport->targetport = NULL;
1592 nport->tport = NULL;
1593
1594 return tport;
1595 }
1596
1597 static int
__targetport_unreg(struct fcloop_nport * nport,struct fcloop_tport * tport)1598 __targetport_unreg(struct fcloop_nport *nport, struct fcloop_tport *tport)
1599 {
1600 return nvmet_fc_unregister_targetport(tport->targetport);
1601 }
1602
1603 static ssize_t
fcloop_delete_target_port(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1604 fcloop_delete_target_port(struct device *dev, struct device_attribute *attr,
1605 const char *buf, size_t count)
1606 {
1607 struct fcloop_nport *nport;
1608 struct fcloop_tport *tport;
1609 u64 nodename, portname;
1610 unsigned long flags;
1611 int ret;
1612
1613 ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf);
1614 if (ret)
1615 return ret;
1616
1617 nport = fcloop_nport_lookup(nodename, portname);
1618 if (!nport)
1619 return -ENOENT;
1620
1621 spin_lock_irqsave(&fcloop_lock, flags);
1622 tport = __unlink_target_port(nport);
1623 spin_unlock_irqrestore(&fcloop_lock, flags);
1624
1625 if (!tport) {
1626 ret = -ENOENT;
1627 goto out_nport_put;
1628 }
1629
1630 ret = __targetport_unreg(nport, tport);
1631
1632 out_nport_put:
1633 fcloop_nport_put(nport);
1634
1635 return ret ? ret : count;
1636 }
1637
1638 static ssize_t
fcloop_set_cmd_drop(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1639 fcloop_set_cmd_drop(struct device *dev, struct device_attribute *attr,
1640 const char *buf, size_t count)
1641 {
1642 unsigned int opcode;
1643 int starting, amount;
1644
1645 if (sscanf(buf, "%x:%d:%d", &opcode, &starting, &amount) != 3)
1646 return -EBADRQC;
1647
1648 drop_current_cnt = 0;
1649 drop_fabric_opcode = (opcode & ~DROP_OPCODE_MASK) ? true : false;
1650 drop_opcode = (opcode & DROP_OPCODE_MASK);
1651 drop_instance = starting;
1652 /* the check to drop routine uses instance + count to know when
1653 * to end. Thus, if dropping 1 instance, count should be 0.
1654 * so subtract 1 from the count.
1655 */
1656 drop_amount = amount - 1;
1657
1658 pr_info("%s: DROP: Starting at instance %d of%s opcode x%x drop +%d "
1659 "instances\n",
1660 __func__, drop_instance, drop_fabric_opcode ? " fabric" : "",
1661 drop_opcode, drop_amount);
1662
1663 return count;
1664 }
1665
1666
1667 static DEVICE_ATTR(add_local_port, 0200, NULL, fcloop_create_local_port);
1668 static DEVICE_ATTR(del_local_port, 0200, NULL, fcloop_delete_local_port);
1669 static DEVICE_ATTR(add_remote_port, 0200, NULL, fcloop_create_remote_port);
1670 static DEVICE_ATTR(del_remote_port, 0200, NULL, fcloop_delete_remote_port);
1671 static DEVICE_ATTR(add_target_port, 0200, NULL, fcloop_create_target_port);
1672 static DEVICE_ATTR(del_target_port, 0200, NULL, fcloop_delete_target_port);
1673 static DEVICE_ATTR(set_cmd_drop, 0200, NULL, fcloop_set_cmd_drop);
1674
1675 static struct attribute *fcloop_dev_attrs[] = {
1676 &dev_attr_add_local_port.attr,
1677 &dev_attr_del_local_port.attr,
1678 &dev_attr_add_remote_port.attr,
1679 &dev_attr_del_remote_port.attr,
1680 &dev_attr_add_target_port.attr,
1681 &dev_attr_del_target_port.attr,
1682 &dev_attr_set_cmd_drop.attr,
1683 NULL
1684 };
1685
1686 static const struct attribute_group fclopp_dev_attrs_group = {
1687 .attrs = fcloop_dev_attrs,
1688 };
1689
1690 static const struct attribute_group *fcloop_dev_attr_groups[] = {
1691 &fclopp_dev_attrs_group,
1692 NULL,
1693 };
1694
1695 static const struct class fcloop_class = {
1696 .name = "fcloop",
1697 };
1698 static struct device *fcloop_device;
1699
fcloop_init(void)1700 static int __init fcloop_init(void)
1701 {
1702 int ret;
1703
1704 lsreq_cache = kmem_cache_create("lsreq_cache",
1705 sizeof(struct fcloop_lsreq), 0,
1706 0, NULL);
1707 if (!lsreq_cache)
1708 return -ENOMEM;
1709
1710 ret = class_register(&fcloop_class);
1711 if (ret) {
1712 pr_err("couldn't register class fcloop\n");
1713 goto out_destroy_cache;
1714 }
1715
1716 fcloop_device = device_create_with_groups(
1717 &fcloop_class, NULL, MKDEV(0, 0), NULL,
1718 fcloop_dev_attr_groups, "ctl");
1719 if (IS_ERR(fcloop_device)) {
1720 pr_err("couldn't create ctl device!\n");
1721 ret = PTR_ERR(fcloop_device);
1722 goto out_destroy_class;
1723 }
1724
1725 get_device(fcloop_device);
1726
1727 return 0;
1728
1729 out_destroy_class:
1730 class_unregister(&fcloop_class);
1731 out_destroy_cache:
1732 kmem_cache_destroy(lsreq_cache);
1733 return ret;
1734 }
1735
fcloop_exit(void)1736 static void __exit fcloop_exit(void)
1737 {
1738 struct fcloop_lport *lport;
1739 struct fcloop_nport *nport;
1740 struct fcloop_tport *tport;
1741 struct fcloop_rport *rport;
1742 unsigned long flags;
1743 int ret;
1744
1745 spin_lock_irqsave(&fcloop_lock, flags);
1746
1747 for (;;) {
1748 nport = list_first_entry_or_null(&fcloop_nports,
1749 typeof(*nport), nport_list);
1750 if (!nport || !fcloop_nport_get(nport))
1751 break;
1752
1753 tport = __unlink_target_port(nport);
1754 rport = __unlink_remote_port(nport);
1755
1756 spin_unlock_irqrestore(&fcloop_lock, flags);
1757
1758 if (tport) {
1759 ret = __targetport_unreg(nport, tport);
1760 if (ret)
1761 pr_warn("%s: Failed deleting target port\n",
1762 __func__);
1763 }
1764
1765 if (rport) {
1766 ret = __remoteport_unreg(nport, rport);
1767 if (ret)
1768 pr_warn("%s: Failed deleting remote port\n",
1769 __func__);
1770 }
1771
1772 fcloop_nport_put(nport);
1773
1774 spin_lock_irqsave(&fcloop_lock, flags);
1775 }
1776
1777 for (;;) {
1778 lport = list_first_entry_or_null(&fcloop_lports,
1779 typeof(*lport), lport_list);
1780 if (!lport || !fcloop_lport_get(lport))
1781 break;
1782
1783 spin_unlock_irqrestore(&fcloop_lock, flags);
1784
1785 ret = __localport_unreg(lport);
1786 if (ret)
1787 pr_warn("%s: Failed deleting local port\n", __func__);
1788
1789 fcloop_lport_put(lport);
1790
1791 spin_lock_irqsave(&fcloop_lock, flags);
1792 }
1793
1794 spin_unlock_irqrestore(&fcloop_lock, flags);
1795
1796 put_device(fcloop_device);
1797
1798 device_destroy(&fcloop_class, MKDEV(0, 0));
1799 class_unregister(&fcloop_class);
1800 kmem_cache_destroy(lsreq_cache);
1801 }
1802
1803 module_init(fcloop_init);
1804 module_exit(fcloop_exit);
1805
1806 MODULE_DESCRIPTION("NVMe target FC loop transport driver");
1807 MODULE_LICENSE("GPL v2");
1808