xref: /linux/drivers/nvme/target/fcloop.c (revision b2d0f5d5dc53532e6f07bc546a476a55ebdfe0f3)
1 /*
2  * Copyright (c) 2016 Avago Technologies.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of version 2 of the GNU General Public License as
6  * published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful.
9  * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES,
10  * INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A
11  * PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE DISCLAIMED, EXCEPT TO
12  * THE EXTENT THAT SUCH DISCLAIMERS ARE HELD TO BE LEGALLY INVALID.
13  * See the GNU General Public License for more details, a copy of which
14  * can be found in the file COPYING included with this package
15  */
16 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17 #include <linux/module.h>
18 #include <linux/parser.h>
19 #include <uapi/scsi/fc/fc_fs.h>
20 
21 #include "../host/nvme.h"
22 #include "../target/nvmet.h"
23 #include <linux/nvme-fc-driver.h>
24 #include <linux/nvme-fc.h>
25 
26 
27 enum {
28 	NVMF_OPT_ERR		= 0,
29 	NVMF_OPT_WWNN		= 1 << 0,
30 	NVMF_OPT_WWPN		= 1 << 1,
31 	NVMF_OPT_ROLES		= 1 << 2,
32 	NVMF_OPT_FCADDR		= 1 << 3,
33 	NVMF_OPT_LPWWNN		= 1 << 4,
34 	NVMF_OPT_LPWWPN		= 1 << 5,
35 };
36 
37 struct fcloop_ctrl_options {
38 	int			mask;
39 	u64			wwnn;
40 	u64			wwpn;
41 	u32			roles;
42 	u32			fcaddr;
43 	u64			lpwwnn;
44 	u64			lpwwpn;
45 };
46 
47 static const match_table_t opt_tokens = {
48 	{ NVMF_OPT_WWNN,	"wwnn=%s"	},
49 	{ NVMF_OPT_WWPN,	"wwpn=%s"	},
50 	{ NVMF_OPT_ROLES,	"roles=%d"	},
51 	{ NVMF_OPT_FCADDR,	"fcaddr=%x"	},
52 	{ NVMF_OPT_LPWWNN,	"lpwwnn=%s"	},
53 	{ NVMF_OPT_LPWWPN,	"lpwwpn=%s"	},
54 	{ NVMF_OPT_ERR,		NULL		}
55 };
56 
57 static int
58 fcloop_parse_options(struct fcloop_ctrl_options *opts,
59 		const char *buf)
60 {
61 	substring_t args[MAX_OPT_ARGS];
62 	char *options, *o, *p;
63 	int token, ret = 0;
64 	u64 token64;
65 
66 	options = o = kstrdup(buf, GFP_KERNEL);
67 	if (!options)
68 		return -ENOMEM;
69 
70 	while ((p = strsep(&o, ",\n")) != NULL) {
71 		if (!*p)
72 			continue;
73 
74 		token = match_token(p, opt_tokens, args);
75 		opts->mask |= token;
76 		switch (token) {
77 		case NVMF_OPT_WWNN:
78 			if (match_u64(args, &token64)) {
79 				ret = -EINVAL;
80 				goto out_free_options;
81 			}
82 			opts->wwnn = token64;
83 			break;
84 		case NVMF_OPT_WWPN:
85 			if (match_u64(args, &token64)) {
86 				ret = -EINVAL;
87 				goto out_free_options;
88 			}
89 			opts->wwpn = token64;
90 			break;
91 		case NVMF_OPT_ROLES:
92 			if (match_int(args, &token)) {
93 				ret = -EINVAL;
94 				goto out_free_options;
95 			}
96 			opts->roles = token;
97 			break;
98 		case NVMF_OPT_FCADDR:
99 			if (match_hex(args, &token)) {
100 				ret = -EINVAL;
101 				goto out_free_options;
102 			}
103 			opts->fcaddr = token;
104 			break;
105 		case NVMF_OPT_LPWWNN:
106 			if (match_u64(args, &token64)) {
107 				ret = -EINVAL;
108 				goto out_free_options;
109 			}
110 			opts->lpwwnn = token64;
111 			break;
112 		case NVMF_OPT_LPWWPN:
113 			if (match_u64(args, &token64)) {
114 				ret = -EINVAL;
115 				goto out_free_options;
116 			}
117 			opts->lpwwpn = token64;
118 			break;
119 		default:
120 			pr_warn("unknown parameter or missing value '%s'\n", p);
121 			ret = -EINVAL;
122 			goto out_free_options;
123 		}
124 	}
125 
126 out_free_options:
127 	kfree(options);
128 	return ret;
129 }
130 
131 
132 static int
133 fcloop_parse_nm_options(struct device *dev, u64 *nname, u64 *pname,
134 		const char *buf)
135 {
136 	substring_t args[MAX_OPT_ARGS];
137 	char *options, *o, *p;
138 	int token, ret = 0;
139 	u64 token64;
140 
141 	*nname = -1;
142 	*pname = -1;
143 
144 	options = o = kstrdup(buf, GFP_KERNEL);
145 	if (!options)
146 		return -ENOMEM;
147 
148 	while ((p = strsep(&o, ",\n")) != NULL) {
149 		if (!*p)
150 			continue;
151 
152 		token = match_token(p, opt_tokens, args);
153 		switch (token) {
154 		case NVMF_OPT_WWNN:
155 			if (match_u64(args, &token64)) {
156 				ret = -EINVAL;
157 				goto out_free_options;
158 			}
159 			*nname = token64;
160 			break;
161 		case NVMF_OPT_WWPN:
162 			if (match_u64(args, &token64)) {
163 				ret = -EINVAL;
164 				goto out_free_options;
165 			}
166 			*pname = token64;
167 			break;
168 		default:
169 			pr_warn("unknown parameter or missing value '%s'\n", p);
170 			ret = -EINVAL;
171 			goto out_free_options;
172 		}
173 	}
174 
175 out_free_options:
176 	kfree(options);
177 
178 	if (!ret) {
179 		if (*nname == -1)
180 			return -EINVAL;
181 		if (*pname == -1)
182 			return -EINVAL;
183 	}
184 
185 	return ret;
186 }
187 
188 
189 #define LPORT_OPTS	(NVMF_OPT_WWNN | NVMF_OPT_WWPN)
190 
191 #define RPORT_OPTS	(NVMF_OPT_WWNN | NVMF_OPT_WWPN |  \
192 			 NVMF_OPT_LPWWNN | NVMF_OPT_LPWWPN)
193 
194 #define TGTPORT_OPTS	(NVMF_OPT_WWNN | NVMF_OPT_WWPN)
195 
196 
197 static DEFINE_SPINLOCK(fcloop_lock);
198 static LIST_HEAD(fcloop_lports);
199 static LIST_HEAD(fcloop_nports);
200 
201 struct fcloop_lport {
202 	struct nvme_fc_local_port *localport;
203 	struct list_head lport_list;
204 	struct completion unreg_done;
205 };
206 
207 struct fcloop_rport {
208 	struct nvme_fc_remote_port *remoteport;
209 	struct nvmet_fc_target_port *targetport;
210 	struct fcloop_nport *nport;
211 	struct fcloop_lport *lport;
212 };
213 
214 struct fcloop_tport {
215 	struct nvmet_fc_target_port *targetport;
216 	struct nvme_fc_remote_port *remoteport;
217 	struct fcloop_nport *nport;
218 	struct fcloop_lport *lport;
219 };
220 
221 struct fcloop_nport {
222 	struct fcloop_rport *rport;
223 	struct fcloop_tport *tport;
224 	struct fcloop_lport *lport;
225 	struct list_head nport_list;
226 	struct kref ref;
227 	u64 node_name;
228 	u64 port_name;
229 	u32 port_role;
230 	u32 port_id;
231 };
232 
233 struct fcloop_lsreq {
234 	struct fcloop_tport		*tport;
235 	struct nvmefc_ls_req		*lsreq;
236 	struct work_struct		work;
237 	struct nvmefc_tgt_ls_req	tgt_ls_req;
238 	int				status;
239 };
240 
241 struct fcloop_fcpreq {
242 	struct fcloop_tport		*tport;
243 	struct nvmefc_fcp_req		*fcpreq;
244 	spinlock_t			reqlock;
245 	u16				status;
246 	bool				active;
247 	bool				aborted;
248 	struct work_struct		work;
249 	struct nvmefc_tgt_fcp_req	tgt_fcp_req;
250 };
251 
252 struct fcloop_ini_fcpreq {
253 	struct nvmefc_fcp_req		*fcpreq;
254 	struct fcloop_fcpreq		*tfcp_req;
255 	struct work_struct		iniwork;
256 };
257 
258 static inline struct fcloop_lsreq *
259 tgt_ls_req_to_lsreq(struct nvmefc_tgt_ls_req *tgt_lsreq)
260 {
261 	return container_of(tgt_lsreq, struct fcloop_lsreq, tgt_ls_req);
262 }
263 
264 static inline struct fcloop_fcpreq *
265 tgt_fcp_req_to_fcpreq(struct nvmefc_tgt_fcp_req *tgt_fcpreq)
266 {
267 	return container_of(tgt_fcpreq, struct fcloop_fcpreq, tgt_fcp_req);
268 }
269 
270 
271 static int
272 fcloop_create_queue(struct nvme_fc_local_port *localport,
273 			unsigned int qidx, u16 qsize,
274 			void **handle)
275 {
276 	*handle = localport;
277 	return 0;
278 }
279 
280 static void
281 fcloop_delete_queue(struct nvme_fc_local_port *localport,
282 			unsigned int idx, void *handle)
283 {
284 }
285 
286 
287 /*
288  * Transmit of LS RSP done (e.g. buffers all set). call back up
289  * initiator "done" flows.
290  */
291 static void
292 fcloop_tgt_lsrqst_done_work(struct work_struct *work)
293 {
294 	struct fcloop_lsreq *tls_req =
295 		container_of(work, struct fcloop_lsreq, work);
296 	struct fcloop_tport *tport = tls_req->tport;
297 	struct nvmefc_ls_req *lsreq = tls_req->lsreq;
298 
299 	if (tport->remoteport)
300 		lsreq->done(lsreq, tls_req->status);
301 }
302 
303 static int
304 fcloop_ls_req(struct nvme_fc_local_port *localport,
305 			struct nvme_fc_remote_port *remoteport,
306 			struct nvmefc_ls_req *lsreq)
307 {
308 	struct fcloop_lsreq *tls_req = lsreq->private;
309 	struct fcloop_rport *rport = remoteport->private;
310 	int ret = 0;
311 
312 	tls_req->lsreq = lsreq;
313 	INIT_WORK(&tls_req->work, fcloop_tgt_lsrqst_done_work);
314 
315 	if (!rport->targetport) {
316 		tls_req->status = -ECONNREFUSED;
317 		schedule_work(&tls_req->work);
318 		return ret;
319 	}
320 
321 	tls_req->status = 0;
322 	tls_req->tport = rport->targetport->private;
323 	ret = nvmet_fc_rcv_ls_req(rport->targetport, &tls_req->tgt_ls_req,
324 				 lsreq->rqstaddr, lsreq->rqstlen);
325 
326 	return ret;
327 }
328 
329 static int
330 fcloop_xmt_ls_rsp(struct nvmet_fc_target_port *tport,
331 			struct nvmefc_tgt_ls_req *tgt_lsreq)
332 {
333 	struct fcloop_lsreq *tls_req = tgt_ls_req_to_lsreq(tgt_lsreq);
334 	struct nvmefc_ls_req *lsreq = tls_req->lsreq;
335 
336 	memcpy(lsreq->rspaddr, tgt_lsreq->rspbuf,
337 		((lsreq->rsplen < tgt_lsreq->rsplen) ?
338 				lsreq->rsplen : tgt_lsreq->rsplen));
339 	tgt_lsreq->done(tgt_lsreq);
340 
341 	schedule_work(&tls_req->work);
342 
343 	return 0;
344 }
345 
346 /*
347  * FCP IO operation done by initiator abort.
348  * call back up initiator "done" flows.
349  */
350 static void
351 fcloop_tgt_fcprqst_ini_done_work(struct work_struct *work)
352 {
353 	struct fcloop_ini_fcpreq *inireq =
354 		container_of(work, struct fcloop_ini_fcpreq, iniwork);
355 
356 	inireq->fcpreq->done(inireq->fcpreq);
357 }
358 
359 /*
360  * FCP IO operation done by target completion.
361  * call back up initiator "done" flows.
362  */
363 static void
364 fcloop_tgt_fcprqst_done_work(struct work_struct *work)
365 {
366 	struct fcloop_fcpreq *tfcp_req =
367 		container_of(work, struct fcloop_fcpreq, work);
368 	struct fcloop_tport *tport = tfcp_req->tport;
369 	struct nvmefc_fcp_req *fcpreq;
370 
371 	spin_lock(&tfcp_req->reqlock);
372 	fcpreq = tfcp_req->fcpreq;
373 	spin_unlock(&tfcp_req->reqlock);
374 
375 	if (tport->remoteport && fcpreq) {
376 		fcpreq->status = tfcp_req->status;
377 		fcpreq->done(fcpreq);
378 	}
379 
380 	kfree(tfcp_req);
381 }
382 
383 
384 static int
385 fcloop_fcp_req(struct nvme_fc_local_port *localport,
386 			struct nvme_fc_remote_port *remoteport,
387 			void *hw_queue_handle,
388 			struct nvmefc_fcp_req *fcpreq)
389 {
390 	struct fcloop_rport *rport = remoteport->private;
391 	struct fcloop_ini_fcpreq *inireq = fcpreq->private;
392 	struct fcloop_fcpreq *tfcp_req;
393 	int ret = 0;
394 
395 	if (!rport->targetport)
396 		return -ECONNREFUSED;
397 
398 	tfcp_req = kzalloc(sizeof(*tfcp_req), GFP_KERNEL);
399 	if (!tfcp_req)
400 		return -ENOMEM;
401 
402 	inireq->fcpreq = fcpreq;
403 	inireq->tfcp_req = tfcp_req;
404 	INIT_WORK(&inireq->iniwork, fcloop_tgt_fcprqst_ini_done_work);
405 	tfcp_req->fcpreq = fcpreq;
406 	tfcp_req->tport = rport->targetport->private;
407 	spin_lock_init(&tfcp_req->reqlock);
408 	INIT_WORK(&tfcp_req->work, fcloop_tgt_fcprqst_done_work);
409 
410 	ret = nvmet_fc_rcv_fcp_req(rport->targetport, &tfcp_req->tgt_fcp_req,
411 				 fcpreq->cmdaddr, fcpreq->cmdlen);
412 
413 	return ret;
414 }
415 
416 static void
417 fcloop_fcp_copy_data(u8 op, struct scatterlist *data_sg,
418 			struct scatterlist *io_sg, u32 offset, u32 length)
419 {
420 	void *data_p, *io_p;
421 	u32 data_len, io_len, tlen;
422 
423 	io_p = sg_virt(io_sg);
424 	io_len = io_sg->length;
425 
426 	for ( ; offset; ) {
427 		tlen = min_t(u32, offset, io_len);
428 		offset -= tlen;
429 		io_len -= tlen;
430 		if (!io_len) {
431 			io_sg = sg_next(io_sg);
432 			io_p = sg_virt(io_sg);
433 			io_len = io_sg->length;
434 		} else
435 			io_p += tlen;
436 	}
437 
438 	data_p = sg_virt(data_sg);
439 	data_len = data_sg->length;
440 
441 	for ( ; length; ) {
442 		tlen = min_t(u32, io_len, data_len);
443 		tlen = min_t(u32, tlen, length);
444 
445 		if (op == NVMET_FCOP_WRITEDATA)
446 			memcpy(data_p, io_p, tlen);
447 		else
448 			memcpy(io_p, data_p, tlen);
449 
450 		length -= tlen;
451 
452 		io_len -= tlen;
453 		if ((!io_len) && (length)) {
454 			io_sg = sg_next(io_sg);
455 			io_p = sg_virt(io_sg);
456 			io_len = io_sg->length;
457 		} else
458 			io_p += tlen;
459 
460 		data_len -= tlen;
461 		if ((!data_len) && (length)) {
462 			data_sg = sg_next(data_sg);
463 			data_p = sg_virt(data_sg);
464 			data_len = data_sg->length;
465 		} else
466 			data_p += tlen;
467 	}
468 }
469 
470 static int
471 fcloop_fcp_op(struct nvmet_fc_target_port *tgtport,
472 			struct nvmefc_tgt_fcp_req *tgt_fcpreq)
473 {
474 	struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
475 	struct nvmefc_fcp_req *fcpreq;
476 	u32 rsplen = 0, xfrlen = 0;
477 	int fcp_err = 0, active, aborted;
478 	u8 op = tgt_fcpreq->op;
479 
480 	spin_lock(&tfcp_req->reqlock);
481 	fcpreq = tfcp_req->fcpreq;
482 	active = tfcp_req->active;
483 	aborted = tfcp_req->aborted;
484 	tfcp_req->active = true;
485 	spin_unlock(&tfcp_req->reqlock);
486 
487 	if (unlikely(active))
488 		/* illegal - call while i/o active */
489 		return -EALREADY;
490 
491 	if (unlikely(aborted)) {
492 		/* target transport has aborted i/o prior */
493 		spin_lock(&tfcp_req->reqlock);
494 		tfcp_req->active = false;
495 		spin_unlock(&tfcp_req->reqlock);
496 		tgt_fcpreq->transferred_length = 0;
497 		tgt_fcpreq->fcp_error = -ECANCELED;
498 		tgt_fcpreq->done(tgt_fcpreq);
499 		return 0;
500 	}
501 
502 	/*
503 	 * if fcpreq is NULL, the I/O has been aborted (from
504 	 * initiator side). For the target side, act as if all is well
505 	 * but don't actually move data.
506 	 */
507 
508 	switch (op) {
509 	case NVMET_FCOP_WRITEDATA:
510 		xfrlen = tgt_fcpreq->transfer_length;
511 		if (fcpreq) {
512 			fcloop_fcp_copy_data(op, tgt_fcpreq->sg,
513 					fcpreq->first_sgl, tgt_fcpreq->offset,
514 					xfrlen);
515 			fcpreq->transferred_length += xfrlen;
516 		}
517 		break;
518 
519 	case NVMET_FCOP_READDATA:
520 	case NVMET_FCOP_READDATA_RSP:
521 		xfrlen = tgt_fcpreq->transfer_length;
522 		if (fcpreq) {
523 			fcloop_fcp_copy_data(op, tgt_fcpreq->sg,
524 					fcpreq->first_sgl, tgt_fcpreq->offset,
525 					xfrlen);
526 			fcpreq->transferred_length += xfrlen;
527 		}
528 		if (op == NVMET_FCOP_READDATA)
529 			break;
530 
531 		/* Fall-Thru to RSP handling */
532 
533 	case NVMET_FCOP_RSP:
534 		if (fcpreq) {
535 			rsplen = ((fcpreq->rsplen < tgt_fcpreq->rsplen) ?
536 					fcpreq->rsplen : tgt_fcpreq->rsplen);
537 			memcpy(fcpreq->rspaddr, tgt_fcpreq->rspaddr, rsplen);
538 			if (rsplen < tgt_fcpreq->rsplen)
539 				fcp_err = -E2BIG;
540 			fcpreq->rcv_rsplen = rsplen;
541 			fcpreq->status = 0;
542 		}
543 		tfcp_req->status = 0;
544 		break;
545 
546 	default:
547 		fcp_err = -EINVAL;
548 		break;
549 	}
550 
551 	spin_lock(&tfcp_req->reqlock);
552 	tfcp_req->active = false;
553 	spin_unlock(&tfcp_req->reqlock);
554 
555 	tgt_fcpreq->transferred_length = xfrlen;
556 	tgt_fcpreq->fcp_error = fcp_err;
557 	tgt_fcpreq->done(tgt_fcpreq);
558 
559 	return 0;
560 }
561 
562 static void
563 fcloop_tgt_fcp_abort(struct nvmet_fc_target_port *tgtport,
564 			struct nvmefc_tgt_fcp_req *tgt_fcpreq)
565 {
566 	struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
567 
568 	/*
569 	 * mark aborted only in case there were 2 threads in transport
570 	 * (one doing io, other doing abort) and only kills ops posted
571 	 * after the abort request
572 	 */
573 	spin_lock(&tfcp_req->reqlock);
574 	tfcp_req->aborted = true;
575 	spin_unlock(&tfcp_req->reqlock);
576 
577 	tfcp_req->status = NVME_SC_INTERNAL;
578 
579 	/*
580 	 * nothing more to do. If io wasn't active, the transport should
581 	 * immediately call the req_release. If it was active, the op
582 	 * will complete, and the lldd should call req_release.
583 	 */
584 }
585 
586 static void
587 fcloop_fcp_req_release(struct nvmet_fc_target_port *tgtport,
588 			struct nvmefc_tgt_fcp_req *tgt_fcpreq)
589 {
590 	struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
591 
592 	schedule_work(&tfcp_req->work);
593 }
594 
595 static void
596 fcloop_ls_abort(struct nvme_fc_local_port *localport,
597 			struct nvme_fc_remote_port *remoteport,
598 				struct nvmefc_ls_req *lsreq)
599 {
600 }
601 
602 static void
603 fcloop_fcp_abort(struct nvme_fc_local_port *localport,
604 			struct nvme_fc_remote_port *remoteport,
605 			void *hw_queue_handle,
606 			struct nvmefc_fcp_req *fcpreq)
607 {
608 	struct fcloop_rport *rport = remoteport->private;
609 	struct fcloop_ini_fcpreq *inireq = fcpreq->private;
610 	struct fcloop_fcpreq *tfcp_req = inireq->tfcp_req;
611 
612 	if (!tfcp_req)
613 		/* abort has already been called */
614 		return;
615 
616 	if (rport->targetport)
617 		nvmet_fc_rcv_fcp_abort(rport->targetport,
618 					&tfcp_req->tgt_fcp_req);
619 
620 	/* break initiator/target relationship for io */
621 	spin_lock(&tfcp_req->reqlock);
622 	inireq->tfcp_req = NULL;
623 	tfcp_req->fcpreq = NULL;
624 	spin_unlock(&tfcp_req->reqlock);
625 
626 	/* post the aborted io completion */
627 	fcpreq->status = -ECANCELED;
628 	schedule_work(&inireq->iniwork);
629 }
630 
631 static void
632 fcloop_nport_free(struct kref *ref)
633 {
634 	struct fcloop_nport *nport =
635 		container_of(ref, struct fcloop_nport, ref);
636 	unsigned long flags;
637 
638 	spin_lock_irqsave(&fcloop_lock, flags);
639 	list_del(&nport->nport_list);
640 	spin_unlock_irqrestore(&fcloop_lock, flags);
641 
642 	kfree(nport);
643 }
644 
645 static void
646 fcloop_nport_put(struct fcloop_nport *nport)
647 {
648 	kref_put(&nport->ref, fcloop_nport_free);
649 }
650 
651 static int
652 fcloop_nport_get(struct fcloop_nport *nport)
653 {
654 	return kref_get_unless_zero(&nport->ref);
655 }
656 
657 static void
658 fcloop_localport_delete(struct nvme_fc_local_port *localport)
659 {
660 	struct fcloop_lport *lport = localport->private;
661 
662 	/* release any threads waiting for the unreg to complete */
663 	complete(&lport->unreg_done);
664 }
665 
666 static void
667 fcloop_remoteport_delete(struct nvme_fc_remote_port *remoteport)
668 {
669 	struct fcloop_rport *rport = remoteport->private;
670 
671 	fcloop_nport_put(rport->nport);
672 }
673 
674 static void
675 fcloop_targetport_delete(struct nvmet_fc_target_port *targetport)
676 {
677 	struct fcloop_tport *tport = targetport->private;
678 
679 	fcloop_nport_put(tport->nport);
680 }
681 
682 #define	FCLOOP_HW_QUEUES		4
683 #define	FCLOOP_SGL_SEGS			256
684 #define FCLOOP_DMABOUND_4G		0xFFFFFFFF
685 
686 static struct nvme_fc_port_template fctemplate = {
687 	.localport_delete	= fcloop_localport_delete,
688 	.remoteport_delete	= fcloop_remoteport_delete,
689 	.create_queue		= fcloop_create_queue,
690 	.delete_queue		= fcloop_delete_queue,
691 	.ls_req			= fcloop_ls_req,
692 	.fcp_io			= fcloop_fcp_req,
693 	.ls_abort		= fcloop_ls_abort,
694 	.fcp_abort		= fcloop_fcp_abort,
695 	.max_hw_queues		= FCLOOP_HW_QUEUES,
696 	.max_sgl_segments	= FCLOOP_SGL_SEGS,
697 	.max_dif_sgl_segments	= FCLOOP_SGL_SEGS,
698 	.dma_boundary		= FCLOOP_DMABOUND_4G,
699 	/* sizes of additional private data for data structures */
700 	.local_priv_sz		= sizeof(struct fcloop_lport),
701 	.remote_priv_sz		= sizeof(struct fcloop_rport),
702 	.lsrqst_priv_sz		= sizeof(struct fcloop_lsreq),
703 	.fcprqst_priv_sz	= sizeof(struct fcloop_ini_fcpreq),
704 };
705 
706 static struct nvmet_fc_target_template tgttemplate = {
707 	.targetport_delete	= fcloop_targetport_delete,
708 	.xmt_ls_rsp		= fcloop_xmt_ls_rsp,
709 	.fcp_op			= fcloop_fcp_op,
710 	.fcp_abort		= fcloop_tgt_fcp_abort,
711 	.fcp_req_release	= fcloop_fcp_req_release,
712 	.max_hw_queues		= FCLOOP_HW_QUEUES,
713 	.max_sgl_segments	= FCLOOP_SGL_SEGS,
714 	.max_dif_sgl_segments	= FCLOOP_SGL_SEGS,
715 	.dma_boundary		= FCLOOP_DMABOUND_4G,
716 	/* optional features */
717 	.target_features	= NVMET_FCTGTFEAT_CMD_IN_ISR |
718 				  NVMET_FCTGTFEAT_OPDONE_IN_ISR,
719 	/* sizes of additional private data for data structures */
720 	.target_priv_sz		= sizeof(struct fcloop_tport),
721 };
722 
723 static ssize_t
724 fcloop_create_local_port(struct device *dev, struct device_attribute *attr,
725 		const char *buf, size_t count)
726 {
727 	struct nvme_fc_port_info pinfo;
728 	struct fcloop_ctrl_options *opts;
729 	struct nvme_fc_local_port *localport;
730 	struct fcloop_lport *lport;
731 	int ret;
732 
733 	opts = kzalloc(sizeof(*opts), GFP_KERNEL);
734 	if (!opts)
735 		return -ENOMEM;
736 
737 	ret = fcloop_parse_options(opts, buf);
738 	if (ret)
739 		goto out_free_opts;
740 
741 	/* everything there ? */
742 	if ((opts->mask & LPORT_OPTS) != LPORT_OPTS) {
743 		ret = -EINVAL;
744 		goto out_free_opts;
745 	}
746 
747 	memset(&pinfo, 0, sizeof(pinfo));
748 	pinfo.node_name = opts->wwnn;
749 	pinfo.port_name = opts->wwpn;
750 	pinfo.port_role = opts->roles;
751 	pinfo.port_id = opts->fcaddr;
752 
753 	ret = nvme_fc_register_localport(&pinfo, &fctemplate, NULL, &localport);
754 	if (!ret) {
755 		unsigned long flags;
756 
757 		/* success */
758 		lport = localport->private;
759 		lport->localport = localport;
760 		INIT_LIST_HEAD(&lport->lport_list);
761 
762 		spin_lock_irqsave(&fcloop_lock, flags);
763 		list_add_tail(&lport->lport_list, &fcloop_lports);
764 		spin_unlock_irqrestore(&fcloop_lock, flags);
765 
766 		/* mark all of the input buffer consumed */
767 		ret = count;
768 	}
769 
770 out_free_opts:
771 	kfree(opts);
772 	return ret ? ret : count;
773 }
774 
775 
776 static void
777 __unlink_local_port(struct fcloop_lport *lport)
778 {
779 	list_del(&lport->lport_list);
780 }
781 
782 static int
783 __wait_localport_unreg(struct fcloop_lport *lport)
784 {
785 	int ret;
786 
787 	init_completion(&lport->unreg_done);
788 
789 	ret = nvme_fc_unregister_localport(lport->localport);
790 
791 	wait_for_completion(&lport->unreg_done);
792 
793 	return ret;
794 }
795 
796 
797 static ssize_t
798 fcloop_delete_local_port(struct device *dev, struct device_attribute *attr,
799 		const char *buf, size_t count)
800 {
801 	struct fcloop_lport *tlport, *lport = NULL;
802 	u64 nodename, portname;
803 	unsigned long flags;
804 	int ret;
805 
806 	ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf);
807 	if (ret)
808 		return ret;
809 
810 	spin_lock_irqsave(&fcloop_lock, flags);
811 
812 	list_for_each_entry(tlport, &fcloop_lports, lport_list) {
813 		if (tlport->localport->node_name == nodename &&
814 		    tlport->localport->port_name == portname) {
815 			lport = tlport;
816 			__unlink_local_port(lport);
817 			break;
818 		}
819 	}
820 	spin_unlock_irqrestore(&fcloop_lock, flags);
821 
822 	if (!lport)
823 		return -ENOENT;
824 
825 	ret = __wait_localport_unreg(lport);
826 
827 	return ret ? ret : count;
828 }
829 
830 static struct fcloop_nport *
831 fcloop_alloc_nport(const char *buf, size_t count, bool remoteport)
832 {
833 	struct fcloop_nport *newnport, *nport = NULL;
834 	struct fcloop_lport *tmplport, *lport = NULL;
835 	struct fcloop_ctrl_options *opts;
836 	unsigned long flags;
837 	u32 opts_mask = (remoteport) ? RPORT_OPTS : TGTPORT_OPTS;
838 	int ret;
839 
840 	opts = kzalloc(sizeof(*opts), GFP_KERNEL);
841 	if (!opts)
842 		return NULL;
843 
844 	ret = fcloop_parse_options(opts, buf);
845 	if (ret)
846 		goto out_free_opts;
847 
848 	/* everything there ? */
849 	if ((opts->mask & opts_mask) != opts_mask) {
850 		ret = -EINVAL;
851 		goto out_free_opts;
852 	}
853 
854 	newnport = kzalloc(sizeof(*newnport), GFP_KERNEL);
855 	if (!newnport)
856 		goto out_free_opts;
857 
858 	INIT_LIST_HEAD(&newnport->nport_list);
859 	newnport->node_name = opts->wwnn;
860 	newnport->port_name = opts->wwpn;
861 	if (opts->mask & NVMF_OPT_ROLES)
862 		newnport->port_role = opts->roles;
863 	if (opts->mask & NVMF_OPT_FCADDR)
864 		newnport->port_id = opts->fcaddr;
865 	kref_init(&newnport->ref);
866 
867 	spin_lock_irqsave(&fcloop_lock, flags);
868 
869 	list_for_each_entry(tmplport, &fcloop_lports, lport_list) {
870 		if (tmplport->localport->node_name == opts->wwnn &&
871 		    tmplport->localport->port_name == opts->wwpn)
872 			goto out_invalid_opts;
873 
874 		if (tmplport->localport->node_name == opts->lpwwnn &&
875 		    tmplport->localport->port_name == opts->lpwwpn)
876 			lport = tmplport;
877 	}
878 
879 	if (remoteport) {
880 		if (!lport)
881 			goto out_invalid_opts;
882 		newnport->lport = lport;
883 	}
884 
885 	list_for_each_entry(nport, &fcloop_nports, nport_list) {
886 		if (nport->node_name == opts->wwnn &&
887 		    nport->port_name == opts->wwpn) {
888 			if ((remoteport && nport->rport) ||
889 			    (!remoteport && nport->tport)) {
890 				nport = NULL;
891 				goto out_invalid_opts;
892 			}
893 
894 			fcloop_nport_get(nport);
895 
896 			spin_unlock_irqrestore(&fcloop_lock, flags);
897 
898 			if (remoteport)
899 				nport->lport = lport;
900 			if (opts->mask & NVMF_OPT_ROLES)
901 				nport->port_role = opts->roles;
902 			if (opts->mask & NVMF_OPT_FCADDR)
903 				nport->port_id = opts->fcaddr;
904 			goto out_free_newnport;
905 		}
906 	}
907 
908 	list_add_tail(&newnport->nport_list, &fcloop_nports);
909 
910 	spin_unlock_irqrestore(&fcloop_lock, flags);
911 
912 	kfree(opts);
913 	return newnport;
914 
915 out_invalid_opts:
916 	spin_unlock_irqrestore(&fcloop_lock, flags);
917 out_free_newnport:
918 	kfree(newnport);
919 out_free_opts:
920 	kfree(opts);
921 	return nport;
922 }
923 
924 static ssize_t
925 fcloop_create_remote_port(struct device *dev, struct device_attribute *attr,
926 		const char *buf, size_t count)
927 {
928 	struct nvme_fc_remote_port *remoteport;
929 	struct fcloop_nport *nport;
930 	struct fcloop_rport *rport;
931 	struct nvme_fc_port_info pinfo;
932 	int ret;
933 
934 	nport = fcloop_alloc_nport(buf, count, true);
935 	if (!nport)
936 		return -EIO;
937 
938 	memset(&pinfo, 0, sizeof(pinfo));
939 	pinfo.node_name = nport->node_name;
940 	pinfo.port_name = nport->port_name;
941 	pinfo.port_role = nport->port_role;
942 	pinfo.port_id = nport->port_id;
943 
944 	ret = nvme_fc_register_remoteport(nport->lport->localport,
945 						&pinfo, &remoteport);
946 	if (ret || !remoteport) {
947 		fcloop_nport_put(nport);
948 		return ret;
949 	}
950 
951 	/* success */
952 	rport = remoteport->private;
953 	rport->remoteport = remoteport;
954 	rport->targetport = (nport->tport) ?  nport->tport->targetport : NULL;
955 	if (nport->tport) {
956 		nport->tport->remoteport = remoteport;
957 		nport->tport->lport = nport->lport;
958 	}
959 	rport->nport = nport;
960 	rport->lport = nport->lport;
961 	nport->rport = rport;
962 
963 	return count;
964 }
965 
966 
967 static struct fcloop_rport *
968 __unlink_remote_port(struct fcloop_nport *nport)
969 {
970 	struct fcloop_rport *rport = nport->rport;
971 
972 	if (rport && nport->tport)
973 		nport->tport->remoteport = NULL;
974 	nport->rport = NULL;
975 
976 	return rport;
977 }
978 
979 static int
980 __remoteport_unreg(struct fcloop_nport *nport, struct fcloop_rport *rport)
981 {
982 	if (!rport)
983 		return -EALREADY;
984 
985 	return nvme_fc_unregister_remoteport(rport->remoteport);
986 }
987 
988 static ssize_t
989 fcloop_delete_remote_port(struct device *dev, struct device_attribute *attr,
990 		const char *buf, size_t count)
991 {
992 	struct fcloop_nport *nport = NULL, *tmpport;
993 	static struct fcloop_rport *rport;
994 	u64 nodename, portname;
995 	unsigned long flags;
996 	int ret;
997 
998 	ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf);
999 	if (ret)
1000 		return ret;
1001 
1002 	spin_lock_irqsave(&fcloop_lock, flags);
1003 
1004 	list_for_each_entry(tmpport, &fcloop_nports, nport_list) {
1005 		if (tmpport->node_name == nodename &&
1006 		    tmpport->port_name == portname && tmpport->rport) {
1007 			nport = tmpport;
1008 			rport = __unlink_remote_port(nport);
1009 			break;
1010 		}
1011 	}
1012 
1013 	spin_unlock_irqrestore(&fcloop_lock, flags);
1014 
1015 	if (!nport)
1016 		return -ENOENT;
1017 
1018 	ret = __remoteport_unreg(nport, rport);
1019 
1020 	return ret ? ret : count;
1021 }
1022 
1023 static ssize_t
1024 fcloop_create_target_port(struct device *dev, struct device_attribute *attr,
1025 		const char *buf, size_t count)
1026 {
1027 	struct nvmet_fc_target_port *targetport;
1028 	struct fcloop_nport *nport;
1029 	struct fcloop_tport *tport;
1030 	struct nvmet_fc_port_info tinfo;
1031 	int ret;
1032 
1033 	nport = fcloop_alloc_nport(buf, count, false);
1034 	if (!nport)
1035 		return -EIO;
1036 
1037 	tinfo.node_name = nport->node_name;
1038 	tinfo.port_name = nport->port_name;
1039 	tinfo.port_id = nport->port_id;
1040 
1041 	ret = nvmet_fc_register_targetport(&tinfo, &tgttemplate, NULL,
1042 						&targetport);
1043 	if (ret) {
1044 		fcloop_nport_put(nport);
1045 		return ret;
1046 	}
1047 
1048 	/* success */
1049 	tport = targetport->private;
1050 	tport->targetport = targetport;
1051 	tport->remoteport = (nport->rport) ?  nport->rport->remoteport : NULL;
1052 	if (nport->rport)
1053 		nport->rport->targetport = targetport;
1054 	tport->nport = nport;
1055 	tport->lport = nport->lport;
1056 	nport->tport = tport;
1057 
1058 	return count;
1059 }
1060 
1061 
1062 static struct fcloop_tport *
1063 __unlink_target_port(struct fcloop_nport *nport)
1064 {
1065 	struct fcloop_tport *tport = nport->tport;
1066 
1067 	if (tport && nport->rport)
1068 		nport->rport->targetport = NULL;
1069 	nport->tport = NULL;
1070 
1071 	return tport;
1072 }
1073 
1074 static int
1075 __targetport_unreg(struct fcloop_nport *nport, struct fcloop_tport *tport)
1076 {
1077 	if (!tport)
1078 		return -EALREADY;
1079 
1080 	return nvmet_fc_unregister_targetport(tport->targetport);
1081 }
1082 
1083 static ssize_t
1084 fcloop_delete_target_port(struct device *dev, struct device_attribute *attr,
1085 		const char *buf, size_t count)
1086 {
1087 	struct fcloop_nport *nport = NULL, *tmpport;
1088 	struct fcloop_tport *tport;
1089 	u64 nodename, portname;
1090 	unsigned long flags;
1091 	int ret;
1092 
1093 	ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf);
1094 	if (ret)
1095 		return ret;
1096 
1097 	spin_lock_irqsave(&fcloop_lock, flags);
1098 
1099 	list_for_each_entry(tmpport, &fcloop_nports, nport_list) {
1100 		if (tmpport->node_name == nodename &&
1101 		    tmpport->port_name == portname && tmpport->tport) {
1102 			nport = tmpport;
1103 			tport = __unlink_target_port(nport);
1104 			break;
1105 		}
1106 	}
1107 
1108 	spin_unlock_irqrestore(&fcloop_lock, flags);
1109 
1110 	if (!nport)
1111 		return -ENOENT;
1112 
1113 	ret = __targetport_unreg(nport, tport);
1114 
1115 	return ret ? ret : count;
1116 }
1117 
1118 
1119 static DEVICE_ATTR(add_local_port, 0200, NULL, fcloop_create_local_port);
1120 static DEVICE_ATTR(del_local_port, 0200, NULL, fcloop_delete_local_port);
1121 static DEVICE_ATTR(add_remote_port, 0200, NULL, fcloop_create_remote_port);
1122 static DEVICE_ATTR(del_remote_port, 0200, NULL, fcloop_delete_remote_port);
1123 static DEVICE_ATTR(add_target_port, 0200, NULL, fcloop_create_target_port);
1124 static DEVICE_ATTR(del_target_port, 0200, NULL, fcloop_delete_target_port);
1125 
1126 static struct attribute *fcloop_dev_attrs[] = {
1127 	&dev_attr_add_local_port.attr,
1128 	&dev_attr_del_local_port.attr,
1129 	&dev_attr_add_remote_port.attr,
1130 	&dev_attr_del_remote_port.attr,
1131 	&dev_attr_add_target_port.attr,
1132 	&dev_attr_del_target_port.attr,
1133 	NULL
1134 };
1135 
1136 static struct attribute_group fclopp_dev_attrs_group = {
1137 	.attrs		= fcloop_dev_attrs,
1138 };
1139 
1140 static const struct attribute_group *fcloop_dev_attr_groups[] = {
1141 	&fclopp_dev_attrs_group,
1142 	NULL,
1143 };
1144 
1145 static struct class *fcloop_class;
1146 static struct device *fcloop_device;
1147 
1148 
1149 static int __init fcloop_init(void)
1150 {
1151 	int ret;
1152 
1153 	fcloop_class = class_create(THIS_MODULE, "fcloop");
1154 	if (IS_ERR(fcloop_class)) {
1155 		pr_err("couldn't register class fcloop\n");
1156 		ret = PTR_ERR(fcloop_class);
1157 		return ret;
1158 	}
1159 
1160 	fcloop_device = device_create_with_groups(
1161 				fcloop_class, NULL, MKDEV(0, 0), NULL,
1162 				fcloop_dev_attr_groups, "ctl");
1163 	if (IS_ERR(fcloop_device)) {
1164 		pr_err("couldn't create ctl device!\n");
1165 		ret = PTR_ERR(fcloop_device);
1166 		goto out_destroy_class;
1167 	}
1168 
1169 	get_device(fcloop_device);
1170 
1171 	return 0;
1172 
1173 out_destroy_class:
1174 	class_destroy(fcloop_class);
1175 	return ret;
1176 }
1177 
1178 static void __exit fcloop_exit(void)
1179 {
1180 	struct fcloop_lport *lport;
1181 	struct fcloop_nport *nport;
1182 	struct fcloop_tport *tport;
1183 	struct fcloop_rport *rport;
1184 	unsigned long flags;
1185 	int ret;
1186 
1187 	spin_lock_irqsave(&fcloop_lock, flags);
1188 
1189 	for (;;) {
1190 		nport = list_first_entry_or_null(&fcloop_nports,
1191 						typeof(*nport), nport_list);
1192 		if (!nport)
1193 			break;
1194 
1195 		tport = __unlink_target_port(nport);
1196 		rport = __unlink_remote_port(nport);
1197 
1198 		spin_unlock_irqrestore(&fcloop_lock, flags);
1199 
1200 		ret = __targetport_unreg(nport, tport);
1201 		if (ret)
1202 			pr_warn("%s: Failed deleting target port\n", __func__);
1203 
1204 		ret = __remoteport_unreg(nport, rport);
1205 		if (ret)
1206 			pr_warn("%s: Failed deleting remote port\n", __func__);
1207 
1208 		spin_lock_irqsave(&fcloop_lock, flags);
1209 	}
1210 
1211 	for (;;) {
1212 		lport = list_first_entry_or_null(&fcloop_lports,
1213 						typeof(*lport), lport_list);
1214 		if (!lport)
1215 			break;
1216 
1217 		__unlink_local_port(lport);
1218 
1219 		spin_unlock_irqrestore(&fcloop_lock, flags);
1220 
1221 		ret = __wait_localport_unreg(lport);
1222 		if (ret)
1223 			pr_warn("%s: Failed deleting local port\n", __func__);
1224 
1225 		spin_lock_irqsave(&fcloop_lock, flags);
1226 	}
1227 
1228 	spin_unlock_irqrestore(&fcloop_lock, flags);
1229 
1230 	put_device(fcloop_device);
1231 
1232 	device_destroy(fcloop_class, MKDEV(0, 0));
1233 	class_destroy(fcloop_class);
1234 }
1235 
1236 module_init(fcloop_init);
1237 module_exit(fcloop_exit);
1238 
1239 MODULE_LICENSE("GPL v2");
1240