xref: /linux/drivers/nvme/target/fcloop.c (revision a3d14d1602ca11429d242d230c31af8f822f614f)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2016 Avago Technologies.  All rights reserved.
4  */
5 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
6 #include <linux/module.h>
7 #include <linux/parser.h>
8 #include <uapi/scsi/fc/fc_fs.h>
9 
10 #include "../host/nvme.h"
11 #include "../target/nvmet.h"
12 #include <linux/nvme-fc-driver.h>
13 #include <linux/nvme-fc.h>
14 
15 
16 enum {
17 	NVMF_OPT_ERR		= 0,
18 	NVMF_OPT_WWNN		= 1 << 0,
19 	NVMF_OPT_WWPN		= 1 << 1,
20 	NVMF_OPT_ROLES		= 1 << 2,
21 	NVMF_OPT_FCADDR		= 1 << 3,
22 	NVMF_OPT_LPWWNN		= 1 << 4,
23 	NVMF_OPT_LPWWPN		= 1 << 5,
24 };
25 
26 struct fcloop_ctrl_options {
27 	int			mask;
28 	u64			wwnn;
29 	u64			wwpn;
30 	u32			roles;
31 	u32			fcaddr;
32 	u64			lpwwnn;
33 	u64			lpwwpn;
34 };
35 
36 static const match_table_t opt_tokens = {
37 	{ NVMF_OPT_WWNN,	"wwnn=%s"	},
38 	{ NVMF_OPT_WWPN,	"wwpn=%s"	},
39 	{ NVMF_OPT_ROLES,	"roles=%d"	},
40 	{ NVMF_OPT_FCADDR,	"fcaddr=%x"	},
41 	{ NVMF_OPT_LPWWNN,	"lpwwnn=%s"	},
42 	{ NVMF_OPT_LPWWPN,	"lpwwpn=%s"	},
43 	{ NVMF_OPT_ERR,		NULL		}
44 };
45 
fcloop_verify_addr(substring_t * s)46 static int fcloop_verify_addr(substring_t *s)
47 {
48 	size_t blen = s->to - s->from + 1;
49 
50 	if (strnlen(s->from, blen) != NVME_FC_TRADDR_HEXNAMELEN + 2 ||
51 	    strncmp(s->from, "0x", 2))
52 		return -EINVAL;
53 
54 	return 0;
55 }
56 
57 static int
fcloop_parse_options(struct fcloop_ctrl_options * opts,const char * buf)58 fcloop_parse_options(struct fcloop_ctrl_options *opts,
59 		const char *buf)
60 {
61 	substring_t args[MAX_OPT_ARGS];
62 	char *options, *o, *p;
63 	int token, ret = 0;
64 	u64 token64;
65 
66 	options = o = kstrdup(buf, GFP_KERNEL);
67 	if (!options)
68 		return -ENOMEM;
69 
70 	while ((p = strsep(&o, ",\n")) != NULL) {
71 		if (!*p)
72 			continue;
73 
74 		token = match_token(p, opt_tokens, args);
75 		opts->mask |= token;
76 		switch (token) {
77 		case NVMF_OPT_WWNN:
78 			if (fcloop_verify_addr(args) ||
79 			    match_u64(args, &token64)) {
80 				ret = -EINVAL;
81 				goto out_free_options;
82 			}
83 			opts->wwnn = token64;
84 			break;
85 		case NVMF_OPT_WWPN:
86 			if (fcloop_verify_addr(args) ||
87 			    match_u64(args, &token64)) {
88 				ret = -EINVAL;
89 				goto out_free_options;
90 			}
91 			opts->wwpn = token64;
92 			break;
93 		case NVMF_OPT_ROLES:
94 			if (match_int(args, &token)) {
95 				ret = -EINVAL;
96 				goto out_free_options;
97 			}
98 			opts->roles = token;
99 			break;
100 		case NVMF_OPT_FCADDR:
101 			if (match_hex(args, &token)) {
102 				ret = -EINVAL;
103 				goto out_free_options;
104 			}
105 			opts->fcaddr = token;
106 			break;
107 		case NVMF_OPT_LPWWNN:
108 			if (fcloop_verify_addr(args) ||
109 			    match_u64(args, &token64)) {
110 				ret = -EINVAL;
111 				goto out_free_options;
112 			}
113 			opts->lpwwnn = token64;
114 			break;
115 		case NVMF_OPT_LPWWPN:
116 			if (fcloop_verify_addr(args) ||
117 			    match_u64(args, &token64)) {
118 				ret = -EINVAL;
119 				goto out_free_options;
120 			}
121 			opts->lpwwpn = token64;
122 			break;
123 		default:
124 			pr_warn("unknown parameter or missing value '%s'\n", p);
125 			ret = -EINVAL;
126 			goto out_free_options;
127 		}
128 	}
129 
130 out_free_options:
131 	kfree(options);
132 	return ret;
133 }
134 
135 
136 static int
fcloop_parse_nm_options(struct device * dev,u64 * nname,u64 * pname,const char * buf)137 fcloop_parse_nm_options(struct device *dev, u64 *nname, u64 *pname,
138 		const char *buf)
139 {
140 	substring_t args[MAX_OPT_ARGS];
141 	char *options, *o, *p;
142 	int token, ret = 0;
143 	u64 token64;
144 
145 	*nname = -1;
146 	*pname = -1;
147 
148 	options = o = kstrdup(buf, GFP_KERNEL);
149 	if (!options)
150 		return -ENOMEM;
151 
152 	while ((p = strsep(&o, ",\n")) != NULL) {
153 		if (!*p)
154 			continue;
155 
156 		token = match_token(p, opt_tokens, args);
157 		switch (token) {
158 		case NVMF_OPT_WWNN:
159 			if (fcloop_verify_addr(args) ||
160 			    match_u64(args, &token64)) {
161 				ret = -EINVAL;
162 				goto out_free_options;
163 			}
164 			*nname = token64;
165 			break;
166 		case NVMF_OPT_WWPN:
167 			if (fcloop_verify_addr(args) ||
168 			    match_u64(args, &token64)) {
169 				ret = -EINVAL;
170 				goto out_free_options;
171 			}
172 			*pname = token64;
173 			break;
174 		default:
175 			pr_warn("unknown parameter or missing value '%s'\n", p);
176 			ret = -EINVAL;
177 			goto out_free_options;
178 		}
179 	}
180 
181 out_free_options:
182 	kfree(options);
183 
184 	if (!ret) {
185 		if (*nname == -1)
186 			return -EINVAL;
187 		if (*pname == -1)
188 			return -EINVAL;
189 	}
190 
191 	return ret;
192 }
193 
194 
195 #define LPORT_OPTS	(NVMF_OPT_WWNN | NVMF_OPT_WWPN)
196 
197 #define RPORT_OPTS	(NVMF_OPT_WWNN | NVMF_OPT_WWPN |  \
198 			 NVMF_OPT_LPWWNN | NVMF_OPT_LPWWPN)
199 
200 #define TGTPORT_OPTS	(NVMF_OPT_WWNN | NVMF_OPT_WWPN)
201 
202 
203 static DEFINE_SPINLOCK(fcloop_lock);
204 static LIST_HEAD(fcloop_lports);
205 static LIST_HEAD(fcloop_nports);
206 
207 struct fcloop_lport {
208 	struct nvme_fc_local_port *localport;
209 	struct list_head lport_list;
210 	refcount_t ref;
211 };
212 
213 struct fcloop_lport_priv {
214 	struct fcloop_lport *lport;
215 };
216 
217 /* The port is already being removed, avoid double free */
218 #define PORT_DELETED	0
219 
220 struct fcloop_rport {
221 	struct nvme_fc_remote_port	*remoteport;
222 	struct nvmet_fc_target_port	*targetport;
223 	struct fcloop_nport		*nport;
224 	struct fcloop_lport		*lport;
225 	spinlock_t			lock;
226 	struct list_head		ls_list;
227 	struct work_struct		ls_work;
228 	unsigned long			flags;
229 };
230 
231 struct fcloop_tport {
232 	struct nvmet_fc_target_port	*targetport;
233 	struct nvme_fc_remote_port	*remoteport;
234 	struct fcloop_nport		*nport;
235 	struct fcloop_lport		*lport;
236 	spinlock_t			lock;
237 	struct list_head		ls_list;
238 	struct work_struct		ls_work;
239 	unsigned long			flags;
240 };
241 
242 struct fcloop_nport {
243 	struct fcloop_rport *rport;
244 	struct fcloop_tport *tport;
245 	struct fcloop_lport *lport;
246 	struct list_head nport_list;
247 	refcount_t ref;
248 	u64 node_name;
249 	u64 port_name;
250 	u32 port_role;
251 	u32 port_id;
252 };
253 
254 struct fcloop_lsreq {
255 	struct nvmefc_ls_req		*lsreq;
256 	struct nvmefc_ls_rsp		ls_rsp;
257 	int				lsdir;	/* H2T or T2H */
258 	int				status;
259 	struct list_head		ls_list; /* fcloop_rport->ls_list */
260 };
261 
262 struct fcloop_rscn {
263 	struct fcloop_tport		*tport;
264 	struct work_struct		work;
265 };
266 
267 enum {
268 	INI_IO_START		= 0,
269 	INI_IO_ACTIVE		= 1,
270 	INI_IO_ABORTED		= 2,
271 	INI_IO_COMPLETED	= 3,
272 };
273 
274 struct fcloop_fcpreq {
275 	struct fcloop_tport		*tport;
276 	struct nvmefc_fcp_req		*fcpreq;
277 	spinlock_t			reqlock;
278 	u16				status;
279 	u32				inistate;
280 	bool				active;
281 	bool				aborted;
282 	refcount_t			ref;
283 	struct work_struct		fcp_rcv_work;
284 	struct work_struct		abort_rcv_work;
285 	struct work_struct		tio_done_work;
286 	struct nvmefc_tgt_fcp_req	tgt_fcp_req;
287 };
288 
289 struct fcloop_ini_fcpreq {
290 	struct nvmefc_fcp_req		*fcpreq;
291 	struct fcloop_fcpreq		*tfcp_req;
292 	spinlock_t			inilock;
293 };
294 
295 /* SLAB cache for fcloop_lsreq structures */
296 static struct kmem_cache *lsreq_cache;
297 
298 static inline struct fcloop_lsreq *
ls_rsp_to_lsreq(struct nvmefc_ls_rsp * lsrsp)299 ls_rsp_to_lsreq(struct nvmefc_ls_rsp *lsrsp)
300 {
301 	return container_of(lsrsp, struct fcloop_lsreq, ls_rsp);
302 }
303 
304 static inline struct fcloop_fcpreq *
tgt_fcp_req_to_fcpreq(struct nvmefc_tgt_fcp_req * tgt_fcpreq)305 tgt_fcp_req_to_fcpreq(struct nvmefc_tgt_fcp_req *tgt_fcpreq)
306 {
307 	return container_of(tgt_fcpreq, struct fcloop_fcpreq, tgt_fcp_req);
308 }
309 
310 
311 static int
fcloop_create_queue(struct nvme_fc_local_port * localport,unsigned int qidx,u16 qsize,void ** handle)312 fcloop_create_queue(struct nvme_fc_local_port *localport,
313 			unsigned int qidx, u16 qsize,
314 			void **handle)
315 {
316 	*handle = localport;
317 	return 0;
318 }
319 
320 static void
fcloop_delete_queue(struct nvme_fc_local_port * localport,unsigned int idx,void * handle)321 fcloop_delete_queue(struct nvme_fc_local_port *localport,
322 			unsigned int idx, void *handle)
323 {
324 }
325 
326 static void
fcloop_rport_lsrqst_work(struct work_struct * work)327 fcloop_rport_lsrqst_work(struct work_struct *work)
328 {
329 	struct fcloop_rport *rport =
330 		container_of(work, struct fcloop_rport, ls_work);
331 	struct fcloop_lsreq *tls_req;
332 
333 	spin_lock(&rport->lock);
334 	for (;;) {
335 		tls_req = list_first_entry_or_null(&rport->ls_list,
336 				struct fcloop_lsreq, ls_list);
337 		if (!tls_req)
338 			break;
339 
340 		list_del(&tls_req->ls_list);
341 		spin_unlock(&rport->lock);
342 
343 		tls_req->lsreq->done(tls_req->lsreq, tls_req->status);
344 		/*
345 		 * callee may free memory containing tls_req.
346 		 * do not reference lsreq after this.
347 		 */
348 		kmem_cache_free(lsreq_cache, tls_req);
349 
350 		spin_lock(&rport->lock);
351 	}
352 	spin_unlock(&rport->lock);
353 }
354 
355 static int
fcloop_h2t_ls_req(struct nvme_fc_local_port * localport,struct nvme_fc_remote_port * remoteport,struct nvmefc_ls_req * lsreq)356 fcloop_h2t_ls_req(struct nvme_fc_local_port *localport,
357 			struct nvme_fc_remote_port *remoteport,
358 			struct nvmefc_ls_req *lsreq)
359 {
360 	struct fcloop_rport *rport = remoteport->private;
361 	struct fcloop_lsreq *tls_req;
362 	int ret = 0;
363 
364 	tls_req = kmem_cache_alloc(lsreq_cache, GFP_KERNEL);
365 	if (!tls_req)
366 		return -ENOMEM;
367 	tls_req->lsreq = lsreq;
368 	INIT_LIST_HEAD(&tls_req->ls_list);
369 
370 	if (!rport->targetport) {
371 		tls_req->status = -ECONNREFUSED;
372 		spin_lock(&rport->lock);
373 		list_add_tail(&tls_req->ls_list, &rport->ls_list);
374 		spin_unlock(&rport->lock);
375 		queue_work(nvmet_wq, &rport->ls_work);
376 		return ret;
377 	}
378 
379 	tls_req->status = 0;
380 	ret = nvmet_fc_rcv_ls_req(rport->targetport, rport,
381 				  &tls_req->ls_rsp,
382 				  lsreq->rqstaddr, lsreq->rqstlen);
383 
384 	return ret;
385 }
386 
387 static int
fcloop_h2t_xmt_ls_rsp(struct nvmet_fc_target_port * targetport,struct nvmefc_ls_rsp * lsrsp)388 fcloop_h2t_xmt_ls_rsp(struct nvmet_fc_target_port *targetport,
389 			struct nvmefc_ls_rsp *lsrsp)
390 {
391 	struct fcloop_lsreq *tls_req = ls_rsp_to_lsreq(lsrsp);
392 	struct nvmefc_ls_req *lsreq = tls_req->lsreq;
393 	struct fcloop_tport *tport = targetport->private;
394 	struct nvme_fc_remote_port *remoteport = tport->remoteport;
395 	struct fcloop_rport *rport;
396 
397 	memcpy(lsreq->rspaddr, lsrsp->rspbuf,
398 		((lsreq->rsplen < lsrsp->rsplen) ?
399 				lsreq->rsplen : lsrsp->rsplen));
400 
401 	lsrsp->done(lsrsp);
402 
403 	if (!remoteport) {
404 		kmem_cache_free(lsreq_cache, tls_req);
405 		return 0;
406 	}
407 
408 	rport = remoteport->private;
409 	spin_lock(&rport->lock);
410 	list_add_tail(&tls_req->ls_list, &rport->ls_list);
411 	spin_unlock(&rport->lock);
412 	queue_work(nvmet_wq, &rport->ls_work);
413 
414 	return 0;
415 }
416 
417 static void
fcloop_tport_lsrqst_work(struct work_struct * work)418 fcloop_tport_lsrqst_work(struct work_struct *work)
419 {
420 	struct fcloop_tport *tport =
421 		container_of(work, struct fcloop_tport, ls_work);
422 	struct fcloop_lsreq *tls_req;
423 
424 	spin_lock(&tport->lock);
425 	for (;;) {
426 		tls_req = list_first_entry_or_null(&tport->ls_list,
427 				struct fcloop_lsreq, ls_list);
428 		if (!tls_req)
429 			break;
430 
431 		list_del(&tls_req->ls_list);
432 		spin_unlock(&tport->lock);
433 
434 		tls_req->lsreq->done(tls_req->lsreq, tls_req->status);
435 		/*
436 		 * callee may free memory containing tls_req.
437 		 * do not reference lsreq after this.
438 		 */
439 		kmem_cache_free(lsreq_cache, tls_req);
440 
441 		spin_lock(&tport->lock);
442 	}
443 	spin_unlock(&tport->lock);
444 }
445 
446 static int
fcloop_t2h_ls_req(struct nvmet_fc_target_port * targetport,void * hosthandle,struct nvmefc_ls_req * lsreq)447 fcloop_t2h_ls_req(struct nvmet_fc_target_port *targetport, void *hosthandle,
448 			struct nvmefc_ls_req *lsreq)
449 {
450 	struct fcloop_tport *tport = targetport->private;
451 	struct fcloop_lsreq *tls_req;
452 	int ret = 0;
453 
454 	/*
455 	 * hosthandle should be the dst.rport value.
456 	 * hosthandle ignored as fcloop currently is
457 	 * 1:1 tgtport vs remoteport
458 	 */
459 
460 	tls_req = kmem_cache_alloc(lsreq_cache, GFP_KERNEL);
461 	if (!tls_req)
462 		return -ENOMEM;
463 	tls_req->lsreq = lsreq;
464 	INIT_LIST_HEAD(&tls_req->ls_list);
465 
466 	if (!tport->remoteport) {
467 		tls_req->status = -ECONNREFUSED;
468 		spin_lock(&tport->lock);
469 		list_add_tail(&tls_req->ls_list, &tport->ls_list);
470 		spin_unlock(&tport->lock);
471 		queue_work(nvmet_wq, &tport->ls_work);
472 		return ret;
473 	}
474 
475 	tls_req->status = 0;
476 	ret = nvme_fc_rcv_ls_req(tport->remoteport, &tls_req->ls_rsp,
477 				 lsreq->rqstaddr, lsreq->rqstlen);
478 
479 	if (ret)
480 		kmem_cache_free(lsreq_cache, tls_req);
481 
482 	return ret;
483 }
484 
485 static int
fcloop_t2h_xmt_ls_rsp(struct nvme_fc_local_port * localport,struct nvme_fc_remote_port * remoteport,struct nvmefc_ls_rsp * lsrsp)486 fcloop_t2h_xmt_ls_rsp(struct nvme_fc_local_port *localport,
487 			struct nvme_fc_remote_port *remoteport,
488 			struct nvmefc_ls_rsp *lsrsp)
489 {
490 	struct fcloop_lsreq *tls_req = ls_rsp_to_lsreq(lsrsp);
491 	struct nvmefc_ls_req *lsreq = tls_req->lsreq;
492 	struct fcloop_rport *rport = remoteport->private;
493 	struct nvmet_fc_target_port *targetport = rport->targetport;
494 	struct fcloop_tport *tport;
495 
496 	if (!targetport) {
497 		/*
498 		 * The target port is gone. The target doesn't expect any
499 		 * response anymore and the ->done call is not valid
500 		 * because the resources have been freed by
501 		 * nvmet_fc_free_pending_reqs.
502 		 *
503 		 * We end up here from delete association exchange:
504 		 * nvmet_fc_xmt_disconnect_assoc sends an async request.
505 		 */
506 		kmem_cache_free(lsreq_cache, tls_req);
507 		return 0;
508 	}
509 
510 	memcpy(lsreq->rspaddr, lsrsp->rspbuf,
511 		((lsreq->rsplen < lsrsp->rsplen) ?
512 				lsreq->rsplen : lsrsp->rsplen));
513 	lsrsp->done(lsrsp);
514 
515 	tport = targetport->private;
516 	spin_lock(&tport->lock);
517 	list_add_tail(&tls_req->ls_list, &tport->ls_list);
518 	spin_unlock(&tport->lock);
519 	queue_work(nvmet_wq, &tport->ls_work);
520 
521 	return 0;
522 }
523 
524 static void
fcloop_t2h_host_release(void * hosthandle)525 fcloop_t2h_host_release(void *hosthandle)
526 {
527 	/* host handle ignored for now */
528 }
529 
530 static int
fcloop_t2h_host_traddr(void * hosthandle,u64 * wwnn,u64 * wwpn)531 fcloop_t2h_host_traddr(void *hosthandle, u64 *wwnn, u64 *wwpn)
532 {
533 	struct fcloop_rport *rport = hosthandle;
534 
535 	*wwnn = rport->lport->localport->node_name;
536 	*wwpn = rport->lport->localport->port_name;
537 	return 0;
538 }
539 
540 /*
541  * Simulate reception of RSCN and converting it to a initiator transport
542  * call to rescan a remote port.
543  */
544 static void
fcloop_tgt_rscn_work(struct work_struct * work)545 fcloop_tgt_rscn_work(struct work_struct *work)
546 {
547 	struct fcloop_rscn *tgt_rscn =
548 		container_of(work, struct fcloop_rscn, work);
549 	struct fcloop_tport *tport = tgt_rscn->tport;
550 
551 	if (tport->remoteport)
552 		nvme_fc_rescan_remoteport(tport->remoteport);
553 	kfree(tgt_rscn);
554 }
555 
556 static void
fcloop_tgt_discovery_evt(struct nvmet_fc_target_port * tgtport)557 fcloop_tgt_discovery_evt(struct nvmet_fc_target_port *tgtport)
558 {
559 	struct fcloop_rscn *tgt_rscn;
560 
561 	tgt_rscn = kzalloc(sizeof(*tgt_rscn), GFP_KERNEL);
562 	if (!tgt_rscn)
563 		return;
564 
565 	tgt_rscn->tport = tgtport->private;
566 	INIT_WORK(&tgt_rscn->work, fcloop_tgt_rscn_work);
567 
568 	queue_work(nvmet_wq, &tgt_rscn->work);
569 }
570 
571 static void
fcloop_tfcp_req_put(struct fcloop_fcpreq * tfcp_req)572 fcloop_tfcp_req_put(struct fcloop_fcpreq *tfcp_req)
573 {
574 	if (!refcount_dec_and_test(&tfcp_req->ref))
575 		return;
576 
577 	kfree(tfcp_req);
578 }
579 
580 static int
fcloop_tfcp_req_get(struct fcloop_fcpreq * tfcp_req)581 fcloop_tfcp_req_get(struct fcloop_fcpreq *tfcp_req)
582 {
583 	return refcount_inc_not_zero(&tfcp_req->ref);
584 }
585 
586 static void
fcloop_call_host_done(struct nvmefc_fcp_req * fcpreq,struct fcloop_fcpreq * tfcp_req,int status)587 fcloop_call_host_done(struct nvmefc_fcp_req *fcpreq,
588 			struct fcloop_fcpreq *tfcp_req, int status)
589 {
590 	struct fcloop_ini_fcpreq *inireq = NULL;
591 
592 	if (fcpreq) {
593 		inireq = fcpreq->private;
594 		spin_lock(&inireq->inilock);
595 		inireq->tfcp_req = NULL;
596 		spin_unlock(&inireq->inilock);
597 
598 		fcpreq->status = status;
599 		fcpreq->done(fcpreq);
600 	}
601 
602 	/* release original io reference on tgt struct */
603 	if (tfcp_req)
604 		fcloop_tfcp_req_put(tfcp_req);
605 }
606 
607 static bool drop_fabric_opcode;
608 #define DROP_OPCODE_MASK	0x00FF
609 /* fabrics opcode will have a bit set above 1st byte */
610 static int drop_opcode = -1;
611 static int drop_instance;
612 static int drop_amount;
613 static int drop_current_cnt;
614 
615 /*
616  * Routine to parse io and determine if the io is to be dropped.
617  * Returns:
618  *  0 if io is not obstructed
619  *  1 if io was dropped
620  */
check_for_drop(struct fcloop_fcpreq * tfcp_req)621 static int check_for_drop(struct fcloop_fcpreq *tfcp_req)
622 {
623 	struct nvmefc_fcp_req *fcpreq = tfcp_req->fcpreq;
624 	struct nvme_fc_cmd_iu *cmdiu = fcpreq->cmdaddr;
625 	struct nvme_command *sqe = &cmdiu->sqe;
626 
627 	if (drop_opcode == -1)
628 		return 0;
629 
630 	pr_info("%s: seq opcd x%02x fctype x%02x: drop F %s op x%02x "
631 		"inst %d start %d amt %d\n",
632 		__func__, sqe->common.opcode, sqe->fabrics.fctype,
633 		drop_fabric_opcode ? "y" : "n",
634 		drop_opcode, drop_current_cnt, drop_instance, drop_amount);
635 
636 	if ((drop_fabric_opcode &&
637 	     (sqe->common.opcode != nvme_fabrics_command ||
638 	      sqe->fabrics.fctype != drop_opcode)) ||
639 	    (!drop_fabric_opcode && sqe->common.opcode != drop_opcode))
640 		return 0;
641 
642 	if (++drop_current_cnt >= drop_instance) {
643 		if (drop_current_cnt >= drop_instance + drop_amount)
644 			drop_opcode = -1;
645 		return 1;
646 	}
647 
648 	return 0;
649 }
650 
651 static void
fcloop_fcp_recv_work(struct work_struct * work)652 fcloop_fcp_recv_work(struct work_struct *work)
653 {
654 	struct fcloop_fcpreq *tfcp_req =
655 		container_of(work, struct fcloop_fcpreq, fcp_rcv_work);
656 	struct nvmefc_fcp_req *fcpreq;
657 	unsigned long flags;
658 	int ret = 0;
659 	bool aborted = false;
660 
661 	spin_lock_irqsave(&tfcp_req->reqlock, flags);
662 	fcpreq = tfcp_req->fcpreq;
663 	switch (tfcp_req->inistate) {
664 	case INI_IO_START:
665 		tfcp_req->inistate = INI_IO_ACTIVE;
666 		break;
667 	case INI_IO_ABORTED:
668 		aborted = true;
669 		break;
670 	default:
671 		spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
672 		WARN_ON(1);
673 		return;
674 	}
675 	spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
676 
677 	if (unlikely(aborted)) {
678 		/* the abort handler will call fcloop_call_host_done */
679 		return;
680 	}
681 
682 	if (unlikely(check_for_drop(tfcp_req))) {
683 		pr_info("%s: dropped command ********\n", __func__);
684 		return;
685 	}
686 
687 	ret = nvmet_fc_rcv_fcp_req(tfcp_req->tport->targetport,
688 				   &tfcp_req->tgt_fcp_req,
689 				   fcpreq->cmdaddr, fcpreq->cmdlen);
690 	if (ret)
691 		fcloop_call_host_done(fcpreq, tfcp_req, ret);
692 }
693 
694 static void
fcloop_fcp_abort_recv_work(struct work_struct * work)695 fcloop_fcp_abort_recv_work(struct work_struct *work)
696 {
697 	struct fcloop_fcpreq *tfcp_req =
698 		container_of(work, struct fcloop_fcpreq, abort_rcv_work);
699 	struct nvmefc_fcp_req *fcpreq;
700 	bool completed = false;
701 	unsigned long flags;
702 
703 	spin_lock_irqsave(&tfcp_req->reqlock, flags);
704 	switch (tfcp_req->inistate) {
705 	case INI_IO_ABORTED:
706 		fcpreq = tfcp_req->fcpreq;
707 		tfcp_req->fcpreq = NULL;
708 		break;
709 	case INI_IO_COMPLETED:
710 		completed = true;
711 		break;
712 	default:
713 		spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
714 		fcloop_tfcp_req_put(tfcp_req);
715 		WARN_ON(1);
716 		return;
717 	}
718 	spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
719 
720 	if (unlikely(completed)) {
721 		/* remove reference taken in original abort downcall */
722 		fcloop_tfcp_req_put(tfcp_req);
723 		return;
724 	}
725 
726 	if (tfcp_req->tport->targetport)
727 		nvmet_fc_rcv_fcp_abort(tfcp_req->tport->targetport,
728 					&tfcp_req->tgt_fcp_req);
729 
730 	fcloop_call_host_done(fcpreq, tfcp_req, -ECANCELED);
731 	/* call_host_done releases reference for abort downcall */
732 }
733 
734 /*
735  * FCP IO operation done by target completion.
736  * call back up initiator "done" flows.
737  */
738 static void
fcloop_tgt_fcprqst_done_work(struct work_struct * work)739 fcloop_tgt_fcprqst_done_work(struct work_struct *work)
740 {
741 	struct fcloop_fcpreq *tfcp_req =
742 		container_of(work, struct fcloop_fcpreq, tio_done_work);
743 	struct nvmefc_fcp_req *fcpreq;
744 	unsigned long flags;
745 
746 	spin_lock_irqsave(&tfcp_req->reqlock, flags);
747 	fcpreq = tfcp_req->fcpreq;
748 	tfcp_req->inistate = INI_IO_COMPLETED;
749 	spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
750 
751 	fcloop_call_host_done(fcpreq, tfcp_req, tfcp_req->status);
752 }
753 
754 
755 static int
fcloop_fcp_req(struct nvme_fc_local_port * localport,struct nvme_fc_remote_port * remoteport,void * hw_queue_handle,struct nvmefc_fcp_req * fcpreq)756 fcloop_fcp_req(struct nvme_fc_local_port *localport,
757 			struct nvme_fc_remote_port *remoteport,
758 			void *hw_queue_handle,
759 			struct nvmefc_fcp_req *fcpreq)
760 {
761 	struct fcloop_rport *rport = remoteport->private;
762 	struct fcloop_ini_fcpreq *inireq = fcpreq->private;
763 	struct fcloop_fcpreq *tfcp_req;
764 
765 	if (!rport->targetport)
766 		return -ECONNREFUSED;
767 
768 	tfcp_req = kzalloc(sizeof(*tfcp_req), GFP_ATOMIC);
769 	if (!tfcp_req)
770 		return -ENOMEM;
771 
772 	inireq->fcpreq = fcpreq;
773 	inireq->tfcp_req = tfcp_req;
774 	spin_lock_init(&inireq->inilock);
775 
776 	tfcp_req->fcpreq = fcpreq;
777 	tfcp_req->tport = rport->targetport->private;
778 	tfcp_req->inistate = INI_IO_START;
779 	spin_lock_init(&tfcp_req->reqlock);
780 	INIT_WORK(&tfcp_req->fcp_rcv_work, fcloop_fcp_recv_work);
781 	INIT_WORK(&tfcp_req->abort_rcv_work, fcloop_fcp_abort_recv_work);
782 	INIT_WORK(&tfcp_req->tio_done_work, fcloop_tgt_fcprqst_done_work);
783 	refcount_set(&tfcp_req->ref, 1);
784 
785 	queue_work(nvmet_wq, &tfcp_req->fcp_rcv_work);
786 
787 	return 0;
788 }
789 
790 static void
fcloop_fcp_copy_data(u8 op,struct scatterlist * data_sg,struct scatterlist * io_sg,u32 offset,u32 length)791 fcloop_fcp_copy_data(u8 op, struct scatterlist *data_sg,
792 			struct scatterlist *io_sg, u32 offset, u32 length)
793 {
794 	void *data_p, *io_p;
795 	u32 data_len, io_len, tlen;
796 
797 	io_p = sg_virt(io_sg);
798 	io_len = io_sg->length;
799 
800 	for ( ; offset; ) {
801 		tlen = min_t(u32, offset, io_len);
802 		offset -= tlen;
803 		io_len -= tlen;
804 		if (!io_len) {
805 			io_sg = sg_next(io_sg);
806 			io_p = sg_virt(io_sg);
807 			io_len = io_sg->length;
808 		} else
809 			io_p += tlen;
810 	}
811 
812 	data_p = sg_virt(data_sg);
813 	data_len = data_sg->length;
814 
815 	for ( ; length; ) {
816 		tlen = min_t(u32, io_len, data_len);
817 		tlen = min_t(u32, tlen, length);
818 
819 		if (op == NVMET_FCOP_WRITEDATA)
820 			memcpy(data_p, io_p, tlen);
821 		else
822 			memcpy(io_p, data_p, tlen);
823 
824 		length -= tlen;
825 
826 		io_len -= tlen;
827 		if ((!io_len) && (length)) {
828 			io_sg = sg_next(io_sg);
829 			io_p = sg_virt(io_sg);
830 			io_len = io_sg->length;
831 		} else
832 			io_p += tlen;
833 
834 		data_len -= tlen;
835 		if ((!data_len) && (length)) {
836 			data_sg = sg_next(data_sg);
837 			data_p = sg_virt(data_sg);
838 			data_len = data_sg->length;
839 		} else
840 			data_p += tlen;
841 	}
842 }
843 
844 static int
fcloop_fcp_op(struct nvmet_fc_target_port * tgtport,struct nvmefc_tgt_fcp_req * tgt_fcpreq)845 fcloop_fcp_op(struct nvmet_fc_target_port *tgtport,
846 			struct nvmefc_tgt_fcp_req *tgt_fcpreq)
847 {
848 	struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
849 	struct nvmefc_fcp_req *fcpreq;
850 	u32 rsplen = 0, xfrlen = 0;
851 	int fcp_err = 0, active, aborted;
852 	u8 op = tgt_fcpreq->op;
853 	unsigned long flags;
854 
855 	spin_lock_irqsave(&tfcp_req->reqlock, flags);
856 	fcpreq = tfcp_req->fcpreq;
857 	active = tfcp_req->active;
858 	aborted = tfcp_req->aborted;
859 	tfcp_req->active = true;
860 	spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
861 
862 	if (unlikely(active))
863 		/* illegal - call while i/o active */
864 		return -EALREADY;
865 
866 	if (unlikely(aborted)) {
867 		/* target transport has aborted i/o prior */
868 		spin_lock_irqsave(&tfcp_req->reqlock, flags);
869 		tfcp_req->active = false;
870 		spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
871 		tgt_fcpreq->transferred_length = 0;
872 		tgt_fcpreq->fcp_error = -ECANCELED;
873 		tgt_fcpreq->done(tgt_fcpreq);
874 		return 0;
875 	}
876 
877 	/*
878 	 * if fcpreq is NULL, the I/O has been aborted (from
879 	 * initiator side). For the target side, act as if all is well
880 	 * but don't actually move data.
881 	 */
882 
883 	switch (op) {
884 	case NVMET_FCOP_WRITEDATA:
885 		xfrlen = tgt_fcpreq->transfer_length;
886 		if (fcpreq) {
887 			fcloop_fcp_copy_data(op, tgt_fcpreq->sg,
888 					fcpreq->first_sgl, tgt_fcpreq->offset,
889 					xfrlen);
890 			fcpreq->transferred_length += xfrlen;
891 		}
892 		break;
893 
894 	case NVMET_FCOP_READDATA:
895 	case NVMET_FCOP_READDATA_RSP:
896 		xfrlen = tgt_fcpreq->transfer_length;
897 		if (fcpreq) {
898 			fcloop_fcp_copy_data(op, tgt_fcpreq->sg,
899 					fcpreq->first_sgl, tgt_fcpreq->offset,
900 					xfrlen);
901 			fcpreq->transferred_length += xfrlen;
902 		}
903 		if (op == NVMET_FCOP_READDATA)
904 			break;
905 
906 		/* Fall-Thru to RSP handling */
907 		fallthrough;
908 
909 	case NVMET_FCOP_RSP:
910 		if (fcpreq) {
911 			rsplen = ((fcpreq->rsplen < tgt_fcpreq->rsplen) ?
912 					fcpreq->rsplen : tgt_fcpreq->rsplen);
913 			memcpy(fcpreq->rspaddr, tgt_fcpreq->rspaddr, rsplen);
914 			if (rsplen < tgt_fcpreq->rsplen)
915 				fcp_err = -E2BIG;
916 			fcpreq->rcv_rsplen = rsplen;
917 			fcpreq->status = 0;
918 		}
919 		tfcp_req->status = 0;
920 		break;
921 
922 	default:
923 		fcp_err = -EINVAL;
924 		break;
925 	}
926 
927 	spin_lock_irqsave(&tfcp_req->reqlock, flags);
928 	tfcp_req->active = false;
929 	spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
930 
931 	tgt_fcpreq->transferred_length = xfrlen;
932 	tgt_fcpreq->fcp_error = fcp_err;
933 	tgt_fcpreq->done(tgt_fcpreq);
934 
935 	return 0;
936 }
937 
938 static void
fcloop_tgt_fcp_abort(struct nvmet_fc_target_port * tgtport,struct nvmefc_tgt_fcp_req * tgt_fcpreq)939 fcloop_tgt_fcp_abort(struct nvmet_fc_target_port *tgtport,
940 			struct nvmefc_tgt_fcp_req *tgt_fcpreq)
941 {
942 	struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
943 	unsigned long flags;
944 
945 	/*
946 	 * mark aborted only in case there were 2 threads in transport
947 	 * (one doing io, other doing abort) and only kills ops posted
948 	 * after the abort request
949 	 */
950 	spin_lock_irqsave(&tfcp_req->reqlock, flags);
951 	tfcp_req->aborted = true;
952 	spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
953 
954 	tfcp_req->status = NVME_SC_INTERNAL;
955 
956 	/*
957 	 * nothing more to do. If io wasn't active, the transport should
958 	 * immediately call the req_release. If it was active, the op
959 	 * will complete, and the lldd should call req_release.
960 	 */
961 }
962 
963 static void
fcloop_fcp_req_release(struct nvmet_fc_target_port * tgtport,struct nvmefc_tgt_fcp_req * tgt_fcpreq)964 fcloop_fcp_req_release(struct nvmet_fc_target_port *tgtport,
965 			struct nvmefc_tgt_fcp_req *tgt_fcpreq)
966 {
967 	struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
968 
969 	queue_work(nvmet_wq, &tfcp_req->tio_done_work);
970 }
971 
972 static void
fcloop_h2t_ls_abort(struct nvme_fc_local_port * localport,struct nvme_fc_remote_port * remoteport,struct nvmefc_ls_req * lsreq)973 fcloop_h2t_ls_abort(struct nvme_fc_local_port *localport,
974 			struct nvme_fc_remote_port *remoteport,
975 				struct nvmefc_ls_req *lsreq)
976 {
977 }
978 
979 static void
fcloop_t2h_ls_abort(struct nvmet_fc_target_port * targetport,void * hosthandle,struct nvmefc_ls_req * lsreq)980 fcloop_t2h_ls_abort(struct nvmet_fc_target_port *targetport,
981 			void *hosthandle, struct nvmefc_ls_req *lsreq)
982 {
983 }
984 
985 static void
fcloop_fcp_abort(struct nvme_fc_local_port * localport,struct nvme_fc_remote_port * remoteport,void * hw_queue_handle,struct nvmefc_fcp_req * fcpreq)986 fcloop_fcp_abort(struct nvme_fc_local_port *localport,
987 			struct nvme_fc_remote_port *remoteport,
988 			void *hw_queue_handle,
989 			struct nvmefc_fcp_req *fcpreq)
990 {
991 	struct fcloop_ini_fcpreq *inireq = fcpreq->private;
992 	struct fcloop_fcpreq *tfcp_req;
993 	bool abortio = true;
994 	unsigned long flags;
995 
996 	spin_lock(&inireq->inilock);
997 	tfcp_req = inireq->tfcp_req;
998 	if (tfcp_req) {
999 		if (!fcloop_tfcp_req_get(tfcp_req))
1000 			tfcp_req = NULL;
1001 	}
1002 	spin_unlock(&inireq->inilock);
1003 
1004 	if (!tfcp_req) {
1005 		/* abort has already been called */
1006 		goto out_host_done;
1007 	}
1008 
1009 	/* break initiator/target relationship for io */
1010 	spin_lock_irqsave(&tfcp_req->reqlock, flags);
1011 	switch (tfcp_req->inistate) {
1012 	case INI_IO_START:
1013 	case INI_IO_ACTIVE:
1014 		tfcp_req->inistate = INI_IO_ABORTED;
1015 		break;
1016 	case INI_IO_COMPLETED:
1017 		abortio = false;
1018 		break;
1019 	default:
1020 		spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
1021 		WARN_ON(1);
1022 		goto out_host_done;
1023 	}
1024 	spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
1025 
1026 	if (abortio)
1027 		/* leave the reference while the work item is scheduled */
1028 		WARN_ON(!queue_work(nvmet_wq, &tfcp_req->abort_rcv_work));
1029 	else  {
1030 		/*
1031 		 * as the io has already had the done callback made,
1032 		 * nothing more to do. So release the reference taken above
1033 		 */
1034 		fcloop_tfcp_req_put(tfcp_req);
1035 	}
1036 
1037 	return;
1038 
1039 out_host_done:
1040 	fcloop_call_host_done(fcpreq, tfcp_req, -ECANCELED);
1041 }
1042 
1043 static void
fcloop_lport_put(struct fcloop_lport * lport)1044 fcloop_lport_put(struct fcloop_lport *lport)
1045 {
1046 	unsigned long flags;
1047 
1048 	if (!refcount_dec_and_test(&lport->ref))
1049 		return;
1050 
1051 	spin_lock_irqsave(&fcloop_lock, flags);
1052 	list_del(&lport->lport_list);
1053 	spin_unlock_irqrestore(&fcloop_lock, flags);
1054 
1055 	kfree(lport);
1056 }
1057 
1058 static int
fcloop_lport_get(struct fcloop_lport * lport)1059 fcloop_lport_get(struct fcloop_lport *lport)
1060 {
1061 	return refcount_inc_not_zero(&lport->ref);
1062 }
1063 
1064 static void
fcloop_nport_put(struct fcloop_nport * nport)1065 fcloop_nport_put(struct fcloop_nport *nport)
1066 {
1067 	unsigned long flags;
1068 
1069 	if (!refcount_dec_and_test(&nport->ref))
1070 		return;
1071 
1072 	spin_lock_irqsave(&fcloop_lock, flags);
1073 	list_del(&nport->nport_list);
1074 	spin_unlock_irqrestore(&fcloop_lock, flags);
1075 
1076 	if (nport->lport)
1077 		fcloop_lport_put(nport->lport);
1078 
1079 	kfree(nport);
1080 }
1081 
1082 static int
fcloop_nport_get(struct fcloop_nport * nport)1083 fcloop_nport_get(struct fcloop_nport *nport)
1084 {
1085 	return refcount_inc_not_zero(&nport->ref);
1086 }
1087 
1088 static void
fcloop_localport_delete(struct nvme_fc_local_port * localport)1089 fcloop_localport_delete(struct nvme_fc_local_port *localport)
1090 {
1091 	struct fcloop_lport_priv *lport_priv = localport->private;
1092 	struct fcloop_lport *lport = lport_priv->lport;
1093 
1094 	fcloop_lport_put(lport);
1095 }
1096 
1097 static void
fcloop_remoteport_delete(struct nvme_fc_remote_port * remoteport)1098 fcloop_remoteport_delete(struct nvme_fc_remote_port *remoteport)
1099 {
1100 	struct fcloop_rport *rport = remoteport->private;
1101 	bool put_port = false;
1102 	unsigned long flags;
1103 
1104 	flush_work(&rport->ls_work);
1105 
1106 	spin_lock_irqsave(&fcloop_lock, flags);
1107 	if (!test_and_set_bit(PORT_DELETED, &rport->flags))
1108 		put_port = true;
1109 	rport->nport->rport = NULL;
1110 	spin_unlock_irqrestore(&fcloop_lock, flags);
1111 
1112 	if (put_port)
1113 		fcloop_nport_put(rport->nport);
1114 }
1115 
1116 static void
fcloop_targetport_delete(struct nvmet_fc_target_port * targetport)1117 fcloop_targetport_delete(struct nvmet_fc_target_port *targetport)
1118 {
1119 	struct fcloop_tport *tport = targetport->private;
1120 	bool put_port = false;
1121 	unsigned long flags;
1122 
1123 	flush_work(&tport->ls_work);
1124 
1125 	spin_lock_irqsave(&fcloop_lock, flags);
1126 	if (!test_and_set_bit(PORT_DELETED, &tport->flags))
1127 		put_port = true;
1128 	tport->nport->tport = NULL;
1129 	spin_unlock_irqrestore(&fcloop_lock, flags);
1130 
1131 	if (put_port)
1132 		fcloop_nport_put(tport->nport);
1133 }
1134 
1135 #define	FCLOOP_HW_QUEUES		4
1136 #define	FCLOOP_SGL_SEGS			256
1137 #define FCLOOP_DMABOUND_4G		0xFFFFFFFF
1138 
1139 static struct nvme_fc_port_template fctemplate = {
1140 	.localport_delete	= fcloop_localport_delete,
1141 	.remoteport_delete	= fcloop_remoteport_delete,
1142 	.create_queue		= fcloop_create_queue,
1143 	.delete_queue		= fcloop_delete_queue,
1144 	.ls_req			= fcloop_h2t_ls_req,
1145 	.fcp_io			= fcloop_fcp_req,
1146 	.ls_abort		= fcloop_h2t_ls_abort,
1147 	.fcp_abort		= fcloop_fcp_abort,
1148 	.xmt_ls_rsp		= fcloop_t2h_xmt_ls_rsp,
1149 	.max_hw_queues		= FCLOOP_HW_QUEUES,
1150 	.max_sgl_segments	= FCLOOP_SGL_SEGS,
1151 	.max_dif_sgl_segments	= FCLOOP_SGL_SEGS,
1152 	.dma_boundary		= FCLOOP_DMABOUND_4G,
1153 	/* sizes of additional private data for data structures */
1154 	.local_priv_sz		= sizeof(struct fcloop_lport_priv),
1155 	.remote_priv_sz		= sizeof(struct fcloop_rport),
1156 	.fcprqst_priv_sz	= sizeof(struct fcloop_ini_fcpreq),
1157 };
1158 
1159 static struct nvmet_fc_target_template tgttemplate = {
1160 	.targetport_delete	= fcloop_targetport_delete,
1161 	.xmt_ls_rsp		= fcloop_h2t_xmt_ls_rsp,
1162 	.fcp_op			= fcloop_fcp_op,
1163 	.fcp_abort		= fcloop_tgt_fcp_abort,
1164 	.fcp_req_release	= fcloop_fcp_req_release,
1165 	.discovery_event	= fcloop_tgt_discovery_evt,
1166 	.ls_req			= fcloop_t2h_ls_req,
1167 	.ls_abort		= fcloop_t2h_ls_abort,
1168 	.host_release		= fcloop_t2h_host_release,
1169 	.host_traddr		= fcloop_t2h_host_traddr,
1170 	.max_hw_queues		= FCLOOP_HW_QUEUES,
1171 	.max_sgl_segments	= FCLOOP_SGL_SEGS,
1172 	.max_dif_sgl_segments	= FCLOOP_SGL_SEGS,
1173 	.dma_boundary		= FCLOOP_DMABOUND_4G,
1174 	/* optional features */
1175 	.target_features	= 0,
1176 	/* sizes of additional private data for data structures */
1177 	.target_priv_sz		= sizeof(struct fcloop_tport),
1178 };
1179 
1180 static ssize_t
fcloop_create_local_port(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1181 fcloop_create_local_port(struct device *dev, struct device_attribute *attr,
1182 		const char *buf, size_t count)
1183 {
1184 	struct nvme_fc_port_info pinfo;
1185 	struct fcloop_ctrl_options *opts;
1186 	struct nvme_fc_local_port *localport;
1187 	struct fcloop_lport *lport;
1188 	struct fcloop_lport_priv *lport_priv;
1189 	unsigned long flags;
1190 	int ret = -ENOMEM;
1191 
1192 	lport = kzalloc(sizeof(*lport), GFP_KERNEL);
1193 	if (!lport)
1194 		return -ENOMEM;
1195 
1196 	opts = kzalloc(sizeof(*opts), GFP_KERNEL);
1197 	if (!opts)
1198 		goto out_free_lport;
1199 
1200 	ret = fcloop_parse_options(opts, buf);
1201 	if (ret)
1202 		goto out_free_opts;
1203 
1204 	/* everything there ? */
1205 	if ((opts->mask & LPORT_OPTS) != LPORT_OPTS) {
1206 		ret = -EINVAL;
1207 		goto out_free_opts;
1208 	}
1209 
1210 	memset(&pinfo, 0, sizeof(pinfo));
1211 	pinfo.node_name = opts->wwnn;
1212 	pinfo.port_name = opts->wwpn;
1213 	pinfo.port_role = opts->roles;
1214 	pinfo.port_id = opts->fcaddr;
1215 
1216 	ret = nvme_fc_register_localport(&pinfo, &fctemplate, NULL, &localport);
1217 	if (!ret) {
1218 		/* success */
1219 		lport_priv = localport->private;
1220 		lport_priv->lport = lport;
1221 
1222 		lport->localport = localport;
1223 		INIT_LIST_HEAD(&lport->lport_list);
1224 		refcount_set(&lport->ref, 1);
1225 
1226 		spin_lock_irqsave(&fcloop_lock, flags);
1227 		list_add_tail(&lport->lport_list, &fcloop_lports);
1228 		spin_unlock_irqrestore(&fcloop_lock, flags);
1229 	}
1230 
1231 out_free_opts:
1232 	kfree(opts);
1233 out_free_lport:
1234 	/* free only if we're going to fail */
1235 	if (ret)
1236 		kfree(lport);
1237 
1238 	return ret ? ret : count;
1239 }
1240 
1241 static int
__localport_unreg(struct fcloop_lport * lport)1242 __localport_unreg(struct fcloop_lport *lport)
1243 {
1244 	return nvme_fc_unregister_localport(lport->localport);
1245 }
1246 
1247 static struct fcloop_nport *
__fcloop_nport_lookup(u64 node_name,u64 port_name)1248 __fcloop_nport_lookup(u64 node_name, u64 port_name)
1249 {
1250 	struct fcloop_nport *nport;
1251 
1252 	list_for_each_entry(nport, &fcloop_nports, nport_list) {
1253 		if (nport->node_name != node_name ||
1254 		    nport->port_name != port_name)
1255 			continue;
1256 
1257 		if (fcloop_nport_get(nport))
1258 			return nport;
1259 
1260 		break;
1261 	}
1262 
1263 	return NULL;
1264 }
1265 
1266 static struct fcloop_nport *
fcloop_nport_lookup(u64 node_name,u64 port_name)1267 fcloop_nport_lookup(u64 node_name, u64 port_name)
1268 {
1269 	struct fcloop_nport *nport;
1270 	unsigned long flags;
1271 
1272 	spin_lock_irqsave(&fcloop_lock, flags);
1273 	nport = __fcloop_nport_lookup(node_name, port_name);
1274 	spin_unlock_irqrestore(&fcloop_lock, flags);
1275 
1276 	return nport;
1277 }
1278 
1279 static struct fcloop_lport *
__fcloop_lport_lookup(u64 node_name,u64 port_name)1280 __fcloop_lport_lookup(u64 node_name, u64 port_name)
1281 {
1282 	struct fcloop_lport *lport;
1283 
1284 	list_for_each_entry(lport, &fcloop_lports, lport_list) {
1285 		if (lport->localport->node_name != node_name ||
1286 		    lport->localport->port_name != port_name)
1287 			continue;
1288 
1289 		if (fcloop_lport_get(lport))
1290 			return lport;
1291 
1292 		break;
1293 	}
1294 
1295 	return NULL;
1296 }
1297 
1298 static struct fcloop_lport *
fcloop_lport_lookup(u64 node_name,u64 port_name)1299 fcloop_lport_lookup(u64 node_name, u64 port_name)
1300 {
1301 	struct fcloop_lport *lport;
1302 	unsigned long flags;
1303 
1304 	spin_lock_irqsave(&fcloop_lock, flags);
1305 	lport = __fcloop_lport_lookup(node_name, port_name);
1306 	spin_unlock_irqrestore(&fcloop_lock, flags);
1307 
1308 	return lport;
1309 }
1310 
1311 static ssize_t
fcloop_delete_local_port(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1312 fcloop_delete_local_port(struct device *dev, struct device_attribute *attr,
1313 		const char *buf, size_t count)
1314 {
1315 	struct fcloop_lport *lport;
1316 	u64 nodename, portname;
1317 	int ret;
1318 
1319 	ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf);
1320 	if (ret)
1321 		return ret;
1322 
1323 	lport = fcloop_lport_lookup(nodename, portname);
1324 	if (!lport)
1325 		return -ENOENT;
1326 
1327 	ret = __localport_unreg(lport);
1328 	fcloop_lport_put(lport);
1329 
1330 	return ret ? ret : count;
1331 }
1332 
1333 static struct fcloop_nport *
fcloop_alloc_nport(const char * buf,size_t count,bool remoteport)1334 fcloop_alloc_nport(const char *buf, size_t count, bool remoteport)
1335 {
1336 	struct fcloop_nport *newnport, *nport;
1337 	struct fcloop_lport *lport;
1338 	struct fcloop_ctrl_options *opts;
1339 	unsigned long flags;
1340 	u32 opts_mask = (remoteport) ? RPORT_OPTS : TGTPORT_OPTS;
1341 	int ret;
1342 
1343 	opts = kzalloc(sizeof(*opts), GFP_KERNEL);
1344 	if (!opts)
1345 		return NULL;
1346 
1347 	ret = fcloop_parse_options(opts, buf);
1348 	if (ret)
1349 		goto out_free_opts;
1350 
1351 	/* everything there ? */
1352 	if ((opts->mask & opts_mask) != opts_mask)
1353 		goto out_free_opts;
1354 
1355 	newnport = kzalloc(sizeof(*newnport), GFP_KERNEL);
1356 	if (!newnport)
1357 		goto out_free_opts;
1358 
1359 	INIT_LIST_HEAD(&newnport->nport_list);
1360 	newnport->node_name = opts->wwnn;
1361 	newnport->port_name = opts->wwpn;
1362 	if (opts->mask & NVMF_OPT_ROLES)
1363 		newnport->port_role = opts->roles;
1364 	if (opts->mask & NVMF_OPT_FCADDR)
1365 		newnport->port_id = opts->fcaddr;
1366 	refcount_set(&newnport->ref, 1);
1367 
1368 	spin_lock_irqsave(&fcloop_lock, flags);
1369 	lport = __fcloop_lport_lookup(opts->wwnn, opts->wwpn);
1370 	if (lport) {
1371 		/* invalid configuration */
1372 		fcloop_lport_put(lport);
1373 		goto out_free_newnport;
1374 	}
1375 
1376 	if (remoteport) {
1377 		lport = __fcloop_lport_lookup(opts->lpwwnn, opts->lpwwpn);
1378 		if (!lport) {
1379 			/* invalid configuration */
1380 			goto out_free_newnport;
1381 		}
1382 	}
1383 
1384 	nport = __fcloop_nport_lookup(opts->wwnn, opts->wwpn);
1385 	if (nport) {
1386 		if ((remoteport && nport->rport) ||
1387 		    (!remoteport && nport->tport)) {
1388 			/* invalid configuration */
1389 			goto out_put_nport;
1390 		}
1391 
1392 		/* found existing nport, discard the new nport */
1393 		kfree(newnport);
1394 	} else {
1395 		list_add_tail(&newnport->nport_list, &fcloop_nports);
1396 		nport = newnport;
1397 	}
1398 
1399 	if (opts->mask & NVMF_OPT_ROLES)
1400 		nport->port_role = opts->roles;
1401 	if (opts->mask & NVMF_OPT_FCADDR)
1402 		nport->port_id = opts->fcaddr;
1403 	if (lport) {
1404 		if (!nport->lport)
1405 			nport->lport = lport;
1406 		else
1407 			fcloop_lport_put(lport);
1408 	}
1409 	spin_unlock_irqrestore(&fcloop_lock, flags);
1410 
1411 	kfree(opts);
1412 	return nport;
1413 
1414 out_put_nport:
1415 	if (lport)
1416 		fcloop_lport_put(lport);
1417 	fcloop_nport_put(nport);
1418 out_free_newnport:
1419 	spin_unlock_irqrestore(&fcloop_lock, flags);
1420 	kfree(newnport);
1421 out_free_opts:
1422 	kfree(opts);
1423 	return NULL;
1424 }
1425 
1426 static ssize_t
fcloop_create_remote_port(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1427 fcloop_create_remote_port(struct device *dev, struct device_attribute *attr,
1428 		const char *buf, size_t count)
1429 {
1430 	struct nvme_fc_remote_port *remoteport;
1431 	struct fcloop_nport *nport;
1432 	struct fcloop_rport *rport;
1433 	struct nvme_fc_port_info pinfo;
1434 	int ret;
1435 
1436 	nport = fcloop_alloc_nport(buf, count, true);
1437 	if (!nport)
1438 		return -EIO;
1439 
1440 	memset(&pinfo, 0, sizeof(pinfo));
1441 	pinfo.node_name = nport->node_name;
1442 	pinfo.port_name = nport->port_name;
1443 	pinfo.port_role = nport->port_role;
1444 	pinfo.port_id = nport->port_id;
1445 
1446 	ret = nvme_fc_register_remoteport(nport->lport->localport,
1447 						&pinfo, &remoteport);
1448 	if (ret || !remoteport) {
1449 		fcloop_nport_put(nport);
1450 		return ret;
1451 	}
1452 
1453 	/* success */
1454 	rport = remoteport->private;
1455 	rport->remoteport = remoteport;
1456 	rport->targetport = (nport->tport) ?  nport->tport->targetport : NULL;
1457 	if (nport->tport) {
1458 		nport->tport->remoteport = remoteport;
1459 		nport->tport->lport = nport->lport;
1460 	}
1461 	rport->nport = nport;
1462 	rport->lport = nport->lport;
1463 	nport->rport = rport;
1464 	rport->flags = 0;
1465 	spin_lock_init(&rport->lock);
1466 	INIT_WORK(&rport->ls_work, fcloop_rport_lsrqst_work);
1467 	INIT_LIST_HEAD(&rport->ls_list);
1468 
1469 	return count;
1470 }
1471 
1472 
1473 static struct fcloop_rport *
__unlink_remote_port(struct fcloop_nport * nport)1474 __unlink_remote_port(struct fcloop_nport *nport)
1475 {
1476 	struct fcloop_rport *rport = nport->rport;
1477 
1478 	lockdep_assert_held(&fcloop_lock);
1479 
1480 	if (rport && nport->tport)
1481 		nport->tport->remoteport = NULL;
1482 	nport->rport = NULL;
1483 
1484 	return rport;
1485 }
1486 
1487 static int
__remoteport_unreg(struct fcloop_nport * nport,struct fcloop_rport * rport)1488 __remoteport_unreg(struct fcloop_nport *nport, struct fcloop_rport *rport)
1489 {
1490 	return nvme_fc_unregister_remoteport(rport->remoteport);
1491 }
1492 
1493 static ssize_t
fcloop_delete_remote_port(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1494 fcloop_delete_remote_port(struct device *dev, struct device_attribute *attr,
1495 		const char *buf, size_t count)
1496 {
1497 	struct fcloop_nport *nport;
1498 	struct fcloop_rport *rport;
1499 	u64 nodename, portname;
1500 	unsigned long flags;
1501 	int ret;
1502 
1503 	ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf);
1504 	if (ret)
1505 		return ret;
1506 
1507 	nport = fcloop_nport_lookup(nodename, portname);
1508 	if (!nport)
1509 		return -ENOENT;
1510 
1511 	spin_lock_irqsave(&fcloop_lock, flags);
1512 	rport = __unlink_remote_port(nport);
1513 	spin_unlock_irqrestore(&fcloop_lock, flags);
1514 
1515 	if (!rport) {
1516 		ret = -ENOENT;
1517 		goto out_nport_put;
1518 	}
1519 
1520 	ret = __remoteport_unreg(nport, rport);
1521 
1522 out_nport_put:
1523 	fcloop_nport_put(nport);
1524 
1525 	return ret ? ret : count;
1526 }
1527 
1528 static ssize_t
fcloop_create_target_port(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1529 fcloop_create_target_port(struct device *dev, struct device_attribute *attr,
1530 		const char *buf, size_t count)
1531 {
1532 	struct nvmet_fc_target_port *targetport;
1533 	struct fcloop_nport *nport;
1534 	struct fcloop_tport *tport;
1535 	struct nvmet_fc_port_info tinfo;
1536 	int ret;
1537 
1538 	nport = fcloop_alloc_nport(buf, count, false);
1539 	if (!nport)
1540 		return -EIO;
1541 
1542 	tinfo.node_name = nport->node_name;
1543 	tinfo.port_name = nport->port_name;
1544 	tinfo.port_id = nport->port_id;
1545 
1546 	ret = nvmet_fc_register_targetport(&tinfo, &tgttemplate, NULL,
1547 						&targetport);
1548 	if (ret) {
1549 		fcloop_nport_put(nport);
1550 		return ret;
1551 	}
1552 
1553 	/* success */
1554 	tport = targetport->private;
1555 	tport->targetport = targetport;
1556 	tport->remoteport = (nport->rport) ?  nport->rport->remoteport : NULL;
1557 	if (nport->rport)
1558 		nport->rport->targetport = targetport;
1559 	tport->nport = nport;
1560 	tport->lport = nport->lport;
1561 	nport->tport = tport;
1562 	tport->flags = 0;
1563 	spin_lock_init(&tport->lock);
1564 	INIT_WORK(&tport->ls_work, fcloop_tport_lsrqst_work);
1565 	INIT_LIST_HEAD(&tport->ls_list);
1566 
1567 	return count;
1568 }
1569 
1570 
1571 static struct fcloop_tport *
__unlink_target_port(struct fcloop_nport * nport)1572 __unlink_target_port(struct fcloop_nport *nport)
1573 {
1574 	struct fcloop_tport *tport = nport->tport;
1575 
1576 	lockdep_assert_held(&fcloop_lock);
1577 
1578 	if (tport && nport->rport)
1579 		nport->rport->targetport = NULL;
1580 	nport->tport = NULL;
1581 
1582 	return tport;
1583 }
1584 
1585 static int
__targetport_unreg(struct fcloop_nport * nport,struct fcloop_tport * tport)1586 __targetport_unreg(struct fcloop_nport *nport, struct fcloop_tport *tport)
1587 {
1588 	return nvmet_fc_unregister_targetport(tport->targetport);
1589 }
1590 
1591 static ssize_t
fcloop_delete_target_port(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1592 fcloop_delete_target_port(struct device *dev, struct device_attribute *attr,
1593 		const char *buf, size_t count)
1594 {
1595 	struct fcloop_nport *nport;
1596 	struct fcloop_tport *tport;
1597 	u64 nodename, portname;
1598 	unsigned long flags;
1599 	int ret;
1600 
1601 	ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf);
1602 	if (ret)
1603 		return ret;
1604 
1605 	nport = fcloop_nport_lookup(nodename, portname);
1606 	if (!nport)
1607 		return -ENOENT;
1608 
1609 	spin_lock_irqsave(&fcloop_lock, flags);
1610 	tport = __unlink_target_port(nport);
1611 	spin_unlock_irqrestore(&fcloop_lock, flags);
1612 
1613 	if (!tport) {
1614 		ret = -ENOENT;
1615 		goto out_nport_put;
1616 	}
1617 
1618 	ret = __targetport_unreg(nport, tport);
1619 
1620 out_nport_put:
1621 	fcloop_nport_put(nport);
1622 
1623 	return ret ? ret : count;
1624 }
1625 
1626 static ssize_t
fcloop_set_cmd_drop(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1627 fcloop_set_cmd_drop(struct device *dev, struct device_attribute *attr,
1628 		const char *buf, size_t count)
1629 {
1630 	unsigned int opcode;
1631 	int starting, amount;
1632 
1633 	if (sscanf(buf, "%x:%d:%d", &opcode, &starting, &amount) != 3)
1634 		return -EBADRQC;
1635 
1636 	drop_current_cnt = 0;
1637 	drop_fabric_opcode = (opcode & ~DROP_OPCODE_MASK) ? true : false;
1638 	drop_opcode = (opcode & DROP_OPCODE_MASK);
1639 	drop_instance = starting;
1640 	/* the check to drop routine uses instance + count to know when
1641 	 * to end. Thus, if dropping 1 instance, count should be 0.
1642 	 * so subtract 1 from the count.
1643 	 */
1644 	drop_amount = amount - 1;
1645 
1646 	pr_info("%s: DROP: Starting at instance %d of%s opcode x%x drop +%d "
1647 		"instances\n",
1648 		__func__, drop_instance, drop_fabric_opcode ? " fabric" : "",
1649 		drop_opcode, drop_amount);
1650 
1651 	return count;
1652 }
1653 
1654 
1655 static DEVICE_ATTR(add_local_port, 0200, NULL, fcloop_create_local_port);
1656 static DEVICE_ATTR(del_local_port, 0200, NULL, fcloop_delete_local_port);
1657 static DEVICE_ATTR(add_remote_port, 0200, NULL, fcloop_create_remote_port);
1658 static DEVICE_ATTR(del_remote_port, 0200, NULL, fcloop_delete_remote_port);
1659 static DEVICE_ATTR(add_target_port, 0200, NULL, fcloop_create_target_port);
1660 static DEVICE_ATTR(del_target_port, 0200, NULL, fcloop_delete_target_port);
1661 static DEVICE_ATTR(set_cmd_drop, 0200, NULL, fcloop_set_cmd_drop);
1662 
1663 static struct attribute *fcloop_dev_attrs[] = {
1664 	&dev_attr_add_local_port.attr,
1665 	&dev_attr_del_local_port.attr,
1666 	&dev_attr_add_remote_port.attr,
1667 	&dev_attr_del_remote_port.attr,
1668 	&dev_attr_add_target_port.attr,
1669 	&dev_attr_del_target_port.attr,
1670 	&dev_attr_set_cmd_drop.attr,
1671 	NULL
1672 };
1673 
1674 static const struct attribute_group fclopp_dev_attrs_group = {
1675 	.attrs		= fcloop_dev_attrs,
1676 };
1677 
1678 static const struct attribute_group *fcloop_dev_attr_groups[] = {
1679 	&fclopp_dev_attrs_group,
1680 	NULL,
1681 };
1682 
1683 static const struct class fcloop_class = {
1684 	.name = "fcloop",
1685 };
1686 static struct device *fcloop_device;
1687 
fcloop_init(void)1688 static int __init fcloop_init(void)
1689 {
1690 	int ret;
1691 
1692 	lsreq_cache = kmem_cache_create("lsreq_cache",
1693 				sizeof(struct fcloop_lsreq), 0,
1694 				0, NULL);
1695 	if (!lsreq_cache)
1696 		return -ENOMEM;
1697 
1698 	ret = class_register(&fcloop_class);
1699 	if (ret) {
1700 		pr_err("couldn't register class fcloop\n");
1701 		goto out_destroy_cache;
1702 	}
1703 
1704 	fcloop_device = device_create_with_groups(
1705 				&fcloop_class, NULL, MKDEV(0, 0), NULL,
1706 				fcloop_dev_attr_groups, "ctl");
1707 	if (IS_ERR(fcloop_device)) {
1708 		pr_err("couldn't create ctl device!\n");
1709 		ret = PTR_ERR(fcloop_device);
1710 		goto out_destroy_class;
1711 	}
1712 
1713 	get_device(fcloop_device);
1714 
1715 	return 0;
1716 
1717 out_destroy_class:
1718 	class_unregister(&fcloop_class);
1719 out_destroy_cache:
1720 	kmem_cache_destroy(lsreq_cache);
1721 	return ret;
1722 }
1723 
fcloop_exit(void)1724 static void __exit fcloop_exit(void)
1725 {
1726 	struct fcloop_lport *lport;
1727 	struct fcloop_nport *nport;
1728 	struct fcloop_tport *tport;
1729 	struct fcloop_rport *rport;
1730 	unsigned long flags;
1731 	int ret;
1732 
1733 	spin_lock_irqsave(&fcloop_lock, flags);
1734 
1735 	for (;;) {
1736 		nport = list_first_entry_or_null(&fcloop_nports,
1737 						typeof(*nport), nport_list);
1738 		if (!nport || !fcloop_nport_get(nport))
1739 			break;
1740 
1741 		tport = __unlink_target_port(nport);
1742 		rport = __unlink_remote_port(nport);
1743 
1744 		spin_unlock_irqrestore(&fcloop_lock, flags);
1745 
1746 		if (tport) {
1747 			ret = __targetport_unreg(nport, tport);
1748 			if (ret)
1749 				pr_warn("%s: Failed deleting target port\n",
1750 					__func__);
1751 		}
1752 
1753 		if (rport) {
1754 			ret = __remoteport_unreg(nport, rport);
1755 			if (ret)
1756 				pr_warn("%s: Failed deleting remote port\n",
1757 					__func__);
1758 		}
1759 
1760 		fcloop_nport_put(nport);
1761 
1762 		spin_lock_irqsave(&fcloop_lock, flags);
1763 	}
1764 
1765 	for (;;) {
1766 		lport = list_first_entry_or_null(&fcloop_lports,
1767 						typeof(*lport), lport_list);
1768 		if (!lport || !fcloop_lport_get(lport))
1769 			break;
1770 
1771 		spin_unlock_irqrestore(&fcloop_lock, flags);
1772 
1773 		ret = __localport_unreg(lport);
1774 		if (ret)
1775 			pr_warn("%s: Failed deleting local port\n", __func__);
1776 
1777 		fcloop_lport_put(lport);
1778 
1779 		spin_lock_irqsave(&fcloop_lock, flags);
1780 	}
1781 
1782 	spin_unlock_irqrestore(&fcloop_lock, flags);
1783 
1784 	put_device(fcloop_device);
1785 
1786 	device_destroy(&fcloop_class, MKDEV(0, 0));
1787 	class_unregister(&fcloop_class);
1788 	kmem_cache_destroy(lsreq_cache);
1789 }
1790 
1791 module_init(fcloop_init);
1792 module_exit(fcloop_exit);
1793 
1794 MODULE_DESCRIPTION("NVMe target FC loop transport driver");
1795 MODULE_LICENSE("GPL v2");
1796