xref: /linux/drivers/nvme/target/fcloop.c (revision 4482ebb2970efa58173075c101426b2f3af40b41)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2016 Avago Technologies.  All rights reserved.
4  */
5 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
6 #include <linux/module.h>
7 #include <linux/parser.h>
8 #include <uapi/scsi/fc/fc_fs.h>
9 
10 #include "../host/nvme.h"
11 #include "../target/nvmet.h"
12 #include <linux/nvme-fc-driver.h>
13 #include <linux/nvme-fc.h>
14 
15 
16 enum {
17 	NVMF_OPT_ERR		= 0,
18 	NVMF_OPT_WWNN		= 1 << 0,
19 	NVMF_OPT_WWPN		= 1 << 1,
20 	NVMF_OPT_ROLES		= 1 << 2,
21 	NVMF_OPT_FCADDR		= 1 << 3,
22 	NVMF_OPT_LPWWNN		= 1 << 4,
23 	NVMF_OPT_LPWWPN		= 1 << 5,
24 };
25 
26 struct fcloop_ctrl_options {
27 	int			mask;
28 	u64			wwnn;
29 	u64			wwpn;
30 	u32			roles;
31 	u32			fcaddr;
32 	u64			lpwwnn;
33 	u64			lpwwpn;
34 };
35 
36 static const match_table_t opt_tokens = {
37 	{ NVMF_OPT_WWNN,	"wwnn=%s"	},
38 	{ NVMF_OPT_WWPN,	"wwpn=%s"	},
39 	{ NVMF_OPT_ROLES,	"roles=%d"	},
40 	{ NVMF_OPT_FCADDR,	"fcaddr=%x"	},
41 	{ NVMF_OPT_LPWWNN,	"lpwwnn=%s"	},
42 	{ NVMF_OPT_LPWWPN,	"lpwwpn=%s"	},
43 	{ NVMF_OPT_ERR,		NULL		}
44 };
45 
fcloop_verify_addr(substring_t * s)46 static int fcloop_verify_addr(substring_t *s)
47 {
48 	size_t blen = s->to - s->from + 1;
49 
50 	if (strnlen(s->from, blen) != NVME_FC_TRADDR_HEXNAMELEN + 2 ||
51 	    strncmp(s->from, "0x", 2))
52 		return -EINVAL;
53 
54 	return 0;
55 }
56 
57 static int
fcloop_parse_options(struct fcloop_ctrl_options * opts,const char * buf)58 fcloop_parse_options(struct fcloop_ctrl_options *opts,
59 		const char *buf)
60 {
61 	substring_t args[MAX_OPT_ARGS];
62 	char *options, *o, *p;
63 	int token, ret = 0;
64 	u64 token64;
65 
66 	options = o = kstrdup(buf, GFP_KERNEL);
67 	if (!options)
68 		return -ENOMEM;
69 
70 	while ((p = strsep(&o, ",\n")) != NULL) {
71 		if (!*p)
72 			continue;
73 
74 		token = match_token(p, opt_tokens, args);
75 		opts->mask |= token;
76 		switch (token) {
77 		case NVMF_OPT_WWNN:
78 			if (fcloop_verify_addr(args) ||
79 			    match_u64(args, &token64)) {
80 				ret = -EINVAL;
81 				goto out_free_options;
82 			}
83 			opts->wwnn = token64;
84 			break;
85 		case NVMF_OPT_WWPN:
86 			if (fcloop_verify_addr(args) ||
87 			    match_u64(args, &token64)) {
88 				ret = -EINVAL;
89 				goto out_free_options;
90 			}
91 			opts->wwpn = token64;
92 			break;
93 		case NVMF_OPT_ROLES:
94 			if (match_int(args, &token)) {
95 				ret = -EINVAL;
96 				goto out_free_options;
97 			}
98 			opts->roles = token;
99 			break;
100 		case NVMF_OPT_FCADDR:
101 			if (match_hex(args, &token)) {
102 				ret = -EINVAL;
103 				goto out_free_options;
104 			}
105 			opts->fcaddr = token;
106 			break;
107 		case NVMF_OPT_LPWWNN:
108 			if (fcloop_verify_addr(args) ||
109 			    match_u64(args, &token64)) {
110 				ret = -EINVAL;
111 				goto out_free_options;
112 			}
113 			opts->lpwwnn = token64;
114 			break;
115 		case NVMF_OPT_LPWWPN:
116 			if (fcloop_verify_addr(args) ||
117 			    match_u64(args, &token64)) {
118 				ret = -EINVAL;
119 				goto out_free_options;
120 			}
121 			opts->lpwwpn = token64;
122 			break;
123 		default:
124 			pr_warn("unknown parameter or missing value '%s'\n", p);
125 			ret = -EINVAL;
126 			goto out_free_options;
127 		}
128 	}
129 
130 out_free_options:
131 	kfree(options);
132 	return ret;
133 }
134 
135 
136 static int
fcloop_parse_nm_options(struct device * dev,u64 * nname,u64 * pname,const char * buf)137 fcloop_parse_nm_options(struct device *dev, u64 *nname, u64 *pname,
138 		const char *buf)
139 {
140 	substring_t args[MAX_OPT_ARGS];
141 	char *options, *o, *p;
142 	int token, ret = 0;
143 	u64 token64;
144 
145 	*nname = -1;
146 	*pname = -1;
147 
148 	options = o = kstrdup(buf, GFP_KERNEL);
149 	if (!options)
150 		return -ENOMEM;
151 
152 	while ((p = strsep(&o, ",\n")) != NULL) {
153 		if (!*p)
154 			continue;
155 
156 		token = match_token(p, opt_tokens, args);
157 		switch (token) {
158 		case NVMF_OPT_WWNN:
159 			if (fcloop_verify_addr(args) ||
160 			    match_u64(args, &token64)) {
161 				ret = -EINVAL;
162 				goto out_free_options;
163 			}
164 			*nname = token64;
165 			break;
166 		case NVMF_OPT_WWPN:
167 			if (fcloop_verify_addr(args) ||
168 			    match_u64(args, &token64)) {
169 				ret = -EINVAL;
170 				goto out_free_options;
171 			}
172 			*pname = token64;
173 			break;
174 		default:
175 			pr_warn("unknown parameter or missing value '%s'\n", p);
176 			ret = -EINVAL;
177 			goto out_free_options;
178 		}
179 	}
180 
181 out_free_options:
182 	kfree(options);
183 
184 	if (!ret) {
185 		if (*nname == -1)
186 			return -EINVAL;
187 		if (*pname == -1)
188 			return -EINVAL;
189 	}
190 
191 	return ret;
192 }
193 
194 
195 #define LPORT_OPTS	(NVMF_OPT_WWNN | NVMF_OPT_WWPN)
196 
197 #define RPORT_OPTS	(NVMF_OPT_WWNN | NVMF_OPT_WWPN |  \
198 			 NVMF_OPT_LPWWNN | NVMF_OPT_LPWWPN)
199 
200 #define TGTPORT_OPTS	(NVMF_OPT_WWNN | NVMF_OPT_WWPN)
201 
202 
203 static DEFINE_SPINLOCK(fcloop_lock);
204 static LIST_HEAD(fcloop_lports);
205 static LIST_HEAD(fcloop_nports);
206 
207 struct fcloop_lport {
208 	struct nvme_fc_local_port *localport;
209 	struct list_head lport_list;
210 	refcount_t ref;
211 };
212 
213 struct fcloop_lport_priv {
214 	struct fcloop_lport *lport;
215 };
216 
217 /* The port is already being removed, avoid double free */
218 #define PORT_DELETED	0
219 
220 struct fcloop_rport {
221 	struct nvme_fc_remote_port	*remoteport;
222 	struct nvmet_fc_target_port	*targetport;
223 	struct fcloop_nport		*nport;
224 	struct fcloop_lport		*lport;
225 	spinlock_t			lock;
226 	struct list_head		ls_list;
227 	struct work_struct		ls_work;
228 	unsigned long			flags;
229 };
230 
231 struct fcloop_tport {
232 	struct nvmet_fc_target_port	*targetport;
233 	struct nvme_fc_remote_port	*remoteport;
234 	struct fcloop_nport		*nport;
235 	struct fcloop_lport		*lport;
236 	spinlock_t			lock;
237 	struct list_head		ls_list;
238 	struct work_struct		ls_work;
239 	unsigned long			flags;
240 };
241 
242 struct fcloop_nport {
243 	struct fcloop_rport *rport;
244 	struct fcloop_tport *tport;
245 	struct fcloop_lport *lport;
246 	struct list_head nport_list;
247 	refcount_t ref;
248 	u64 node_name;
249 	u64 port_name;
250 	u32 port_role;
251 	u32 port_id;
252 };
253 
254 struct fcloop_lsreq {
255 	struct nvmefc_ls_req		*lsreq;
256 	struct nvmefc_ls_rsp		ls_rsp;
257 	int				status;
258 	struct list_head		ls_list; /* fcloop_rport->ls_list */
259 };
260 
261 struct fcloop_rscn {
262 	struct fcloop_tport		*tport;
263 	struct work_struct		work;
264 };
265 
266 enum {
267 	INI_IO_START		= 0,
268 	INI_IO_ACTIVE		= 1,
269 	INI_IO_ABORTED		= 2,
270 	INI_IO_COMPLETED	= 3,
271 };
272 
273 struct fcloop_fcpreq {
274 	struct fcloop_tport		*tport;
275 	struct nvmefc_fcp_req		*fcpreq;
276 	spinlock_t			reqlock;
277 	u16				status;
278 	u32				inistate;
279 	bool				active;
280 	bool				aborted;
281 	refcount_t			ref;
282 	struct work_struct		fcp_rcv_work;
283 	struct work_struct		abort_rcv_work;
284 	struct work_struct		tio_done_work;
285 	struct nvmefc_tgt_fcp_req	tgt_fcp_req;
286 };
287 
288 struct fcloop_ini_fcpreq {
289 	struct nvmefc_fcp_req		*fcpreq;
290 	struct fcloop_fcpreq		*tfcp_req;
291 	spinlock_t			inilock;
292 };
293 
294 /* SLAB cache for fcloop_lsreq structures */
295 static struct kmem_cache *lsreq_cache;
296 
297 static inline struct fcloop_lsreq *
ls_rsp_to_lsreq(struct nvmefc_ls_rsp * lsrsp)298 ls_rsp_to_lsreq(struct nvmefc_ls_rsp *lsrsp)
299 {
300 	return container_of(lsrsp, struct fcloop_lsreq, ls_rsp);
301 }
302 
303 static inline struct fcloop_fcpreq *
tgt_fcp_req_to_fcpreq(struct nvmefc_tgt_fcp_req * tgt_fcpreq)304 tgt_fcp_req_to_fcpreq(struct nvmefc_tgt_fcp_req *tgt_fcpreq)
305 {
306 	return container_of(tgt_fcpreq, struct fcloop_fcpreq, tgt_fcp_req);
307 }
308 
309 
310 static int
fcloop_create_queue(struct nvme_fc_local_port * localport,unsigned int qidx,u16 qsize,void ** handle)311 fcloop_create_queue(struct nvme_fc_local_port *localport,
312 			unsigned int qidx, u16 qsize,
313 			void **handle)
314 {
315 	*handle = localport;
316 	return 0;
317 }
318 
319 static void
fcloop_delete_queue(struct nvme_fc_local_port * localport,unsigned int idx,void * handle)320 fcloop_delete_queue(struct nvme_fc_local_port *localport,
321 			unsigned int idx, void *handle)
322 {
323 }
324 
325 static void
fcloop_rport_lsrqst_work(struct work_struct * work)326 fcloop_rport_lsrqst_work(struct work_struct *work)
327 {
328 	struct fcloop_rport *rport =
329 		container_of(work, struct fcloop_rport, ls_work);
330 	struct fcloop_lsreq *tls_req;
331 
332 	spin_lock(&rport->lock);
333 	for (;;) {
334 		tls_req = list_first_entry_or_null(&rport->ls_list,
335 				struct fcloop_lsreq, ls_list);
336 		if (!tls_req)
337 			break;
338 
339 		list_del(&tls_req->ls_list);
340 		spin_unlock(&rport->lock);
341 
342 		tls_req->lsreq->done(tls_req->lsreq, tls_req->status);
343 		/*
344 		 * callee may free memory containing tls_req.
345 		 * do not reference lsreq after this.
346 		 */
347 		kmem_cache_free(lsreq_cache, tls_req);
348 
349 		spin_lock(&rport->lock);
350 	}
351 	spin_unlock(&rport->lock);
352 }
353 
354 static int
fcloop_h2t_ls_req(struct nvme_fc_local_port * localport,struct nvme_fc_remote_port * remoteport,struct nvmefc_ls_req * lsreq)355 fcloop_h2t_ls_req(struct nvme_fc_local_port *localport,
356 			struct nvme_fc_remote_port *remoteport,
357 			struct nvmefc_ls_req *lsreq)
358 {
359 	struct fcloop_rport *rport = remoteport->private;
360 	struct fcloop_lsreq *tls_req;
361 	int ret = 0;
362 
363 	tls_req = kmem_cache_alloc(lsreq_cache, GFP_KERNEL);
364 	if (!tls_req)
365 		return -ENOMEM;
366 	tls_req->lsreq = lsreq;
367 	INIT_LIST_HEAD(&tls_req->ls_list);
368 
369 	if (!rport->targetport) {
370 		tls_req->status = -ECONNREFUSED;
371 		spin_lock(&rport->lock);
372 		list_add_tail(&tls_req->ls_list, &rport->ls_list);
373 		spin_unlock(&rport->lock);
374 		queue_work(nvmet_wq, &rport->ls_work);
375 		return ret;
376 	}
377 
378 	tls_req->status = 0;
379 	ret = nvmet_fc_rcv_ls_req(rport->targetport, rport,
380 				  &tls_req->ls_rsp,
381 				  lsreq->rqstaddr, lsreq->rqstlen);
382 
383 	return ret;
384 }
385 
386 static int
fcloop_h2t_xmt_ls_rsp(struct nvmet_fc_target_port * targetport,struct nvmefc_ls_rsp * lsrsp)387 fcloop_h2t_xmt_ls_rsp(struct nvmet_fc_target_port *targetport,
388 			struct nvmefc_ls_rsp *lsrsp)
389 {
390 	struct fcloop_lsreq *tls_req = ls_rsp_to_lsreq(lsrsp);
391 	struct nvmefc_ls_req *lsreq = tls_req->lsreq;
392 	struct fcloop_tport *tport = targetport->private;
393 	struct nvme_fc_remote_port *remoteport = tport->remoteport;
394 	struct fcloop_rport *rport;
395 
396 	memcpy(lsreq->rspaddr, lsrsp->rspbuf,
397 		((lsreq->rsplen < lsrsp->rsplen) ?
398 				lsreq->rsplen : lsrsp->rsplen));
399 
400 	lsrsp->done(lsrsp);
401 
402 	if (!remoteport) {
403 		kmem_cache_free(lsreq_cache, tls_req);
404 		return 0;
405 	}
406 
407 	rport = remoteport->private;
408 	spin_lock(&rport->lock);
409 	list_add_tail(&tls_req->ls_list, &rport->ls_list);
410 	spin_unlock(&rport->lock);
411 	queue_work(nvmet_wq, &rport->ls_work);
412 
413 	return 0;
414 }
415 
416 static void
fcloop_tport_lsrqst_work(struct work_struct * work)417 fcloop_tport_lsrqst_work(struct work_struct *work)
418 {
419 	struct fcloop_tport *tport =
420 		container_of(work, struct fcloop_tport, ls_work);
421 	struct fcloop_lsreq *tls_req;
422 
423 	spin_lock(&tport->lock);
424 	for (;;) {
425 		tls_req = list_first_entry_or_null(&tport->ls_list,
426 				struct fcloop_lsreq, ls_list);
427 		if (!tls_req)
428 			break;
429 
430 		list_del(&tls_req->ls_list);
431 		spin_unlock(&tport->lock);
432 
433 		tls_req->lsreq->done(tls_req->lsreq, tls_req->status);
434 		/*
435 		 * callee may free memory containing tls_req.
436 		 * do not reference lsreq after this.
437 		 */
438 		kmem_cache_free(lsreq_cache, tls_req);
439 
440 		spin_lock(&tport->lock);
441 	}
442 	spin_unlock(&tport->lock);
443 }
444 
445 static int
fcloop_t2h_ls_req(struct nvmet_fc_target_port * targetport,void * hosthandle,struct nvmefc_ls_req * lsreq)446 fcloop_t2h_ls_req(struct nvmet_fc_target_port *targetport, void *hosthandle,
447 			struct nvmefc_ls_req *lsreq)
448 {
449 	struct fcloop_tport *tport = targetport->private;
450 	struct fcloop_lsreq *tls_req;
451 	int ret = 0;
452 
453 	/*
454 	 * hosthandle should be the dst.rport value.
455 	 * hosthandle ignored as fcloop currently is
456 	 * 1:1 tgtport vs remoteport
457 	 */
458 
459 	tls_req = kmem_cache_alloc(lsreq_cache, GFP_KERNEL);
460 	if (!tls_req)
461 		return -ENOMEM;
462 	tls_req->lsreq = lsreq;
463 	INIT_LIST_HEAD(&tls_req->ls_list);
464 
465 	if (!tport->remoteport) {
466 		tls_req->status = -ECONNREFUSED;
467 		spin_lock(&tport->lock);
468 		list_add_tail(&tls_req->ls_list, &tport->ls_list);
469 		spin_unlock(&tport->lock);
470 		queue_work(nvmet_wq, &tport->ls_work);
471 		return ret;
472 	}
473 
474 	tls_req->status = 0;
475 	ret = nvme_fc_rcv_ls_req(tport->remoteport, &tls_req->ls_rsp,
476 				 lsreq->rqstaddr, lsreq->rqstlen);
477 
478 	if (ret)
479 		kmem_cache_free(lsreq_cache, tls_req);
480 
481 	return ret;
482 }
483 
484 static int
fcloop_t2h_xmt_ls_rsp(struct nvme_fc_local_port * localport,struct nvme_fc_remote_port * remoteport,struct nvmefc_ls_rsp * lsrsp)485 fcloop_t2h_xmt_ls_rsp(struct nvme_fc_local_port *localport,
486 			struct nvme_fc_remote_port *remoteport,
487 			struct nvmefc_ls_rsp *lsrsp)
488 {
489 	struct fcloop_lsreq *tls_req = ls_rsp_to_lsreq(lsrsp);
490 	struct nvmefc_ls_req *lsreq = tls_req->lsreq;
491 	struct fcloop_rport *rport = remoteport->private;
492 	struct nvmet_fc_target_port *targetport = rport->targetport;
493 	struct fcloop_tport *tport;
494 
495 	if (!targetport) {
496 		/*
497 		 * The target port is gone. The target doesn't expect any
498 		 * response anymore and thus lsreq can't be accessed anymore.
499 		 *
500 		 * We end up here from delete association exchange:
501 		 * nvmet_fc_xmt_disconnect_assoc sends an async request.
502 		 *
503 		 * Return success because this is what LLDDs do; silently
504 		 * drop the response.
505 		 */
506 		lsrsp->done(lsrsp);
507 		kmem_cache_free(lsreq_cache, tls_req);
508 		return 0;
509 	}
510 
511 	memcpy(lsreq->rspaddr, lsrsp->rspbuf,
512 		((lsreq->rsplen < lsrsp->rsplen) ?
513 				lsreq->rsplen : lsrsp->rsplen));
514 	lsrsp->done(lsrsp);
515 
516 	tport = targetport->private;
517 	spin_lock(&tport->lock);
518 	list_add_tail(&tls_req->ls_list, &tport->ls_list);
519 	spin_unlock(&tport->lock);
520 	queue_work(nvmet_wq, &tport->ls_work);
521 
522 	return 0;
523 }
524 
525 static void
fcloop_t2h_host_release(void * hosthandle)526 fcloop_t2h_host_release(void *hosthandle)
527 {
528 	/* host handle ignored for now */
529 }
530 
531 static int
fcloop_t2h_host_traddr(void * hosthandle,u64 * wwnn,u64 * wwpn)532 fcloop_t2h_host_traddr(void *hosthandle, u64 *wwnn, u64 *wwpn)
533 {
534 	struct fcloop_rport *rport = hosthandle;
535 
536 	*wwnn = rport->lport->localport->node_name;
537 	*wwpn = rport->lport->localport->port_name;
538 	return 0;
539 }
540 
541 /*
542  * Simulate reception of RSCN and converting it to a initiator transport
543  * call to rescan a remote port.
544  */
545 static void
fcloop_tgt_rscn_work(struct work_struct * work)546 fcloop_tgt_rscn_work(struct work_struct *work)
547 {
548 	struct fcloop_rscn *tgt_rscn =
549 		container_of(work, struct fcloop_rscn, work);
550 	struct fcloop_tport *tport = tgt_rscn->tport;
551 
552 	if (tport->remoteport)
553 		nvme_fc_rescan_remoteport(tport->remoteport);
554 	kfree(tgt_rscn);
555 }
556 
557 static void
fcloop_tgt_discovery_evt(struct nvmet_fc_target_port * tgtport)558 fcloop_tgt_discovery_evt(struct nvmet_fc_target_port *tgtport)
559 {
560 	struct fcloop_rscn *tgt_rscn;
561 
562 	tgt_rscn = kzalloc(sizeof(*tgt_rscn), GFP_KERNEL);
563 	if (!tgt_rscn)
564 		return;
565 
566 	tgt_rscn->tport = tgtport->private;
567 	INIT_WORK(&tgt_rscn->work, fcloop_tgt_rscn_work);
568 
569 	queue_work(nvmet_wq, &tgt_rscn->work);
570 }
571 
572 static void
fcloop_tfcp_req_put(struct fcloop_fcpreq * tfcp_req)573 fcloop_tfcp_req_put(struct fcloop_fcpreq *tfcp_req)
574 {
575 	if (!refcount_dec_and_test(&tfcp_req->ref))
576 		return;
577 
578 	kfree(tfcp_req);
579 }
580 
581 static int
fcloop_tfcp_req_get(struct fcloop_fcpreq * tfcp_req)582 fcloop_tfcp_req_get(struct fcloop_fcpreq *tfcp_req)
583 {
584 	return refcount_inc_not_zero(&tfcp_req->ref);
585 }
586 
587 static void
fcloop_call_host_done(struct nvmefc_fcp_req * fcpreq,struct fcloop_fcpreq * tfcp_req,int status)588 fcloop_call_host_done(struct nvmefc_fcp_req *fcpreq,
589 			struct fcloop_fcpreq *tfcp_req, int status)
590 {
591 	struct fcloop_ini_fcpreq *inireq = NULL;
592 
593 	if (fcpreq) {
594 		inireq = fcpreq->private;
595 		spin_lock(&inireq->inilock);
596 		inireq->tfcp_req = NULL;
597 		spin_unlock(&inireq->inilock);
598 
599 		fcpreq->status = status;
600 		fcpreq->done(fcpreq);
601 	}
602 
603 	/* release original io reference on tgt struct */
604 	if (tfcp_req)
605 		fcloop_tfcp_req_put(tfcp_req);
606 }
607 
608 static bool drop_fabric_opcode;
609 #define DROP_OPCODE_MASK	0x00FF
610 /* fabrics opcode will have a bit set above 1st byte */
611 static int drop_opcode = -1;
612 static int drop_instance;
613 static int drop_amount;
614 static int drop_current_cnt;
615 
616 /*
617  * Routine to parse io and determine if the io is to be dropped.
618  * Returns:
619  *  0 if io is not obstructed
620  *  1 if io was dropped
621  */
check_for_drop(struct fcloop_fcpreq * tfcp_req)622 static int check_for_drop(struct fcloop_fcpreq *tfcp_req)
623 {
624 	struct nvmefc_fcp_req *fcpreq = tfcp_req->fcpreq;
625 	struct nvme_fc_cmd_iu *cmdiu = fcpreq->cmdaddr;
626 	struct nvme_command *sqe = &cmdiu->sqe;
627 
628 	if (drop_opcode == -1)
629 		return 0;
630 
631 	pr_info("%s: seq opcd x%02x fctype x%02x: drop F %s op x%02x "
632 		"inst %d start %d amt %d\n",
633 		__func__, sqe->common.opcode, sqe->fabrics.fctype,
634 		drop_fabric_opcode ? "y" : "n",
635 		drop_opcode, drop_current_cnt, drop_instance, drop_amount);
636 
637 	if ((drop_fabric_opcode &&
638 	     (sqe->common.opcode != nvme_fabrics_command ||
639 	      sqe->fabrics.fctype != drop_opcode)) ||
640 	    (!drop_fabric_opcode && sqe->common.opcode != drop_opcode))
641 		return 0;
642 
643 	if (++drop_current_cnt >= drop_instance) {
644 		if (drop_current_cnt >= drop_instance + drop_amount)
645 			drop_opcode = -1;
646 		return 1;
647 	}
648 
649 	return 0;
650 }
651 
652 static void
fcloop_fcp_recv_work(struct work_struct * work)653 fcloop_fcp_recv_work(struct work_struct *work)
654 {
655 	struct fcloop_fcpreq *tfcp_req =
656 		container_of(work, struct fcloop_fcpreq, fcp_rcv_work);
657 	struct nvmefc_fcp_req *fcpreq;
658 	unsigned long flags;
659 	int ret = 0;
660 	bool aborted = false;
661 
662 	spin_lock_irqsave(&tfcp_req->reqlock, flags);
663 	fcpreq = tfcp_req->fcpreq;
664 	switch (tfcp_req->inistate) {
665 	case INI_IO_START:
666 		tfcp_req->inistate = INI_IO_ACTIVE;
667 		break;
668 	case INI_IO_ABORTED:
669 		aborted = true;
670 		break;
671 	default:
672 		spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
673 		WARN_ON(1);
674 		return;
675 	}
676 	spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
677 
678 	if (unlikely(aborted)) {
679 		/* the abort handler will call fcloop_call_host_done */
680 		return;
681 	}
682 
683 	if (unlikely(check_for_drop(tfcp_req))) {
684 		pr_info("%s: dropped command ********\n", __func__);
685 		return;
686 	}
687 
688 	ret = nvmet_fc_rcv_fcp_req(tfcp_req->tport->targetport,
689 				   &tfcp_req->tgt_fcp_req,
690 				   fcpreq->cmdaddr, fcpreq->cmdlen);
691 	if (ret)
692 		fcloop_call_host_done(fcpreq, tfcp_req, ret);
693 }
694 
695 static void
fcloop_fcp_abort_recv_work(struct work_struct * work)696 fcloop_fcp_abort_recv_work(struct work_struct *work)
697 {
698 	struct fcloop_fcpreq *tfcp_req =
699 		container_of(work, struct fcloop_fcpreq, abort_rcv_work);
700 	struct nvmefc_fcp_req *fcpreq;
701 	bool completed = false;
702 	unsigned long flags;
703 
704 	spin_lock_irqsave(&tfcp_req->reqlock, flags);
705 	switch (tfcp_req->inistate) {
706 	case INI_IO_ABORTED:
707 		fcpreq = tfcp_req->fcpreq;
708 		tfcp_req->fcpreq = NULL;
709 		break;
710 	case INI_IO_COMPLETED:
711 		completed = true;
712 		break;
713 	default:
714 		spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
715 		fcloop_tfcp_req_put(tfcp_req);
716 		WARN_ON(1);
717 		return;
718 	}
719 	spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
720 
721 	if (unlikely(completed)) {
722 		/* remove reference taken in original abort downcall */
723 		fcloop_tfcp_req_put(tfcp_req);
724 		return;
725 	}
726 
727 	if (tfcp_req->tport->targetport)
728 		nvmet_fc_rcv_fcp_abort(tfcp_req->tport->targetport,
729 					&tfcp_req->tgt_fcp_req);
730 
731 	fcloop_call_host_done(fcpreq, tfcp_req, -ECANCELED);
732 	/* call_host_done releases reference for abort downcall */
733 }
734 
735 /*
736  * FCP IO operation done by target completion.
737  * call back up initiator "done" flows.
738  */
739 static void
fcloop_tgt_fcprqst_done_work(struct work_struct * work)740 fcloop_tgt_fcprqst_done_work(struct work_struct *work)
741 {
742 	struct fcloop_fcpreq *tfcp_req =
743 		container_of(work, struct fcloop_fcpreq, tio_done_work);
744 	struct nvmefc_fcp_req *fcpreq;
745 	unsigned long flags;
746 
747 	spin_lock_irqsave(&tfcp_req->reqlock, flags);
748 	fcpreq = tfcp_req->fcpreq;
749 	tfcp_req->inistate = INI_IO_COMPLETED;
750 	spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
751 
752 	fcloop_call_host_done(fcpreq, tfcp_req, tfcp_req->status);
753 }
754 
755 
756 static int
fcloop_fcp_req(struct nvme_fc_local_port * localport,struct nvme_fc_remote_port * remoteport,void * hw_queue_handle,struct nvmefc_fcp_req * fcpreq)757 fcloop_fcp_req(struct nvme_fc_local_port *localport,
758 			struct nvme_fc_remote_port *remoteport,
759 			void *hw_queue_handle,
760 			struct nvmefc_fcp_req *fcpreq)
761 {
762 	struct fcloop_rport *rport = remoteport->private;
763 	struct fcloop_ini_fcpreq *inireq = fcpreq->private;
764 	struct fcloop_fcpreq *tfcp_req;
765 
766 	if (!rport->targetport)
767 		return -ECONNREFUSED;
768 
769 	tfcp_req = kzalloc(sizeof(*tfcp_req), GFP_ATOMIC);
770 	if (!tfcp_req)
771 		return -ENOMEM;
772 
773 	inireq->fcpreq = fcpreq;
774 	inireq->tfcp_req = tfcp_req;
775 	spin_lock_init(&inireq->inilock);
776 
777 	tfcp_req->fcpreq = fcpreq;
778 	tfcp_req->tport = rport->targetport->private;
779 	tfcp_req->inistate = INI_IO_START;
780 	spin_lock_init(&tfcp_req->reqlock);
781 	INIT_WORK(&tfcp_req->fcp_rcv_work, fcloop_fcp_recv_work);
782 	INIT_WORK(&tfcp_req->abort_rcv_work, fcloop_fcp_abort_recv_work);
783 	INIT_WORK(&tfcp_req->tio_done_work, fcloop_tgt_fcprqst_done_work);
784 	refcount_set(&tfcp_req->ref, 1);
785 
786 	queue_work(nvmet_wq, &tfcp_req->fcp_rcv_work);
787 
788 	return 0;
789 }
790 
791 static void
fcloop_fcp_copy_data(u8 op,struct scatterlist * data_sg,struct scatterlist * io_sg,u32 offset,u32 length)792 fcloop_fcp_copy_data(u8 op, struct scatterlist *data_sg,
793 			struct scatterlist *io_sg, u32 offset, u32 length)
794 {
795 	void *data_p, *io_p;
796 	u32 data_len, io_len, tlen;
797 
798 	io_p = sg_virt(io_sg);
799 	io_len = io_sg->length;
800 
801 	for ( ; offset; ) {
802 		tlen = min_t(u32, offset, io_len);
803 		offset -= tlen;
804 		io_len -= tlen;
805 		if (!io_len) {
806 			io_sg = sg_next(io_sg);
807 			io_p = sg_virt(io_sg);
808 			io_len = io_sg->length;
809 		} else
810 			io_p += tlen;
811 	}
812 
813 	data_p = sg_virt(data_sg);
814 	data_len = data_sg->length;
815 
816 	for ( ; length; ) {
817 		tlen = min_t(u32, io_len, data_len);
818 		tlen = min_t(u32, tlen, length);
819 
820 		if (op == NVMET_FCOP_WRITEDATA)
821 			memcpy(data_p, io_p, tlen);
822 		else
823 			memcpy(io_p, data_p, tlen);
824 
825 		length -= tlen;
826 
827 		io_len -= tlen;
828 		if ((!io_len) && (length)) {
829 			io_sg = sg_next(io_sg);
830 			io_p = sg_virt(io_sg);
831 			io_len = io_sg->length;
832 		} else
833 			io_p += tlen;
834 
835 		data_len -= tlen;
836 		if ((!data_len) && (length)) {
837 			data_sg = sg_next(data_sg);
838 			data_p = sg_virt(data_sg);
839 			data_len = data_sg->length;
840 		} else
841 			data_p += tlen;
842 	}
843 }
844 
845 static int
fcloop_fcp_op(struct nvmet_fc_target_port * tgtport,struct nvmefc_tgt_fcp_req * tgt_fcpreq)846 fcloop_fcp_op(struct nvmet_fc_target_port *tgtport,
847 			struct nvmefc_tgt_fcp_req *tgt_fcpreq)
848 {
849 	struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
850 	struct nvmefc_fcp_req *fcpreq;
851 	u32 rsplen = 0, xfrlen = 0;
852 	int fcp_err = 0, active, aborted;
853 	u8 op = tgt_fcpreq->op;
854 	unsigned long flags;
855 
856 	spin_lock_irqsave(&tfcp_req->reqlock, flags);
857 	fcpreq = tfcp_req->fcpreq;
858 	active = tfcp_req->active;
859 	aborted = tfcp_req->aborted;
860 	tfcp_req->active = true;
861 	spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
862 
863 	if (unlikely(active))
864 		/* illegal - call while i/o active */
865 		return -EALREADY;
866 
867 	if (unlikely(aborted)) {
868 		/* target transport has aborted i/o prior */
869 		spin_lock_irqsave(&tfcp_req->reqlock, flags);
870 		tfcp_req->active = false;
871 		spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
872 		tgt_fcpreq->transferred_length = 0;
873 		tgt_fcpreq->fcp_error = -ECANCELED;
874 		tgt_fcpreq->done(tgt_fcpreq);
875 		return 0;
876 	}
877 
878 	/*
879 	 * if fcpreq is NULL, the I/O has been aborted (from
880 	 * initiator side). For the target side, act as if all is well
881 	 * but don't actually move data.
882 	 */
883 
884 	switch (op) {
885 	case NVMET_FCOP_WRITEDATA:
886 		xfrlen = tgt_fcpreq->transfer_length;
887 		if (fcpreq) {
888 			fcloop_fcp_copy_data(op, tgt_fcpreq->sg,
889 					fcpreq->first_sgl, tgt_fcpreq->offset,
890 					xfrlen);
891 			fcpreq->transferred_length += xfrlen;
892 		}
893 		break;
894 
895 	case NVMET_FCOP_READDATA:
896 	case NVMET_FCOP_READDATA_RSP:
897 		xfrlen = tgt_fcpreq->transfer_length;
898 		if (fcpreq) {
899 			fcloop_fcp_copy_data(op, tgt_fcpreq->sg,
900 					fcpreq->first_sgl, tgt_fcpreq->offset,
901 					xfrlen);
902 			fcpreq->transferred_length += xfrlen;
903 		}
904 		if (op == NVMET_FCOP_READDATA)
905 			break;
906 
907 		/* Fall-Thru to RSP handling */
908 		fallthrough;
909 
910 	case NVMET_FCOP_RSP:
911 		if (fcpreq) {
912 			rsplen = ((fcpreq->rsplen < tgt_fcpreq->rsplen) ?
913 					fcpreq->rsplen : tgt_fcpreq->rsplen);
914 			memcpy(fcpreq->rspaddr, tgt_fcpreq->rspaddr, rsplen);
915 			if (rsplen < tgt_fcpreq->rsplen)
916 				fcp_err = -E2BIG;
917 			fcpreq->rcv_rsplen = rsplen;
918 			fcpreq->status = 0;
919 		}
920 		tfcp_req->status = 0;
921 		break;
922 
923 	default:
924 		fcp_err = -EINVAL;
925 		break;
926 	}
927 
928 	spin_lock_irqsave(&tfcp_req->reqlock, flags);
929 	tfcp_req->active = false;
930 	spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
931 
932 	tgt_fcpreq->transferred_length = xfrlen;
933 	tgt_fcpreq->fcp_error = fcp_err;
934 	tgt_fcpreq->done(tgt_fcpreq);
935 
936 	return 0;
937 }
938 
939 static void
fcloop_tgt_fcp_abort(struct nvmet_fc_target_port * tgtport,struct nvmefc_tgt_fcp_req * tgt_fcpreq)940 fcloop_tgt_fcp_abort(struct nvmet_fc_target_port *tgtport,
941 			struct nvmefc_tgt_fcp_req *tgt_fcpreq)
942 {
943 	struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
944 	unsigned long flags;
945 
946 	/*
947 	 * mark aborted only in case there were 2 threads in transport
948 	 * (one doing io, other doing abort) and only kills ops posted
949 	 * after the abort request
950 	 */
951 	spin_lock_irqsave(&tfcp_req->reqlock, flags);
952 	tfcp_req->aborted = true;
953 	spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
954 
955 	tfcp_req->status = NVME_SC_INTERNAL;
956 
957 	/*
958 	 * nothing more to do. If io wasn't active, the transport should
959 	 * immediately call the req_release. If it was active, the op
960 	 * will complete, and the lldd should call req_release.
961 	 */
962 }
963 
964 static void
fcloop_fcp_req_release(struct nvmet_fc_target_port * tgtport,struct nvmefc_tgt_fcp_req * tgt_fcpreq)965 fcloop_fcp_req_release(struct nvmet_fc_target_port *tgtport,
966 			struct nvmefc_tgt_fcp_req *tgt_fcpreq)
967 {
968 	struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
969 
970 	queue_work(nvmet_wq, &tfcp_req->tio_done_work);
971 }
972 
973 static void
fcloop_h2t_ls_abort(struct nvme_fc_local_port * localport,struct nvme_fc_remote_port * remoteport,struct nvmefc_ls_req * lsreq)974 fcloop_h2t_ls_abort(struct nvme_fc_local_port *localport,
975 			struct nvme_fc_remote_port *remoteport,
976 				struct nvmefc_ls_req *lsreq)
977 {
978 }
979 
980 static void
fcloop_t2h_ls_abort(struct nvmet_fc_target_port * targetport,void * hosthandle,struct nvmefc_ls_req * lsreq)981 fcloop_t2h_ls_abort(struct nvmet_fc_target_port *targetport,
982 			void *hosthandle, struct nvmefc_ls_req *lsreq)
983 {
984 }
985 
986 static void
fcloop_fcp_abort(struct nvme_fc_local_port * localport,struct nvme_fc_remote_port * remoteport,void * hw_queue_handle,struct nvmefc_fcp_req * fcpreq)987 fcloop_fcp_abort(struct nvme_fc_local_port *localport,
988 			struct nvme_fc_remote_port *remoteport,
989 			void *hw_queue_handle,
990 			struct nvmefc_fcp_req *fcpreq)
991 {
992 	struct fcloop_ini_fcpreq *inireq = fcpreq->private;
993 	struct fcloop_fcpreq *tfcp_req;
994 	bool abortio = true;
995 	unsigned long flags;
996 
997 	spin_lock(&inireq->inilock);
998 	tfcp_req = inireq->tfcp_req;
999 	if (tfcp_req) {
1000 		if (!fcloop_tfcp_req_get(tfcp_req))
1001 			tfcp_req = NULL;
1002 	}
1003 	spin_unlock(&inireq->inilock);
1004 
1005 	if (!tfcp_req) {
1006 		/* abort has already been called */
1007 		goto out_host_done;
1008 	}
1009 
1010 	/* break initiator/target relationship for io */
1011 	spin_lock_irqsave(&tfcp_req->reqlock, flags);
1012 	switch (tfcp_req->inistate) {
1013 	case INI_IO_START:
1014 	case INI_IO_ACTIVE:
1015 		tfcp_req->inistate = INI_IO_ABORTED;
1016 		break;
1017 	case INI_IO_COMPLETED:
1018 		abortio = false;
1019 		break;
1020 	default:
1021 		spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
1022 		WARN_ON(1);
1023 		goto out_host_done;
1024 	}
1025 	spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
1026 
1027 	if (abortio)
1028 		/* leave the reference while the work item is scheduled */
1029 		WARN_ON(!queue_work(nvmet_wq, &tfcp_req->abort_rcv_work));
1030 	else  {
1031 		/*
1032 		 * as the io has already had the done callback made,
1033 		 * nothing more to do. So release the reference taken above
1034 		 */
1035 		fcloop_tfcp_req_put(tfcp_req);
1036 	}
1037 
1038 	return;
1039 
1040 out_host_done:
1041 	fcloop_call_host_done(fcpreq, tfcp_req, -ECANCELED);
1042 }
1043 
1044 static void
fcloop_lport_put(struct fcloop_lport * lport)1045 fcloop_lport_put(struct fcloop_lport *lport)
1046 {
1047 	unsigned long flags;
1048 
1049 	if (!refcount_dec_and_test(&lport->ref))
1050 		return;
1051 
1052 	spin_lock_irqsave(&fcloop_lock, flags);
1053 	list_del(&lport->lport_list);
1054 	spin_unlock_irqrestore(&fcloop_lock, flags);
1055 
1056 	kfree(lport);
1057 }
1058 
1059 static int
fcloop_lport_get(struct fcloop_lport * lport)1060 fcloop_lport_get(struct fcloop_lport *lport)
1061 {
1062 	return refcount_inc_not_zero(&lport->ref);
1063 }
1064 
1065 static void
fcloop_nport_put(struct fcloop_nport * nport)1066 fcloop_nport_put(struct fcloop_nport *nport)
1067 {
1068 	unsigned long flags;
1069 
1070 	if (!refcount_dec_and_test(&nport->ref))
1071 		return;
1072 
1073 	spin_lock_irqsave(&fcloop_lock, flags);
1074 	list_del(&nport->nport_list);
1075 	spin_unlock_irqrestore(&fcloop_lock, flags);
1076 
1077 	if (nport->lport)
1078 		fcloop_lport_put(nport->lport);
1079 
1080 	kfree(nport);
1081 }
1082 
1083 static int
fcloop_nport_get(struct fcloop_nport * nport)1084 fcloop_nport_get(struct fcloop_nport *nport)
1085 {
1086 	return refcount_inc_not_zero(&nport->ref);
1087 }
1088 
1089 static void
fcloop_localport_delete(struct nvme_fc_local_port * localport)1090 fcloop_localport_delete(struct nvme_fc_local_port *localport)
1091 {
1092 	struct fcloop_lport_priv *lport_priv = localport->private;
1093 	struct fcloop_lport *lport = lport_priv->lport;
1094 
1095 	fcloop_lport_put(lport);
1096 }
1097 
1098 static void
fcloop_remoteport_delete(struct nvme_fc_remote_port * remoteport)1099 fcloop_remoteport_delete(struct nvme_fc_remote_port *remoteport)
1100 {
1101 	struct fcloop_rport *rport = remoteport->private;
1102 	bool put_port = false;
1103 	unsigned long flags;
1104 
1105 	flush_work(&rport->ls_work);
1106 
1107 	spin_lock_irqsave(&fcloop_lock, flags);
1108 	if (!test_and_set_bit(PORT_DELETED, &rport->flags))
1109 		put_port = true;
1110 	rport->nport->rport = NULL;
1111 	spin_unlock_irqrestore(&fcloop_lock, flags);
1112 
1113 	if (put_port) {
1114 		WARN_ON(!list_empty(&rport->ls_list));
1115 		fcloop_nport_put(rport->nport);
1116 	}
1117 }
1118 
1119 static void
fcloop_targetport_delete(struct nvmet_fc_target_port * targetport)1120 fcloop_targetport_delete(struct nvmet_fc_target_port *targetport)
1121 {
1122 	struct fcloop_tport *tport = targetport->private;
1123 	bool put_port = false;
1124 	unsigned long flags;
1125 
1126 	flush_work(&tport->ls_work);
1127 
1128 	spin_lock_irqsave(&fcloop_lock, flags);
1129 	if (!test_and_set_bit(PORT_DELETED, &tport->flags))
1130 		put_port = true;
1131 	tport->nport->tport = NULL;
1132 	spin_unlock_irqrestore(&fcloop_lock, flags);
1133 
1134 	if (put_port) {
1135 		WARN_ON(!list_empty(&tport->ls_list));
1136 		fcloop_nport_put(tport->nport);
1137 	}
1138 }
1139 
1140 #define	FCLOOP_HW_QUEUES		4
1141 #define	FCLOOP_SGL_SEGS			256
1142 #define FCLOOP_DMABOUND_4G		0xFFFFFFFF
1143 
1144 static struct nvme_fc_port_template fctemplate = {
1145 	.localport_delete	= fcloop_localport_delete,
1146 	.remoteport_delete	= fcloop_remoteport_delete,
1147 	.create_queue		= fcloop_create_queue,
1148 	.delete_queue		= fcloop_delete_queue,
1149 	.ls_req			= fcloop_h2t_ls_req,
1150 	.fcp_io			= fcloop_fcp_req,
1151 	.ls_abort		= fcloop_h2t_ls_abort,
1152 	.fcp_abort		= fcloop_fcp_abort,
1153 	.xmt_ls_rsp		= fcloop_t2h_xmt_ls_rsp,
1154 	.max_hw_queues		= FCLOOP_HW_QUEUES,
1155 	.max_sgl_segments	= FCLOOP_SGL_SEGS,
1156 	.max_dif_sgl_segments	= FCLOOP_SGL_SEGS,
1157 	.dma_boundary		= FCLOOP_DMABOUND_4G,
1158 	/* sizes of additional private data for data structures */
1159 	.local_priv_sz		= sizeof(struct fcloop_lport_priv),
1160 	.remote_priv_sz		= sizeof(struct fcloop_rport),
1161 	.fcprqst_priv_sz	= sizeof(struct fcloop_ini_fcpreq),
1162 };
1163 
1164 static struct nvmet_fc_target_template tgttemplate = {
1165 	.targetport_delete	= fcloop_targetport_delete,
1166 	.xmt_ls_rsp		= fcloop_h2t_xmt_ls_rsp,
1167 	.fcp_op			= fcloop_fcp_op,
1168 	.fcp_abort		= fcloop_tgt_fcp_abort,
1169 	.fcp_req_release	= fcloop_fcp_req_release,
1170 	.discovery_event	= fcloop_tgt_discovery_evt,
1171 	.ls_req			= fcloop_t2h_ls_req,
1172 	.ls_abort		= fcloop_t2h_ls_abort,
1173 	.host_release		= fcloop_t2h_host_release,
1174 	.host_traddr		= fcloop_t2h_host_traddr,
1175 	.max_hw_queues		= FCLOOP_HW_QUEUES,
1176 	.max_sgl_segments	= FCLOOP_SGL_SEGS,
1177 	.max_dif_sgl_segments	= FCLOOP_SGL_SEGS,
1178 	.dma_boundary		= FCLOOP_DMABOUND_4G,
1179 	/* optional features */
1180 	.target_features	= 0,
1181 	/* sizes of additional private data for data structures */
1182 	.target_priv_sz		= sizeof(struct fcloop_tport),
1183 };
1184 
1185 static ssize_t
fcloop_create_local_port(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1186 fcloop_create_local_port(struct device *dev, struct device_attribute *attr,
1187 		const char *buf, size_t count)
1188 {
1189 	struct nvme_fc_port_info pinfo;
1190 	struct fcloop_ctrl_options *opts;
1191 	struct nvme_fc_local_port *localport;
1192 	struct fcloop_lport *lport;
1193 	struct fcloop_lport_priv *lport_priv;
1194 	unsigned long flags;
1195 	int ret = -ENOMEM;
1196 
1197 	lport = kzalloc(sizeof(*lport), GFP_KERNEL);
1198 	if (!lport)
1199 		return -ENOMEM;
1200 
1201 	opts = kzalloc(sizeof(*opts), GFP_KERNEL);
1202 	if (!opts)
1203 		goto out_free_lport;
1204 
1205 	ret = fcloop_parse_options(opts, buf);
1206 	if (ret)
1207 		goto out_free_opts;
1208 
1209 	/* everything there ? */
1210 	if ((opts->mask & LPORT_OPTS) != LPORT_OPTS) {
1211 		ret = -EINVAL;
1212 		goto out_free_opts;
1213 	}
1214 
1215 	memset(&pinfo, 0, sizeof(pinfo));
1216 	pinfo.node_name = opts->wwnn;
1217 	pinfo.port_name = opts->wwpn;
1218 	pinfo.port_role = opts->roles;
1219 	pinfo.port_id = opts->fcaddr;
1220 
1221 	ret = nvme_fc_register_localport(&pinfo, &fctemplate, NULL, &localport);
1222 	if (!ret) {
1223 		/* success */
1224 		lport_priv = localport->private;
1225 		lport_priv->lport = lport;
1226 
1227 		lport->localport = localport;
1228 		INIT_LIST_HEAD(&lport->lport_list);
1229 		refcount_set(&lport->ref, 1);
1230 
1231 		spin_lock_irqsave(&fcloop_lock, flags);
1232 		list_add_tail(&lport->lport_list, &fcloop_lports);
1233 		spin_unlock_irqrestore(&fcloop_lock, flags);
1234 	}
1235 
1236 out_free_opts:
1237 	kfree(opts);
1238 out_free_lport:
1239 	/* free only if we're going to fail */
1240 	if (ret)
1241 		kfree(lport);
1242 
1243 	return ret ? ret : count;
1244 }
1245 
1246 static int
__localport_unreg(struct fcloop_lport * lport)1247 __localport_unreg(struct fcloop_lport *lport)
1248 {
1249 	return nvme_fc_unregister_localport(lport->localport);
1250 }
1251 
1252 static struct fcloop_nport *
__fcloop_nport_lookup(u64 node_name,u64 port_name)1253 __fcloop_nport_lookup(u64 node_name, u64 port_name)
1254 {
1255 	struct fcloop_nport *nport;
1256 
1257 	list_for_each_entry(nport, &fcloop_nports, nport_list) {
1258 		if (nport->node_name != node_name ||
1259 		    nport->port_name != port_name)
1260 			continue;
1261 
1262 		if (fcloop_nport_get(nport))
1263 			return nport;
1264 
1265 		break;
1266 	}
1267 
1268 	return NULL;
1269 }
1270 
1271 static struct fcloop_nport *
fcloop_nport_lookup(u64 node_name,u64 port_name)1272 fcloop_nport_lookup(u64 node_name, u64 port_name)
1273 {
1274 	struct fcloop_nport *nport;
1275 	unsigned long flags;
1276 
1277 	spin_lock_irqsave(&fcloop_lock, flags);
1278 	nport = __fcloop_nport_lookup(node_name, port_name);
1279 	spin_unlock_irqrestore(&fcloop_lock, flags);
1280 
1281 	return nport;
1282 }
1283 
1284 static struct fcloop_lport *
__fcloop_lport_lookup(u64 node_name,u64 port_name)1285 __fcloop_lport_lookup(u64 node_name, u64 port_name)
1286 {
1287 	struct fcloop_lport *lport;
1288 
1289 	list_for_each_entry(lport, &fcloop_lports, lport_list) {
1290 		if (lport->localport->node_name != node_name ||
1291 		    lport->localport->port_name != port_name)
1292 			continue;
1293 
1294 		if (fcloop_lport_get(lport))
1295 			return lport;
1296 
1297 		break;
1298 	}
1299 
1300 	return NULL;
1301 }
1302 
1303 static struct fcloop_lport *
fcloop_lport_lookup(u64 node_name,u64 port_name)1304 fcloop_lport_lookup(u64 node_name, u64 port_name)
1305 {
1306 	struct fcloop_lport *lport;
1307 	unsigned long flags;
1308 
1309 	spin_lock_irqsave(&fcloop_lock, flags);
1310 	lport = __fcloop_lport_lookup(node_name, port_name);
1311 	spin_unlock_irqrestore(&fcloop_lock, flags);
1312 
1313 	return lport;
1314 }
1315 
1316 static ssize_t
fcloop_delete_local_port(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1317 fcloop_delete_local_port(struct device *dev, struct device_attribute *attr,
1318 		const char *buf, size_t count)
1319 {
1320 	struct fcloop_lport *lport;
1321 	u64 nodename, portname;
1322 	int ret;
1323 
1324 	ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf);
1325 	if (ret)
1326 		return ret;
1327 
1328 	lport = fcloop_lport_lookup(nodename, portname);
1329 	if (!lport)
1330 		return -ENOENT;
1331 
1332 	ret = __localport_unreg(lport);
1333 	fcloop_lport_put(lport);
1334 
1335 	return ret ? ret : count;
1336 }
1337 
1338 static struct fcloop_nport *
fcloop_alloc_nport(const char * buf,size_t count,bool remoteport)1339 fcloop_alloc_nport(const char *buf, size_t count, bool remoteport)
1340 {
1341 	struct fcloop_nport *newnport, *nport;
1342 	struct fcloop_lport *lport;
1343 	struct fcloop_ctrl_options *opts;
1344 	unsigned long flags;
1345 	u32 opts_mask = (remoteport) ? RPORT_OPTS : TGTPORT_OPTS;
1346 	int ret;
1347 
1348 	opts = kzalloc(sizeof(*opts), GFP_KERNEL);
1349 	if (!opts)
1350 		return NULL;
1351 
1352 	ret = fcloop_parse_options(opts, buf);
1353 	if (ret)
1354 		goto out_free_opts;
1355 
1356 	/* everything there ? */
1357 	if ((opts->mask & opts_mask) != opts_mask)
1358 		goto out_free_opts;
1359 
1360 	newnport = kzalloc(sizeof(*newnport), GFP_KERNEL);
1361 	if (!newnport)
1362 		goto out_free_opts;
1363 
1364 	INIT_LIST_HEAD(&newnport->nport_list);
1365 	newnport->node_name = opts->wwnn;
1366 	newnport->port_name = opts->wwpn;
1367 	if (opts->mask & NVMF_OPT_ROLES)
1368 		newnport->port_role = opts->roles;
1369 	if (opts->mask & NVMF_OPT_FCADDR)
1370 		newnport->port_id = opts->fcaddr;
1371 	refcount_set(&newnport->ref, 1);
1372 
1373 	spin_lock_irqsave(&fcloop_lock, flags);
1374 	lport = __fcloop_lport_lookup(opts->wwnn, opts->wwpn);
1375 	if (lport) {
1376 		/* invalid configuration */
1377 		fcloop_lport_put(lport);
1378 		goto out_free_newnport;
1379 	}
1380 
1381 	if (remoteport) {
1382 		lport = __fcloop_lport_lookup(opts->lpwwnn, opts->lpwwpn);
1383 		if (!lport) {
1384 			/* invalid configuration */
1385 			goto out_free_newnport;
1386 		}
1387 	}
1388 
1389 	nport = __fcloop_nport_lookup(opts->wwnn, opts->wwpn);
1390 	if (nport) {
1391 		if ((remoteport && nport->rport) ||
1392 		    (!remoteport && nport->tport)) {
1393 			/* invalid configuration */
1394 			goto out_put_nport;
1395 		}
1396 
1397 		/* found existing nport, discard the new nport */
1398 		kfree(newnport);
1399 	} else {
1400 		list_add_tail(&newnport->nport_list, &fcloop_nports);
1401 		nport = newnport;
1402 	}
1403 
1404 	if (opts->mask & NVMF_OPT_ROLES)
1405 		nport->port_role = opts->roles;
1406 	if (opts->mask & NVMF_OPT_FCADDR)
1407 		nport->port_id = opts->fcaddr;
1408 	if (lport) {
1409 		if (!nport->lport)
1410 			nport->lport = lport;
1411 		else
1412 			fcloop_lport_put(lport);
1413 	}
1414 	spin_unlock_irqrestore(&fcloop_lock, flags);
1415 
1416 	kfree(opts);
1417 	return nport;
1418 
1419 out_put_nport:
1420 	if (lport)
1421 		fcloop_lport_put(lport);
1422 	fcloop_nport_put(nport);
1423 out_free_newnport:
1424 	spin_unlock_irqrestore(&fcloop_lock, flags);
1425 	kfree(newnport);
1426 out_free_opts:
1427 	kfree(opts);
1428 	return NULL;
1429 }
1430 
1431 static ssize_t
fcloop_create_remote_port(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1432 fcloop_create_remote_port(struct device *dev, struct device_attribute *attr,
1433 		const char *buf, size_t count)
1434 {
1435 	struct nvme_fc_remote_port *remoteport;
1436 	struct fcloop_nport *nport;
1437 	struct fcloop_rport *rport;
1438 	struct nvme_fc_port_info pinfo;
1439 	int ret;
1440 
1441 	nport = fcloop_alloc_nport(buf, count, true);
1442 	if (!nport)
1443 		return -EIO;
1444 
1445 	memset(&pinfo, 0, sizeof(pinfo));
1446 	pinfo.node_name = nport->node_name;
1447 	pinfo.port_name = nport->port_name;
1448 	pinfo.port_role = nport->port_role;
1449 	pinfo.port_id = nport->port_id;
1450 
1451 	ret = nvme_fc_register_remoteport(nport->lport->localport,
1452 						&pinfo, &remoteport);
1453 	if (ret || !remoteport) {
1454 		fcloop_nport_put(nport);
1455 		return ret;
1456 	}
1457 
1458 	/* success */
1459 	rport = remoteport->private;
1460 	rport->remoteport = remoteport;
1461 	rport->targetport = (nport->tport) ?  nport->tport->targetport : NULL;
1462 	if (nport->tport) {
1463 		nport->tport->remoteport = remoteport;
1464 		nport->tport->lport = nport->lport;
1465 	}
1466 	rport->nport = nport;
1467 	rport->lport = nport->lport;
1468 	nport->rport = rport;
1469 	rport->flags = 0;
1470 	spin_lock_init(&rport->lock);
1471 	INIT_WORK(&rport->ls_work, fcloop_rport_lsrqst_work);
1472 	INIT_LIST_HEAD(&rport->ls_list);
1473 
1474 	return count;
1475 }
1476 
1477 
1478 static struct fcloop_rport *
__unlink_remote_port(struct fcloop_nport * nport)1479 __unlink_remote_port(struct fcloop_nport *nport)
1480 {
1481 	struct fcloop_rport *rport = nport->rport;
1482 
1483 	lockdep_assert_held(&fcloop_lock);
1484 
1485 	if (rport && nport->tport)
1486 		nport->tport->remoteport = NULL;
1487 	nport->rport = NULL;
1488 
1489 	return rport;
1490 }
1491 
1492 static int
__remoteport_unreg(struct fcloop_nport * nport,struct fcloop_rport * rport)1493 __remoteport_unreg(struct fcloop_nport *nport, struct fcloop_rport *rport)
1494 {
1495 	return nvme_fc_unregister_remoteport(rport->remoteport);
1496 }
1497 
1498 static ssize_t
fcloop_delete_remote_port(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1499 fcloop_delete_remote_port(struct device *dev, struct device_attribute *attr,
1500 		const char *buf, size_t count)
1501 {
1502 	struct fcloop_nport *nport;
1503 	struct fcloop_rport *rport;
1504 	u64 nodename, portname;
1505 	unsigned long flags;
1506 	int ret;
1507 
1508 	ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf);
1509 	if (ret)
1510 		return ret;
1511 
1512 	nport = fcloop_nport_lookup(nodename, portname);
1513 	if (!nport)
1514 		return -ENOENT;
1515 
1516 	spin_lock_irqsave(&fcloop_lock, flags);
1517 	rport = __unlink_remote_port(nport);
1518 	spin_unlock_irqrestore(&fcloop_lock, flags);
1519 
1520 	if (!rport) {
1521 		ret = -ENOENT;
1522 		goto out_nport_put;
1523 	}
1524 
1525 	ret = __remoteport_unreg(nport, rport);
1526 
1527 out_nport_put:
1528 	fcloop_nport_put(nport);
1529 
1530 	return ret ? ret : count;
1531 }
1532 
1533 static ssize_t
fcloop_create_target_port(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1534 fcloop_create_target_port(struct device *dev, struct device_attribute *attr,
1535 		const char *buf, size_t count)
1536 {
1537 	struct nvmet_fc_target_port *targetport;
1538 	struct fcloop_nport *nport;
1539 	struct fcloop_tport *tport;
1540 	struct nvmet_fc_port_info tinfo;
1541 	int ret;
1542 
1543 	nport = fcloop_alloc_nport(buf, count, false);
1544 	if (!nport)
1545 		return -EIO;
1546 
1547 	tinfo.node_name = nport->node_name;
1548 	tinfo.port_name = nport->port_name;
1549 	tinfo.port_id = nport->port_id;
1550 
1551 	ret = nvmet_fc_register_targetport(&tinfo, &tgttemplate, NULL,
1552 						&targetport);
1553 	if (ret) {
1554 		fcloop_nport_put(nport);
1555 		return ret;
1556 	}
1557 
1558 	/* success */
1559 	tport = targetport->private;
1560 	tport->targetport = targetport;
1561 	tport->remoteport = (nport->rport) ?  nport->rport->remoteport : NULL;
1562 	if (nport->rport)
1563 		nport->rport->targetport = targetport;
1564 	tport->nport = nport;
1565 	tport->lport = nport->lport;
1566 	nport->tport = tport;
1567 	tport->flags = 0;
1568 	spin_lock_init(&tport->lock);
1569 	INIT_WORK(&tport->ls_work, fcloop_tport_lsrqst_work);
1570 	INIT_LIST_HEAD(&tport->ls_list);
1571 
1572 	return count;
1573 }
1574 
1575 
1576 static struct fcloop_tport *
__unlink_target_port(struct fcloop_nport * nport)1577 __unlink_target_port(struct fcloop_nport *nport)
1578 {
1579 	struct fcloop_tport *tport = nport->tport;
1580 
1581 	lockdep_assert_held(&fcloop_lock);
1582 
1583 	if (tport && nport->rport)
1584 		nport->rport->targetport = NULL;
1585 	nport->tport = NULL;
1586 
1587 	return tport;
1588 }
1589 
1590 static int
__targetport_unreg(struct fcloop_nport * nport,struct fcloop_tport * tport)1591 __targetport_unreg(struct fcloop_nport *nport, struct fcloop_tport *tport)
1592 {
1593 	return nvmet_fc_unregister_targetport(tport->targetport);
1594 }
1595 
1596 static ssize_t
fcloop_delete_target_port(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1597 fcloop_delete_target_port(struct device *dev, struct device_attribute *attr,
1598 		const char *buf, size_t count)
1599 {
1600 	struct fcloop_nport *nport;
1601 	struct fcloop_tport *tport;
1602 	u64 nodename, portname;
1603 	unsigned long flags;
1604 	int ret;
1605 
1606 	ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf);
1607 	if (ret)
1608 		return ret;
1609 
1610 	nport = fcloop_nport_lookup(nodename, portname);
1611 	if (!nport)
1612 		return -ENOENT;
1613 
1614 	spin_lock_irqsave(&fcloop_lock, flags);
1615 	tport = __unlink_target_port(nport);
1616 	spin_unlock_irqrestore(&fcloop_lock, flags);
1617 
1618 	if (!tport) {
1619 		ret = -ENOENT;
1620 		goto out_nport_put;
1621 	}
1622 
1623 	ret = __targetport_unreg(nport, tport);
1624 
1625 out_nport_put:
1626 	fcloop_nport_put(nport);
1627 
1628 	return ret ? ret : count;
1629 }
1630 
1631 static ssize_t
fcloop_set_cmd_drop(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1632 fcloop_set_cmd_drop(struct device *dev, struct device_attribute *attr,
1633 		const char *buf, size_t count)
1634 {
1635 	unsigned int opcode;
1636 	int starting, amount;
1637 
1638 	if (sscanf(buf, "%x:%d:%d", &opcode, &starting, &amount) != 3)
1639 		return -EBADRQC;
1640 
1641 	drop_current_cnt = 0;
1642 	drop_fabric_opcode = (opcode & ~DROP_OPCODE_MASK) ? true : false;
1643 	drop_opcode = (opcode & DROP_OPCODE_MASK);
1644 	drop_instance = starting;
1645 	/* the check to drop routine uses instance + count to know when
1646 	 * to end. Thus, if dropping 1 instance, count should be 0.
1647 	 * so subtract 1 from the count.
1648 	 */
1649 	drop_amount = amount - 1;
1650 
1651 	pr_info("%s: DROP: Starting at instance %d of%s opcode x%x drop +%d "
1652 		"instances\n",
1653 		__func__, drop_instance, drop_fabric_opcode ? " fabric" : "",
1654 		drop_opcode, drop_amount);
1655 
1656 	return count;
1657 }
1658 
1659 
1660 static DEVICE_ATTR(add_local_port, 0200, NULL, fcloop_create_local_port);
1661 static DEVICE_ATTR(del_local_port, 0200, NULL, fcloop_delete_local_port);
1662 static DEVICE_ATTR(add_remote_port, 0200, NULL, fcloop_create_remote_port);
1663 static DEVICE_ATTR(del_remote_port, 0200, NULL, fcloop_delete_remote_port);
1664 static DEVICE_ATTR(add_target_port, 0200, NULL, fcloop_create_target_port);
1665 static DEVICE_ATTR(del_target_port, 0200, NULL, fcloop_delete_target_port);
1666 static DEVICE_ATTR(set_cmd_drop, 0200, NULL, fcloop_set_cmd_drop);
1667 
1668 static struct attribute *fcloop_dev_attrs[] = {
1669 	&dev_attr_add_local_port.attr,
1670 	&dev_attr_del_local_port.attr,
1671 	&dev_attr_add_remote_port.attr,
1672 	&dev_attr_del_remote_port.attr,
1673 	&dev_attr_add_target_port.attr,
1674 	&dev_attr_del_target_port.attr,
1675 	&dev_attr_set_cmd_drop.attr,
1676 	NULL
1677 };
1678 
1679 static const struct attribute_group fclopp_dev_attrs_group = {
1680 	.attrs		= fcloop_dev_attrs,
1681 };
1682 
1683 static const struct attribute_group *fcloop_dev_attr_groups[] = {
1684 	&fclopp_dev_attrs_group,
1685 	NULL,
1686 };
1687 
1688 static const struct class fcloop_class = {
1689 	.name = "fcloop",
1690 };
1691 static struct device *fcloop_device;
1692 
fcloop_init(void)1693 static int __init fcloop_init(void)
1694 {
1695 	int ret;
1696 
1697 	lsreq_cache = kmem_cache_create("lsreq_cache",
1698 				sizeof(struct fcloop_lsreq), 0,
1699 				0, NULL);
1700 	if (!lsreq_cache)
1701 		return -ENOMEM;
1702 
1703 	ret = class_register(&fcloop_class);
1704 	if (ret) {
1705 		pr_err("couldn't register class fcloop\n");
1706 		goto out_destroy_cache;
1707 	}
1708 
1709 	fcloop_device = device_create_with_groups(
1710 				&fcloop_class, NULL, MKDEV(0, 0), NULL,
1711 				fcloop_dev_attr_groups, "ctl");
1712 	if (IS_ERR(fcloop_device)) {
1713 		pr_err("couldn't create ctl device!\n");
1714 		ret = PTR_ERR(fcloop_device);
1715 		goto out_destroy_class;
1716 	}
1717 
1718 	get_device(fcloop_device);
1719 
1720 	return 0;
1721 
1722 out_destroy_class:
1723 	class_unregister(&fcloop_class);
1724 out_destroy_cache:
1725 	kmem_cache_destroy(lsreq_cache);
1726 	return ret;
1727 }
1728 
fcloop_exit(void)1729 static void __exit fcloop_exit(void)
1730 {
1731 	struct fcloop_lport *lport;
1732 	struct fcloop_nport *nport;
1733 	struct fcloop_tport *tport;
1734 	struct fcloop_rport *rport;
1735 	unsigned long flags;
1736 	int ret;
1737 
1738 	spin_lock_irqsave(&fcloop_lock, flags);
1739 
1740 	for (;;) {
1741 		nport = list_first_entry_or_null(&fcloop_nports,
1742 						typeof(*nport), nport_list);
1743 		if (!nport || !fcloop_nport_get(nport))
1744 			break;
1745 
1746 		tport = __unlink_target_port(nport);
1747 		rport = __unlink_remote_port(nport);
1748 
1749 		spin_unlock_irqrestore(&fcloop_lock, flags);
1750 
1751 		if (tport) {
1752 			ret = __targetport_unreg(nport, tport);
1753 			if (ret)
1754 				pr_warn("%s: Failed deleting target port\n",
1755 					__func__);
1756 		}
1757 
1758 		if (rport) {
1759 			ret = __remoteport_unreg(nport, rport);
1760 			if (ret)
1761 				pr_warn("%s: Failed deleting remote port\n",
1762 					__func__);
1763 		}
1764 
1765 		fcloop_nport_put(nport);
1766 
1767 		spin_lock_irqsave(&fcloop_lock, flags);
1768 	}
1769 
1770 	for (;;) {
1771 		lport = list_first_entry_or_null(&fcloop_lports,
1772 						typeof(*lport), lport_list);
1773 		if (!lport || !fcloop_lport_get(lport))
1774 			break;
1775 
1776 		spin_unlock_irqrestore(&fcloop_lock, flags);
1777 
1778 		ret = __localport_unreg(lport);
1779 		if (ret)
1780 			pr_warn("%s: Failed deleting local port\n", __func__);
1781 
1782 		fcloop_lport_put(lport);
1783 
1784 		spin_lock_irqsave(&fcloop_lock, flags);
1785 	}
1786 
1787 	spin_unlock_irqrestore(&fcloop_lock, flags);
1788 
1789 	put_device(fcloop_device);
1790 
1791 	device_destroy(&fcloop_class, MKDEV(0, 0));
1792 	class_unregister(&fcloop_class);
1793 	kmem_cache_destroy(lsreq_cache);
1794 }
1795 
1796 module_init(fcloop_init);
1797 module_exit(fcloop_exit);
1798 
1799 MODULE_DESCRIPTION("NVMe target FC loop transport driver");
1800 MODULE_LICENSE("GPL v2");
1801